style(9)
[dragonfly.git] / sys / dev / netif / bge / if_bge.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.29 2003/12/01 21:06:59 ambrisko Exp $
34 * $DragonFly: src/sys/dev/netif/bge/if_bge.c,v 1.31 2005/05/21 09:05:05 joerg Exp $
35 *
36 */
37
38/*
39 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
40 *
41 * Written by Bill Paul <wpaul@windriver.com>
42 * Senior Engineer, Wind River Systems
43 */
44
45/*
46 * The Broadcom BCM5700 is based on technology originally developed by
47 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51 * frames, highly configurable RX filtering, and 16 RX and TX queues
52 * (which, along with RX filter rules, can be used for QOS applications).
53 * Other features, such as TCP segmentation, may be available as part
54 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55 * firmware images can be stored in hardware and need not be compiled
56 * into the driver.
57 *
58 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
60 *
61 * The BCM5701 is a single-chip solution incorporating both the BCM5700
62 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63 * does not support external SSRAM.
64 *
65 * Broadcom also produces a variation of the BCM5700 under the "Altima"
66 * brand name, which is functionally similar but lacks PCI-X support.
67 *
68 * Without external SSRAM, you can only have at most 4 TX rings,
69 * and the use of the mini RX ring is disabled. This seems to imply
70 * that these features are simply not available on the BCM5701. As a
71 * result, this driver does not implement any support for the mini RX
72 * ring.
73 */
74
75#include <sys/param.h>
76#include <sys/systm.h>
77#include <sys/sockio.h>
78#include <sys/mbuf.h>
79#include <sys/malloc.h>
80#include <sys/kernel.h>
81#include <sys/socket.h>
82#include <sys/queue.h>
83
84#include <net/if.h>
85#include <net/ifq_var.h>
86#include <net/if_arp.h>
87#include <net/ethernet.h>
88#include <net/if_dl.h>
89#include <net/if_media.h>
90
91#include <net/bpf.h>
92
93#include <net/if_types.h>
94#include <net/vlan/if_vlan_var.h>
95
96#include <netinet/in_systm.h>
97#include <netinet/in.h>
98#include <netinet/ip.h>
99
100#include <vm/vm.h> /* for vtophys */
101#include <vm/pmap.h> /* for vtophys */
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/netif/mii_layer/mii.h>
107#include <dev/netif/mii_layer/miivar.h>
108#include <dev/netif/mii_layer/miidevs.h>
109#include <dev/netif/mii_layer/brgphyreg.h>
110
111#include <bus/pci/pcidevs.h>
112#include <bus/pci/pcireg.h>
113#include <bus/pci/pcivar.h>
114
115#include "if_bgereg.h"
116
117#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
118
119/* "controller miibus0" required. See GENERIC if you get errors here. */
120#include "miibus_if.h"
121
122/*
123 * Various supported device vendors/types and their names. Note: the
124 * spec seems to indicate that the hardware still has Alteon's vendor
125 * ID burned into it, though it will always be overriden by the vendor
126 * ID in the EEPROM. Just to be safe, we cover all possibilities.
127 */
128#define BGE_DEVDESC_MAX 64 /* Maximum device description length */
129
130static struct bge_type bge_devs[] = {
131 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
132 "Broadcom BCM5700 Gigabit Ethernet" },
133 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_BCM5700,
134 "Broadcom BCM5701 Gigabit Ethernet" },
135 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5700,
136 "Broadcom BCM5700 Gigabit Ethernet" },
137 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5701,
138 "Broadcom BCM5701 Gigabit Ethernet" },
139 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5702X,
140 "Broadcom BCM5702X Gigabit Ethernet" },
141 { PCI_VENDOR_BROADCOM, BCOM_DEVICEID_BCM5702X,
142 "Broadcom BCM5702X Gigabit Ethernet" },
143 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5703X,
144 "Broadcom BCM5703X Gigabit Ethernet" },
145 { PCI_VENDOR_BROADCOM, BCOM_DEVICEID_BCM5703X,
146 "Broadcom BCM5703X Gigabit Ethernet" },
147 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704C,
148 "Broadcom BCM5704C Dual Gigabit Ethernet" },
149 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5704S,
150 "Broadcom BCM5704S Dual Gigabit Ethernet" },
151 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705,
152 "Broadcom BCM5705 Gigabit Ethernet" },
153 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705M,
154 "Broadcom BCM5705M Gigabit Ethernet" },
155 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5705_ALT,
156 "Broadcom BCM5705M Gigabit Ethernet" },
157 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5782,
158 "Broadcom BCM5782 Gigabit Ethernet" },
159 { PCI_VENDOR_BROADCOM, BCOM_DEVICEID_BCM5788,
160 "Broadcom BCM5788 Gigabit Ethernet" },
161 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901,
162 "Broadcom BCM5901 Fast Ethernet" },
163 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM5901A2,
164 "Broadcom BCM5901A2 Fast Ethernet" },
165 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
166 "SysKonnect Gigabit Ethernet" },
167 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1000,
168 "Altima AC1000 Gigabit Ethernet" },
169 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC1001,
170 "Altima AC1002 Gigabit Ethernet" },
171 { PCI_VENDOR_ALTIMA, PCI_PRODUCT_ALTIMA_AC9100,
172 "Altima AC9100 Gigabit Ethernet" },
173 { 0, 0, NULL }
174};
175
176static int bge_probe(device_t);
177static int bge_attach(device_t);
178static int bge_detach(device_t);
179static void bge_release_resources(struct bge_softc *);
180static void bge_txeof(struct bge_softc *);
181static void bge_rxeof(struct bge_softc *);
182
183static void bge_tick(void *);
184static void bge_stats_update(struct bge_softc *);
185static void bge_stats_update_regs(struct bge_softc *);
186static int bge_encap(struct bge_softc *, struct mbuf *, uint32_t *);
187
188static void bge_intr(void *);
189static void bge_start(struct ifnet *);
190static int bge_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
191static void bge_init(void *);
192static void bge_stop(struct bge_softc *);
193static void bge_watchdog(struct ifnet *);
194static void bge_shutdown(device_t);
195static int bge_ifmedia_upd(struct ifnet *);
196static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
197
198static uint8_t bge_eeprom_getbyte(struct bge_softc *, uint32_t, uint8_t *);
199static int bge_read_eeprom(struct bge_softc *, caddr_t, uint32_t, size_t);
200
201static uint32_t bge_crc(caddr_t);
202static void bge_setmulti(struct bge_softc *);
203
204static void bge_handle_events(struct bge_softc *);
205static int bge_alloc_jumbo_mem(struct bge_softc *);
206static void bge_free_jumbo_mem(struct bge_softc *);
207static void *bge_jalloc(struct bge_softc *);
208static void bge_jfree(caddr_t, u_int);
209static void bge_jref(caddr_t, u_int);
210static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *);
211static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
212static int bge_init_rx_ring_std(struct bge_softc *);
213static void bge_free_rx_ring_std(struct bge_softc *);
214static int bge_init_rx_ring_jumbo(struct bge_softc *);
215static void bge_free_rx_ring_jumbo(struct bge_softc *);
216static void bge_free_tx_ring(struct bge_softc *);
217static int bge_init_tx_ring(struct bge_softc *);
218
219static int bge_chipinit(struct bge_softc *);
220static int bge_blockinit(struct bge_softc *);
221
222#ifdef notdef
223static uint8_t bge_vpd_readbyte(struct bge_softc *, uint32_t);
224static void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, uint32_t);
225static void bge_vpd_read(struct bge_softc *);
226#endif
227
228static uint32_t bge_readmem_ind(struct bge_softc *, uint32_t);
229static void bge_writemem_ind(struct bge_softc *, uint32_t, uint32_t);
230#ifdef notdef
231static uint32_t bge_readreg_ind(struct bge_softc *, uint32_t);
232#endif
233static void bge_writereg_ind(struct bge_softc *, uint32_t, uint32_t);
234
235static int bge_miibus_readreg(device_t, int, int);
236static int bge_miibus_writereg(device_t, int, int, int);
237static void bge_miibus_statchg(device_t);
238
239static void bge_reset(struct bge_softc *);
240
241static device_method_t bge_methods[] = {
242 /* Device interface */
243 DEVMETHOD(device_probe, bge_probe),
244 DEVMETHOD(device_attach, bge_attach),
245 DEVMETHOD(device_detach, bge_detach),
246 DEVMETHOD(device_shutdown, bge_shutdown),
247
248 /* bus interface */
249 DEVMETHOD(bus_print_child, bus_generic_print_child),
250 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
251
252 /* MII interface */
253 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
254 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
255 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
256
257 { 0, 0 }
258};
259
260static DEFINE_CLASS_0(bge, bge_driver, bge_methods, sizeof(struct bge_softc));
261static devclass_t bge_devclass;
262
263DECLARE_DUMMY_MODULE(if_bge);
264DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0);
265DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
266
267static uint32_t
268bge_readmem_ind(struct bge_softc *sc, uint32_t off)
269{
270 device_t dev = sc->bge_dev;
271
272 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
273 return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
274}
275
276static void
277bge_writemem_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
278{
279 device_t dev = sc->bge_dev;
280
281 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
282 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
283}
284
285#ifdef notdef
286static uint32_t
287bge_readreg_ind(struct bge_softc *sc, uin32_t off)
288{
289 device_t dev = sc->bge_dev;
290
291 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
292 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
293}
294#endif
295
296static void
297bge_writereg_ind(struct bge_softc *sc, uint32_t off, uint32_t val)
298{
299 device_t dev = sc->bge_dev;
300
301 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
302 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
303}
304
305#ifdef notdef
306static uint8_t
307bge_vpd_readbyte(struct bge_softc *sc, uint32_t addr)
308{
309 device_t dev = sc->bge_dev;
310 uint32_t val;
311 int i;
312
313 pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
314 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
315 DELAY(10);
316 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
317 break;
318 }
319
320 if (i == BGE_TIMEOUT) {
321 device_printf(sc->bge_dev, "VPD read timed out\n");
322 return(0);
323 }
324
325 val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
326
327 return((val >> ((addr % 4) * 8)) & 0xFF);
328}
329
330static void
331bge_vpd_read_res(struct bge_softc *sc, struct vpd_res *res, uint32_t addr)
332{
333 size_t i;
334 uint8_t *ptr;
335
336 ptr = (uint8_t *)res;
337 for (i = 0; i < sizeof(struct vpd_res); i++)
338 ptr[i] = bge_vpd_readbyte(sc, i + addr);
339
340 return;
341}
342
343static void
344bge_vpd_read(struct bge_softc *sc)
345{
346 int pos = 0, i;
347 struct vpd_res res;
348
349 if (sc->bge_vpd_prodname != NULL)
350 free(sc->bge_vpd_prodname, M_DEVBUF);
351 if (sc->bge_vpd_readonly != NULL)
352 free(sc->bge_vpd_readonly, M_DEVBUF);
353 sc->bge_vpd_prodname = NULL;
354 sc->bge_vpd_readonly = NULL;
355
356 bge_vpd_read_res(sc, &res, pos);
357
358 if (res.vr_id != VPD_RES_ID) {
359 device_printf(sc->bge_dev,
360 "bad VPD resource id: expected %x got %x\n",
361 VPD_RES_ID, res.vr_id);
362 return;
363 }
364
365 pos += sizeof(res);
366 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_INTWAIT);
367 for (i = 0; i < res.vr_len; i++)
368 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
369 sc->bge_vpd_prodname[i] = '\0';
370 pos += i;
371
372 bge_vpd_read_res(sc, &res, pos);
373
374 if (res.vr_id != VPD_RES_READ) {
375 device_printf(sc->bge_dev,
376 "bad VPD resource id: expected %x got %x\n",
377 VPD_RES_READ, res.vr_id);
378 return;
379 }
380
381 pos += sizeof(res);
382 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_INTWAIT);
383 for (i = 0; i < res.vr_len + 1; i++)
384 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
385}
386#endif
387
388/*
389 * Read a byte of data stored in the EEPROM at address 'addr.' The
390 * BCM570x supports both the traditional bitbang interface and an
391 * auto access interface for reading the EEPROM. We use the auto
392 * access method.
393 */
394static uint8_t
395bge_eeprom_getbyte(struct bge_softc *sc, uint32_t addr, uint8_t *dest)
396{
397 int i;
398 uint32_t byte = 0;
399
400 /*
401 * Enable use of auto EEPROM access so we can avoid
402 * having to use the bitbang method.
403 */
404 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
405
406 /* Reset the EEPROM, load the clock period. */
407 CSR_WRITE_4(sc, BGE_EE_ADDR,
408 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
409 DELAY(20);
410
411 /* Issue the read EEPROM command. */
412 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
413
414 /* Wait for completion */
415 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
416 DELAY(10);
417 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
418 break;
419 }
420
421 if (i == BGE_TIMEOUT) {
422 if_printf(&sc->arpcom.ac_if, "eeprom read timed out\n");
423 return(0);
424 }
425
426 /* Get result. */
427 byte = CSR_READ_4(sc, BGE_EE_DATA);
428
429 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
430
431 return(0);
432}
433
434/*
435 * Read a sequence of bytes from the EEPROM.
436 */
437static int
438bge_read_eeprom(struct bge_softc *sc, caddr_t dest, uint32_t off, size_t len)
439{
440 size_t i;
441 int err;
442 uint8_t byte;
443
444 for (byte = 0, err = 0, i = 0; i < len; i++) {
445 err = bge_eeprom_getbyte(sc, off + i, &byte);
446 if (err)
447 break;
448 *(dest + i) = byte;
449 }
450
451 return(err ? 1 : 0);
452}
453
454static int
455bge_miibus_readreg(device_t dev, int phy, int reg)
456{
457 struct bge_softc *sc;
458 struct ifnet *ifp;
459 uint32_t val, autopoll;
460 int i;
461
462 sc = device_get_softc(dev);
463 ifp = &sc->arpcom.ac_if;
464
465 /*
466 * Broadcom's own driver always assumes the internal
467 * PHY is at GMII address 1. On some chips, the PHY responds
468 * to accesses at all addresses, which could cause us to
469 * bogusly attach the PHY 32 times at probe type. Always
470 * restricting the lookup to address 1 is simpler than
471 * trying to figure out which chips revisions should be
472 * special-cased.
473 */
474 if (phy != 1)
475 return(0);
476
477 /* Reading with autopolling on may trigger PCI errors */
478 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
479 if (autopoll & BGE_MIMODE_AUTOPOLL) {
480 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
481 DELAY(40);
482 }
483
484 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
485 BGE_MIPHY(phy)|BGE_MIREG(reg));
486
487 for (i = 0; i < BGE_TIMEOUT; i++) {
488 val = CSR_READ_4(sc, BGE_MI_COMM);
489 if (!(val & BGE_MICOMM_BUSY))
490 break;
491 }
492
493 if (i == BGE_TIMEOUT) {
494 if_printf(ifp, "PHY read timed out\n");
495 val = 0;
496 goto done;
497 }
498
499 val = CSR_READ_4(sc, BGE_MI_COMM);
500
501done:
502 if (autopoll & BGE_MIMODE_AUTOPOLL) {
503 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
504 DELAY(40);
505 }
506
507 if (val & BGE_MICOMM_READFAIL)
508 return(0);
509
510 return(val & 0xFFFF);
511}
512
513static int
514bge_miibus_writereg(device_t dev, int phy, int reg, int val)
515{
516 struct bge_softc *sc;
517 uint32_t autopoll;
518 int i;
519
520 sc = device_get_softc(dev);
521
522 /* Reading with autopolling on may trigger PCI errors */
523 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
524 if (autopoll & BGE_MIMODE_AUTOPOLL) {
525 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
526 DELAY(40);
527 }
528
529 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
530 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
531
532 for (i = 0; i < BGE_TIMEOUT; i++) {
533 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
534 break;
535 }
536
537 if (autopoll & BGE_MIMODE_AUTOPOLL) {
538 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
539 DELAY(40);
540 }
541
542 if (i == BGE_TIMEOUT) {
543 if_printf(&sc->arpcom.ac_if, "PHY read timed out\n");
544 return(0);
545 }
546
547 return(0);
548}
549
550static void
551bge_miibus_statchg(device_t dev)
552{
553 struct bge_softc *sc;
554 struct mii_data *mii;
555
556 sc = device_get_softc(dev);
557 mii = device_get_softc(sc->bge_miibus);
558
559 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
560 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
561 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
562 } else {
563 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
564 }
565
566 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
567 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
568 } else {
569 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
570 }
571}
572
573/*
574 * Handle events that have triggered interrupts.
575 */
576static void
577bge_handle_events(struct bge_softc *sc)
578{
579}
580
581/*
582 * Memory management for jumbo frames.
583 */
584static int
585bge_alloc_jumbo_mem(struct bge_softc *sc)
586{
587 struct bge_jpool_entry *entry;
588 caddr_t ptr;
589 int i;
590
591 /* Grab a big chunk o' storage. */
592 sc->bge_cdata.bge_jumbo_buf = contigmalloc(BGE_JMEM, M_DEVBUF,
593 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
594
595 if (sc->bge_cdata.bge_jumbo_buf == NULL) {
596 if_printf(&sc->arpcom.ac_if, "no memory for jumbo buffers!\n");
597 return(ENOBUFS);
598 }
599
600 SLIST_INIT(&sc->bge_jfree_listhead);
601 SLIST_INIT(&sc->bge_jinuse_listhead);
602
603 /*
604 * Now divide it up into 9K pieces and save the addresses
605 * in an array. Note that we play an evil trick here by using
606 * the first few bytes in the buffer to hold the the address
607 * of the softc structure for this interface. This is because
608 * bge_jfree() needs it, but it is called by the mbuf management
609 * code which will not pass it to us explicitly.
610 */
611 ptr = sc->bge_cdata.bge_jumbo_buf;
612 for (i = 0; i < BGE_JSLOTS; i++) {
613 uint64_t **aptr;
614
615 aptr = (uint64_t **)ptr;
616 aptr[0] = (uint64_t *)sc;
617 ptr += sizeof(uint64_t);
618 sc->bge_cdata.bge_jslots[i].bge_buf = ptr;
619 sc->bge_cdata.bge_jslots[i].bge_inuse = 0;
620 ptr += (BGE_JLEN - sizeof(uint64_t));
621 entry = malloc(sizeof(struct bge_jpool_entry),
622 M_DEVBUF, M_INTWAIT);
623 entry->slot = i;
624 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
625 entry, jpool_entries);
626 }
627
628 return(0);
629}
630
631static void
632bge_free_jumbo_mem(struct bge_softc *sc)
633{
634 struct bge_jpool_entry *entry;
635 int i;
636
637 for (i = 0; i < BGE_JSLOTS; i++) {
638 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
639 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
640 free(entry, M_DEVBUF);
641 }
642
643 contigfree(sc->bge_cdata.bge_jumbo_buf, BGE_JMEM, M_DEVBUF);
644}
645
646/*
647 * Allocate a jumbo buffer.
648 */
649static void *
650bge_jalloc(struct bge_softc *sc)
651{
652 struct bge_jpool_entry *entry;
653
654 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
655
656 if (entry == NULL) {
657 if_printf(&sc->arpcom.ac_if, "no free jumbo buffers\n");
658 return(NULL);
659 }
660
661 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
662 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
663 sc->bge_cdata.bge_jslots[entry->slot].bge_inuse = 1;
664 return(sc->bge_cdata.bge_jslots[entry->slot].bge_buf);
665}
666
667/*
668 * Adjust usage count on a jumbo buffer.
669 */
670static void
671bge_jref(caddr_t buf, u_int size)
672{
673 struct bge_softc *sc;
674 uint64_t **aptr;
675 int i;
676
677 /* Extract the softc struct pointer. */
678 aptr = (uint64_t **)(buf - sizeof(uint64_t));
679 sc = (struct bge_softc *)(aptr[0]);
680
681 if (sc == NULL)
682 panic("bge_jref: can't find softc pointer!");
683
684 if (size != BGE_JUMBO_FRAMELEN)
685 panic("bge_jref: adjusting refcount of buf of wrong size!");
686
687 /* calculate the slot this buffer belongs to */
688
689 i = ((vm_offset_t)aptr
690 - (vm_offset_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
691
692 if ((i < 0) || (i >= BGE_JSLOTS))
693 panic("bge_jref: asked to reference buffer "
694 "that we don't manage!");
695 else if (sc->bge_cdata.bge_jslots[i].bge_inuse == 0)
696 panic("bge_jref: buffer already free!");
697 else
698 sc->bge_cdata.bge_jslots[i].bge_inuse++;
699}
700
701/*
702 * Release a jumbo buffer.
703 */
704static void
705bge_jfree(caddr_t buf, u_int size)
706{
707 struct bge_softc *sc;
708 uint64_t **aptr;
709 struct bge_jpool_entry *entry;
710 int i;
711
712 /* Extract the softc struct pointer. */
713 aptr = (uint64_t **)(buf - sizeof(uint64_t));
714 sc = (struct bge_softc *)(aptr[0]);
715
716 if (sc == NULL)
717 panic("bge_jfree: can't find softc pointer!");
718
719 if (size != BGE_JUMBO_FRAMELEN)
720 panic("bge_jfree: freeing buffer of wrong size!");
721
722 /* calculate the slot this buffer belongs to */
723
724 i = ((vm_offset_t)aptr
725 - (vm_offset_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
726
727 if ((i < 0) || (i >= BGE_JSLOTS))
728 panic("bge_jfree: asked to free buffer that we don't manage!");
729 else if (sc->bge_cdata.bge_jslots[i].bge_inuse == 0)
730 panic("bge_jfree: buffer already free!");
731 else {
732 sc->bge_cdata.bge_jslots[i].bge_inuse--;
733 if(sc->bge_cdata.bge_jslots[i].bge_inuse == 0) {
734 entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
735 if (entry == NULL)
736 panic("bge_jfree: buffer not in use!");
737 entry->slot = i;
738 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead,
739 jpool_entries);
740 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
741 entry, jpool_entries);
742 }
743 }
744}
745
746
747/*
748 * Intialize a standard receive ring descriptor.
749 */
750static int
751bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
752{
753 struct mbuf *m_new = NULL;
754 struct bge_rx_bd *r;
755
756 if (m == NULL) {
757 MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
758 if (m_new == NULL)
759 return(ENOBUFS);
760
761 MCLGET(m_new, MB_DONTWAIT);
762 if (!(m_new->m_flags & M_EXT)) {
763 m_freem(m_new);
764 return(ENOBUFS);
765 }
766 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
767 } else {
768 m_new = m;
769 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
770 m_new->m_data = m_new->m_ext.ext_buf;
771 }
772
773 if (!sc->bge_rx_alignment_bug)
774 m_adj(m_new, ETHER_ALIGN);
775 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
776 r = &sc->bge_rdata->bge_rx_std_ring[i];
777 BGE_HOSTADDR(r->bge_addr, vtophys(mtod(m_new, caddr_t)));
778 r->bge_flags = BGE_RXBDFLAG_END;
779 r->bge_len = m_new->m_len;
780 r->bge_idx = i;
781
782 return(0);
783}
784
785/*
786 * Initialize a jumbo receive ring descriptor. This allocates
787 * a jumbo buffer from the pool managed internally by the driver.
788 */
789static int
790bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
791{
792 struct mbuf *m_new = NULL;
793 struct bge_rx_bd *r;
794
795 if (m == NULL) {
796 caddr_t *buf = NULL;
797
798 /* Allocate the mbuf. */
799 MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
800 if (m_new == NULL)
801 return(ENOBUFS);
802
803 /* Allocate the jumbo buffer */
804 buf = bge_jalloc(sc);
805 if (buf == NULL) {
806 m_freem(m_new);
807 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
808 "-- packet dropped!\n");
809 return(ENOBUFS);
810 }
811
812 /* Attach the buffer to the mbuf. */
813 m_new->m_data = m_new->m_ext.ext_buf = (void *)buf;
814 m_new->m_flags |= M_EXT | M_EXT_OLD;
815 m_new->m_len = m_new->m_pkthdr.len =
816 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
817 m_new->m_ext.ext_nfree.old = bge_jfree;
818 m_new->m_ext.ext_nref.old = bge_jref;
819 } else {
820 m_new = m;
821 m_new->m_data = m_new->m_ext.ext_buf;
822 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
823 }
824
825 if (!sc->bge_rx_alignment_bug)
826 m_adj(m_new, ETHER_ALIGN);
827 /* Set up the descriptor. */
828 r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
829 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
830 BGE_HOSTADDR(r->bge_addr, vtophys(mtod(m_new, caddr_t)));
831 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
832 r->bge_len = m_new->m_len;
833 r->bge_idx = i;
834
835 return(0);
836}
837
838/*
839 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
840 * that's 1MB or memory, which is a lot. For now, we fill only the first
841 * 256 ring entries and hope that our CPU is fast enough to keep up with
842 * the NIC.
843 */
844static int
845bge_init_rx_ring_std(struct bge_softc *sc)
846{
847 int i;
848
849 for (i = 0; i < BGE_SSLOTS; i++) {
850 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
851 return(ENOBUFS);
852 };
853
854 sc->bge_std = i - 1;
855 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
856
857 return(0);
858}
859
860static void
861bge_free_rx_ring_std(struct bge_softc *sc)
862{
863 int i;
864
865 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
866 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
867 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
868 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
869 }
870 bzero(&sc->bge_rdata->bge_rx_std_ring[i],
871 sizeof(struct bge_rx_bd));
872 }
873}
874
875static int
876bge_init_rx_ring_jumbo(struct bge_softc *sc)
877{
878 int i;
879 struct bge_rcb *rcb;
880
881 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
882 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
883 return(ENOBUFS);
884 };
885
886 sc->bge_jumbo = i - 1;
887
888 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
889 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
890 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
891
892 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
893
894 return(0);
895}
896
897static void
898bge_free_rx_ring_jumbo(struct bge_softc *sc)
899{
900 int i;
901
902 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
903 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
904 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
905 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
906 }
907 bzero(&sc->bge_rdata->bge_rx_jumbo_ring[i],
908 sizeof(struct bge_rx_bd));
909 }
910}
911
912static void
913bge_free_tx_ring(struct bge_softc *sc)
914{
915 int i;
916
917 if (sc->bge_rdata->bge_tx_ring == NULL)
918 return;
919
920 for (i = 0; i < BGE_TX_RING_CNT; i++) {
921 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
922 m_freem(sc->bge_cdata.bge_tx_chain[i]);
923 sc->bge_cdata.bge_tx_chain[i] = NULL;
924 }
925 bzero(&sc->bge_rdata->bge_tx_ring[i],
926 sizeof(struct bge_tx_bd));
927 }
928}
929
930static int
931bge_init_tx_ring(struct bge_softc *sc)
932{
933 sc->bge_txcnt = 0;
934 sc->bge_tx_saved_considx = 0;
935
936 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
937 /* 5700 b2 errata */
938 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
939 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
940
941 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
942 /* 5700 b2 errata */
943 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
944 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
945
946 return(0);
947}
948
949#define BGE_POLY 0xEDB88320
950
951static uint32_t
952bge_crc(addr)
953 caddr_t addr;
954{
955 uint32_t idx, bit, data, crc;
956
957 /* Compute CRC for the address value. */
958 crc = 0xFFFFFFFF; /* initial value */
959
960 for (idx = 0; idx < 6; idx++) {
961 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
962 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? BGE_POLY : 0);
963 }
964
965 return(crc & 0x7F);
966}
967
968static void
969bge_setmulti(struct bge_softc *sc)
970{
971 struct ifnet *ifp;
972 struct ifmultiaddr *ifma;
973 uint32_t hashes[4] = { 0, 0, 0, 0 };
974 int h, i;
975
976 ifp = &sc->arpcom.ac_if;
977
978 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
979 for (i = 0; i < 4; i++)
980 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
981 return;
982 }
983
984 /* First, zot all the existing filters. */
985 for (i = 0; i < 4; i++)
986 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
987
988 /* Now program new ones. */
989 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
990 if (ifma->ifma_addr->sa_family != AF_LINK)
991 continue;
992 h = bge_crc(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
993 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
994 }
995
996 for (i = 0; i < 4; i++)
997 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
998}
999
1000/*
1001 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1002 * self-test results.
1003 */
1004static int
1005bge_chipinit(struct bge_softc *sc)
1006{
1007 int i;
1008 uint32_t dma_rw_ctl;
1009
1010 /* Set endianness before we access any non-PCI registers. */
1011#if BYTE_ORDER == BIG_ENDIAN
1012 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1013 BGE_BIGENDIAN_INIT, 4);
1014#else
1015 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1016 BGE_LITTLEENDIAN_INIT, 4);
1017#endif
1018
1019 /*
1020 * Check the 'ROM failed' bit on the RX CPU to see if
1021 * self-tests passed.
1022 */
1023 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1024 if_printf(&sc->arpcom.ac_if,
1025 "RX CPU self-diagnostics failed!\n");
1026 return(ENODEV);
1027 }
1028
1029 /* Clear the MAC control register */
1030 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1031
1032 /*
1033 * Clear the MAC statistics block in the NIC's
1034 * internal memory.
1035 */
1036 for (i = BGE_STATS_BLOCK;
1037 i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t))
1038 BGE_MEMWIN_WRITE(sc, i, 0);
1039
1040 for (i = BGE_STATUS_BLOCK;
1041 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t))
1042 BGE_MEMWIN_WRITE(sc, i, 0);
1043
1044 /* Set up the PCI DMA control register. */
1045 if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1046 BGE_PCISTATE_PCI_BUSMODE) {
1047 /* Conventional PCI bus */
1048 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1049 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1050 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1051 (0x0F);
1052 } else {
1053 /* PCI-X bus */
1054 /*
1055 * The 5704 uses a different encoding of read/write
1056 * watermarks.
1057 */
1058 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1059 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1060 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1061 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1062 else
1063 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1064 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1065 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1066 (0x0F);
1067
1068 /*
1069 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1070 * for hardware bugs.
1071 */
1072 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1073 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1074 uint32_t tmp;
1075
1076 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1077 if (tmp == 0x6 || tmp == 0x7)
1078 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1079 }
1080 }
1081
1082 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1083 sc->bge_asicrev == BGE_ASICREV_BCM5704 ||
1084 sc->bge_asicrev == BGE_ASICREV_BCM5705)
1085 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1086 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1087
1088 /*
1089 * Set up general mode register.
1090 */
1091 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
1092 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1093 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1094 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1095
1096 /*
1097 * Disable memory write invalidate. Apparently it is not supported
1098 * properly by these devices.
1099 */
1100 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1101
1102 /* Set the timer prescaler (always 66Mhz) */
1103 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1104
1105 return(0);
1106}
1107
1108static int
1109bge_blockinit(struct bge_softc *sc)
1110{
1111 struct bge_rcb *rcb;
1112 volatile struct bge_rcb *vrcb;
1113 int i;
1114
1115 /*
1116 * Initialize the memory window pointer register so that
1117 * we can access the first 32K of internal NIC RAM. This will
1118 * allow us to set up the TX send ring RCBs and the RX return
1119 * ring RCBs, plus other things which live in NIC memory.
1120 */
1121 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1122
1123 /* Note: the BCM5704 has a smaller mbuf space than other chips. */
1124
1125 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1126 /* Configure mbuf memory pool */
1127 if (sc->bge_extram) {
1128 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1129 BGE_EXT_SSRAM);
1130 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1131 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1132 else
1133 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1134 } else {
1135 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1136 BGE_BUFFPOOL_1);
1137 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1138 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1139 else
1140 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1141 }
1142
1143 /* Configure DMA resource pool */
1144 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1145 BGE_DMA_DESCRIPTORS);
1146 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1147 }
1148
1149 /* Configure mbuf pool watermarks */
1150 if (sc->bge_asicrev == BGE_ASICREV_BCM5705) {
1151 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1152 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1153 } else {
1154 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1155 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1156 }
1157 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1158
1159 /* Configure DMA resource watermarks */
1160 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1161 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1162
1163 /* Enable buffer manager */
1164 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1165 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1166 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1167
1168 /* Poll for buffer manager start indication */
1169 for (i = 0; i < BGE_TIMEOUT; i++) {
1170 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1171 break;
1172 DELAY(10);
1173 }
1174
1175 if (i == BGE_TIMEOUT) {
1176 if_printf(&sc->arpcom.ac_if,
1177 "buffer manager failed to start\n");
1178 return(ENXIO);
1179 }
1180 }
1181
1182 /* Enable flow-through queues */
1183 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1184 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1185
1186 /* Wait until queue initialization is complete */
1187 for (i = 0; i < BGE_TIMEOUT; i++) {
1188 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1189 break;
1190 DELAY(10);
1191 }
1192
1193 if (i == BGE_TIMEOUT) {
1194 if_printf(&sc->arpcom.ac_if,
1195 "flow-through queue init failed\n");
1196 return(ENXIO);
1197 }
1198
1199 /* Initialize the standard RX ring control block */
1200 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1201 BGE_HOSTADDR(rcb->bge_hostaddr,
1202 vtophys(&sc->bge_rdata->bge_rx_std_ring));
1203 if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
1204 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1205 else
1206 rcb->bge_maxlen_flags =
1207 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1208 if (sc->bge_extram)
1209 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1210 else
1211 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1212 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1213 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1214 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1215 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1216
1217 /*
1218 * Initialize the jumbo RX ring control block
1219 * We set the 'ring disabled' bit in the flags
1220 * field until we're actually ready to start
1221 * using this ring (i.e. once we set the MTU
1222 * high enough to require it).
1223 */
1224 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1225 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1226 BGE_HOSTADDR(rcb->bge_hostaddr,
1227 vtophys(&sc->bge_rdata->bge_rx_jumbo_ring));
1228 rcb->bge_maxlen_flags =
1229 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1230 BGE_RCB_FLAG_RING_DISABLED);
1231 if (sc->bge_extram)
1232 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1233 else
1234 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1235 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1236 rcb->bge_hostaddr.bge_addr_hi);
1237 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1238 rcb->bge_hostaddr.bge_addr_lo);
1239 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1240 rcb->bge_maxlen_flags);
1241 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1242
1243 /* Set up dummy disabled mini ring RCB */
1244 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1245 rcb->bge_maxlen_flags =
1246 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1247 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1248 rcb->bge_maxlen_flags);
1249 }
1250
1251 /*
1252 * Set the BD ring replentish thresholds. The recommended
1253 * values are 1/8th the number of descriptors allocated to
1254 * each ring.
1255 */
1256 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1257 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1258
1259 /*
1260 * Disable all unused send rings by setting the 'ring disabled'
1261 * bit in the flags field of all the TX send ring control blocks.
1262 * These are located in NIC memory.
1263 */
1264 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1265 BGE_SEND_RING_RCB);
1266 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1267 vrcb->bge_maxlen_flags =
1268 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1269 vrcb->bge_nicaddr = 0;
1270 vrcb++;
1271 }
1272
1273 /* Configure TX RCB 0 (we use only the first ring) */
1274 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1275 BGE_SEND_RING_RCB);
1276 vrcb->bge_hostaddr.bge_addr_hi = 0;
1277 BGE_HOSTADDR(vrcb->bge_hostaddr, vtophys(&sc->bge_rdata->bge_tx_ring));
1278 vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
1279 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1280 vrcb->bge_maxlen_flags =
1281 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
1282
1283 /* Disable all unused RX return rings */
1284 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1285 BGE_RX_RETURN_RING_RCB);
1286 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1287 vrcb->bge_hostaddr.bge_addr_hi = 0;
1288 vrcb->bge_hostaddr.bge_addr_lo = 0;
1289 vrcb->bge_maxlen_flags =
1290 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1291 BGE_RCB_FLAG_RING_DISABLED);
1292 vrcb->bge_nicaddr = 0;
1293 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1294 (i * (sizeof(uint64_t))), 0);
1295 vrcb++;
1296 }
1297
1298 /* Initialize RX ring indexes */
1299 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1300 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1301 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1302
1303 /*
1304 * Set up RX return ring 0
1305 * Note that the NIC address for RX return rings is 0x00000000.
1306 * The return rings live entirely within the host, so the
1307 * nicaddr field in the RCB isn't used.
1308 */
1309 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1310 BGE_RX_RETURN_RING_RCB);
1311 vrcb->bge_hostaddr.bge_addr_hi = 0;
1312 BGE_HOSTADDR(vrcb->bge_hostaddr,
1313 vtophys(&sc->bge_rdata->bge_rx_return_ring));
1314 vrcb->bge_nicaddr = 0x00000000;
1315 vrcb->bge_maxlen_flags =
1316 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0);
1317
1318 /* Set random backoff seed for TX */
1319 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1320 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1321 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1322 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1323 BGE_TX_BACKOFF_SEED_MASK);
1324
1325 /* Set inter-packet gap */
1326 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1327
1328 /*
1329 * Specify which ring to use for packets that don't match
1330 * any RX rules.
1331 */
1332 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1333
1334 /*
1335 * Configure number of RX lists. One interrupt distribution
1336 * list, sixteen active lists, one bad frames class.
1337 */
1338 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1339
1340 /* Inialize RX list placement stats mask. */
1341 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1342 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1343
1344 /* Disable host coalescing until we get it set up */
1345 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1346
1347 /* Poll to make sure it's shut down. */
1348 for (i = 0; i < BGE_TIMEOUT; i++) {
1349 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1350 break;
1351 DELAY(10);
1352 }
1353
1354 if (i == BGE_TIMEOUT) {
1355 if_printf(&sc->arpcom.ac_if,
1356 "host coalescing engine failed to idle\n");
1357 return(ENXIO);
1358 }
1359
1360 /* Set up host coalescing defaults */
1361 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1362 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1363 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1364 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1365 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1366 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1367 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1368 }
1369 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1370 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1371
1372 /* Set up address of statistics block */
1373 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1374 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0);
1375 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1376 vtophys(&sc->bge_rdata->bge_info.bge_stats));
1377
1378 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1379 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1380 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1381 }
1382
1383 /* Set up address of status block */
1384 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 0);
1385 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1386 vtophys(&sc->bge_rdata->bge_status_block));
1387
1388 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1389 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1390
1391 /* Turn on host coalescing state machine */
1392 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1393
1394 /* Turn on RX BD completion state machine and enable attentions */
1395 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1396 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1397
1398 /* Turn on RX list placement state machine */
1399 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1400
1401 /* Turn on RX list selector state machine. */
1402 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1403 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1404
1405 /* Turn on DMA, clear stats */
1406 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1407 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1408 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1409 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1410 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1411
1412 /* Set misc. local control, enable interrupts on attentions */
1413 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1414
1415#ifdef notdef
1416 /* Assert GPIO pins for PHY reset */
1417 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1418 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1419 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1420 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1421#endif
1422
1423 /* Turn on DMA completion state machine */
1424 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1425 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1426
1427 /* Turn on write DMA state machine */
1428 CSR_WRITE_4(sc, BGE_WDMA_MODE,
1429 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1430
1431 /* Turn on read DMA state machine */
1432 CSR_WRITE_4(sc, BGE_RDMA_MODE,
1433 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1434
1435 /* Turn on RX data completion state machine */
1436 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1437
1438 /* Turn on RX BD initiator state machine */
1439 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1440
1441 /* Turn on RX data and RX BD initiator state machine */
1442 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1443
1444 /* Turn on Mbuf cluster free state machine */
1445 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1446 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1447
1448 /* Turn on send BD completion state machine */
1449 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1450
1451 /* Turn on send data completion state machine */
1452 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1453
1454 /* Turn on send data initiator state machine */
1455 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1456
1457 /* Turn on send BD initiator state machine */
1458 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1459
1460 /* Turn on send BD selector state machine */
1461 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1462
1463 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1464 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1465 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1466
1467 /* ack/clear link change events */
1468 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1469 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
1470 BGE_MACSTAT_LINK_CHANGED);
1471
1472 /* Enable PHY auto polling (for MII/GMII only) */
1473 if (sc->bge_tbi) {
1474 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1475 } else {
1476 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1477 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1478 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1479 BGE_EVTENB_MI_INTERRUPT);
1480 }
1481
1482 /* Enable link state change attentions. */
1483 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1484
1485 return(0);
1486}
1487
1488/*
1489 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1490 * against our list and return its name if we find a match. Note
1491 * that since the Broadcom controller contains VPD support, we
1492 * can get the device name string from the controller itself instead
1493 * of the compiled-in string. This is a little slow, but it guarantees
1494 * we'll always announce the right product name.
1495 */
1496static int
1497bge_probe(device_t dev)
1498{
1499 struct bge_softc *sc;
1500 struct bge_type *t;
1501 char *descbuf;
1502 uint16_t product, vendor;
1503
1504 product = pci_get_device(dev);
1505 vendor = pci_get_vendor(dev);
1506
1507 for (t = bge_devs; t->bge_name != NULL; t++) {
1508 if (vendor == t->bge_vid && product == t->bge_did)
1509 break;
1510 }
1511
1512 if (t->bge_name == NULL)
1513 return(ENXIO);
1514
1515 sc = device_get_softc(dev);
1516#ifdef notdef
1517 sc->bge_dev = dev;
1518
1519 bge_vpd_read(sc);
1520 device_set_desc(dev, sc->bge_vpd_prodname);
1521#endif
1522 descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_WAITOK);
1523 snprintf(descbuf, BGE_DEVDESC_MAX, "%s, ASIC rev. %#04x", t->bge_name,
1524 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1525 device_set_desc_copy(dev, descbuf);
1526 if (pci_get_subvendor(dev) == PCI_VENDOR_DELL)
1527 sc->bge_no_3_led = 1;
1528 free(descbuf, M_TEMP);
1529 return(0);
1530}
1531
1532static int
1533bge_attach(device_t dev)
1534{
1535 int s;
1536 uint32_t command;
1537 struct ifnet *ifp;
1538 struct bge_softc *sc;
1539 uint32_t hwcfg = 0;
1540 uint32_t mac_addr = 0;
1541 int error = 0, rid;
1542 uint8_t ether_addr[ETHER_ADDR_LEN];
1543
1544 s = splimp();
1545
1546 sc = device_get_softc(dev);
1547 sc->bge_dev = dev;
1548 callout_init(&sc->bge_stat_timer);
1549
1550 /*
1551 * Map control/status registers.
1552 */
1553 pci_enable_busmaster(dev);
1554 pci_enable_io(dev, SYS_RES_MEMORY);
1555 command = pci_read_config(dev, PCIR_COMMAND, 4);
1556
1557 if (!(command & PCIM_CMD_MEMEN)) {
1558 device_printf(dev, "failed to enable memory mapping!\n");
1559 error = ENXIO;
1560 goto fail;
1561 }
1562
1563 rid = BGE_PCI_BAR0;
1564 sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1565 RF_ACTIVE);
1566
1567 if (sc->bge_res == NULL) {
1568 device_printf(dev, "couldn't map memory\n");
1569 error = ENXIO;
1570 goto fail;
1571 }
1572
1573 sc->bge_btag = rman_get_bustag(sc->bge_res);
1574 sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1575 sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
1576
1577 /* Allocate interrupt */
1578 rid = 0;
1579
1580 sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1581 RF_SHAREABLE | RF_ACTIVE);
1582
1583 if (sc->bge_irq == NULL) {
1584 device_printf(dev, "couldn't map interrupt\n");
1585 error = ENXIO;
1586 goto fail;
1587 }
1588
1589 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET,
1590 bge_intr, sc, &sc->bge_intrhand);
1591
1592 if (error) {
1593 bge_release_resources(sc);
1594 device_printf(dev, "couldn't set up irq\n");
1595 goto fail;
1596 }
1597
1598 ifp = &sc->arpcom.ac_if;
1599 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1600
1601 /* Try to reset the chip. */
1602 bge_reset(sc);
1603
1604 if (bge_chipinit(sc)) {
1605 device_printf(dev, "chip initialization failed\n");
1606 bge_release_resources(sc);
1607 error = ENXIO;
1608 goto fail;
1609 }
1610
1611 /*
1612 * Get station address from the EEPROM.
1613 */
1614 mac_addr = bge_readmem_ind(sc, 0x0c14);
1615 if ((mac_addr >> 16) == 0x484b) {
1616 ether_addr[0] = (uint8_t)(mac_addr >> 8);
1617 ether_addr[1] = (uint8_t)mac_addr;
1618 mac_addr = bge_readmem_ind(sc, 0x0c18);
1619 ether_addr[2] = (uint8_t)(mac_addr >> 24);
1620 ether_addr[3] = (uint8_t)(mac_addr >> 16);
1621 ether_addr[4] = (uint8_t)(mac_addr >> 8);
1622 ether_addr[5] = (uint8_t)mac_addr;
1623 } else if (bge_read_eeprom(sc, ether_addr,
1624 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
1625 device_printf(dev, "failed to read station address\n");
1626 bge_release_resources(sc);
1627 error = ENXIO;
1628 goto fail;
1629 }
1630
1631 /* Allocate the general information block and ring buffers. */
1632 sc->bge_rdata = contigmalloc(sizeof(struct bge_ring_data), M_DEVBUF,
1633 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1634
1635 if (sc->bge_rdata == NULL) {
1636 bge_release_resources(sc);
1637 error = ENXIO;
1638 device_printf(dev, "no memory for list buffers!\n");
1639 goto fail;
1640 }
1641
1642 bzero(sc->bge_rdata, sizeof(struct bge_ring_data));
1643
1644 /* Save ASIC rev. */
1645
1646 sc->bge_chipid =
1647 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1648 BGE_PCIMISCCTL_ASICREV;
1649 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1650 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1651
1652 /*
1653 * Try to allocate memory for jumbo buffers.
1654 * The 5705 does not appear to support jumbo frames.
1655 */
1656 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
1657 if (bge_alloc_jumbo_mem(sc)) {
1658 device_printf(dev, "jumbo buffer allocation failed\n");
1659 bge_release_resources(sc);
1660 error = ENXIO;
1661 goto fail;
1662 }
1663 }
1664
1665 /* Set default tuneable values. */
1666 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1667 sc->bge_rx_coal_ticks = 150;
1668 sc->bge_tx_coal_ticks = 150;
1669 sc->bge_rx_max_coal_bds = 64;
1670 sc->bge_tx_max_coal_bds = 128;
1671
1672 /* 5705 limits RX return ring to 512 entries. */
1673 if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
1674 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1675 else
1676 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1677
1678 /* Set up ifnet structure */
1679 ifp->if_softc = sc;
1680 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1681 ifp->if_ioctl = bge_ioctl;
1682 ifp->if_start = bge_start;
1683 ifp->if_watchdog = bge_watchdog;
1684 ifp->if_init = bge_init;
1685 ifp->if_mtu = ETHERMTU;
1686 ifq_set_maxlen(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1687 ifq_set_ready(&ifp->if_snd);
1688 ifp->if_hwassist = BGE_CSUM_FEATURES;
1689 ifp->if_capabilities = IFCAP_HWCSUM;
1690 ifp->if_capenable = ifp->if_capabilities;
1691
1692 /*
1693 * Figure out what sort of media we have by checking the
1694 * hardware config word in the first 32k of NIC internal memory,
1695 * or fall back to examining the EEPROM if necessary.
1696 * Note: on some BCM5700 cards, this value appears to be unset.
1697 * If that's the case, we have to rely on identifying the NIC
1698 * by its PCI subsystem ID, as we do below for the SysKonnect
1699 * SK-9D41.
1700 */
1701 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
1702 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1703 else {
1704 bge_read_eeprom(sc, (caddr_t)&hwcfg,
1705 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
1706 hwcfg = ntohl(hwcfg);
1707 }
1708
1709 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1710 sc->bge_tbi = 1;
1711
1712 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1713 if (pci_get_subvendor(dev) == PCI_PRODUCT_SCHNEIDERKOCH_SK_9D41)
1714 sc->bge_tbi = 1;
1715
1716 if (sc->bge_tbi) {
1717 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
1718 bge_ifmedia_upd, bge_ifmedia_sts);
1719 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1720 ifmedia_add(&sc->bge_ifmedia,
1721 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1722 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1723 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
1724 } else {
1725 /*
1726 * Do transceiver setup.
1727 */
1728 if (mii_phy_probe(dev, &sc->bge_miibus,
1729 bge_ifmedia_upd, bge_ifmedia_sts)) {
1730 device_printf(dev, "MII without any PHY!\n");
1731 bge_release_resources(sc);
1732 bge_free_jumbo_mem(sc);
1733 error = ENXIO;
1734 goto fail;
1735 }
1736 }
1737
1738 /*
1739 * When using the BCM5701 in PCI-X mode, data corruption has
1740 * been observed in the first few bytes of some received packets.
1741 * Aligning the packet buffer in memory eliminates the corruption.
1742 * Unfortunately, this misaligns the packet payloads. On platforms
1743 * which do not support unaligned accesses, we will realign the
1744 * payloads by copying the received packets.
1745 */
1746 switch (sc->bge_chipid) {
1747 case BGE_CHIPID_BCM5701_A0:
1748 case BGE_CHIPID_BCM5701_B0:
1749 case BGE_CHIPID_BCM5701_B2:
1750 case BGE_CHIPID_BCM5701_B5:
1751 /* If in PCI-X mode, work around the alignment bug. */
1752 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
1753 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
1754 BGE_PCISTATE_PCI_BUSSPEED)
1755 sc->bge_rx_alignment_bug = 1;
1756 break;
1757 }
1758
1759 /*
1760 * Call MI attach routine.
1761 */
1762 ether_ifattach(ifp, ether_addr);
1763
1764fail:
1765 splx(s);
1766
1767 return(error);
1768}
1769
1770static int
1771bge_detach(device_t dev)
1772{
1773 struct bge_softc *sc;
1774 struct ifnet *ifp;
1775 int s;
1776
1777 s = splimp();
1778
1779 sc = device_get_softc(dev);
1780 ifp = &sc->arpcom.ac_if;
1781
1782 ether_ifdetach(ifp);
1783 bge_stop(sc);
1784 bge_reset(sc);
1785
1786 if (sc->bge_tbi) {
1787 ifmedia_removeall(&sc->bge_ifmedia);
1788 } else {
1789 bus_generic_detach(dev);
1790 device_delete_child(dev, sc->bge_miibus);
1791 }
1792
1793 bge_release_resources(sc);
1794 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1795 bge_free_jumbo_mem(sc);
1796
1797 splx(s);
1798
1799 return(0);
1800}
1801
1802static void
1803bge_release_resources(struct bge_softc *sc)
1804{
1805 device_t dev;
1806
1807 dev = sc->bge_dev;
1808
1809 if (sc->bge_vpd_prodname != NULL)
1810 free(sc->bge_vpd_prodname, M_DEVBUF);
1811
1812 if (sc->bge_vpd_readonly != NULL)
1813 free(sc->bge_vpd_readonly, M_DEVBUF);
1814
1815 if (sc->bge_intrhand != NULL)
1816 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
1817
1818 if (sc->bge_irq != NULL)
1819 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
1820
1821 if (sc->bge_res != NULL)
1822 bus_release_resource(dev, SYS_RES_MEMORY,
1823 BGE_PCI_BAR0, sc->bge_res);
1824
1825 if (sc->bge_rdata != NULL)
1826 contigfree(sc->bge_rdata, sizeof(struct bge_ring_data),
1827 M_DEVBUF);
1828
1829 return;
1830}
1831
1832static void
1833bge_reset(struct bge_softc *sc)
1834{
1835 device_t dev;
1836 uint32_t cachesize, command, pcistate;
1837 int i, val = 0;
1838
1839 dev = sc->bge_dev;
1840
1841 /* Save some important PCI state. */
1842 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
1843 command = pci_read_config(dev, BGE_PCI_CMD, 4);
1844 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
1845
1846 pci_write_config(dev, BGE_PCI_MISC_CTL,
1847 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1848 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1849
1850 /* Issue global reset */
1851 bge_writereg_ind(sc, BGE_MISC_CFG,
1852 BGE_MISCCFG_RESET_CORE_CLOCKS | (65<<1));
1853
1854 DELAY(1000);
1855
1856 /* Reset some of the PCI state that got zapped by reset */
1857 pci_write_config(dev, BGE_PCI_MISC_CTL,
1858 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1859 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1860 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
1861 pci_write_config(dev, BGE_PCI_CMD, command, 4);
1862 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
1863
1864 /*
1865 * Prevent PXE restart: write a magic number to the
1866 * general communications memory at 0xB50.
1867 */
1868 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1869 /*
1870 * Poll the value location we just wrote until
1871 * we see the 1's complement of the magic number.
1872 * This indicates that the firmware initialization
1873 * is complete.
1874 */
1875 for (i = 0; i < BGE_TIMEOUT; i++) {
1876 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
1877 if (val == ~BGE_MAGIC_NUMBER)
1878 break;
1879 DELAY(10);
1880 }
1881
1882 if (i == BGE_TIMEOUT) {
1883 if_printf(&sc->arpcom.ac_if, "firmware handshake timed out\n");
1884 return;
1885 }
1886
1887 /*
1888 * XXX Wait for the value of the PCISTATE register to
1889 * return to its original pre-reset state. This is a
1890 * fairly good indicator of reset completion. If we don't
1891 * wait for the reset to fully complete, trying to read
1892 * from the device's non-PCI registers may yield garbage
1893 * results.
1894 */
1895 for (i = 0; i < BGE_TIMEOUT; i++) {
1896 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
1897 break;
1898 DELAY(10);
1899 }
1900
1901 /* Enable memory arbiter. */
1902 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
1903 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
1904
1905 /* Fix up byte swapping */
1906 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
1907 BGE_MODECTL_BYTESWAP_DATA);
1908
1909 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1910
1911 DELAY(10000);
1912
1913 return;
1914}
1915
1916/*
1917 * Frame reception handling. This is called if there's a frame
1918 * on the receive return list.
1919 *
1920 * Note: we have to be able to handle two possibilities here:
1921 * 1) the frame is from the jumbo recieve ring
1922 * 2) the frame is from the standard receive ring
1923 */
1924
1925static void
1926bge_rxeof(struct bge_softc *sc)
1927{
1928 struct ifnet *ifp;
1929 int stdcnt = 0, jumbocnt = 0;
1930
1931 ifp = &sc->arpcom.ac_if;
1932
1933 while(sc->bge_rx_saved_considx !=
1934 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
1935 struct bge_rx_bd *cur_rx;
1936 uint32_t rxidx;
1937 struct mbuf *m = NULL;
1938 uint16_t vlan_tag = 0;
1939 int have_tag = 0;
1940
1941 cur_rx =
1942 &sc->bge_rdata->bge_rx_return_ring[sc->bge_rx_saved_considx];
1943
1944 rxidx = cur_rx->bge_idx;
1945 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
1946
1947 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
1948 have_tag = 1;
1949 vlan_tag = cur_rx->bge_vlan_tag;
1950 }
1951
1952 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
1953 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
1954 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
1955 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
1956 jumbocnt++;
1957 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
1958 ifp->if_ierrors++;
1959 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
1960 continue;
1961 }
1962 if (bge_newbuf_jumbo(sc,
1963 sc->bge_jumbo, NULL) == ENOBUFS) {
1964 ifp->if_ierrors++;
1965 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
1966 continue;
1967 }
1968 } else {
1969 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
1970 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
1971 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
1972 stdcnt++;
1973 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
1974 ifp->if_ierrors++;
1975 bge_newbuf_std(sc, sc->bge_std, m);
1976 continue;
1977 }
1978 if (bge_newbuf_std(sc, sc->bge_std,
1979 NULL) == ENOBUFS) {
1980 ifp->if_ierrors++;
1981 bge_newbuf_std(sc, sc->bge_std, m);
1982 continue;
1983 }
1984 }
1985
1986 ifp->if_ipackets++;
1987#ifndef __i386__
1988 /*
1989 * The i386 allows unaligned accesses, but for other
1990 * platforms we must make sure the payload is aligned.
1991 */
1992 if (sc->bge_rx_alignment_bug) {
1993 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
1994 cur_rx->bge_len);
1995 m->m_data += ETHER_ALIGN;
1996 }
1997#endif
1998 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
1999 m->m_pkthdr.rcvif = ifp;
2000
2001#if 0 /* currently broken for some packets, possibly related to TCP options */
2002 if (ifp->if_hwassist) {
2003 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2004 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2005 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2006 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2007 m->m_pkthdr.csum_data =
2008 cur_rx->bge_tcp_udp_csum;
2009 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2010 }
2011 }
2012#endif
2013
2014 /*
2015 * If we received a packet with a vlan tag, pass it
2016 * to vlan_input() instead of ether_input().
2017 */
2018 if (have_tag) {
2019 VLAN_INPUT_TAG(m, vlan_tag);
2020 have_tag = vlan_tag = 0;
2021 continue;
2022 }
2023
2024 (*ifp->if_input)(ifp, m);
2025 }
2026
2027 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2028 if (stdcnt)
2029 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2030 if (jumbocnt)
2031 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2032}
2033
2034static void
2035bge_txeof(struct bge_softc *sc)
2036{
2037 struct bge_tx_bd *cur_tx = NULL;
2038 struct ifnet *ifp;
2039
2040 ifp = &sc->arpcom.ac_if;
2041
2042 /*
2043 * Go through our tx ring and free mbufs for those
2044 * frames that have been sent.
2045 */
2046 while (sc->bge_tx_saved_considx !=
2047 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
2048 uint32_t idx = 0;
2049
2050 idx = sc->bge_tx_saved_considx;
2051 cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
2052 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2053 ifp->if_opackets++;
2054 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2055 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2056 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2057 }
2058 sc->bge_txcnt--;
2059 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2060 ifp->if_timer = 0;
2061 }
2062
2063 if (cur_tx != NULL)
2064 ifp->if_flags &= ~IFF_OACTIVE;
2065}
2066
2067static void
2068bge_intr(void *xsc)
2069{
2070 struct bge_softc *sc = xsc;;
2071 struct ifnet *ifp = &sc->arpcom.ac_if;
2072 uint32_t status;
2073
2074#ifdef notdef
2075 /* Avoid this for now -- checking this register is expensive. */
2076 /* Make sure this is really our interrupt. */
2077 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2078 return;
2079#endif
2080 /* Ack interrupt and stop others from occuring. */
2081 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2082
2083 /*
2084 * Process link state changes.
2085 * Grrr. The link status word in the status block does
2086 * not work correctly on the BCM5700 rev AX and BX chips,
2087 * according to all available information. Hence, we have
2088 * to enable MII interrupts in order to properly obtain
2089 * async link changes. Unfortunately, this also means that
2090 * we have to read the MAC status register to detect link
2091 * changes, thereby adding an additional register access to
2092 * the interrupt handler.
2093 */
2094
2095 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
2096 status = CSR_READ_4(sc, BGE_MAC_STS);
2097 if (status & BGE_MACSTAT_MI_INTERRUPT) {
2098 sc->bge_link = 0;
2099 callout_stop(&sc->bge_stat_timer);
2100 bge_tick(sc);
2101 /* Clear the interrupt */
2102 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2103 BGE_EVTENB_MI_INTERRUPT);
2104 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
2105 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
2106 BRGPHY_INTRS);
2107 }
2108 } else {
2109 if ((sc->bge_rdata->bge_status_block.bge_status &
2110 BGE_STATFLAG_UPDATED) &&
2111 (sc->bge_rdata->bge_status_block.bge_status &
2112 BGE_STATFLAG_LINKSTATE_CHANGED)) {
2113 sc->bge_rdata->bge_status_block.bge_status &=
2114 ~(BGE_STATFLAG_UPDATED|
2115 BGE_STATFLAG_LINKSTATE_CHANGED);
2116 /*
2117 * Sometimes PCS encoding errors are detected in
2118 * TBI mode (on fiber NICs), and for some reason
2119 * the chip will signal them as link changes.
2120 * If we get a link change event, but the 'PCS
2121 * encoding error' bit in the MAC status register
2122 * is set, don't bother doing a link check.
2123 * This avoids spurious "gigabit link up" messages
2124 * that sometimes appear on fiber NICs during
2125 * periods of heavy traffic. (There should be no
2126 * effect on copper NICs.)
2127 */
2128 status = CSR_READ_4(sc, BGE_MAC_STS);
2129 if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR|
2130 BGE_MACSTAT_MI_COMPLETE))) {
2131 sc->bge_link = 0;
2132 callout_stop(&sc->bge_stat_timer);
2133 bge_tick(sc);
2134 }
2135 sc->bge_link = 0;
2136 callout_stop(&sc->bge_stat_timer);
2137 bge_tick(sc);
2138 /* Clear the interrupt */
2139 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2140 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2141 BGE_MACSTAT_LINK_CHANGED);
2142
2143 /* Force flush the status block cached by PCI bridge */
2144 CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
2145 }
2146 }
2147
2148 if (ifp->if_flags & IFF_RUNNING) {
2149 /* Check RX return ring producer/consumer */
2150 bge_rxeof(sc);
2151
2152 /* Check TX ring producer/consumer */
2153 bge_txeof(sc);
2154 }
2155
2156 bge_handle_events(sc);
2157
2158 /* Re-enable interrupts. */
2159 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2160
2161 if ((ifp->if_flags & IFF_RUNNING) && !ifq_is_empty(&ifp->if_snd))
2162 (*ifp->if_start)(ifp);
2163}
2164
2165static void
2166bge_tick(void *xsc)
2167{
2168 struct bge_softc *sc = xsc;
2169 struct ifnet *ifp = &sc->arpcom.ac_if;
2170 struct mii_data *mii = NULL;
2171 struct ifmedia *ifm = NULL;
2172 int s;
2173
2174 s = splimp();
2175
2176 if (sc->bge_asicrev == BGE_ASICREV_BCM5705)
2177 bge_stats_update_regs(sc);
2178 else
2179 bge_stats_update(sc);
2180 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2181 if (sc->bge_link) {
2182 splx(s);
2183 return;
2184 }
2185
2186 if (sc->bge_tbi) {
2187 ifm = &sc->bge_ifmedia;
2188 if (CSR_READ_4(sc, BGE_MAC_STS) &
2189 BGE_MACSTAT_TBI_PCS_SYNCHED) {
2190 sc->bge_link++;
2191 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
2192 if_printf(ifp, "gigabit link up\n");
2193 if (!ifq_is_empty(&ifp->if_snd))
2194 (*ifp->if_start)(ifp);
2195 }
2196 splx(s);
2197 return;
2198 }
2199
2200 mii = device_get_softc(sc->bge_miibus);
2201 mii_tick(mii);
2202
2203 if (!sc->bge_link) {
2204 mii_pollstat(mii);
2205 if (mii->mii_media_status & IFM_ACTIVE &&
2206 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2207 sc->bge_link++;
2208 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
2209 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
2210 if_printf(ifp, "gigabit link up\n");
2211 if (!ifq_is_empty(&ifp->if_snd))
2212 (*ifp->if_start)(ifp);
2213 }
2214 }
2215
2216 splx(s);
2217}
2218
2219static void
2220bge_stats_update_regs(struct bge_softc *sc)
2221{
2222 struct ifnet *ifp = &sc->arpcom.ac_if;
2223 struct bge_mac_stats_regs stats;
2224 uint32_t *s;
2225 int i;
2226
2227 s = (uint32_t *)&stats;
2228 for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) {
2229 *s = CSR_READ_4(sc, BGE_RX_STATS + i);
2230 s++;
2231 }
2232
2233 ifp->if_collisions +=
2234 (stats.dot3StatsSingleCollisionFrames +
2235 stats.dot3StatsMultipleCollisionFrames +
2236 stats.dot3StatsExcessiveCollisions +
2237 stats.dot3StatsLateCollisions) -
2238 ifp->if_collisions;
2239}
2240
2241static void
2242bge_stats_update(struct bge_softc *sc)
2243{
2244 struct ifnet *ifp = &sc->arpcom.ac_if;
2245 struct bge_stats *stats;
2246
2247 stats = (struct bge_stats *)(sc->bge_vhandle +
2248 BGE_MEMWIN_START + BGE_STATS_BLOCK);
2249
2250 ifp->if_collisions +=
2251 (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo +
2252 stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo +
2253 stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo +
2254 stats->txstats.dot3StatsLateCollisions.bge_addr_lo) -
2255 ifp->if_collisions;
2256
2257#ifdef notdef
2258 ifp->if_collisions +=
2259 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2260 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2261 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2262 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2263 ifp->if_collisions;
2264#endif
2265}
2266
2267/*
2268 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2269 * pointers to descriptors.
2270 */
2271static int
2272bge_encap(struct bge_softc *sc, struct mbuf *m_head, uint32_t *txidx)
2273{
2274 struct bge_tx_bd *f = NULL;
2275 struct mbuf *m;
2276 uint32_t frag, cur, cnt = 0;
2277 uint16_t csum_flags = 0;
2278 struct ifvlan *ifv = NULL;
2279
2280 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
2281 m_head->m_pkthdr.rcvif != NULL &&
2282 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
2283 ifv = m_head->m_pkthdr.rcvif->if_softc;
2284
2285 m = m_head;
2286 cur = frag = *txidx;
2287
2288 if (m_head->m_pkthdr.csum_flags) {
2289 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2290 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2291 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2292 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2293 if (m_head->m_flags & M_LASTFRAG)
2294 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2295 else if (m_head->m_flags & M_FRAG)
2296 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2297 }
2298 /*
2299 * Start packing the mbufs in this chain into
2300 * the fragment pointers. Stop when we run out
2301 * of fragments or hit the end of the mbuf chain.
2302 */
2303 for (m = m_head; m != NULL; m = m->m_next) {
2304 if (m->m_len != 0) {
2305 f = &sc->bge_rdata->bge_tx_ring[frag];
2306 if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
2307 break;
2308 BGE_HOSTADDR(f->bge_addr,
2309 vtophys(mtod(m, vm_offset_t)));
2310 f->bge_len = m->m_len;
2311 f->bge_flags = csum_flags;
2312 if (ifv != NULL) {
2313 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2314 f->bge_vlan_tag = ifv->ifv_tag;
2315 } else {
2316 f->bge_vlan_tag = 0;
2317 }
2318 /*
2319 * Sanity check: avoid coming within 16 descriptors
2320 * of the end of the ring.
2321 */
2322 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16)
2323 return(ENOBUFS);
2324 cur = frag;
2325 BGE_INC(frag, BGE_TX_RING_CNT);
2326 cnt++;
2327 }
2328 }
2329
2330 if (m != NULL)
2331 return(ENOBUFS);
2332
2333 if (frag == sc->bge_tx_saved_considx)
2334 return(ENOBUFS);
2335
2336 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
2337 sc->bge_cdata.bge_tx_chain[cur] = m_head;
2338 sc->bge_txcnt += cnt;
2339
2340 *txidx = frag;
2341
2342 return(0);
2343}
2344
2345/*
2346 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2347 * to the mbuf data regions directly in the transmit descriptors.
2348 */
2349static void
2350bge_start(struct ifnet *ifp)
2351{
2352 struct bge_softc *sc;
2353 struct mbuf *m_head = NULL;
2354 uint32_t prodidx = 0;
2355
2356 sc = ifp->if_softc;
2357
2358 if (!sc->bge_link)
2359 return;
2360
2361 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
2362
2363 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2364 m_head = ifq_poll(&ifp->if_snd);
2365 if (m_head == NULL)
2366 break;
2367
2368 /*
2369 * XXX
2370 * safety overkill. If this is a fragmented packet chain
2371 * with delayed TCP/UDP checksums, then only encapsulate
2372 * it if we have enough descriptors to handle the entire
2373 * chain at once.
2374 * (paranoia -- may not actually be needed)
2375 */
2376 if (m_head->m_flags & M_FIRSTFRAG &&
2377 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
2378 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2379 m_head->m_pkthdr.csum_data + 16) {
2380 ifp->if_flags |= IFF_OACTIVE;
2381 break;
2382 }
2383 }
2384
2385 /*
2386 * Pack the data into the transmit ring. If we
2387 * don't have room, set the OACTIVE flag and wait
2388 * for the NIC to drain the ring.
2389 */
2390 if (bge_encap(sc, m_head, &prodidx)) {
2391 ifp->if_flags |= IFF_OACTIVE;
2392 break;
2393 }
2394 m_head = ifq_dequeue(&ifp->if_snd);
2395
2396 BPF_MTAP(ifp, m_head);
2397 }
2398
2399 /* Transmit */
2400 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2401 /* 5700 b2 errata */
2402 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
2403 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2404
2405 /*
2406 * Set a timeout in case the chip goes out to lunch.
2407 */
2408 ifp->if_timer = 5;
2409}
2410
2411static void
2412bge_init(void *xsc)
2413{
2414 struct bge_softc *sc = xsc;
2415 struct ifnet *ifp = &sc->arpcom.ac_if;
2416 uint16_t *m;
2417 int s;
2418
2419 s = splimp();
2420
2421 if (ifp->if_flags & IFF_RUNNING) {
2422 splx(s);
2423 return;
2424 }
2425
2426 /* Cancel pending I/O and flush buffers. */
2427 bge_stop(sc);
2428 bge_reset(sc);
2429 bge_chipinit(sc);
2430
2431 /*
2432 * Init the various state machines, ring
2433 * control blocks and firmware.
2434 */
2435 if (bge_blockinit(sc)) {
2436 if_printf(ifp, "initialization failure\n");
2437 splx(s);
2438 return;
2439 }
2440
2441 /* Specify MTU. */
2442 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2443 ETHER_HDR_LEN + ETHER_CRC_LEN);
2444
2445 /* Load our MAC address. */
2446 m = (uint16_t *)&sc->arpcom.ac_enaddr[0];
2447 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2448 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2449
2450 /* Enable or disable promiscuous mode as needed. */
2451 if (ifp->if_flags & IFF_PROMISC) {
2452 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2453 } else {
2454 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2455 }
2456
2457 /* Program multicast filter. */
2458 bge_setmulti(sc);
2459
2460 /* Init RX ring. */
2461 bge_init_rx_ring_std(sc);
2462
2463 /*
2464 * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
2465 * memory to insure that the chip has in fact read the first
2466 * entry of the ring.
2467 */
2468 if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) {
2469 uint32_t v, i;
2470 for (i = 0; i < 10; i++) {
2471 DELAY(20);
2472 v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8);
2473 if (v == (MCLBYTES - ETHER_ALIGN))
2474 break;
2475 }
2476 if (i == 10)
2477 if_printf(ifp, "5705 A0 chip failed to load RX ring\n");
2478 }
2479
2480 /* Init jumbo RX ring. */
2481 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2482 bge_init_rx_ring_jumbo(sc);
2483
2484 /* Init our RX return ring index */
2485 sc->bge_rx_saved_considx = 0;
2486
2487 /* Init TX ring. */
2488 bge_init_tx_ring(sc);
2489
2490 /* Turn on transmitter */
2491 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2492
2493 /* Turn on receiver */
2494 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2495
2496 /* Tell firmware we're alive. */
2497 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2498
2499 /* Enable host interrupts. */
2500 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
2501 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2502 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2503
2504 bge_ifmedia_upd(ifp);
2505
2506 ifp->if_flags |= IFF_RUNNING;
2507 ifp->if_flags &= ~IFF_OACTIVE;
2508
2509 splx(s);
2510
2511 callout_reset(&sc->bge_stat_timer, hz, bge_tick, sc);
2512}
2513
2514/*
2515 * Set media options.
2516 */
2517static int
2518bge_ifmedia_upd(struct ifnet *ifp)
2519{
2520 struct bge_softc *sc = ifp->if_softc;
2521 struct ifmedia *ifm = &sc->bge_ifmedia;
2522 struct mii_data *mii;
2523
2524 /* If this is a 1000baseX NIC, enable the TBI port. */
2525 if (sc->bge_tbi) {
2526 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2527 return(EINVAL);
2528 switch(IFM_SUBTYPE(ifm->ifm_media)) {
2529 case IFM_AUTO:
2530 break;
2531 case IFM_1000_SX:
2532 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
2533 BGE_CLRBIT(sc, BGE_MAC_MODE,
2534 BGE_MACMODE_HALF_DUPLEX);
2535 } else {
2536 BGE_SETBIT(sc, BGE_MAC_MODE,
2537 BGE_MACMODE_HALF_DUPLEX);
2538 }
2539 break;
2540 default:
2541 return(EINVAL);
2542 }
2543 return(0);
2544 }
2545
2546 mii = device_get_softc(sc->bge_miibus);
2547 sc->bge_link = 0;
2548 if (mii->mii_instance) {
2549 struct mii_softc *miisc;
2550 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
2551 miisc = LIST_NEXT(miisc, mii_list))
2552 mii_phy_reset(miisc);
2553 }
2554 mii_mediachg(mii);
2555
2556 return(0);
2557}
2558
2559/*
2560 * Report current media status.
2561 */
2562static void
2563bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2564{
2565 struct bge_softc *sc = ifp->if_softc;
2566 struct mii_data *mii;
2567
2568 if (sc->bge_tbi) {
2569 ifmr->ifm_status = IFM_AVALID;
2570 ifmr->ifm_active = IFM_ETHER;
2571 if (CSR_READ_4(sc, BGE_MAC_STS) &
2572 BGE_MACSTAT_TBI_PCS_SYNCHED)
2573 ifmr->ifm_status |= IFM_ACTIVE;
2574 ifmr->ifm_active |= IFM_1000_SX;
2575 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
2576 ifmr->ifm_active |= IFM_HDX;
2577 else
2578 ifmr->ifm_active |= IFM_FDX;
2579 return;
2580 }
2581
2582 mii = device_get_softc(sc->bge_miibus);
2583 mii_pollstat(mii);
2584 ifmr->ifm_active = mii->mii_media_active;
2585 ifmr->ifm_status = mii->mii_media_status;
2586}
2587
2588static int
2589bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
2590{
2591 struct bge_softc *sc = ifp->if_softc;
2592 struct ifreq *ifr = (struct ifreq *) data;
2593 int s, mask, error = 0;
2594 struct mii_data *mii;
2595
2596 s = splimp();
2597
2598 switch(command) {
2599 case SIOCSIFADDR:
2600 case SIOCGIFADDR:
2601 error = ether_ioctl(ifp, command, data);
2602 break;
2603 case SIOCSIFMTU:
2604 /* Disallow jumbo frames on 5705. */
2605 if ((sc->bge_asicrev == BGE_ASICREV_BCM5705 &&
2606 ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU)
2607 error = EINVAL;
2608 else {
2609 ifp->if_mtu = ifr->ifr_mtu;
2610 ifp->if_flags &= ~IFF_RUNNING;
2611 bge_init(sc);
2612 }
2613 break;
2614 case SIOCSIFFLAGS:
2615 if (ifp->if_flags & IFF_UP) {
2616 /*
2617 * If only the state of the PROMISC flag changed,
2618 * then just use the 'set promisc mode' command
2619 * instead of reinitializing the entire NIC. Doing
2620 * a full re-init means reloading the firmware and
2621 * waiting for it to start up, which may take a
2622 * second or two.
2623 */
2624 if (ifp->if_flags & IFF_RUNNING &&
2625 ifp->if_flags & IFF_PROMISC &&
2626 !(sc->bge_if_flags & IFF_PROMISC)) {
2627 BGE_SETBIT(sc, BGE_RX_MODE,
2628 BGE_RXMODE_RX_PROMISC);
2629 } else if (ifp->if_flags & IFF_RUNNING &&
2630 !(ifp->if_flags & IFF_PROMISC) &&
2631 sc->bge_if_flags & IFF_PROMISC) {
2632 BGE_CLRBIT(sc, BGE_RX_MODE,
2633 BGE_RXMODE_RX_PROMISC);
2634 } else
2635 bge_init(sc);
2636 } else {
2637 if (ifp->if_flags & IFF_RUNNING) {
2638 bge_stop(sc);
2639 }
2640 }
2641 sc->bge_if_flags = ifp->if_flags;
2642 error = 0;
2643 break;
2644 case SIOCADDMULTI:
2645 case SIOCDELMULTI:
2646 if (ifp->if_flags & IFF_RUNNING) {
2647 bge_setmulti(sc);
2648 error = 0;
2649 }
2650 break;
2651 case SIOCSIFMEDIA:
2652 case SIOCGIFMEDIA:
2653 if (sc->bge_tbi) {
2654 error = ifmedia_ioctl(ifp, ifr,
2655 &sc->bge_ifmedia, command);
2656 } else {
2657 mii = device_get_softc(sc->bge_miibus);
2658 error = ifmedia_ioctl(ifp, ifr,
2659 &mii->mii_media, command);
2660 }
2661 break;
2662 case SIOCSIFCAP:
2663 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2664 if (mask & IFCAP_HWCSUM) {
2665 if (IFCAP_HWCSUM & ifp->if_capenable)
2666 ifp->if_capenable &= ~IFCAP_HWCSUM;
2667 else
2668 ifp->if_capenable |= IFCAP_HWCSUM;
2669 }
2670 error = 0;
2671 break;
2672 default:
2673 error = EINVAL;
2674 break;
2675 }
2676
2677 splx(s);
2678
2679 return(error);
2680}
2681
2682static void
2683bge_watchdog(struct ifnet *ifp)
2684{
2685 struct bge_softc *sc = ifp->if_softc;
2686
2687 if_printf(ifp, "watchdog timeout -- resetting\n");
2688
2689 ifp->if_flags &= ~IFF_RUNNING;
2690 bge_init(sc);
2691
2692 ifp->if_oerrors++;
2693}
2694
2695/*
2696 * Stop the adapter and free any mbufs allocated to the
2697 * RX and TX lists.
2698 */
2699static void
2700bge_stop(struct bge_softc *sc)
2701{
2702 struct ifnet *ifp = &sc->arpcom.ac_if;
2703 struct ifmedia_entry *ifm;
2704 struct mii_data *mii = NULL;
2705 int mtmp, itmp;
2706
2707 if (!sc->bge_tbi)
2708 mii = device_get_softc(sc->bge_miibus);
2709
2710 callout_stop(&sc->bge_stat_timer);
2711
2712 /*
2713 * Disable all of the receiver blocks
2714 */
2715 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2716 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2717 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2718 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2719 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2720 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
2721 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2722 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
2723
2724 /*
2725 * Disable all of the transmit blocks
2726 */
2727 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2728 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2729 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2730 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
2731 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
2732 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2733 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2734 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2735
2736 /*
2737 * Shut down all of the memory managers and related
2738 * state machines.
2739 */
2740 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
2741 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
2742 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2743 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2744 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
2745 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
2746 if (sc->bge_asicrev != BGE_ASICREV_BCM5705) {
2747 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
2748 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2749 }
2750
2751 /* Disable host interrupts. */
2752 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2753 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2754
2755 /*
2756 * Tell firmware we're shutting down.
2757 */
2758 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2759
2760 /* Free the RX lists. */
2761 bge_free_rx_ring_std(sc);
2762
2763 /* Free jumbo RX list. */
2764 if (sc->bge_asicrev != BGE_ASICREV_BCM5705)
2765 bge_free_rx_ring_jumbo(sc);
2766
2767 /* Free TX buffers. */
2768 bge_free_tx_ring(sc);
2769
2770 /*
2771 * Isolate/power down the PHY, but leave the media selection
2772 * unchanged so that things will be put back to normal when
2773 * we bring the interface back up.
2774 */
2775 if (!sc->bge_tbi) {
2776 itmp = ifp->if_flags;
2777 ifp->if_flags |= IFF_UP;
2778 ifm = mii->mii_media.ifm_cur;
2779 mtmp = ifm->ifm_media;
2780 ifm->ifm_media = IFM_ETHER|IFM_NONE;
2781 mii_mediachg(mii);
2782 ifm->ifm_media = mtmp;
2783 ifp->if_flags = itmp;
2784 }
2785
2786 sc->bge_link = 0;
2787
2788 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
2789
2790 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2791}
2792
2793/*
2794 * Stop all chip I/O so that the kernel's probe routines don't
2795 * get confused by errant DMAs when rebooting.
2796 */
2797static void
2798bge_shutdown(device_t dev)
2799{
2800 struct bge_softc *sc = device_get_softc(dev);
2801
2802 bge_stop(sc);
2803 bge_reset(sc);
2804}