Merge: FreeBSD (RELENG_4) netstat/inet.c rev. 1.37.2.11
[dragonfly.git] / sys / dev / netif / bge / if_bge.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.22 2003/05/11 18:00:55 ps Exp $
34 * $DragonFly: src/sys/dev/netif/bge/if_bge.c,v 1.13 2004/02/08 07:03:17 hmp Exp $
35 *
36 */
37
38/*
39 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
40 *
41 * Written by Bill Paul <wpaul@windriver.com>
42 * Senior Engineer, Wind River Systems
43 */
44
45/*
46 * The Broadcom BCM5700 is based on technology originally developed by
47 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51 * frames, highly configurable RX filtering, and 16 RX and TX queues
52 * (which, along with RX filter rules, can be used for QOS applications).
53 * Other features, such as TCP segmentation, may be available as part
54 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55 * firmware images can be stored in hardware and need not be compiled
56 * into the driver.
57 *
58 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
60 *
61 * The BCM5701 is a single-chip solution incorporating both the BCM5700
62 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
63 * does not support external SSRAM.
64 *
65 * Broadcom also produces a variation of the BCM5700 under the "Altima"
66 * brand name, which is functionally similar but lacks PCI-X support.
67 *
68 * Without external SSRAM, you can only have at most 4 TX rings,
69 * and the use of the mini RX ring is disabled. This seems to imply
70 * that these features are simply not available on the BCM5701. As a
71 * result, this driver does not implement any support for the mini RX
72 * ring.
73 */
74
75#include <sys/param.h>
76#include <sys/systm.h>
77#include <sys/sockio.h>
78#include <sys/mbuf.h>
79#include <sys/malloc.h>
80#include <sys/kernel.h>
81#include <sys/socket.h>
82#include <sys/queue.h>
83
84#include <net/if.h>
85#include <net/if_arp.h>
86#include <net/ethernet.h>
87#include <net/if_dl.h>
88#include <net/if_media.h>
89
90#include <net/bpf.h>
91
92#include <net/if_types.h>
93#include <net/vlan/if_vlan_var.h>
94
95#include <netinet/in_systm.h>
96#include <netinet/in.h>
97#include <netinet/ip.h>
98
99#include <vm/vm.h> /* for vtophys */
100#include <vm/pmap.h> /* for vtophys */
101#include <machine/clock.h> /* for DELAY */
102#include <machine/bus_memio.h>
103#include <machine/bus.h>
104#include <machine/resource.h>
105#include <sys/bus.h>
106#include <sys/rman.h>
107
108#include <dev/netif/mii_layer/mii.h>
109#include <dev/netif/mii_layer/miivar.h>
110#include <dev/netif/mii_layer/miidevs.h>
111#include <dev/netif/mii_layer/brgphyreg.h>
112
113#include <bus/pci/pcireg.h>
114#include <bus/pci/pcivar.h>
115
116#include "if_bgereg.h"
117
118#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
119
120/* "controller miibus0" required. See GENERIC if you get errors here. */
121#include "miibus_if.h"
122
123/*
124 * Various supported device vendors/types and their names. Note: the
125 * spec seems to indicate that the hardware still has Alteon's vendor
126 * ID burned into it, though it will always be overriden by the vendor
127 * ID in the EEPROM. Just to be safe, we cover all possibilities.
128 */
129#define BGE_DEVDESC_MAX 64 /* Maximum device description length */
130
131static struct bge_type bge_devs[] = {
132 { ALT_VENDORID, ALT_DEVICEID_BCM5700,
133 "Broadcom BCM5700 Gigabit Ethernet" },
134 { ALT_VENDORID, ALT_DEVICEID_BCM5701,
135 "Broadcom BCM5701 Gigabit Ethernet" },
136 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700,
137 "Broadcom BCM5700 Gigabit Ethernet" },
138 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701,
139 "Broadcom BCM5701 Gigabit Ethernet" },
140 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X,
141 "Broadcom BCM5702X Gigabit Ethernet" },
142 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X,
143 "Broadcom BCM5703X Gigabit Ethernet" },
144 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C,
145 "Broadcom BCM5704C Dual Gigabit Ethernet" },
146 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S,
147 "Broadcom BCM5704S Dual Gigabit Ethernet" },
148 { SK_VENDORID, SK_DEVICEID_ALTIMA,
149 "SysKonnect Gigabit Ethernet" },
150 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000,
151 "Altima AC1000 Gigabit Ethernet" },
152 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100,
153 "Altima AC9100 Gigabit Ethernet" },
154 { 0, 0, NULL }
155};
156
157static int bge_probe (device_t);
158static int bge_attach (device_t);
159static int bge_detach (device_t);
160static void bge_release_resources
161 (struct bge_softc *);
162static void bge_txeof (struct bge_softc *);
163static void bge_rxeof (struct bge_softc *);
164
165static void bge_tick (void *);
166static void bge_stats_update (struct bge_softc *);
167static int bge_encap (struct bge_softc *, struct mbuf *,
168 u_int32_t *);
169
170static void bge_intr (void *);
171static void bge_start (struct ifnet *);
172static int bge_ioctl (struct ifnet *, u_long, caddr_t);
173static void bge_init (void *);
174static void bge_stop (struct bge_softc *);
175static void bge_watchdog (struct ifnet *);
176static void bge_shutdown (device_t);
177static int bge_ifmedia_upd (struct ifnet *);
178static void bge_ifmedia_sts (struct ifnet *, struct ifmediareq *);
179
180static u_int8_t bge_eeprom_getbyte (struct bge_softc *,
181 int, u_int8_t *);
182static int bge_read_eeprom (struct bge_softc *, caddr_t, int, int);
183
184static u_int32_t bge_crc (caddr_t);
185static void bge_setmulti (struct bge_softc *);
186
187static void bge_handle_events (struct bge_softc *);
188static int bge_alloc_jumbo_mem (struct bge_softc *);
189static void bge_free_jumbo_mem (struct bge_softc *);
190static void *bge_jalloc (struct bge_softc *);
191static void bge_jfree (caddr_t, u_int);
192static void bge_jref (caddr_t, u_int);
193static int bge_newbuf_std (struct bge_softc *, int, struct mbuf *);
194static int bge_newbuf_jumbo (struct bge_softc *, int, struct mbuf *);
195static int bge_init_rx_ring_std (struct bge_softc *);
196static void bge_free_rx_ring_std (struct bge_softc *);
197static int bge_init_rx_ring_jumbo (struct bge_softc *);
198static void bge_free_rx_ring_jumbo (struct bge_softc *);
199static void bge_free_tx_ring (struct bge_softc *);
200static int bge_init_tx_ring (struct bge_softc *);
201
202static int bge_chipinit (struct bge_softc *);
203static int bge_blockinit (struct bge_softc *);
204
205#ifdef notdef
206static u_int8_t bge_vpd_readbyte (struct bge_softc *, int);
207static void bge_vpd_read_res (struct bge_softc *,
208 struct vpd_res *, int);
209static void bge_vpd_read (struct bge_softc *);
210#endif
211
212static u_int32_t bge_readmem_ind
213 (struct bge_softc *, int);
214static void bge_writemem_ind (struct bge_softc *, int, int);
215#ifdef notdef
216static u_int32_t bge_readreg_ind
217 (struct bge_softc *, int);
218#endif
219static void bge_writereg_ind (struct bge_softc *, int, int);
220
221static int bge_miibus_readreg (device_t, int, int);
222static int bge_miibus_writereg (device_t, int, int, int);
223static void bge_miibus_statchg (device_t);
224
225static void bge_reset (struct bge_softc *);
226
227static device_method_t bge_methods[] = {
228 /* Device interface */
229 DEVMETHOD(device_probe, bge_probe),
230 DEVMETHOD(device_attach, bge_attach),
231 DEVMETHOD(device_detach, bge_detach),
232 DEVMETHOD(device_shutdown, bge_shutdown),
233
234 /* bus interface */
235 DEVMETHOD(bus_print_child, bus_generic_print_child),
236 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
237
238 /* MII interface */
239 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
240 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
241 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
242
243 { 0, 0 }
244};
245
246static driver_t bge_driver = {
247 "bge",
248 bge_methods,
249 sizeof(struct bge_softc)
250};
251
252static devclass_t bge_devclass;
253
254DECLARE_DUMMY_MODULE(if_bge);
255DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0);
256DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
257
258static u_int32_t
259bge_readmem_ind(sc, off)
260 struct bge_softc *sc;
261 int off;
262{
263 device_t dev;
264
265 dev = sc->bge_dev;
266
267 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
268 return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
269}
270
271static void
272bge_writemem_ind(sc, off, val)
273 struct bge_softc *sc;
274 int off, val;
275{
276 device_t dev;
277
278 dev = sc->bge_dev;
279
280 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
281 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
282
283 return;
284}
285
286#ifdef notdef
287static u_int32_t
288bge_readreg_ind(sc, off)
289 struct bge_softc *sc;
290 int off;
291{
292 device_t dev;
293
294 dev = sc->bge_dev;
295
296 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
297 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
298}
299#endif
300
301static void
302bge_writereg_ind(sc, off, val)
303 struct bge_softc *sc;
304 int off, val;
305{
306 device_t dev;
307
308 dev = sc->bge_dev;
309
310 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
311 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
312
313 return;
314}
315
316#ifdef notdef
317static u_int8_t
318bge_vpd_readbyte(sc, addr)
319 struct bge_softc *sc;
320 int addr;
321{
322 int i;
323 device_t dev;
324 u_int32_t val;
325
326 dev = sc->bge_dev;
327 pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
328 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
329 DELAY(10);
330 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
331 break;
332 }
333
334 if (i == BGE_TIMEOUT) {
335 printf("bge%d: VPD read timed out\n", sc->bge_unit);
336 return(0);
337 }
338
339 val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
340
341 return((val >> ((addr % 4) * 8)) & 0xFF);
342}
343
344static void
345bge_vpd_read_res(sc, res, addr)
346 struct bge_softc *sc;
347 struct vpd_res *res;
348 int addr;
349{
350 int i;
351 u_int8_t *ptr;
352
353 ptr = (u_int8_t *)res;
354 for (i = 0; i < sizeof(struct vpd_res); i++)
355 ptr[i] = bge_vpd_readbyte(sc, i + addr);
356
357 return;
358}
359
360static void
361bge_vpd_read(sc)
362 struct bge_softc *sc;
363{
364 int pos = 0, i;
365 struct vpd_res res;
366
367 if (sc->bge_vpd_prodname != NULL)
368 free(sc->bge_vpd_prodname, M_DEVBUF);
369 if (sc->bge_vpd_readonly != NULL)
370 free(sc->bge_vpd_readonly, M_DEVBUF);
371 sc->bge_vpd_prodname = NULL;
372 sc->bge_vpd_readonly = NULL;
373
374 bge_vpd_read_res(sc, &res, pos);
375
376 if (res.vr_id != VPD_RES_ID) {
377 printf("bge%d: bad VPD resource id: expected %x got %x\n",
378 sc->bge_unit, VPD_RES_ID, res.vr_id);
379 return;
380 }
381
382 pos += sizeof(res);
383 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
384 for (i = 0; i < res.vr_len; i++)
385 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
386 sc->bge_vpd_prodname[i] = '\0';
387 pos += i;
388
389 bge_vpd_read_res(sc, &res, pos);
390
391 if (res.vr_id != VPD_RES_READ) {
392 printf("bge%d: bad VPD resource id: expected %x got %x\n",
393 sc->bge_unit, VPD_RES_READ, res.vr_id);
394 return;
395 }
396
397 pos += sizeof(res);
398 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
399 for (i = 0; i < res.vr_len + 1; i++)
400 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
401
402 return;
403}
404#endif
405
406/*
407 * Read a byte of data stored in the EEPROM at address 'addr.' The
408 * BCM570x supports both the traditional bitbang interface and an
409 * auto access interface for reading the EEPROM. We use the auto
410 * access method.
411 */
412static u_int8_t
413bge_eeprom_getbyte(sc, addr, dest)
414 struct bge_softc *sc;
415 int addr;
416 u_int8_t *dest;
417{
418 int i;
419 u_int32_t byte = 0;
420
421 /*
422 * Enable use of auto EEPROM access so we can avoid
423 * having to use the bitbang method.
424 */
425 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
426
427 /* Reset the EEPROM, load the clock period. */
428 CSR_WRITE_4(sc, BGE_EE_ADDR,
429 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
430 DELAY(20);
431
432 /* Issue the read EEPROM command. */
433 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
434
435 /* Wait for completion */
436 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
437 DELAY(10);
438 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
439 break;
440 }
441
442 if (i == BGE_TIMEOUT) {
443 printf("bge%d: eeprom read timed out\n", sc->bge_unit);
444 return(0);
445 }
446
447 /* Get result. */
448 byte = CSR_READ_4(sc, BGE_EE_DATA);
449
450 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
451
452 return(0);
453}
454
455/*
456 * Read a sequence of bytes from the EEPROM.
457 */
458static int
459bge_read_eeprom(sc, dest, off, cnt)
460 struct bge_softc *sc;
461 caddr_t dest;
462 int off;
463 int cnt;
464{
465 int err = 0, i;
466 u_int8_t byte = 0;
467
468 for (i = 0; i < cnt; i++) {
469 err = bge_eeprom_getbyte(sc, off + i, &byte);
470 if (err)
471 break;
472 *(dest + i) = byte;
473 }
474
475 return(err ? 1 : 0);
476}
477
478static int
479bge_miibus_readreg(dev, phy, reg)
480 device_t dev;
481 int phy, reg;
482{
483 struct bge_softc *sc;
484 struct ifnet *ifp;
485 u_int32_t val, autopoll;
486 int i;
487
488 sc = device_get_softc(dev);
489 ifp = &sc->arpcom.ac_if;
490
491 if (phy != 1)
492 switch(sc->bge_chipid) {
493 case BGE_CHIPID_BCM5701_B5:
494 case BGE_CHIPID_BCM5703_A2:
495 case BGE_CHIPID_BCM5704_A0:
496 return(0);
497 }
498
499 /* Reading with autopolling on may trigger PCI errors */
500 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
501 if (autopoll & BGE_MIMODE_AUTOPOLL) {
502 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
503 DELAY(40);
504 }
505
506 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
507 BGE_MIPHY(phy)|BGE_MIREG(reg));
508
509 for (i = 0; i < BGE_TIMEOUT; i++) {
510 val = CSR_READ_4(sc, BGE_MI_COMM);
511 if (!(val & BGE_MICOMM_BUSY))
512 break;
513 }
514
515 if (i == BGE_TIMEOUT) {
516 printf("bge%d: PHY read timed out\n", sc->bge_unit);
517 val = 0;
518 goto done;
519 }
520
521 val = CSR_READ_4(sc, BGE_MI_COMM);
522
523done:
524 if (autopoll & BGE_MIMODE_AUTOPOLL) {
525 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
526 DELAY(40);
527 }
528
529 if (val & BGE_MICOMM_READFAIL)
530 return(0);
531
532 return(val & 0xFFFF);
533}
534
535static int
536bge_miibus_writereg(dev, phy, reg, val)
537 device_t dev;
538 int phy, reg, val;
539{
540 struct bge_softc *sc;
541 u_int32_t autopoll;
542 int i;
543
544 sc = device_get_softc(dev);
545
546 /* Reading with autopolling on may trigger PCI errors */
547 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
548 if (autopoll & BGE_MIMODE_AUTOPOLL) {
549 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
550 DELAY(40);
551 }
552
553 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
554 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
555
556 for (i = 0; i < BGE_TIMEOUT; i++) {
557 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
558 break;
559 }
560
561 if (autopoll & BGE_MIMODE_AUTOPOLL) {
562 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
563 DELAY(40);
564 }
565
566 if (i == BGE_TIMEOUT) {
567 printf("bge%d: PHY read timed out\n", sc->bge_unit);
568 return(0);
569 }
570
571 return(0);
572}
573
574static void
575bge_miibus_statchg(dev)
576 device_t dev;
577{
578 struct bge_softc *sc;
579 struct mii_data *mii;
580
581 sc = device_get_softc(dev);
582 mii = device_get_softc(sc->bge_miibus);
583
584 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
585 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_TX) {
586 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
587 } else {
588 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
589 }
590
591 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
592 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
593 } else {
594 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
595 }
596
597 return;
598}
599
600/*
601 * Handle events that have triggered interrupts.
602 */
603static void
604bge_handle_events(sc)
605 struct bge_softc *sc;
606{
607
608 return;
609}
610
611/*
612 * Memory management for jumbo frames.
613 */
614
615static int
616bge_alloc_jumbo_mem(sc)
617 struct bge_softc *sc;
618{
619 caddr_t ptr;
620 int i;
621 struct bge_jpool_entry *entry;
622
623 /* Grab a big chunk o' storage. */
624 sc->bge_cdata.bge_jumbo_buf = contigmalloc(BGE_JMEM, M_DEVBUF,
625 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
626
627 if (sc->bge_cdata.bge_jumbo_buf == NULL) {
628 printf("bge%d: no memory for jumbo buffers!\n", sc->bge_unit);
629 return(ENOBUFS);
630 }
631
632 SLIST_INIT(&sc->bge_jfree_listhead);
633 SLIST_INIT(&sc->bge_jinuse_listhead);
634
635 /*
636 * Now divide it up into 9K pieces and save the addresses
637 * in an array. Note that we play an evil trick here by using
638 * the first few bytes in the buffer to hold the the address
639 * of the softc structure for this interface. This is because
640 * bge_jfree() needs it, but it is called by the mbuf management
641 * code which will not pass it to us explicitly.
642 */
643 ptr = sc->bge_cdata.bge_jumbo_buf;
644 for (i = 0; i < BGE_JSLOTS; i++) {
645 u_int64_t **aptr;
646 aptr = (u_int64_t **)ptr;
647 aptr[0] = (u_int64_t *)sc;
648 ptr += sizeof(u_int64_t);
649 sc->bge_cdata.bge_jslots[i].bge_buf = ptr;
650 sc->bge_cdata.bge_jslots[i].bge_inuse = 0;
651 ptr += (BGE_JLEN - sizeof(u_int64_t));
652 entry = malloc(sizeof(struct bge_jpool_entry),
653 M_DEVBUF, M_NOWAIT);
654 if (entry == NULL) {
655 contigfree(sc->bge_cdata.bge_jumbo_buf,
656 BGE_JMEM, M_DEVBUF);
657 sc->bge_cdata.bge_jumbo_buf = NULL;
658 printf("bge%d: no memory for jumbo "
659 "buffer queue!\n", sc->bge_unit);
660 return(ENOBUFS);
661 }
662 entry->slot = i;
663 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
664 entry, jpool_entries);
665 }
666
667 return(0);
668}
669
670static void
671bge_free_jumbo_mem(sc)
672 struct bge_softc *sc;
673{
674 int i;
675 struct bge_jpool_entry *entry;
676
677 for (i = 0; i < BGE_JSLOTS; i++) {
678 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
679 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
680 free(entry, M_DEVBUF);
681 }
682
683 contigfree(sc->bge_cdata.bge_jumbo_buf, BGE_JMEM, M_DEVBUF);
684
685 return;
686}
687
688/*
689 * Allocate a jumbo buffer.
690 */
691static void *
692bge_jalloc(sc)
693 struct bge_softc *sc;
694{
695 struct bge_jpool_entry *entry;
696
697 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
698
699 if (entry == NULL) {
700 printf("bge%d: no free jumbo buffers\n", sc->bge_unit);
701 return(NULL);
702 }
703
704 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
705 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
706 sc->bge_cdata.bge_jslots[entry->slot].bge_inuse = 1;
707 return(sc->bge_cdata.bge_jslots[entry->slot].bge_buf);
708}
709
710/*
711 * Adjust usage count on a jumbo buffer.
712 */
713static void
714bge_jref(buf, size)
715 caddr_t buf;
716 u_int size;
717{
718 struct bge_softc *sc;
719 u_int64_t **aptr;
720 int i;
721
722 /* Extract the softc struct pointer. */
723 aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
724 sc = (struct bge_softc *)(aptr[0]);
725
726 if (sc == NULL)
727 panic("bge_jref: can't find softc pointer!");
728
729 if (size != BGE_JUMBO_FRAMELEN)
730 panic("bge_jref: adjusting refcount of buf of wrong size!");
731
732 /* calculate the slot this buffer belongs to */
733
734 i = ((vm_offset_t)aptr
735 - (vm_offset_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
736
737 if ((i < 0) || (i >= BGE_JSLOTS))
738 panic("bge_jref: asked to reference buffer "
739 "that we don't manage!");
740 else if (sc->bge_cdata.bge_jslots[i].bge_inuse == 0)
741 panic("bge_jref: buffer already free!");
742 else
743 sc->bge_cdata.bge_jslots[i].bge_inuse++;
744
745 return;
746}
747
748/*
749 * Release a jumbo buffer.
750 */
751static void
752bge_jfree(buf, size)
753 caddr_t buf;
754 u_int size;
755{
756 struct bge_softc *sc;
757 u_int64_t **aptr;
758 int i;
759 struct bge_jpool_entry *entry;
760
761 /* Extract the softc struct pointer. */
762 aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
763 sc = (struct bge_softc *)(aptr[0]);
764
765 if (sc == NULL)
766 panic("bge_jfree: can't find softc pointer!");
767
768 if (size != BGE_JUMBO_FRAMELEN)
769 panic("bge_jfree: freeing buffer of wrong size!");
770
771 /* calculate the slot this buffer belongs to */
772
773 i = ((vm_offset_t)aptr
774 - (vm_offset_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
775
776 if ((i < 0) || (i >= BGE_JSLOTS))
777 panic("bge_jfree: asked to free buffer that we don't manage!");
778 else if (sc->bge_cdata.bge_jslots[i].bge_inuse == 0)
779 panic("bge_jfree: buffer already free!");
780 else {
781 sc->bge_cdata.bge_jslots[i].bge_inuse--;
782 if(sc->bge_cdata.bge_jslots[i].bge_inuse == 0) {
783 entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
784 if (entry == NULL)
785 panic("bge_jfree: buffer not in use!");
786 entry->slot = i;
787 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead,
788 jpool_entries);
789 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
790 entry, jpool_entries);
791 }
792 }
793
794 return;
795}
796
797
798/*
799 * Intialize a standard receive ring descriptor.
800 */
801static int
802bge_newbuf_std(sc, i, m)
803 struct bge_softc *sc;
804 int i;
805 struct mbuf *m;
806{
807 struct mbuf *m_new = NULL;
808 struct bge_rx_bd *r;
809
810 if (m == NULL) {
811 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
812 if (m_new == NULL) {
813 return(ENOBUFS);
814 }
815
816 MCLGET(m_new, M_DONTWAIT);
817 if (!(m_new->m_flags & M_EXT)) {
818 m_freem(m_new);
819 return(ENOBUFS);
820 }
821 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
822 } else {
823 m_new = m;
824 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
825 m_new->m_data = m_new->m_ext.ext_buf;
826 }
827
828 if (!sc->bge_rx_alignment_bug)
829 m_adj(m_new, ETHER_ALIGN);
830 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
831 r = &sc->bge_rdata->bge_rx_std_ring[i];
832 BGE_HOSTADDR(r->bge_addr) = vtophys(mtod(m_new, caddr_t));
833 r->bge_flags = BGE_RXBDFLAG_END;
834 r->bge_len = m_new->m_len;
835 r->bge_idx = i;
836
837 return(0);
838}
839
840/*
841 * Initialize a jumbo receive ring descriptor. This allocates
842 * a jumbo buffer from the pool managed internally by the driver.
843 */
844static int
845bge_newbuf_jumbo(sc, i, m)
846 struct bge_softc *sc;
847 int i;
848 struct mbuf *m;
849{
850 struct mbuf *m_new = NULL;
851 struct bge_rx_bd *r;
852
853 if (m == NULL) {
854 caddr_t *buf = NULL;
855
856 /* Allocate the mbuf. */
857 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
858 if (m_new == NULL) {
859 return(ENOBUFS);
860 }
861
862 /* Allocate the jumbo buffer */
863 buf = bge_jalloc(sc);
864 if (buf == NULL) {
865 m_freem(m_new);
866 printf("bge%d: jumbo allocation failed "
867 "-- packet dropped!\n", sc->bge_unit);
868 return(ENOBUFS);
869 }
870
871 /* Attach the buffer to the mbuf. */
872 m_new->m_data = m_new->m_ext.ext_buf = (void *)buf;
873 m_new->m_flags |= M_EXT;
874 m_new->m_len = m_new->m_pkthdr.len =
875 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
876 m_new->m_ext.ext_free = bge_jfree;
877 m_new->m_ext.ext_ref = bge_jref;
878 } else {
879 m_new = m;
880 m_new->m_data = m_new->m_ext.ext_buf;
881 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
882 }
883
884 if (!sc->bge_rx_alignment_bug)
885 m_adj(m_new, ETHER_ALIGN);
886 /* Set up the descriptor. */
887 r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
888 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
889 BGE_HOSTADDR(r->bge_addr) = vtophys(mtod(m_new, caddr_t));
890 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
891 r->bge_len = m_new->m_len;
892 r->bge_idx = i;
893
894 return(0);
895}
896
897/*
898 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
899 * that's 1MB or memory, which is a lot. For now, we fill only the first
900 * 256 ring entries and hope that our CPU is fast enough to keep up with
901 * the NIC.
902 */
903static int
904bge_init_rx_ring_std(sc)
905 struct bge_softc *sc;
906{
907 int i;
908
909 for (i = 0; i < BGE_SSLOTS; i++) {
910 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
911 return(ENOBUFS);
912 };
913
914 sc->bge_std = i - 1;
915 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
916
917 return(0);
918}
919
920static void
921bge_free_rx_ring_std(sc)
922 struct bge_softc *sc;
923{
924 int i;
925
926 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
927 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
928 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
929 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
930 }
931 bzero((char *)&sc->bge_rdata->bge_rx_std_ring[i],
932 sizeof(struct bge_rx_bd));
933 }
934
935 return;
936}
937
938static int
939bge_init_rx_ring_jumbo(sc)
940 struct bge_softc *sc;
941{
942 int i;
943 struct bge_rcb *rcb;
944
945 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
946 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
947 return(ENOBUFS);
948 };
949
950 sc->bge_jumbo = i - 1;
951
952 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
953 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
954 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
955
956 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
957
958 return(0);
959}
960
961static void
962bge_free_rx_ring_jumbo(sc)
963 struct bge_softc *sc;
964{
965 int i;
966
967 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
968 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
969 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
970 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
971 }
972 bzero((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i],
973 sizeof(struct bge_rx_bd));
974 }
975
976 return;
977}
978
979static void
980bge_free_tx_ring(sc)
981 struct bge_softc *sc;
982{
983 int i;
984
985 if (sc->bge_rdata->bge_tx_ring == NULL)
986 return;
987
988 for (i = 0; i < BGE_TX_RING_CNT; i++) {
989 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
990 m_freem(sc->bge_cdata.bge_tx_chain[i]);
991 sc->bge_cdata.bge_tx_chain[i] = NULL;
992 }
993 bzero((char *)&sc->bge_rdata->bge_tx_ring[i],
994 sizeof(struct bge_tx_bd));
995 }
996
997 return;
998}
999
1000static int
1001bge_init_tx_ring(sc)
1002 struct bge_softc *sc;
1003{
1004 sc->bge_txcnt = 0;
1005 sc->bge_tx_saved_considx = 0;
1006
1007 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1008 /* 5700 b2 errata */
1009 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1010 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1011
1012 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1013 /* 5700 b2 errata */
1014 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1015 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1016
1017 return(0);
1018}
1019
1020#define BGE_POLY 0xEDB88320
1021
1022static u_int32_t
1023bge_crc(addr)
1024 caddr_t addr;
1025{
1026 u_int32_t idx, bit, data, crc;
1027
1028 /* Compute CRC for the address value. */
1029 crc = 0xFFFFFFFF; /* initial value */
1030
1031 for (idx = 0; idx < 6; idx++) {
1032 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
1033 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? BGE_POLY : 0);
1034 }
1035
1036 return(crc & 0x7F);
1037}
1038
1039static void
1040bge_setmulti(sc)
1041 struct bge_softc *sc;
1042{
1043 struct ifnet *ifp;
1044 struct ifmultiaddr *ifma;
1045 u_int32_t hashes[4] = { 0, 0, 0, 0 };
1046 int h, i;
1047
1048 ifp = &sc->arpcom.ac_if;
1049
1050 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1051 for (i = 0; i < 4; i++)
1052 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1053 return;
1054 }
1055
1056 /* First, zot all the existing filters. */
1057 for (i = 0; i < 4; i++)
1058 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1059
1060 /* Now program new ones. */
1061 for (ifma = ifp->if_multiaddrs.lh_first;
1062 ifma != NULL; ifma = ifma->ifma_link.le_next) {
1063 if (ifma->ifma_addr->sa_family != AF_LINK)
1064 continue;
1065 h = bge_crc(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1066 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1067 }
1068
1069 for (i = 0; i < 4; i++)
1070 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1071
1072 return;
1073}
1074
1075/*
1076 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1077 * self-test results.
1078 */
1079static int
1080bge_chipinit(sc)
1081 struct bge_softc *sc;
1082{
1083 int i;
1084 u_int32_t dma_rw_ctl;
1085
1086 /* Set endianness before we access any non-PCI registers. */
1087#if BYTE_ORDER == BIG_ENDIAN
1088 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1089 BGE_BIGENDIAN_INIT, 4);
1090#else
1091 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1092 BGE_LITTLEENDIAN_INIT, 4);
1093#endif
1094
1095 /*
1096 * Check the 'ROM failed' bit on the RX CPU to see if
1097 * self-tests passed.
1098 */
1099 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1100 printf("bge%d: RX CPU self-diagnostics failed!\n",
1101 sc->bge_unit);
1102 return(ENODEV);
1103 }
1104
1105 /* Clear the MAC control register */
1106 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1107
1108 /*
1109 * Clear the MAC statistics block in the NIC's
1110 * internal memory.
1111 */
1112 for (i = BGE_STATS_BLOCK;
1113 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1114 BGE_MEMWIN_WRITE(sc, i, 0);
1115
1116 for (i = BGE_STATUS_BLOCK;
1117 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1118 BGE_MEMWIN_WRITE(sc, i, 0);
1119
1120 /* Set up the PCI DMA control register. */
1121 if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1122 BGE_PCISTATE_PCI_BUSMODE) {
1123 /* Conventional PCI bus */
1124 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1125 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1126 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1127 (0x0F);
1128 } else {
1129 /* PCI-X bus */
1130 /*
1131 * The 5704 uses a different encoding of read/write
1132 * watermarks.
1133 */
1134 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1135 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1136 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1137 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1138 else
1139 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1140 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1141 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1142 (0x0F);
1143
1144 /*
1145 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1146 * for hardware bugs.
1147 */
1148 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1149 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1150 u_int32_t tmp;
1151
1152 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1153 if (tmp == 0x6 || tmp == 0x7)
1154 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1155 }
1156 }
1157
1158 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1159 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1160 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1161 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1162
1163 /*
1164 * Set up general mode register.
1165 */
1166 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
1167 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1168 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1169 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1170
1171 /*
1172 * Disable memory write invalidate. Apparently it is not supported
1173 * properly by these devices.
1174 */
1175 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1176
1177#ifdef __brokenalpha__
1178 /*
1179 * Must insure that we do not cross an 8K (bytes) boundary
1180 * for DMA reads. Our highest limit is 1K bytes. This is a
1181 * restriction on some ALPHA platforms with early revision
1182 * 21174 PCI chipsets, such as the AlphaPC 164lx
1183 */
1184 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1185 BGE_PCI_READ_BNDRY_1024BYTES, 4);
1186#endif
1187
1188 /* Set the timer prescaler (always 66Mhz) */
1189 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1190
1191 return(0);
1192}
1193
1194static int
1195bge_blockinit(sc)
1196 struct bge_softc *sc;
1197{
1198 struct bge_rcb *rcb;
1199 volatile struct bge_rcb *vrcb;
1200 int i;
1201
1202 /*
1203 * Initialize the memory window pointer register so that
1204 * we can access the first 32K of internal NIC RAM. This will
1205 * allow us to set up the TX send ring RCBs and the RX return
1206 * ring RCBs, plus other things which live in NIC memory.
1207 */
1208 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1209
1210 /* Configure mbuf memory pool */
1211 if (sc->bge_extram) {
1212 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_EXT_SSRAM);
1213 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1214 } else {
1215 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1216 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1217 }
1218
1219 /* Configure DMA resource pool */
1220 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, BGE_DMA_DESCRIPTORS);
1221 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1222
1223 /* Configure mbuf pool watermarks */
1224 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1225 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1226 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1227
1228 /* Configure DMA resource watermarks */
1229 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1230 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1231
1232 /* Enable buffer manager */
1233 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1234 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1235
1236 /* Poll for buffer manager start indication */
1237 for (i = 0; i < BGE_TIMEOUT; i++) {
1238 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1239 break;
1240 DELAY(10);
1241 }
1242
1243 if (i == BGE_TIMEOUT) {
1244 printf("bge%d: buffer manager failed to start\n",
1245 sc->bge_unit);
1246 return(ENXIO);
1247 }
1248
1249 /* Enable flow-through queues */
1250 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1251 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1252
1253 /* Wait until queue initialization is complete */
1254 for (i = 0; i < BGE_TIMEOUT; i++) {
1255 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1256 break;
1257 DELAY(10);
1258 }
1259
1260 if (i == BGE_TIMEOUT) {
1261 printf("bge%d: flow-through queue init failed\n",
1262 sc->bge_unit);
1263 return(ENXIO);
1264 }
1265
1266 /* Initialize the standard RX ring control block */
1267 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1268 BGE_HOSTADDR(rcb->bge_hostaddr) =
1269 vtophys(&sc->bge_rdata->bge_rx_std_ring);
1270 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1271 if (sc->bge_extram)
1272 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1273 else
1274 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1275 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1276 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1277 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1278 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1279
1280 /*
1281 * Initialize the jumbo RX ring control block
1282 * We set the 'ring disabled' bit in the flags
1283 * field until we're actually ready to start
1284 * using this ring (i.e. once we set the MTU
1285 * high enough to require it).
1286 */
1287 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1288 BGE_HOSTADDR(rcb->bge_hostaddr) =
1289 vtophys(&sc->bge_rdata->bge_rx_jumbo_ring);
1290 rcb->bge_maxlen_flags =
1291 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, BGE_RCB_FLAG_RING_DISABLED);
1292 if (sc->bge_extram)
1293 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1294 else
1295 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1296 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1297 rcb->bge_hostaddr.bge_addr_hi);
1298 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1299 rcb->bge_hostaddr.bge_addr_lo);
1300 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1301 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1302
1303 /* Set up dummy disabled mini ring RCB */
1304 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1305 rcb->bge_maxlen_flags =
1306 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1307 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1308
1309 /*
1310 * Set the BD ring replentish thresholds. The recommended
1311 * values are 1/8th the number of descriptors allocated to
1312 * each ring.
1313 */
1314 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1315 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1316
1317 /*
1318 * Disable all unused send rings by setting the 'ring disabled'
1319 * bit in the flags field of all the TX send ring control blocks.
1320 * These are located in NIC memory.
1321 */
1322 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1323 BGE_SEND_RING_RCB);
1324 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1325 vrcb->bge_maxlen_flags =
1326 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1327 vrcb->bge_nicaddr = 0;
1328 vrcb++;
1329 }
1330
1331 /* Configure TX RCB 0 (we use only the first ring) */
1332 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1333 BGE_SEND_RING_RCB);
1334 vrcb->bge_hostaddr.bge_addr_hi = 0;
1335 BGE_HOSTADDR(vrcb->bge_hostaddr) =
1336 vtophys(&sc->bge_rdata->bge_tx_ring);
1337 vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
1338 vrcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
1339
1340 /* Disable all unused RX return rings */
1341 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1342 BGE_RX_RETURN_RING_RCB);
1343 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1344 vrcb->bge_hostaddr.bge_addr_hi = 0;
1345 vrcb->bge_hostaddr.bge_addr_lo = 0;
1346 vrcb->bge_maxlen_flags =
1347 BGE_RCB_MAXLEN_FLAGS(BGE_RETURN_RING_CNT,
1348 BGE_RCB_FLAG_RING_DISABLED);
1349 vrcb->bge_nicaddr = 0;
1350 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1351 (i * (sizeof(u_int64_t))), 0);
1352 vrcb++;
1353 }
1354
1355 /* Initialize RX ring indexes */
1356 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1357 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1358 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1359
1360 /*
1361 * Set up RX return ring 0
1362 * Note that the NIC address for RX return rings is 0x00000000.
1363 * The return rings live entirely within the host, so the
1364 * nicaddr field in the RCB isn't used.
1365 */
1366 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1367 BGE_RX_RETURN_RING_RCB);
1368 vrcb->bge_hostaddr.bge_addr_hi = 0;
1369 BGE_HOSTADDR(vrcb->bge_hostaddr) =
1370 vtophys(&sc->bge_rdata->bge_rx_return_ring);
1371 vrcb->bge_nicaddr = 0x00000000;
1372 vrcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_RETURN_RING_CNT, 0);
1373
1374 /* Set random backoff seed for TX */
1375 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1376 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1377 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1378 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1379 BGE_TX_BACKOFF_SEED_MASK);
1380
1381 /* Set inter-packet gap */
1382 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1383
1384 /*
1385 * Specify which ring to use for packets that don't match
1386 * any RX rules.
1387 */
1388 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1389
1390 /*
1391 * Configure number of RX lists. One interrupt distribution
1392 * list, sixteen active lists, one bad frames class.
1393 */
1394 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1395
1396 /* Inialize RX list placement stats mask. */
1397 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1398 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1399
1400 /* Disable host coalescing until we get it set up */
1401 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1402
1403 /* Poll to make sure it's shut down. */
1404 for (i = 0; i < BGE_TIMEOUT; i++) {
1405 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1406 break;
1407 DELAY(10);
1408 }
1409
1410 if (i == BGE_TIMEOUT) {
1411 printf("bge%d: host coalescing engine failed to idle\n",
1412 sc->bge_unit);
1413 return(ENXIO);
1414 }
1415
1416 /* Set up host coalescing defaults */
1417 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1418 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1419 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1420 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1421 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1422 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1423 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1424 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1425 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1426
1427 /* Set up address of statistics block */
1428 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1429 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0);
1430 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1431 vtophys(&sc->bge_rdata->bge_info.bge_stats));
1432
1433 /* Set up address of status block */
1434 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1435 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 0);
1436 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1437 vtophys(&sc->bge_rdata->bge_status_block));
1438 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1439 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1440
1441 /* Turn on host coalescing state machine */
1442 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1443
1444 /* Turn on RX BD completion state machine and enable attentions */
1445 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1446 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1447
1448 /* Turn on RX list placement state machine */
1449 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1450
1451 /* Turn on RX list selector state machine. */
1452 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1453
1454 /* Turn on DMA, clear stats */
1455 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1456 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1457 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1458 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1459 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1460
1461 /* Set misc. local control, enable interrupts on attentions */
1462 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1463
1464#ifdef notdef
1465 /* Assert GPIO pins for PHY reset */
1466 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1467 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1468 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1469 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1470#endif
1471
1472 /* Turn on DMA completion state machine */
1473 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1474
1475 /* Turn on write DMA state machine */
1476 CSR_WRITE_4(sc, BGE_WDMA_MODE,
1477 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1478
1479 /* Turn on read DMA state machine */
1480 CSR_WRITE_4(sc, BGE_RDMA_MODE,
1481 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1482
1483 /* Turn on RX data completion state machine */
1484 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1485
1486 /* Turn on RX BD initiator state machine */
1487 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1488
1489 /* Turn on RX data and RX BD initiator state machine */
1490 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1491
1492 /* Turn on Mbuf cluster free state machine */
1493 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1494
1495 /* Turn on send BD completion state machine */
1496 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1497
1498 /* Turn on send data completion state machine */
1499 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1500
1501 /* Turn on send data initiator state machine */
1502 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1503
1504 /* Turn on send BD initiator state machine */
1505 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1506
1507 /* Turn on send BD selector state machine */
1508 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1509
1510 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1511 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1512 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1513
1514 /* ack/clear link change events */
1515 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1516 BGE_MACSTAT_CFG_CHANGED);
1517 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1518
1519 /* Enable PHY auto polling (for MII/GMII only) */
1520 if (sc->bge_tbi) {
1521 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1522 } else {
1523 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1524 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1525 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1526 BGE_EVTENB_MI_INTERRUPT);
1527 }
1528
1529 /* Enable link state change attentions. */
1530 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1531
1532 return(0);
1533}
1534
1535/*
1536 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1537 * against our list and return its name if we find a match. Note
1538 * that since the Broadcom controller contains VPD support, we
1539 * can get the device name string from the controller itself instead
1540 * of the compiled-in string. This is a little slow, but it guarantees
1541 * we'll always announce the right product name.
1542 */
1543static int
1544bge_probe(dev)
1545 device_t dev;
1546{
1547 struct bge_type *t;
1548 struct bge_softc *sc;
1549 char *descbuf;
1550
1551 t = bge_devs;
1552
1553 sc = device_get_softc(dev);
1554 bzero(sc, sizeof(struct bge_softc));
1555 sc->bge_unit = device_get_unit(dev);
1556 sc->bge_dev = dev;
1557
1558 while(t->bge_name != NULL) {
1559 if ((pci_get_vendor(dev) == t->bge_vid) &&
1560 (pci_get_device(dev) == t->bge_did)) {
1561#ifdef notdef
1562 bge_vpd_read(sc);
1563 device_set_desc(dev, sc->bge_vpd_prodname);
1564#endif
1565 descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
1566 if (descbuf == NULL)
1567 return(ENOMEM);
1568 snprintf(descbuf, BGE_DEVDESC_MAX,
1569 "%s, ASIC rev. %#04x", t->bge_name,
1570 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1571 device_set_desc_copy(dev, descbuf);
1572 free(descbuf, M_TEMP);
1573 return(0);
1574 }
1575 t++;
1576 }
1577
1578 return(ENXIO);
1579}
1580
1581static int
1582bge_attach(dev)
1583 device_t dev;
1584{
1585 int s;
1586 u_int32_t command;
1587 struct ifnet *ifp;
1588 struct bge_softc *sc;
1589 u_int32_t hwcfg = 0;
1590 u_int32_t mac_addr = 0;
1591 int unit, error = 0, rid;
1592
1593 s = splimp();
1594
1595 sc = device_get_softc(dev);
1596 unit = device_get_unit(dev);
1597 sc->bge_dev = dev;
1598 sc->bge_unit = unit;
1599
1600 /*
1601 * Map control/status registers.
1602 */
1603 command = pci_read_config(dev, PCIR_COMMAND, 4);
1604 command |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
1605 pci_write_config(dev, PCIR_COMMAND, command, 4);
1606 command = pci_read_config(dev, PCIR_COMMAND, 4);
1607
1608 if (!(command & PCIM_CMD_MEMEN)) {
1609 printf("bge%d: failed to enable memory mapping!\n", unit);
1610 error = ENXIO;
1611 goto fail;
1612 }
1613
1614 rid = BGE_PCI_BAR0;
1615 sc->bge_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
1616 0, ~0, 1, RF_ACTIVE);
1617
1618 if (sc->bge_res == NULL) {
1619 printf ("bge%d: couldn't map memory\n", unit);
1620 error = ENXIO;
1621 goto fail;
1622 }
1623
1624 sc->bge_btag = rman_get_bustag(sc->bge_res);
1625 sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1626 sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
1627
1628 /*
1629 * XXX FIXME: rman_get_virtual() on the alpha is currently
1630 * broken and returns a physical address instead of a kernel
1631 * virtual address. Consequently, we need to do a little
1632 * extra mangling of the vhandle on the alpha. This should
1633 * eventually be fixed! The whole idea here is to get rid
1634 * of platform dependencies.
1635 */
1636#ifdef __alpha__
1637 if (pci_cvt_to_bwx(sc->bge_vhandle))
1638 sc->bge_vhandle = pci_cvt_to_bwx(sc->bge_vhandle);
1639 else
1640 sc->bge_vhandle = pci_cvt_to_dense(sc->bge_vhandle);
1641 sc->bge_vhandle = ALPHA_PHYS_TO_K0SEG(sc->bge_vhandle);
1642#endif
1643
1644 /* Allocate interrupt */
1645 rid = 0;
1646
1647 sc->bge_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1648 RF_SHAREABLE | RF_ACTIVE);
1649
1650 if (sc->bge_irq == NULL) {
1651 printf("bge%d: couldn't map interrupt\n", unit);
1652 error = ENXIO;
1653 goto fail;
1654 }
1655
1656 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET,
1657 bge_intr, sc, &sc->bge_intrhand);
1658
1659 if (error) {
1660 bge_release_resources(sc);
1661 printf("bge%d: couldn't set up irq\n", unit);
1662 goto fail;
1663 }
1664
1665 sc->bge_unit = unit;
1666
1667 /* Try to reset the chip. */
1668 bge_reset(sc);
1669
1670 if (bge_chipinit(sc)) {
1671 printf("bge%d: chip initialization failed\n", sc->bge_unit);
1672 bge_release_resources(sc);
1673 error = ENXIO;
1674 goto fail;
1675 }
1676
1677 /*
1678 * Get station address from the EEPROM.
1679 */
1680 mac_addr = bge_readmem_ind(sc, 0x0c14);
1681 if ((mac_addr >> 16) == 0x484b) {
1682 sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8);
1683 sc->arpcom.ac_enaddr[1] = (u_char)mac_addr;
1684 mac_addr = bge_readmem_ind(sc, 0x0c18);
1685 sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24);
1686 sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16);
1687 sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8);
1688 sc->arpcom.ac_enaddr[5] = (u_char)mac_addr;
1689 } else if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
1690 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
1691 printf("bge%d: failed to read station address\n", unit);
1692 bge_release_resources(sc);
1693 error = ENXIO;
1694 goto fail;
1695 }
1696
1697 /*
1698 * A Broadcom chip was detected. Inform the world.
1699 */
1700 printf("bge%d: Ethernet address: %6D\n", unit,
1701 sc->arpcom.ac_enaddr, ":");
1702
1703 /* Allocate the general information block and ring buffers. */
1704 sc->bge_rdata = contigmalloc(sizeof(struct bge_ring_data), M_DEVBUF,
1705 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1706
1707 if (sc->bge_rdata == NULL) {
1708 bge_release_resources(sc);
1709 error = ENXIO;
1710 printf("bge%d: no memory for list buffers!\n", sc->bge_unit);
1711 goto fail;
1712 }
1713
1714 bzero(sc->bge_rdata, sizeof(struct bge_ring_data));
1715
1716 /* Try to allocate memory for jumbo buffers. */
1717 if (bge_alloc_jumbo_mem(sc)) {
1718 printf("bge%d: jumbo buffer allocation "
1719 "failed\n", sc->bge_unit);
1720 bge_release_resources(sc);
1721 error = ENXIO;
1722 goto fail;
1723 }
1724
1725 /* Set default tuneable values. */
1726 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1727 sc->bge_rx_coal_ticks = 150;
1728 sc->bge_tx_coal_ticks = 150;
1729 sc->bge_rx_max_coal_bds = 64;
1730 sc->bge_tx_max_coal_bds = 128;
1731
1732 /* Set up ifnet structure */
1733 ifp = &sc->arpcom.ac_if;
1734 ifp->if_softc = sc;
1735 if_initname(ifp, "bge", sc->bge_unit);
1736 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1737 ifp->if_ioctl = bge_ioctl;
1738 ifp->if_output = ether_output;
1739 ifp->if_start = bge_start;
1740 ifp->if_watchdog = bge_watchdog;
1741 ifp->if_init = bge_init;
1742 ifp->if_mtu = ETHERMTU;
1743 ifp->if_snd.ifq_maxlen = BGE_TX_RING_CNT - 1;
1744 ifp->if_hwassist = BGE_CSUM_FEATURES;
1745 ifp->if_capabilities = IFCAP_HWCSUM;
1746 ifp->if_capenable = ifp->if_capabilities;
1747
1748 /* Save ASIC rev. */
1749
1750 sc->bge_chipid =
1751 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1752 BGE_PCIMISCCTL_ASICREV;
1753 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1754 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1755
1756 /*
1757 * Figure out what sort of media we have by checking the
1758 * hardware config word in the first 32k of NIC internal memory,
1759 * or fall back to examining the EEPROM if necessary.
1760 * Note: on some BCM5700 cards, this value appears to be unset.
1761 * If that's the case, we have to rely on identifying the NIC
1762 * by its PCI subsystem ID, as we do below for the SysKonnect
1763 * SK-9D41.
1764 */
1765 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
1766 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1767 else {
1768 bge_read_eeprom(sc, (caddr_t)&hwcfg,
1769 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
1770 hwcfg = ntohl(hwcfg);
1771 }
1772
1773 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1774 sc->bge_tbi = 1;
1775
1776 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1777 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
1778 sc->bge_tbi = 1;
1779
1780 if (sc->bge_tbi) {
1781 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
1782 bge_ifmedia_upd, bge_ifmedia_sts);
1783 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1784 ifmedia_add(&sc->bge_ifmedia,
1785 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1786 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1787 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
1788 } else {
1789 /*
1790 * Do transceiver setup.
1791 */
1792 if (mii_phy_probe(dev, &sc->bge_miibus,
1793 bge_ifmedia_upd, bge_ifmedia_sts)) {
1794 printf("bge%d: MII without any PHY!\n", sc->bge_unit);
1795 bge_release_resources(sc);
1796 bge_free_jumbo_mem(sc);
1797 error = ENXIO;
1798 goto fail;
1799 }
1800 }
1801
1802 /*
1803 * When using the BCM5701 in PCI-X mode, data corruption has
1804 * been observed in the first few bytes of some received packets.
1805 * Aligning the packet buffer in memory eliminates the corruption.
1806 * Unfortunately, this misaligns the packet payloads. On platforms
1807 * which do not support unaligned accesses, we will realign the
1808 * payloads by copying the received packets.
1809 */
1810 switch (sc->bge_chipid) {
1811 case BGE_CHIPID_BCM5701_A0:
1812 case BGE_CHIPID_BCM5701_B0:
1813 case BGE_CHIPID_BCM5701_B2:
1814 case BGE_CHIPID_BCM5701_B5:
1815 /* If in PCI-X mode, work around the alignment bug. */
1816 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
1817 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
1818 BGE_PCISTATE_PCI_BUSSPEED)
1819 sc->bge_rx_alignment_bug = 1;
1820 break;
1821 }
1822
1823 /*
1824 * Call MI attach routine.
1825 */
1826 ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
1827 callout_handle_init(&sc->bge_stat_ch);
1828
1829fail:
1830 splx(s);
1831
1832 return(error);
1833}
1834
1835static int
1836bge_detach(dev)
1837 device_t dev;
1838{
1839 struct bge_softc *sc;
1840 struct ifnet *ifp;
1841 int s;
1842
1843 s = splimp();
1844
1845 sc = device_get_softc(dev);
1846 ifp = &sc->arpcom.ac_if;
1847
1848 ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
1849 bge_stop(sc);
1850 bge_reset(sc);
1851
1852 if (sc->bge_tbi) {
1853 ifmedia_removeall(&sc->bge_ifmedia);
1854 } else {
1855 bus_generic_detach(dev);
1856 device_delete_child(dev, sc->bge_miibus);
1857 }
1858
1859 bge_release_resources(sc);
1860 bge_free_jumbo_mem(sc);
1861
1862 splx(s);
1863
1864 return(0);
1865}
1866
1867static void
1868bge_release_resources(sc)
1869 struct bge_softc *sc;
1870{
1871 device_t dev;
1872
1873 dev = sc->bge_dev;
1874
1875 if (sc->bge_vpd_prodname != NULL)
1876 free(sc->bge_vpd_prodname, M_DEVBUF);
1877
1878 if (sc->bge_vpd_readonly != NULL)
1879 free(sc->bge_vpd_readonly, M_DEVBUF);
1880
1881 if (sc->bge_intrhand != NULL)
1882 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
1883
1884 if (sc->bge_irq != NULL)
1885 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
1886
1887 if (sc->bge_res != NULL)
1888 bus_release_resource(dev, SYS_RES_MEMORY,
1889 BGE_PCI_BAR0, sc->bge_res);
1890
1891 if (sc->bge_rdata != NULL)
1892 contigfree(sc->bge_rdata,
1893 sizeof(struct bge_ring_data), M_DEVBUF);
1894
1895 return;
1896}
1897
1898static void
1899bge_reset(sc)
1900 struct bge_softc *sc;
1901{
1902 device_t dev;
1903 u_int32_t cachesize, command, pcistate;
1904 int i, val = 0;
1905
1906 dev = sc->bge_dev;
1907
1908 /* Save some important PCI state. */
1909 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
1910 command = pci_read_config(dev, BGE_PCI_CMD, 4);
1911 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
1912
1913 pci_write_config(dev, BGE_PCI_MISC_CTL,
1914 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1915 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1916
1917 /* Issue global reset */
1918 bge_writereg_ind(sc, BGE_MISC_CFG,
1919 BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1));
1920
1921 DELAY(1000);
1922
1923 /* Reset some of the PCI state that got zapped by reset */
1924 pci_write_config(dev, BGE_PCI_MISC_CTL,
1925 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1926 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1927 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
1928 pci_write_config(dev, BGE_PCI_CMD, command, 4);
1929 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
1930
1931 /*
1932 * Prevent PXE restart: write a magic number to the
1933 * general communications memory at 0xB50.
1934 */
1935 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1936 /*
1937 * Poll the value location we just wrote until
1938 * we see the 1's complement of the magic number.
1939 * This indicates that the firmware initialization
1940 * is complete.
1941 */
1942 for (i = 0; i < BGE_TIMEOUT; i++) {
1943 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
1944 if (val == ~BGE_MAGIC_NUMBER)
1945 break;
1946 DELAY(10);
1947 }
1948
1949 if (i == BGE_TIMEOUT) {
1950 printf("bge%d: firmware handshake timed out\n", sc->bge_unit);
1951 return;
1952 }
1953
1954 /*
1955 * XXX Wait for the value of the PCISTATE register to
1956 * return to its original pre-reset state. This is a
1957 * fairly good indicator of reset completion. If we don't
1958 * wait for the reset to fully complete, trying to read
1959 * from the device's non-PCI registers may yield garbage
1960 * results.
1961 */
1962 for (i = 0; i < BGE_TIMEOUT; i++) {
1963 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
1964 break;
1965 DELAY(10);
1966 }
1967
1968 /* Enable memory arbiter. */
1969 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
1970
1971 /* Fix up byte swapping */
1972 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
1973 BGE_MODECTL_BYTESWAP_DATA);
1974
1975 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1976
1977 DELAY(10000);
1978
1979 return;
1980}
1981
1982/*
1983 * Frame reception handling. This is called if there's a frame
1984 * on the receive return list.
1985 *
1986 * Note: we have to be able to handle two possibilities here:
1987 * 1) the frame is from the jumbo recieve ring
1988 * 2) the frame is from the standard receive ring
1989 */
1990
1991static void
1992bge_rxeof(sc)
1993 struct bge_softc *sc;
1994{
1995 struct ifnet *ifp;
1996 int stdcnt = 0, jumbocnt = 0;
1997
1998 ifp = &sc->arpcom.ac_if;
1999
2000 while(sc->bge_rx_saved_considx !=
2001 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
2002 struct bge_rx_bd *cur_rx;
2003 u_int32_t rxidx;
2004 struct ether_header *eh;
2005 struct mbuf *m = NULL;
2006 u_int16_t vlan_tag = 0;
2007 int have_tag = 0;
2008
2009 cur_rx =
2010 &sc->bge_rdata->bge_rx_return_ring[sc->bge_rx_saved_considx];
2011
2012 rxidx = cur_rx->bge_idx;
2013 BGE_INC(sc->bge_rx_saved_considx, BGE_RETURN_RING_CNT);
2014
2015 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2016 have_tag = 1;
2017 vlan_tag = cur_rx->bge_vlan_tag;
2018 }
2019
2020 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2021 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2022 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2023 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2024 jumbocnt++;
2025 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2026 ifp->if_ierrors++;
2027 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2028 continue;
2029 }
2030 if (bge_newbuf_jumbo(sc,
2031 sc->bge_jumbo, NULL) == ENOBUFS) {
2032 ifp->if_ierrors++;
2033 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2034 continue;
2035 }
2036 } else {
2037 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2038 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2039 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2040 stdcnt++;
2041 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2042 ifp->if_ierrors++;
2043 bge_newbuf_std(sc, sc->bge_std, m);
2044 continue;
2045 }
2046 if (bge_newbuf_std(sc, sc->bge_std,
2047 NULL) == ENOBUFS) {
2048 ifp->if_ierrors++;
2049 bge_newbuf_std(sc, sc->bge_std, m);
2050 continue;
2051 }
2052 }
2053
2054 ifp->if_ipackets++;
2055#ifndef __i386__
2056 /*
2057 * The i386 allows unaligned accesses, but for other
2058 * platforms we must make sure the payload is aligned.
2059 */
2060 if (sc->bge_rx_alignment_bug) {
2061 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2062 cur_rx->bge_len);
2063 m->m_data += ETHER_ALIGN;
2064 }
2065#endif
2066 eh = mtod(m, struct ether_header *);
2067 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2068 m->m_pkthdr.rcvif = ifp;
2069
2070 /* Remove header from mbuf and pass it on. */
2071 m_adj(m, sizeof(struct ether_header));
2072
2073#if 0 /* currently broken for some packets, possibly related to TCP options */
2074 if (ifp->if_hwassist) {
2075 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2076 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2077 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2078 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2079 m->m_pkthdr.csum_data =
2080 cur_rx->bge_tcp_udp_csum;
2081 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2082 }
2083 }
2084#endif
2085
2086 /*
2087 * If we received a packet with a vlan tag, pass it
2088 * to vlan_input() instead of ether_input().
2089 */
2090 if (have_tag) {
2091 VLAN_INPUT_TAG(eh, m, vlan_tag);
2092 have_tag = vlan_tag = 0;
2093 continue;
2094 }
2095
2096 ether_input(ifp, eh, m);
2097 }
2098
2099 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2100 if (stdcnt)
2101 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2102 if (jumbocnt)
2103 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2104
2105 return;
2106}
2107
2108static void
2109bge_txeof(sc)
2110 struct bge_softc *sc;
2111{
2112 struct bge_tx_bd *cur_tx = NULL;
2113 struct ifnet *ifp;
2114
2115 ifp = &sc->arpcom.ac_if;
2116
2117 /*
2118 * Go through our tx ring and free mbufs for those
2119 * frames that have been sent.
2120 */
2121 while (sc->bge_tx_saved_considx !=
2122 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
2123 u_int32_t idx = 0;
2124
2125 idx = sc->bge_tx_saved_considx;
2126 cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
2127 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2128 ifp->if_opackets++;
2129 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2130 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2131 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2132 }
2133 sc->bge_txcnt--;
2134 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2135 ifp->if_timer = 0;
2136 }
2137
2138 if (cur_tx != NULL)
2139 ifp->if_flags &= ~IFF_OACTIVE;
2140
2141 return;
2142}
2143
2144static void
2145bge_intr(xsc)
2146 void *xsc;
2147{
2148 struct bge_softc *sc;
2149 struct ifnet *ifp;
2150 u_int32_t status;
2151
2152 sc = xsc;
2153 ifp = &sc->arpcom.ac_if;
2154
2155#ifdef notdef
2156 /* Avoid this for now -- checking this register is expensive. */
2157 /* Make sure this is really our interrupt. */
2158 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2159 return;
2160#endif
2161 /* Ack interrupt and stop others from occuring. */
2162 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2163
2164 /*
2165 * Process link state changes.
2166 * Grrr. The link status word in the status block does
2167 * not work correctly on the BCM5700 rev AX and BX chips,
2168 * according to all available information. Hence, we have
2169 * to enable MII interrupts in order to properly obtain
2170 * async link changes. Unfortunately, this also means that
2171 * we have to read the MAC status register to detect link
2172 * changes, thereby adding an additional register access to
2173 * the interrupt handler.
2174 */
2175
2176 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
2177 u_int32_t status;
2178
2179 status = CSR_READ_4(sc, BGE_MAC_STS);
2180 if (status & BGE_MACSTAT_MI_INTERRUPT) {
2181 sc->bge_link = 0;
2182 untimeout(bge_tick, sc, sc->bge_stat_ch);
2183 bge_tick(sc);
2184 /* Clear the interrupt */
2185 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2186 BGE_EVTENB_MI_INTERRUPT);
2187 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
2188 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
2189 BRGPHY_INTRS);
2190 }
2191 } else {
2192 if ((sc->bge_rdata->bge_status_block.bge_status &
2193 BGE_STATFLAG_UPDATED) &&
2194 (sc->bge_rdata->bge_status_block.bge_status &
2195 BGE_STATFLAG_LINKSTATE_CHANGED)) {
2196 sc->bge_rdata->bge_status_block.bge_status &= ~(BGE_STATFLAG_UPDATED|BGE_STATFLAG_LINKSTATE_CHANGED);
2197 /*
2198 * Sometime PCS encoding errors are detected in
2199 * TBI mode (on fiber NICs), and for some reason
2200 * the chip will signal them as link changes.
2201 * If we get a link change event, but the 'PCS
2202 * encoding error' bit in the MAC status register
2203 * is set, don't bother doing a link check.
2204 * This avoids spurious "gigabit link up" messages
2205 * that sometimes appear on fiber NIC's during
2206 * periods of heavy traffic. (There should be no
2207 * effect on copper NICs.)
2208 */
2209 status = CSR_READ_4(sc, BGE_MAC_STS);
2210 if (!(status & BGE_MACSTAT_PORT_DECODE_ERROR)) {
2211 sc->bge_link = 0;
2212 untimeout(bge_tick, sc, sc->bge_stat_ch);
2213 bge_tick(sc);
2214 }
2215 sc->bge_link = 0;
2216 untimeout(bge_tick, sc, sc->bge_stat_ch);
2217 bge_tick(sc);
2218 /* Clear the interrupt */
2219 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2220 BGE_MACSTAT_CFG_CHANGED);
2221
2222 /* Force flush the status block cached by PCI bridge */
2223 CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
2224 }
2225 }
2226
2227 if (ifp->if_flags & IFF_RUNNING) {
2228 /* Check RX return ring producer/consumer */
2229 bge_rxeof(sc);
2230
2231 /* Check TX ring producer/consumer */
2232 bge_txeof(sc);
2233 }
2234
2235 bge_handle_events(sc);
2236
2237 /* Re-enable interrupts. */
2238 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2239
2240 if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
2241 bge_start(ifp);
2242
2243 return;
2244}
2245
2246static void
2247bge_tick(xsc)
2248 void *xsc;
2249{
2250 struct bge_softc *sc;
2251 struct mii_data *mii = NULL;
2252 struct ifmedia *ifm = NULL;
2253 struct ifnet *ifp;
2254 int s;
2255
2256 sc = xsc;
2257 ifp = &sc->arpcom.ac_if;
2258
2259 s = splimp();
2260
2261 bge_stats_update(sc);
2262 sc->bge_stat_ch = timeout(bge_tick, sc, hz);
2263 if (sc->bge_link) {
2264 splx(s);
2265 return;
2266 }
2267
2268 if (sc->bge_tbi) {
2269 ifm = &sc->bge_ifmedia;
2270 if (CSR_READ_4(sc, BGE_MAC_STS) &
2271 BGE_MACSTAT_TBI_PCS_SYNCHED) {
2272 sc->bge_link++;
2273 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
2274 printf("bge%d: gigabit link up\n", sc->bge_unit);
2275 if (ifp->if_snd.ifq_head != NULL)
2276 bge_start(ifp);
2277 }
2278 splx(s);
2279 return;
2280 }
2281
2282 mii = device_get_softc(sc->bge_miibus);
2283 mii_tick(mii);
2284
2285 if (!sc->bge_link) {
2286 mii_pollstat(mii);
2287 if (mii->mii_media_status & IFM_ACTIVE &&
2288 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2289 sc->bge_link++;
2290 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_TX ||
2291 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
2292 printf("bge%d: gigabit link up\n",
2293 sc->bge_unit);
2294 if (ifp->if_snd.ifq_head != NULL)
2295 bge_start(ifp);
2296 }
2297 }
2298
2299 splx(s);
2300
2301 return;
2302}
2303
2304static void
2305bge_stats_update(sc)
2306 struct bge_softc *sc;
2307{
2308 struct ifnet *ifp;
2309 struct bge_stats *stats;
2310
2311 ifp = &sc->arpcom.ac_if;
2312
2313 stats = (struct bge_stats *)(sc->bge_vhandle +
2314 BGE_MEMWIN_START + BGE_STATS_BLOCK);
2315
2316 ifp->if_collisions +=
2317 (stats->dot3StatsSingleCollisionFrames.bge_addr_lo +
2318 stats->dot3StatsMultipleCollisionFrames.bge_addr_lo +
2319 stats->dot3StatsExcessiveCollisions.bge_addr_lo +
2320 stats->dot3StatsLateCollisions.bge_addr_lo) -
2321 ifp->if_collisions;
2322
2323#ifdef notdef
2324 ifp->if_collisions +=
2325 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2326 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2327 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2328 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2329 ifp->if_collisions;
2330#endif
2331
2332 return;
2333}
2334
2335/*
2336 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2337 * pointers to descriptors.
2338 */
2339static int
2340bge_encap(sc, m_head, txidx)
2341 struct bge_softc *sc;
2342 struct mbuf *m_head;
2343 u_int32_t *txidx;
2344{
2345 struct bge_tx_bd *f = NULL;
2346 struct mbuf *m;
2347 u_int32_t frag, cur, cnt = 0;
2348 u_int16_t csum_flags = 0;
2349 struct ifvlan *ifv = NULL;
2350
2351 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
2352 m_head->m_pkthdr.rcvif != NULL &&
2353 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
2354 ifv = m_head->m_pkthdr.rcvif->if_softc;
2355
2356 m = m_head;
2357 cur = frag = *txidx;
2358
2359 if (m_head->m_pkthdr.csum_flags) {
2360 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2361 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2362 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2363 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2364 if (m_head->m_flags & M_LASTFRAG)
2365 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2366 else if (m_head->m_flags & M_FRAG)
2367 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2368 }
2369 /*
2370 * Start packing the mbufs in this chain into
2371 * the fragment pointers. Stop when we run out
2372 * of fragments or hit the end of the mbuf chain.
2373 */
2374 for (m = m_head; m != NULL; m = m->m_next) {
2375 if (m->m_len != 0) {
2376 f = &sc->bge_rdata->bge_tx_ring[frag];
2377 if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
2378 break;
2379 BGE_HOSTADDR(f->bge_addr) =
2380 vtophys(mtod(m, vm_offset_t));
2381 f->bge_len = m->m_len;
2382 f->bge_flags = csum_flags;
2383 if (ifv != NULL) {
2384 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2385 f->bge_vlan_tag = ifv->ifv_tag;
2386 } else {
2387 f->bge_vlan_tag = 0;
2388 }
2389 /*
2390 * Sanity check: avoid coming within 16 descriptors
2391 * of the end of the ring.
2392 */
2393 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16)
2394 return(ENOBUFS);
2395 cur = frag;
2396 BGE_INC(frag, BGE_TX_RING_CNT);
2397 cnt++;
2398 }
2399 }
2400
2401 if (m != NULL)
2402 return(ENOBUFS);
2403
2404 if (frag == sc->bge_tx_saved_considx)
2405 return(ENOBUFS);
2406
2407 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
2408 sc->bge_cdata.bge_tx_chain[cur] = m_head;
2409 sc->bge_txcnt += cnt;
2410
2411 *txidx = frag;
2412
2413 return(0);
2414}
2415
2416/*
2417 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2418 * to the mbuf data regions directly in the transmit descriptors.
2419 */
2420static void
2421bge_start(ifp)
2422 struct ifnet *ifp;
2423{
2424 struct bge_softc *sc;
2425 struct mbuf *m_head = NULL;
2426 u_int32_t prodidx = 0;
2427
2428 sc = ifp->if_softc;
2429
2430 if (!sc->bge_link && ifp->if_snd.ifq_len < 10)
2431 return;
2432
2433 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
2434
2435 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2436 IF_DEQUEUE(&ifp->if_snd, m_head);
2437 if (m_head == NULL)
2438 break;
2439
2440 /*
2441 * XXX
2442 * safety overkill. If this is a fragmented packet chain
2443 * with delayed TCP/UDP checksums, then only encapsulate
2444 * it if we have enough descriptors to handle the entire
2445 * chain at once.
2446 * (paranoia -- may not actually be needed)
2447 */
2448 if (m_head->m_flags & M_FIRSTFRAG &&
2449 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
2450 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2451 m_head->m_pkthdr.csum_data + 16) {
2452 IF_PREPEND(&ifp->if_snd, m_head);
2453 ifp->if_flags |= IFF_OACTIVE;
2454 break;
2455 }
2456 }
2457
2458 /*
2459 * Pack the data into the transmit ring. If we
2460 * don't have room, set the OACTIVE flag and wait
2461 * for the NIC to drain the ring.
2462 */
2463 if (bge_encap(sc, m_head, &prodidx)) {
2464 IF_PREPEND(&ifp->if_snd, m_head);
2465 ifp->if_flags |= IFF_OACTIVE;
2466 break;
2467 }
2468
2469 /*
2470 * If there's a BPF listener, bounce a copy of this frame
2471 * to him.
2472 */
2473 if (ifp->if_bpf)
2474 bpf_mtap(ifp, m_head);
2475 }
2476
2477 /* Transmit */
2478 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2479 /* 5700 b2 errata */
2480 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
2481 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2482
2483 /*
2484 * Set a timeout in case the chip goes out to lunch.
2485 */
2486 ifp->if_timer = 5;
2487
2488 return;
2489}
2490
2491static void
2492bge_init(xsc)
2493 void *xsc;
2494{
2495 struct bge_softc *sc = xsc;
2496 struct ifnet *ifp;
2497 u_int16_t *m;
2498 int s;
2499
2500 s = splimp();
2501
2502 ifp = &sc->arpcom.ac_if;
2503
2504 if (ifp->if_flags & IFF_RUNNING) {
2505 splx(s);
2506 return;
2507 }
2508
2509 /* Cancel pending I/O and flush buffers. */
2510 bge_stop(sc);
2511 bge_reset(sc);
2512 bge_chipinit(sc);
2513
2514 /*
2515 * Init the various state machines, ring
2516 * control blocks and firmware.
2517 */
2518 if (bge_blockinit(sc)) {
2519 printf("bge%d: initialization failure\n", sc->bge_unit);
2520 splx(s);
2521 return;
2522 }
2523
2524 ifp = &sc->arpcom.ac_if;
2525
2526 /* Specify MTU. */
2527 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2528 ETHER_HDR_LEN + ETHER_CRC_LEN);
2529
2530 /* Load our MAC address. */
2531 m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
2532 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2533 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2534
2535 /* Enable or disable promiscuous mode as needed. */
2536 if (ifp->if_flags & IFF_PROMISC) {
2537 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2538 } else {
2539 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2540 }
2541
2542 /* Program multicast filter. */
2543 bge_setmulti(sc);
2544
2545 /* Init RX ring. */
2546 bge_init_rx_ring_std(sc);
2547
2548 /* Init jumbo RX ring. */
2549 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2550 bge_init_rx_ring_jumbo(sc);
2551
2552 /* Init our RX return ring index */
2553 sc->bge_rx_saved_considx = 0;
2554
2555 /* Init TX ring. */
2556 bge_init_tx_ring(sc);
2557
2558 /* Turn on transmitter */
2559 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2560
2561 /* Turn on receiver */
2562 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2563
2564 /* Tell firmware we're alive. */
2565 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2566
2567 /* Enable host interrupts. */
2568 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
2569 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2570 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2571
2572 bge_ifmedia_upd(ifp);
2573
2574 ifp->if_flags |= IFF_RUNNING;
2575 ifp->if_flags &= ~IFF_OACTIVE;
2576
2577 splx(s);
2578
2579 sc->bge_stat_ch = timeout(bge_tick, sc, hz);
2580
2581 return;
2582}
2583
2584/*
2585 * Set media options.
2586 */
2587static int
2588bge_ifmedia_upd(ifp)
2589 struct ifnet *ifp;
2590{
2591 struct bge_softc *sc;
2592 struct mii_data *mii;
2593 struct ifmedia *ifm;
2594
2595 sc = ifp->if_softc;
2596 ifm = &sc->bge_ifmedia;
2597
2598 /* If this is a 1000baseX NIC, enable the TBI port. */
2599 if (sc->bge_tbi) {
2600 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2601 return(EINVAL);
2602 switch(IFM_SUBTYPE(ifm->ifm_media)) {
2603 case IFM_AUTO:
2604 break;
2605 case IFM_1000_SX:
2606 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
2607 BGE_CLRBIT(sc, BGE_MAC_MODE,
2608 BGE_MACMODE_HALF_DUPLEX);
2609 } else {
2610 BGE_SETBIT(sc, BGE_MAC_MODE,
2611 BGE_MACMODE_HALF_DUPLEX);
2612 }
2613 break;
2614 default:
2615 return(EINVAL);
2616 }
2617 return(0);
2618 }
2619
2620 mii = device_get_softc(sc->bge_miibus);
2621 sc->bge_link = 0;
2622 if (mii->mii_instance) {
2623 struct mii_softc *miisc;
2624 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
2625 miisc = LIST_NEXT(miisc, mii_list))
2626 mii_phy_reset(miisc);
2627 }
2628 mii_mediachg(mii);
2629
2630 return(0);
2631}
2632
2633/*
2634 * Report current media status.
2635 */
2636static void
2637bge_ifmedia_sts(ifp, ifmr)
2638 struct ifnet *ifp;
2639 struct ifmediareq *ifmr;
2640{
2641 struct bge_softc *sc;
2642 struct mii_data *mii;
2643
2644 sc = ifp->if_softc;
2645
2646 if (sc->bge_tbi) {
2647 ifmr->ifm_status = IFM_AVALID;
2648 ifmr->ifm_active = IFM_ETHER;
2649 if (CSR_READ_4(sc, BGE_MAC_STS) &
2650 BGE_MACSTAT_TBI_PCS_SYNCHED)
2651 ifmr->ifm_status |= IFM_ACTIVE;
2652 ifmr->ifm_active |= IFM_1000_SX;
2653 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
2654 ifmr->ifm_active |= IFM_HDX;
2655 else
2656 ifmr->ifm_active |= IFM_FDX;
2657 return;
2658 }
2659
2660 mii = device_get_softc(sc->bge_miibus);
2661 mii_pollstat(mii);
2662 ifmr->ifm_active = mii->mii_media_active;
2663 ifmr->ifm_status = mii->mii_media_status;
2664
2665 return;
2666}
2667
2668static int
2669bge_ioctl(ifp, command, data)
2670 struct ifnet *ifp;
2671 u_long command;
2672 caddr_t data;
2673{
2674 struct bge_softc *sc = ifp->if_softc;
2675 struct ifreq *ifr = (struct ifreq *) data;
2676 int s, mask, error = 0;
2677 struct mii_data *mii;
2678
2679 s = splimp();
2680
2681 switch(command) {
2682 case SIOCSIFADDR:
2683 case SIOCGIFADDR:
2684 error = ether_ioctl(ifp, command, data);
2685 break;
2686 case SIOCSIFMTU:
2687 if (ifr->ifr_mtu > BGE_JUMBO_MTU)
2688 error = EINVAL;
2689 else {
2690 ifp->if_mtu = ifr->ifr_mtu;
2691 ifp->if_flags &= ~IFF_RUNNING;
2692 bge_init(sc);
2693 }
2694 break;
2695 case SIOCSIFFLAGS:
2696 if (ifp->if_flags & IFF_UP) {
2697 /*
2698 * If only the state of the PROMISC flag changed,
2699 * then just use the 'set promisc mode' command
2700 * instead of reinitializing the entire NIC. Doing
2701 * a full re-init means reloading the firmware and
2702 * waiting for it to start up, which may take a
2703 * second or two.
2704 */
2705 if (ifp->if_flags & IFF_RUNNING &&
2706 ifp->if_flags & IFF_PROMISC &&
2707 !(sc->bge_if_flags & IFF_PROMISC)) {
2708 BGE_SETBIT(sc, BGE_RX_MODE,
2709 BGE_RXMODE_RX_PROMISC);
2710 } else if (ifp->if_flags & IFF_RUNNING &&
2711 !(ifp->if_flags & IFF_PROMISC) &&
2712 sc->bge_if_flags & IFF_PROMISC) {
2713 BGE_CLRBIT(sc, BGE_RX_MODE,
2714 BGE_RXMODE_RX_PROMISC);
2715 } else
2716 bge_init(sc);
2717 } else {
2718 if (ifp->if_flags & IFF_RUNNING) {
2719 bge_stop(sc);
2720 }
2721 }
2722 sc->bge_if_flags = ifp->if_flags;
2723 error = 0;
2724 break;
2725 case SIOCADDMULTI:
2726 case SIOCDELMULTI:
2727 if (ifp->if_flags & IFF_RUNNING) {
2728 bge_setmulti(sc);
2729 error = 0;
2730 }
2731 break;
2732 case SIOCSIFMEDIA:
2733 case SIOCGIFMEDIA:
2734 if (sc->bge_tbi) {
2735 error = ifmedia_ioctl(ifp, ifr,
2736 &sc->bge_ifmedia, command);
2737 } else {
2738 mii = device_get_softc(sc->bge_miibus);
2739 error = ifmedia_ioctl(ifp, ifr,
2740 &mii->mii_media, command);
2741 }
2742 break;
2743 case SIOCSIFCAP:
2744 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2745 if (mask & IFCAP_HWCSUM) {
2746 if (IFCAP_HWCSUM & ifp->if_capenable)
2747 ifp->if_capenable &= ~IFCAP_HWCSUM;
2748 else
2749 ifp->if_capenable |= IFCAP_HWCSUM;
2750 }
2751 error = 0;
2752 break;
2753 default:
2754 error = EINVAL;
2755 break;
2756 }
2757
2758 (void)splx(s);
2759
2760 return(error);
2761}
2762
2763static void
2764bge_watchdog(ifp)
2765 struct ifnet *ifp;
2766{
2767 struct bge_softc *sc;
2768
2769 sc = ifp->if_softc;
2770
2771 printf("bge%d: watchdog timeout -- resetting\n", sc->bge_unit);
2772
2773 ifp->if_flags &= ~IFF_RUNNING;
2774 bge_init(sc);
2775
2776 ifp->if_oerrors++;
2777
2778 return;
2779}
2780
2781/*
2782 * Stop the adapter and free any mbufs allocated to the
2783 * RX and TX lists.
2784 */
2785static void
2786bge_stop(sc)
2787 struct bge_softc *sc;
2788{
2789 struct ifnet *ifp;
2790 struct ifmedia_entry *ifm;
2791 struct mii_data *mii = NULL;
2792 int mtmp, itmp;
2793
2794 ifp = &sc->arpcom.ac_if;
2795
2796 if (!sc->bge_tbi)
2797 mii = device_get_softc(sc->bge_miibus);
2798
2799 untimeout(bge_tick, sc, sc->bge_stat_ch);
2800
2801 /*
2802 * Disable all of the receiver blocks
2803 */
2804 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2805 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2806 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2807 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2808 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
2809 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2810 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
2811
2812 /*
2813 * Disable all of the transmit blocks
2814 */
2815 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2816 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2817 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2818 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
2819 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
2820 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2821 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2822
2823 /*
2824 * Shut down all of the memory managers and related
2825 * state machines.
2826 */
2827 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
2828 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
2829 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2830 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
2831 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
2832 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
2833 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2834
2835 /* Disable host interrupts. */
2836 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2837 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2838
2839 /*
2840 * Tell firmware we're shutting down.
2841 */
2842 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2843
2844 /* Free the RX lists. */
2845 bge_free_rx_ring_std(sc);
2846
2847 /* Free jumbo RX list. */
2848 bge_free_rx_ring_jumbo(sc);
2849
2850 /* Free TX buffers. */
2851 bge_free_tx_ring(sc);
2852
2853 /*
2854 * Isolate/power down the PHY, but leave the media selection
2855 * unchanged so that things will be put back to normal when
2856 * we bring the interface back up.
2857 */
2858 if (!sc->bge_tbi) {
2859 itmp = ifp->if_flags;
2860 ifp->if_flags |= IFF_UP;
2861 ifm = mii->mii_media.ifm_cur;
2862 mtmp = ifm->ifm_media;
2863 ifm->ifm_media = IFM_ETHER|IFM_NONE;
2864 mii_mediachg(mii);
2865 ifm->ifm_media = mtmp;
2866 ifp->if_flags = itmp;
2867 }
2868
2869 sc->bge_link = 0;
2870
2871 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
2872
2873 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2874
2875 return;
2876}
2877
2878/*
2879 * Stop all chip I/O so that the kernel's probe routines don't
2880 * get confused by errant DMAs when rebooting.
2881 */
2882static void
2883bge_shutdown(dev)
2884 device_t dev;
2885{
2886 struct bge_softc *sc;
2887
2888 sc = device_get_softc(dev);
2889
2890 bge_stop(sc);
2891 bge_reset(sc);
2892
2893 return;
2894}