Initial import from FreeBSD RELENG_4:
[dragonfly.git] / sys / dev / netif / bge / if_bge.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 2001 Wind River Systems
3 * Copyright (c) 1997, 1998, 1999, 2001
4 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Bill Paul.
17 * 4. Neither the name of the author nor the names of any co-contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 * $FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.22 2003/05/11 18:00:55 ps Exp $
34 */
35
36/*
37 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
38 *
39 * Written by Bill Paul <wpaul@windriver.com>
40 * Senior Engineer, Wind River Systems
41 */
42
43/*
44 * The Broadcom BCM5700 is based on technology originally developed by
45 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
46 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
47 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
48 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
49 * frames, highly configurable RX filtering, and 16 RX and TX queues
50 * (which, along with RX filter rules, can be used for QOS applications).
51 * Other features, such as TCP segmentation, may be available as part
52 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
53 * firmware images can be stored in hardware and need not be compiled
54 * into the driver.
55 *
56 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
57 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
58 *
59 * The BCM5701 is a single-chip solution incorporating both the BCM5700
60 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
61 * does not support external SSRAM.
62 *
63 * Broadcom also produces a variation of the BCM5700 under the "Altima"
64 * brand name, which is functionally similar but lacks PCI-X support.
65 *
66 * Without external SSRAM, you can only have at most 4 TX rings,
67 * and the use of the mini RX ring is disabled. This seems to imply
68 * that these features are simply not available on the BCM5701. As a
69 * result, this driver does not implement any support for the mini RX
70 * ring.
71 */
72
73#include <sys/param.h>
74#include <sys/systm.h>
75#include <sys/sockio.h>
76#include <sys/mbuf.h>
77#include <sys/malloc.h>
78#include <sys/kernel.h>
79#include <sys/socket.h>
80#include <sys/queue.h>
81
82#include <net/if.h>
83#include <net/if_arp.h>
84#include <net/ethernet.h>
85#include <net/if_dl.h>
86#include <net/if_media.h>
87
88#include <net/bpf.h>
89
90#include <net/if_types.h>
91#include <net/if_vlan_var.h>
92
93#include <netinet/in_systm.h>
94#include <netinet/in.h>
95#include <netinet/ip.h>
96
97#include <vm/vm.h> /* for vtophys */
98#include <vm/pmap.h> /* for vtophys */
99#include <machine/clock.h> /* for DELAY */
100#include <machine/bus_memio.h>
101#include <machine/bus.h>
102#include <machine/resource.h>
103#include <sys/bus.h>
104#include <sys/rman.h>
105
106#include <dev/mii/mii.h>
107#include <dev/mii/miivar.h>
108#include <dev/mii/miidevs.h>
109#include <dev/mii/brgphyreg.h>
110
111#include <pci/pcireg.h>
112#include <pci/pcivar.h>
113
114#include <dev/bge/if_bgereg.h>
115
116#define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
117
118/* "controller miibus0" required. See GENERIC if you get errors here. */
119#include "miibus_if.h"
120
121#if !defined(lint)
122static const char rcsid[] =
123 "$FreeBSD: src/sys/dev/bge/if_bge.c,v 1.3.2.22 2003/05/11 18:00:55 ps Exp $";
124#endif
125
126/*
127 * Various supported device vendors/types and their names. Note: the
128 * spec seems to indicate that the hardware still has Alteon's vendor
129 * ID burned into it, though it will always be overriden by the vendor
130 * ID in the EEPROM. Just to be safe, we cover all possibilities.
131 */
132#define BGE_DEVDESC_MAX 64 /* Maximum device description length */
133
134static struct bge_type bge_devs[] = {
135 { ALT_VENDORID, ALT_DEVICEID_BCM5700,
136 "Broadcom BCM5700 Gigabit Ethernet" },
137 { ALT_VENDORID, ALT_DEVICEID_BCM5701,
138 "Broadcom BCM5701 Gigabit Ethernet" },
139 { BCOM_VENDORID, BCOM_DEVICEID_BCM5700,
140 "Broadcom BCM5700 Gigabit Ethernet" },
141 { BCOM_VENDORID, BCOM_DEVICEID_BCM5701,
142 "Broadcom BCM5701 Gigabit Ethernet" },
143 { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X,
144 "Broadcom BCM5702X Gigabit Ethernet" },
145 { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X,
146 "Broadcom BCM5703X Gigabit Ethernet" },
147 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C,
148 "Broadcom BCM5704C Dual Gigabit Ethernet" },
149 { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S,
150 "Broadcom BCM5704S Dual Gigabit Ethernet" },
151 { SK_VENDORID, SK_DEVICEID_ALTIMA,
152 "SysKonnect Gigabit Ethernet" },
153 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000,
154 "Altima AC1000 Gigabit Ethernet" },
155 { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100,
156 "Altima AC9100 Gigabit Ethernet" },
157 { 0, 0, NULL }
158};
159
160static int bge_probe __P((device_t));
161static int bge_attach __P((device_t));
162static int bge_detach __P((device_t));
163static void bge_release_resources
164 __P((struct bge_softc *));
165static void bge_txeof __P((struct bge_softc *));
166static void bge_rxeof __P((struct bge_softc *));
167
168static void bge_tick __P((void *));
169static void bge_stats_update __P((struct bge_softc *));
170static int bge_encap __P((struct bge_softc *, struct mbuf *,
171 u_int32_t *));
172
173static void bge_intr __P((void *));
174static void bge_start __P((struct ifnet *));
175static int bge_ioctl __P((struct ifnet *, u_long, caddr_t));
176static void bge_init __P((void *));
177static void bge_stop __P((struct bge_softc *));
178static void bge_watchdog __P((struct ifnet *));
179static void bge_shutdown __P((device_t));
180static int bge_ifmedia_upd __P((struct ifnet *));
181static void bge_ifmedia_sts __P((struct ifnet *, struct ifmediareq *));
182
183static u_int8_t bge_eeprom_getbyte __P((struct bge_softc *,
184 int, u_int8_t *));
185static int bge_read_eeprom __P((struct bge_softc *, caddr_t, int, int));
186
187static u_int32_t bge_crc __P((caddr_t));
188static void bge_setmulti __P((struct bge_softc *));
189
190static void bge_handle_events __P((struct bge_softc *));
191static int bge_alloc_jumbo_mem __P((struct bge_softc *));
192static void bge_free_jumbo_mem __P((struct bge_softc *));
193static void *bge_jalloc __P((struct bge_softc *));
194static void bge_jfree __P((caddr_t, u_int));
195static void bge_jref __P((caddr_t, u_int));
196static int bge_newbuf_std __P((struct bge_softc *, int, struct mbuf *));
197static int bge_newbuf_jumbo __P((struct bge_softc *, int, struct mbuf *));
198static int bge_init_rx_ring_std __P((struct bge_softc *));
199static void bge_free_rx_ring_std __P((struct bge_softc *));
200static int bge_init_rx_ring_jumbo __P((struct bge_softc *));
201static void bge_free_rx_ring_jumbo __P((struct bge_softc *));
202static void bge_free_tx_ring __P((struct bge_softc *));
203static int bge_init_tx_ring __P((struct bge_softc *));
204
205static int bge_chipinit __P((struct bge_softc *));
206static int bge_blockinit __P((struct bge_softc *));
207
208#ifdef notdef
209static u_int8_t bge_vpd_readbyte __P((struct bge_softc *, int));
210static void bge_vpd_read_res __P((struct bge_softc *,
211 struct vpd_res *, int));
212static void bge_vpd_read __P((struct bge_softc *));
213#endif
214
215static u_int32_t bge_readmem_ind
216 __P((struct bge_softc *, int));
217static void bge_writemem_ind __P((struct bge_softc *, int, int));
218#ifdef notdef
219static u_int32_t bge_readreg_ind
220 __P((struct bge_softc *, int));
221#endif
222static void bge_writereg_ind __P((struct bge_softc *, int, int));
223
224static int bge_miibus_readreg __P((device_t, int, int));
225static int bge_miibus_writereg __P((device_t, int, int, int));
226static void bge_miibus_statchg __P((device_t));
227
228static void bge_reset __P((struct bge_softc *));
229
230static device_method_t bge_methods[] = {
231 /* Device interface */
232 DEVMETHOD(device_probe, bge_probe),
233 DEVMETHOD(device_attach, bge_attach),
234 DEVMETHOD(device_detach, bge_detach),
235 DEVMETHOD(device_shutdown, bge_shutdown),
236
237 /* bus interface */
238 DEVMETHOD(bus_print_child, bus_generic_print_child),
239 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
240
241 /* MII interface */
242 DEVMETHOD(miibus_readreg, bge_miibus_readreg),
243 DEVMETHOD(miibus_writereg, bge_miibus_writereg),
244 DEVMETHOD(miibus_statchg, bge_miibus_statchg),
245
246 { 0, 0 }
247};
248
249static driver_t bge_driver = {
250 "bge",
251 bge_methods,
252 sizeof(struct bge_softc)
253};
254
255static devclass_t bge_devclass;
256
257DRIVER_MODULE(if_bge, pci, bge_driver, bge_devclass, 0, 0);
258DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0);
259
260static u_int32_t
261bge_readmem_ind(sc, off)
262 struct bge_softc *sc;
263 int off;
264{
265 device_t dev;
266
267 dev = sc->bge_dev;
268
269 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
270 return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4));
271}
272
273static void
274bge_writemem_ind(sc, off, val)
275 struct bge_softc *sc;
276 int off, val;
277{
278 device_t dev;
279
280 dev = sc->bge_dev;
281
282 pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4);
283 pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4);
284
285 return;
286}
287
288#ifdef notdef
289static u_int32_t
290bge_readreg_ind(sc, off)
291 struct bge_softc *sc;
292 int off;
293{
294 device_t dev;
295
296 dev = sc->bge_dev;
297
298 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
299 return(pci_read_config(dev, BGE_PCI_REG_DATA, 4));
300}
301#endif
302
303static void
304bge_writereg_ind(sc, off, val)
305 struct bge_softc *sc;
306 int off, val;
307{
308 device_t dev;
309
310 dev = sc->bge_dev;
311
312 pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4);
313 pci_write_config(dev, BGE_PCI_REG_DATA, val, 4);
314
315 return;
316}
317
318#ifdef notdef
319static u_int8_t
320bge_vpd_readbyte(sc, addr)
321 struct bge_softc *sc;
322 int addr;
323{
324 int i;
325 device_t dev;
326 u_int32_t val;
327
328 dev = sc->bge_dev;
329 pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2);
330 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
331 DELAY(10);
332 if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG)
333 break;
334 }
335
336 if (i == BGE_TIMEOUT) {
337 printf("bge%d: VPD read timed out\n", sc->bge_unit);
338 return(0);
339 }
340
341 val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4);
342
343 return((val >> ((addr % 4) * 8)) & 0xFF);
344}
345
346static void
347bge_vpd_read_res(sc, res, addr)
348 struct bge_softc *sc;
349 struct vpd_res *res;
350 int addr;
351{
352 int i;
353 u_int8_t *ptr;
354
355 ptr = (u_int8_t *)res;
356 for (i = 0; i < sizeof(struct vpd_res); i++)
357 ptr[i] = bge_vpd_readbyte(sc, i + addr);
358
359 return;
360}
361
362static void
363bge_vpd_read(sc)
364 struct bge_softc *sc;
365{
366 int pos = 0, i;
367 struct vpd_res res;
368
369 if (sc->bge_vpd_prodname != NULL)
370 free(sc->bge_vpd_prodname, M_DEVBUF);
371 if (sc->bge_vpd_readonly != NULL)
372 free(sc->bge_vpd_readonly, M_DEVBUF);
373 sc->bge_vpd_prodname = NULL;
374 sc->bge_vpd_readonly = NULL;
375
376 bge_vpd_read_res(sc, &res, pos);
377
378 if (res.vr_id != VPD_RES_ID) {
379 printf("bge%d: bad VPD resource id: expected %x got %x\n",
380 sc->bge_unit, VPD_RES_ID, res.vr_id);
381 return;
382 }
383
384 pos += sizeof(res);
385 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
386 for (i = 0; i < res.vr_len; i++)
387 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
388 sc->bge_vpd_prodname[i] = '\0';
389 pos += i;
390
391 bge_vpd_read_res(sc, &res, pos);
392
393 if (res.vr_id != VPD_RES_READ) {
394 printf("bge%d: bad VPD resource id: expected %x got %x\n",
395 sc->bge_unit, VPD_RES_READ, res.vr_id);
396 return;
397 }
398
399 pos += sizeof(res);
400 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
401 for (i = 0; i < res.vr_len + 1; i++)
402 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
403
404 return;
405}
406#endif
407
408/*
409 * Read a byte of data stored in the EEPROM at address 'addr.' The
410 * BCM570x supports both the traditional bitbang interface and an
411 * auto access interface for reading the EEPROM. We use the auto
412 * access method.
413 */
414static u_int8_t
415bge_eeprom_getbyte(sc, addr, dest)
416 struct bge_softc *sc;
417 int addr;
418 u_int8_t *dest;
419{
420 int i;
421 u_int32_t byte = 0;
422
423 /*
424 * Enable use of auto EEPROM access so we can avoid
425 * having to use the bitbang method.
426 */
427 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
428
429 /* Reset the EEPROM, load the clock period. */
430 CSR_WRITE_4(sc, BGE_EE_ADDR,
431 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
432 DELAY(20);
433
434 /* Issue the read EEPROM command. */
435 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
436
437 /* Wait for completion */
438 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
439 DELAY(10);
440 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
441 break;
442 }
443
444 if (i == BGE_TIMEOUT) {
445 printf("bge%d: eeprom read timed out\n", sc->bge_unit);
446 return(0);
447 }
448
449 /* Get result. */
450 byte = CSR_READ_4(sc, BGE_EE_DATA);
451
452 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
453
454 return(0);
455}
456
457/*
458 * Read a sequence of bytes from the EEPROM.
459 */
460static int
461bge_read_eeprom(sc, dest, off, cnt)
462 struct bge_softc *sc;
463 caddr_t dest;
464 int off;
465 int cnt;
466{
467 int err = 0, i;
468 u_int8_t byte = 0;
469
470 for (i = 0; i < cnt; i++) {
471 err = bge_eeprom_getbyte(sc, off + i, &byte);
472 if (err)
473 break;
474 *(dest + i) = byte;
475 }
476
477 return(err ? 1 : 0);
478}
479
480static int
481bge_miibus_readreg(dev, phy, reg)
482 device_t dev;
483 int phy, reg;
484{
485 struct bge_softc *sc;
486 struct ifnet *ifp;
487 u_int32_t val, autopoll;
488 int i;
489
490 sc = device_get_softc(dev);
491 ifp = &sc->arpcom.ac_if;
492
493 if (phy != 1)
494 switch(sc->bge_chipid) {
495 case BGE_CHIPID_BCM5701_B5:
496 case BGE_CHIPID_BCM5703_A2:
497 case BGE_CHIPID_BCM5704_A0:
498 return(0);
499 }
500
501 /* Reading with autopolling on may trigger PCI errors */
502 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
503 if (autopoll & BGE_MIMODE_AUTOPOLL) {
504 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
505 DELAY(40);
506 }
507
508 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
509 BGE_MIPHY(phy)|BGE_MIREG(reg));
510
511 for (i = 0; i < BGE_TIMEOUT; i++) {
512 val = CSR_READ_4(sc, BGE_MI_COMM);
513 if (!(val & BGE_MICOMM_BUSY))
514 break;
515 }
516
517 if (i == BGE_TIMEOUT) {
518 printf("bge%d: PHY read timed out\n", sc->bge_unit);
519 val = 0;
520 goto done;
521 }
522
523 val = CSR_READ_4(sc, BGE_MI_COMM);
524
525done:
526 if (autopoll & BGE_MIMODE_AUTOPOLL) {
527 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
528 DELAY(40);
529 }
530
531 if (val & BGE_MICOMM_READFAIL)
532 return(0);
533
534 return(val & 0xFFFF);
535}
536
537static int
538bge_miibus_writereg(dev, phy, reg, val)
539 device_t dev;
540 int phy, reg, val;
541{
542 struct bge_softc *sc;
543 u_int32_t autopoll;
544 int i;
545
546 sc = device_get_softc(dev);
547
548 /* Reading with autopolling on may trigger PCI errors */
549 autopoll = CSR_READ_4(sc, BGE_MI_MODE);
550 if (autopoll & BGE_MIMODE_AUTOPOLL) {
551 BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
552 DELAY(40);
553 }
554
555 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
556 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
557
558 for (i = 0; i < BGE_TIMEOUT; i++) {
559 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
560 break;
561 }
562
563 if (autopoll & BGE_MIMODE_AUTOPOLL) {
564 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL);
565 DELAY(40);
566 }
567
568 if (i == BGE_TIMEOUT) {
569 printf("bge%d: PHY read timed out\n", sc->bge_unit);
570 return(0);
571 }
572
573 return(0);
574}
575
576static void
577bge_miibus_statchg(dev)
578 device_t dev;
579{
580 struct bge_softc *sc;
581 struct mii_data *mii;
582
583 sc = device_get_softc(dev);
584 mii = device_get_softc(sc->bge_miibus);
585
586 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
587 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_TX) {
588 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
589 } else {
590 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
591 }
592
593 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
594 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
595 } else {
596 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
597 }
598
599 return;
600}
601
602/*
603 * Handle events that have triggered interrupts.
604 */
605static void
606bge_handle_events(sc)
607 struct bge_softc *sc;
608{
609
610 return;
611}
612
613/*
614 * Memory management for jumbo frames.
615 */
616
617static int
618bge_alloc_jumbo_mem(sc)
619 struct bge_softc *sc;
620{
621 caddr_t ptr;
622 register int i;
623 struct bge_jpool_entry *entry;
624
625 /* Grab a big chunk o' storage. */
626 sc->bge_cdata.bge_jumbo_buf = contigmalloc(BGE_JMEM, M_DEVBUF,
627 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
628
629 if (sc->bge_cdata.bge_jumbo_buf == NULL) {
630 printf("bge%d: no memory for jumbo buffers!\n", sc->bge_unit);
631 return(ENOBUFS);
632 }
633
634 SLIST_INIT(&sc->bge_jfree_listhead);
635 SLIST_INIT(&sc->bge_jinuse_listhead);
636
637 /*
638 * Now divide it up into 9K pieces and save the addresses
639 * in an array. Note that we play an evil trick here by using
640 * the first few bytes in the buffer to hold the the address
641 * of the softc structure for this interface. This is because
642 * bge_jfree() needs it, but it is called by the mbuf management
643 * code which will not pass it to us explicitly.
644 */
645 ptr = sc->bge_cdata.bge_jumbo_buf;
646 for (i = 0; i < BGE_JSLOTS; i++) {
647 u_int64_t **aptr;
648 aptr = (u_int64_t **)ptr;
649 aptr[0] = (u_int64_t *)sc;
650 ptr += sizeof(u_int64_t);
651 sc->bge_cdata.bge_jslots[i].bge_buf = ptr;
652 sc->bge_cdata.bge_jslots[i].bge_inuse = 0;
653 ptr += (BGE_JLEN - sizeof(u_int64_t));
654 entry = malloc(sizeof(struct bge_jpool_entry),
655 M_DEVBUF, M_NOWAIT);
656 if (entry == NULL) {
657 contigfree(sc->bge_cdata.bge_jumbo_buf,
658 BGE_JMEM, M_DEVBUF);
659 sc->bge_cdata.bge_jumbo_buf = NULL;
660 printf("bge%d: no memory for jumbo "
661 "buffer queue!\n", sc->bge_unit);
662 return(ENOBUFS);
663 }
664 entry->slot = i;
665 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
666 entry, jpool_entries);
667 }
668
669 return(0);
670}
671
672static void
673bge_free_jumbo_mem(sc)
674 struct bge_softc *sc;
675{
676 int i;
677 struct bge_jpool_entry *entry;
678
679 for (i = 0; i < BGE_JSLOTS; i++) {
680 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
681 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
682 free(entry, M_DEVBUF);
683 }
684
685 contigfree(sc->bge_cdata.bge_jumbo_buf, BGE_JMEM, M_DEVBUF);
686
687 return;
688}
689
690/*
691 * Allocate a jumbo buffer.
692 */
693static void *
694bge_jalloc(sc)
695 struct bge_softc *sc;
696{
697 struct bge_jpool_entry *entry;
698
699 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
700
701 if (entry == NULL) {
702 printf("bge%d: no free jumbo buffers\n", sc->bge_unit);
703 return(NULL);
704 }
705
706 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
707 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
708 sc->bge_cdata.bge_jslots[entry->slot].bge_inuse = 1;
709 return(sc->bge_cdata.bge_jslots[entry->slot].bge_buf);
710}
711
712/*
713 * Adjust usage count on a jumbo buffer.
714 */
715static void
716bge_jref(buf, size)
717 caddr_t buf;
718 u_int size;
719{
720 struct bge_softc *sc;
721 u_int64_t **aptr;
722 register int i;
723
724 /* Extract the softc struct pointer. */
725 aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
726 sc = (struct bge_softc *)(aptr[0]);
727
728 if (sc == NULL)
729 panic("bge_jref: can't find softc pointer!");
730
731 if (size != BGE_JUMBO_FRAMELEN)
732 panic("bge_jref: adjusting refcount of buf of wrong size!");
733
734 /* calculate the slot this buffer belongs to */
735
736 i = ((vm_offset_t)aptr
737 - (vm_offset_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
738
739 if ((i < 0) || (i >= BGE_JSLOTS))
740 panic("bge_jref: asked to reference buffer "
741 "that we don't manage!");
742 else if (sc->bge_cdata.bge_jslots[i].bge_inuse == 0)
743 panic("bge_jref: buffer already free!");
744 else
745 sc->bge_cdata.bge_jslots[i].bge_inuse++;
746
747 return;
748}
749
750/*
751 * Release a jumbo buffer.
752 */
753static void
754bge_jfree(buf, size)
755 caddr_t buf;
756 u_int size;
757{
758 struct bge_softc *sc;
759 u_int64_t **aptr;
760 int i;
761 struct bge_jpool_entry *entry;
762
763 /* Extract the softc struct pointer. */
764 aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
765 sc = (struct bge_softc *)(aptr[0]);
766
767 if (sc == NULL)
768 panic("bge_jfree: can't find softc pointer!");
769
770 if (size != BGE_JUMBO_FRAMELEN)
771 panic("bge_jfree: freeing buffer of wrong size!");
772
773 /* calculate the slot this buffer belongs to */
774
775 i = ((vm_offset_t)aptr
776 - (vm_offset_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
777
778 if ((i < 0) || (i >= BGE_JSLOTS))
779 panic("bge_jfree: asked to free buffer that we don't manage!");
780 else if (sc->bge_cdata.bge_jslots[i].bge_inuse == 0)
781 panic("bge_jfree: buffer already free!");
782 else {
783 sc->bge_cdata.bge_jslots[i].bge_inuse--;
784 if(sc->bge_cdata.bge_jslots[i].bge_inuse == 0) {
785 entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
786 if (entry == NULL)
787 panic("bge_jfree: buffer not in use!");
788 entry->slot = i;
789 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead,
790 jpool_entries);
791 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
792 entry, jpool_entries);
793 }
794 }
795
796 return;
797}
798
799
800/*
801 * Intialize a standard receive ring descriptor.
802 */
803static int
804bge_newbuf_std(sc, i, m)
805 struct bge_softc *sc;
806 int i;
807 struct mbuf *m;
808{
809 struct mbuf *m_new = NULL;
810 struct bge_rx_bd *r;
811
812 if (m == NULL) {
813 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
814 if (m_new == NULL) {
815 return(ENOBUFS);
816 }
817
818 MCLGET(m_new, M_DONTWAIT);
819 if (!(m_new->m_flags & M_EXT)) {
820 m_freem(m_new);
821 return(ENOBUFS);
822 }
823 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
824 } else {
825 m_new = m;
826 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
827 m_new->m_data = m_new->m_ext.ext_buf;
828 }
829
830 if (!sc->bge_rx_alignment_bug)
831 m_adj(m_new, ETHER_ALIGN);
832 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
833 r = &sc->bge_rdata->bge_rx_std_ring[i];
834 BGE_HOSTADDR(r->bge_addr) = vtophys(mtod(m_new, caddr_t));
835 r->bge_flags = BGE_RXBDFLAG_END;
836 r->bge_len = m_new->m_len;
837 r->bge_idx = i;
838
839 return(0);
840}
841
842/*
843 * Initialize a jumbo receive ring descriptor. This allocates
844 * a jumbo buffer from the pool managed internally by the driver.
845 */
846static int
847bge_newbuf_jumbo(sc, i, m)
848 struct bge_softc *sc;
849 int i;
850 struct mbuf *m;
851{
852 struct mbuf *m_new = NULL;
853 struct bge_rx_bd *r;
854
855 if (m == NULL) {
856 caddr_t *buf = NULL;
857
858 /* Allocate the mbuf. */
859 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
860 if (m_new == NULL) {
861 return(ENOBUFS);
862 }
863
864 /* Allocate the jumbo buffer */
865 buf = bge_jalloc(sc);
866 if (buf == NULL) {
867 m_freem(m_new);
868 printf("bge%d: jumbo allocation failed "
869 "-- packet dropped!\n", sc->bge_unit);
870 return(ENOBUFS);
871 }
872
873 /* Attach the buffer to the mbuf. */
874 m_new->m_data = m_new->m_ext.ext_buf = (void *)buf;
875 m_new->m_flags |= M_EXT;
876 m_new->m_len = m_new->m_pkthdr.len =
877 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
878 m_new->m_ext.ext_free = bge_jfree;
879 m_new->m_ext.ext_ref = bge_jref;
880 } else {
881 m_new = m;
882 m_new->m_data = m_new->m_ext.ext_buf;
883 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
884 }
885
886 if (!sc->bge_rx_alignment_bug)
887 m_adj(m_new, ETHER_ALIGN);
888 /* Set up the descriptor. */
889 r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
890 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
891 BGE_HOSTADDR(r->bge_addr) = vtophys(mtod(m_new, caddr_t));
892 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
893 r->bge_len = m_new->m_len;
894 r->bge_idx = i;
895
896 return(0);
897}
898
899/*
900 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
901 * that's 1MB or memory, which is a lot. For now, we fill only the first
902 * 256 ring entries and hope that our CPU is fast enough to keep up with
903 * the NIC.
904 */
905static int
906bge_init_rx_ring_std(sc)
907 struct bge_softc *sc;
908{
909 int i;
910
911 for (i = 0; i < BGE_SSLOTS; i++) {
912 if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
913 return(ENOBUFS);
914 };
915
916 sc->bge_std = i - 1;
917 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
918
919 return(0);
920}
921
922static void
923bge_free_rx_ring_std(sc)
924 struct bge_softc *sc;
925{
926 int i;
927
928 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
929 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
930 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
931 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
932 }
933 bzero((char *)&sc->bge_rdata->bge_rx_std_ring[i],
934 sizeof(struct bge_rx_bd));
935 }
936
937 return;
938}
939
940static int
941bge_init_rx_ring_jumbo(sc)
942 struct bge_softc *sc;
943{
944 int i;
945 struct bge_rcb *rcb;
946
947 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
948 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
949 return(ENOBUFS);
950 };
951
952 sc->bge_jumbo = i - 1;
953
954 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
955 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0);
956 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
957
958 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
959
960 return(0);
961}
962
963static void
964bge_free_rx_ring_jumbo(sc)
965 struct bge_softc *sc;
966{
967 int i;
968
969 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
970 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
971 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
972 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
973 }
974 bzero((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i],
975 sizeof(struct bge_rx_bd));
976 }
977
978 return;
979}
980
981static void
982bge_free_tx_ring(sc)
983 struct bge_softc *sc;
984{
985 int i;
986
987 if (sc->bge_rdata->bge_tx_ring == NULL)
988 return;
989
990 for (i = 0; i < BGE_TX_RING_CNT; i++) {
991 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
992 m_freem(sc->bge_cdata.bge_tx_chain[i]);
993 sc->bge_cdata.bge_tx_chain[i] = NULL;
994 }
995 bzero((char *)&sc->bge_rdata->bge_tx_ring[i],
996 sizeof(struct bge_tx_bd));
997 }
998
999 return;
1000}
1001
1002static int
1003bge_init_tx_ring(sc)
1004 struct bge_softc *sc;
1005{
1006 sc->bge_txcnt = 0;
1007 sc->bge_tx_saved_considx = 0;
1008
1009 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1010 /* 5700 b2 errata */
1011 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1012 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1013
1014 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1015 /* 5700 b2 errata */
1016 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
1017 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1018
1019 return(0);
1020}
1021
1022#define BGE_POLY 0xEDB88320
1023
1024static u_int32_t
1025bge_crc(addr)
1026 caddr_t addr;
1027{
1028 u_int32_t idx, bit, data, crc;
1029
1030 /* Compute CRC for the address value. */
1031 crc = 0xFFFFFFFF; /* initial value */
1032
1033 for (idx = 0; idx < 6; idx++) {
1034 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
1035 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? BGE_POLY : 0);
1036 }
1037
1038 return(crc & 0x7F);
1039}
1040
1041static void
1042bge_setmulti(sc)
1043 struct bge_softc *sc;
1044{
1045 struct ifnet *ifp;
1046 struct ifmultiaddr *ifma;
1047 u_int32_t hashes[4] = { 0, 0, 0, 0 };
1048 int h, i;
1049
1050 ifp = &sc->arpcom.ac_if;
1051
1052 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
1053 for (i = 0; i < 4; i++)
1054 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
1055 return;
1056 }
1057
1058 /* First, zot all the existing filters. */
1059 for (i = 0; i < 4; i++)
1060 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
1061
1062 /* Now program new ones. */
1063 for (ifma = ifp->if_multiaddrs.lh_first;
1064 ifma != NULL; ifma = ifma->ifma_link.le_next) {
1065 if (ifma->ifma_addr->sa_family != AF_LINK)
1066 continue;
1067 h = bge_crc(LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
1068 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1069 }
1070
1071 for (i = 0; i < 4; i++)
1072 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1073
1074 return;
1075}
1076
1077/*
1078 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1079 * self-test results.
1080 */
1081static int
1082bge_chipinit(sc)
1083 struct bge_softc *sc;
1084{
1085 int i;
1086 u_int32_t dma_rw_ctl;
1087
1088 /* Set endianness before we access any non-PCI registers. */
1089#if BYTE_ORDER == BIG_ENDIAN
1090 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1091 BGE_BIGENDIAN_INIT, 4);
1092#else
1093 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL,
1094 BGE_LITTLEENDIAN_INIT, 4);
1095#endif
1096
1097 /*
1098 * Check the 'ROM failed' bit on the RX CPU to see if
1099 * self-tests passed.
1100 */
1101 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1102 printf("bge%d: RX CPU self-diagnostics failed!\n",
1103 sc->bge_unit);
1104 return(ENODEV);
1105 }
1106
1107 /* Clear the MAC control register */
1108 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1109
1110 /*
1111 * Clear the MAC statistics block in the NIC's
1112 * internal memory.
1113 */
1114 for (i = BGE_STATS_BLOCK;
1115 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1116 BGE_MEMWIN_WRITE(sc, i, 0);
1117
1118 for (i = BGE_STATUS_BLOCK;
1119 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1120 BGE_MEMWIN_WRITE(sc, i, 0);
1121
1122 /* Set up the PCI DMA control register. */
1123 if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) &
1124 BGE_PCISTATE_PCI_BUSMODE) {
1125 /* Conventional PCI bus */
1126 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1127 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1128 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1129 (0x0F);
1130 } else {
1131 /* PCI-X bus */
1132 /*
1133 * The 5704 uses a different encoding of read/write
1134 * watermarks.
1135 */
1136 if (sc->bge_asicrev == BGE_ASICREV_BCM5704)
1137 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1138 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1139 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1140 else
1141 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1142 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1143 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1144 (0x0F);
1145
1146 /*
1147 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1148 * for hardware bugs.
1149 */
1150 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1151 sc->bge_asicrev == BGE_ASICREV_BCM5704) {
1152 u_int32_t tmp;
1153
1154 tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f;
1155 if (tmp == 0x6 || tmp == 0x7)
1156 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1157 }
1158 }
1159
1160 if (sc->bge_asicrev == BGE_ASICREV_BCM5703 ||
1161 sc->bge_asicrev == BGE_ASICREV_BCM5704)
1162 dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA;
1163 pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4);
1164
1165 /*
1166 * Set up general mode register.
1167 */
1168 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME|
1169 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1170 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1171 BGE_MODECTL_NO_RX_CRC|BGE_MODECTL_TX_NO_PHDR_CSUM|
1172 BGE_MODECTL_RX_NO_PHDR_CSUM);
1173
1174 /*
1175 * Disable memory write invalidate. Apparently it is not supported
1176 * properly by these devices.
1177 */
1178 PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4);
1179
1180#ifdef __brokenalpha__
1181 /*
1182 * Must insure that we do not cross an 8K (bytes) boundary
1183 * for DMA reads. Our highest limit is 1K bytes. This is a
1184 * restriction on some ALPHA platforms with early revision
1185 * 21174 PCI chipsets, such as the AlphaPC 164lx
1186 */
1187 PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL,
1188 BGE_PCI_READ_BNDRY_1024BYTES, 4);
1189#endif
1190
1191 /* Set the timer prescaler (always 66Mhz) */
1192 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1193
1194 return(0);
1195}
1196
1197static int
1198bge_blockinit(sc)
1199 struct bge_softc *sc;
1200{
1201 struct bge_rcb *rcb;
1202 volatile struct bge_rcb *vrcb;
1203 int i;
1204
1205 /*
1206 * Initialize the memory window pointer register so that
1207 * we can access the first 32K of internal NIC RAM. This will
1208 * allow us to set up the TX send ring RCBs and the RX return
1209 * ring RCBs, plus other things which live in NIC memory.
1210 */
1211 CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0);
1212
1213 /* Configure mbuf memory pool */
1214 if (sc->bge_extram) {
1215 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_EXT_SSRAM);
1216 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1217 } else {
1218 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1219 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1220 }
1221
1222 /* Configure DMA resource pool */
1223 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, BGE_DMA_DESCRIPTORS);
1224 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1225
1226 /* Configure mbuf pool watermarks */
1227 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1228 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1229 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1230
1231 /* Configure DMA resource watermarks */
1232 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1233 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1234
1235 /* Enable buffer manager */
1236 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1237 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1238
1239 /* Poll for buffer manager start indication */
1240 for (i = 0; i < BGE_TIMEOUT; i++) {
1241 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1242 break;
1243 DELAY(10);
1244 }
1245
1246 if (i == BGE_TIMEOUT) {
1247 printf("bge%d: buffer manager failed to start\n",
1248 sc->bge_unit);
1249 return(ENXIO);
1250 }
1251
1252 /* Enable flow-through queues */
1253 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1254 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1255
1256 /* Wait until queue initialization is complete */
1257 for (i = 0; i < BGE_TIMEOUT; i++) {
1258 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1259 break;
1260 DELAY(10);
1261 }
1262
1263 if (i == BGE_TIMEOUT) {
1264 printf("bge%d: flow-through queue init failed\n",
1265 sc->bge_unit);
1266 return(ENXIO);
1267 }
1268
1269 /* Initialize the standard RX ring control block */
1270 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1271 BGE_HOSTADDR(rcb->bge_hostaddr) =
1272 vtophys(&sc->bge_rdata->bge_rx_std_ring);
1273 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1274 if (sc->bge_extram)
1275 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1276 else
1277 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1278 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1279 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1280 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1281 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1282
1283 /*
1284 * Initialize the jumbo RX ring control block
1285 * We set the 'ring disabled' bit in the flags
1286 * field until we're actually ready to start
1287 * using this ring (i.e. once we set the MTU
1288 * high enough to require it).
1289 */
1290 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1291 BGE_HOSTADDR(rcb->bge_hostaddr) =
1292 vtophys(&sc->bge_rdata->bge_rx_jumbo_ring);
1293 rcb->bge_maxlen_flags =
1294 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, BGE_RCB_FLAG_RING_DISABLED);
1295 if (sc->bge_extram)
1296 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1297 else
1298 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1299 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1300 rcb->bge_hostaddr.bge_addr_hi);
1301 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1302 rcb->bge_hostaddr.bge_addr_lo);
1303 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1304 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1305
1306 /* Set up dummy disabled mini ring RCB */
1307 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1308 rcb->bge_maxlen_flags =
1309 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1310 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1311
1312 /*
1313 * Set the BD ring replentish thresholds. The recommended
1314 * values are 1/8th the number of descriptors allocated to
1315 * each ring.
1316 */
1317 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1318 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1319
1320 /*
1321 * Disable all unused send rings by setting the 'ring disabled'
1322 * bit in the flags field of all the TX send ring control blocks.
1323 * These are located in NIC memory.
1324 */
1325 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1326 BGE_SEND_RING_RCB);
1327 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1328 vrcb->bge_maxlen_flags =
1329 BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED);
1330 vrcb->bge_nicaddr = 0;
1331 vrcb++;
1332 }
1333
1334 /* Configure TX RCB 0 (we use only the first ring) */
1335 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1336 BGE_SEND_RING_RCB);
1337 vrcb->bge_hostaddr.bge_addr_hi = 0;
1338 BGE_HOSTADDR(vrcb->bge_hostaddr) =
1339 vtophys(&sc->bge_rdata->bge_tx_ring);
1340 vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT);
1341 vrcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0);
1342
1343 /* Disable all unused RX return rings */
1344 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1345 BGE_RX_RETURN_RING_RCB);
1346 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1347 vrcb->bge_hostaddr.bge_addr_hi = 0;
1348 vrcb->bge_hostaddr.bge_addr_lo = 0;
1349 vrcb->bge_maxlen_flags =
1350 BGE_RCB_MAXLEN_FLAGS(BGE_RETURN_RING_CNT,
1351 BGE_RCB_FLAG_RING_DISABLED);
1352 vrcb->bge_nicaddr = 0;
1353 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1354 (i * (sizeof(u_int64_t))), 0);
1355 vrcb++;
1356 }
1357
1358 /* Initialize RX ring indexes */
1359 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1360 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1361 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1362
1363 /*
1364 * Set up RX return ring 0
1365 * Note that the NIC address for RX return rings is 0x00000000.
1366 * The return rings live entirely within the host, so the
1367 * nicaddr field in the RCB isn't used.
1368 */
1369 vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START +
1370 BGE_RX_RETURN_RING_RCB);
1371 vrcb->bge_hostaddr.bge_addr_hi = 0;
1372 BGE_HOSTADDR(vrcb->bge_hostaddr) =
1373 vtophys(&sc->bge_rdata->bge_rx_return_ring);
1374 vrcb->bge_nicaddr = 0x00000000;
1375 vrcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_RETURN_RING_CNT, 0);
1376
1377 /* Set random backoff seed for TX */
1378 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1379 sc->arpcom.ac_enaddr[0] + sc->arpcom.ac_enaddr[1] +
1380 sc->arpcom.ac_enaddr[2] + sc->arpcom.ac_enaddr[3] +
1381 sc->arpcom.ac_enaddr[4] + sc->arpcom.ac_enaddr[5] +
1382 BGE_TX_BACKOFF_SEED_MASK);
1383
1384 /* Set inter-packet gap */
1385 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1386
1387 /*
1388 * Specify which ring to use for packets that don't match
1389 * any RX rules.
1390 */
1391 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1392
1393 /*
1394 * Configure number of RX lists. One interrupt distribution
1395 * list, sixteen active lists, one bad frames class.
1396 */
1397 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1398
1399 /* Inialize RX list placement stats mask. */
1400 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1401 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1402
1403 /* Disable host coalescing until we get it set up */
1404 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1405
1406 /* Poll to make sure it's shut down. */
1407 for (i = 0; i < BGE_TIMEOUT; i++) {
1408 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1409 break;
1410 DELAY(10);
1411 }
1412
1413 if (i == BGE_TIMEOUT) {
1414 printf("bge%d: host coalescing engine failed to idle\n",
1415 sc->bge_unit);
1416 return(ENXIO);
1417 }
1418
1419 /* Set up host coalescing defaults */
1420 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1421 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1422 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1423 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1424 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1425 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1426 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1427 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1428 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1429
1430 /* Set up address of statistics block */
1431 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1432 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, 0);
1433 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO,
1434 vtophys(&sc->bge_rdata->bge_info.bge_stats));
1435
1436 /* Set up address of status block */
1437 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1438 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, 0);
1439 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO,
1440 vtophys(&sc->bge_rdata->bge_status_block));
1441 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1442 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1443
1444 /* Turn on host coalescing state machine */
1445 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1446
1447 /* Turn on RX BD completion state machine and enable attentions */
1448 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1449 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1450
1451 /* Turn on RX list placement state machine */
1452 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1453
1454 /* Turn on RX list selector state machine. */
1455 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1456
1457 /* Turn on DMA, clear stats */
1458 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1459 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1460 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1461 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1462 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1463
1464 /* Set misc. local control, enable interrupts on attentions */
1465 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1466
1467#ifdef notdef
1468 /* Assert GPIO pins for PHY reset */
1469 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1470 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1471 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1472 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1473#endif
1474
1475 /* Turn on DMA completion state machine */
1476 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1477
1478 /* Turn on write DMA state machine */
1479 CSR_WRITE_4(sc, BGE_WDMA_MODE,
1480 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1481
1482 /* Turn on read DMA state machine */
1483 CSR_WRITE_4(sc, BGE_RDMA_MODE,
1484 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1485
1486 /* Turn on RX data completion state machine */
1487 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1488
1489 /* Turn on RX BD initiator state machine */
1490 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1491
1492 /* Turn on RX data and RX BD initiator state machine */
1493 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1494
1495 /* Turn on Mbuf cluster free state machine */
1496 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1497
1498 /* Turn on send BD completion state machine */
1499 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1500
1501 /* Turn on send data completion state machine */
1502 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1503
1504 /* Turn on send data initiator state machine */
1505 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1506
1507 /* Turn on send BD initiator state machine */
1508 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1509
1510 /* Turn on send BD selector state machine */
1511 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1512
1513 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1514 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1515 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1516
1517 /* ack/clear link change events */
1518 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1519 BGE_MACSTAT_CFG_CHANGED);
1520 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1521
1522 /* Enable PHY auto polling (for MII/GMII only) */
1523 if (sc->bge_tbi) {
1524 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1525 } else {
1526 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1527 if (sc->bge_asicrev == BGE_ASICREV_BCM5700)
1528 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1529 BGE_EVTENB_MI_INTERRUPT);
1530 }
1531
1532 /* Enable link state change attentions. */
1533 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1534
1535 return(0);
1536}
1537
1538/*
1539 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1540 * against our list and return its name if we find a match. Note
1541 * that since the Broadcom controller contains VPD support, we
1542 * can get the device name string from the controller itself instead
1543 * of the compiled-in string. This is a little slow, but it guarantees
1544 * we'll always announce the right product name.
1545 */
1546static int
1547bge_probe(dev)
1548 device_t dev;
1549{
1550 struct bge_type *t;
1551 struct bge_softc *sc;
1552 char *descbuf;
1553
1554 t = bge_devs;
1555
1556 sc = device_get_softc(dev);
1557 bzero(sc, sizeof(struct bge_softc));
1558 sc->bge_unit = device_get_unit(dev);
1559 sc->bge_dev = dev;
1560
1561 while(t->bge_name != NULL) {
1562 if ((pci_get_vendor(dev) == t->bge_vid) &&
1563 (pci_get_device(dev) == t->bge_did)) {
1564#ifdef notdef
1565 bge_vpd_read(sc);
1566 device_set_desc(dev, sc->bge_vpd_prodname);
1567#endif
1568 descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
1569 if (descbuf == NULL)
1570 return(ENOMEM);
1571 snprintf(descbuf, BGE_DEVDESC_MAX,
1572 "%s, ASIC rev. %#04x", t->bge_name,
1573 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16);
1574 device_set_desc_copy(dev, descbuf);
1575 free(descbuf, M_TEMP);
1576 return(0);
1577 }
1578 t++;
1579 }
1580
1581 return(ENXIO);
1582}
1583
1584static int
1585bge_attach(dev)
1586 device_t dev;
1587{
1588 int s;
1589 u_int32_t command;
1590 struct ifnet *ifp;
1591 struct bge_softc *sc;
1592 u_int32_t hwcfg = 0;
1593 u_int32_t mac_addr = 0;
1594 int unit, error = 0, rid;
1595
1596 s = splimp();
1597
1598 sc = device_get_softc(dev);
1599 unit = device_get_unit(dev);
1600 sc->bge_dev = dev;
1601 sc->bge_unit = unit;
1602
1603 /*
1604 * Map control/status registers.
1605 */
1606 command = pci_read_config(dev, PCIR_COMMAND, 4);
1607 command |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
1608 pci_write_config(dev, PCIR_COMMAND, command, 4);
1609 command = pci_read_config(dev, PCIR_COMMAND, 4);
1610
1611 if (!(command & PCIM_CMD_MEMEN)) {
1612 printf("bge%d: failed to enable memory mapping!\n", unit);
1613 error = ENXIO;
1614 goto fail;
1615 }
1616
1617 rid = BGE_PCI_BAR0;
1618 sc->bge_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
1619 0, ~0, 1, RF_ACTIVE);
1620
1621 if (sc->bge_res == NULL) {
1622 printf ("bge%d: couldn't map memory\n", unit);
1623 error = ENXIO;
1624 goto fail;
1625 }
1626
1627 sc->bge_btag = rman_get_bustag(sc->bge_res);
1628 sc->bge_bhandle = rman_get_bushandle(sc->bge_res);
1629 sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res);
1630
1631 /*
1632 * XXX FIXME: rman_get_virtual() on the alpha is currently
1633 * broken and returns a physical address instead of a kernel
1634 * virtual address. Consequently, we need to do a little
1635 * extra mangling of the vhandle on the alpha. This should
1636 * eventually be fixed! The whole idea here is to get rid
1637 * of platform dependencies.
1638 */
1639#ifdef __alpha__
1640 if (pci_cvt_to_bwx(sc->bge_vhandle))
1641 sc->bge_vhandle = pci_cvt_to_bwx(sc->bge_vhandle);
1642 else
1643 sc->bge_vhandle = pci_cvt_to_dense(sc->bge_vhandle);
1644 sc->bge_vhandle = ALPHA_PHYS_TO_K0SEG(sc->bge_vhandle);
1645#endif
1646
1647 /* Allocate interrupt */
1648 rid = 0;
1649
1650 sc->bge_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1651 RF_SHAREABLE | RF_ACTIVE);
1652
1653 if (sc->bge_irq == NULL) {
1654 printf("bge%d: couldn't map interrupt\n", unit);
1655 error = ENXIO;
1656 goto fail;
1657 }
1658
1659 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET,
1660 bge_intr, sc, &sc->bge_intrhand);
1661
1662 if (error) {
1663 bge_release_resources(sc);
1664 printf("bge%d: couldn't set up irq\n", unit);
1665 goto fail;
1666 }
1667
1668 sc->bge_unit = unit;
1669
1670 /* Try to reset the chip. */
1671 bge_reset(sc);
1672
1673 if (bge_chipinit(sc)) {
1674 printf("bge%d: chip initialization failed\n", sc->bge_unit);
1675 bge_release_resources(sc);
1676 error = ENXIO;
1677 goto fail;
1678 }
1679
1680 /*
1681 * Get station address from the EEPROM.
1682 */
1683 mac_addr = bge_readmem_ind(sc, 0x0c14);
1684 if ((mac_addr >> 16) == 0x484b) {
1685 sc->arpcom.ac_enaddr[0] = (u_char)(mac_addr >> 8);
1686 sc->arpcom.ac_enaddr[1] = (u_char)mac_addr;
1687 mac_addr = bge_readmem_ind(sc, 0x0c18);
1688 sc->arpcom.ac_enaddr[2] = (u_char)(mac_addr >> 24);
1689 sc->arpcom.ac_enaddr[3] = (u_char)(mac_addr >> 16);
1690 sc->arpcom.ac_enaddr[4] = (u_char)(mac_addr >> 8);
1691 sc->arpcom.ac_enaddr[5] = (u_char)mac_addr;
1692 } else if (bge_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr,
1693 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
1694 printf("bge%d: failed to read station address\n", unit);
1695 bge_release_resources(sc);
1696 error = ENXIO;
1697 goto fail;
1698 }
1699
1700 /*
1701 * A Broadcom chip was detected. Inform the world.
1702 */
1703 printf("bge%d: Ethernet address: %6D\n", unit,
1704 sc->arpcom.ac_enaddr, ":");
1705
1706 /* Allocate the general information block and ring buffers. */
1707 sc->bge_rdata = contigmalloc(sizeof(struct bge_ring_data), M_DEVBUF,
1708 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1709
1710 if (sc->bge_rdata == NULL) {
1711 bge_release_resources(sc);
1712 error = ENXIO;
1713 printf("bge%d: no memory for list buffers!\n", sc->bge_unit);
1714 goto fail;
1715 }
1716
1717 bzero(sc->bge_rdata, sizeof(struct bge_ring_data));
1718
1719 /* Try to allocate memory for jumbo buffers. */
1720 if (bge_alloc_jumbo_mem(sc)) {
1721 printf("bge%d: jumbo buffer allocation "
1722 "failed\n", sc->bge_unit);
1723 bge_release_resources(sc);
1724 error = ENXIO;
1725 goto fail;
1726 }
1727
1728 /* Set default tuneable values. */
1729 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1730 sc->bge_rx_coal_ticks = 150;
1731 sc->bge_tx_coal_ticks = 150;
1732 sc->bge_rx_max_coal_bds = 64;
1733 sc->bge_tx_max_coal_bds = 128;
1734
1735 /* Set up ifnet structure */
1736 ifp = &sc->arpcom.ac_if;
1737 ifp->if_softc = sc;
1738 ifp->if_unit = sc->bge_unit;
1739 ifp->if_name = "bge";
1740 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1741 ifp->if_ioctl = bge_ioctl;
1742 ifp->if_output = ether_output;
1743 ifp->if_start = bge_start;
1744 ifp->if_watchdog = bge_watchdog;
1745 ifp->if_init = bge_init;
1746 ifp->if_mtu = ETHERMTU;
1747 ifp->if_snd.ifq_maxlen = BGE_TX_RING_CNT - 1;
1748 ifp->if_hwassist = BGE_CSUM_FEATURES;
1749 ifp->if_capabilities = IFCAP_HWCSUM;
1750 ifp->if_capenable = ifp->if_capabilities;
1751
1752 /* Save ASIC rev. */
1753
1754 sc->bge_chipid =
1755 pci_read_config(dev, BGE_PCI_MISC_CTL, 4) &
1756 BGE_PCIMISCCTL_ASICREV;
1757 sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid);
1758 sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid);
1759
1760 /*
1761 * Figure out what sort of media we have by checking the
1762 * hardware config word in the first 32k of NIC internal memory,
1763 * or fall back to examining the EEPROM if necessary.
1764 * Note: on some BCM5700 cards, this value appears to be unset.
1765 * If that's the case, we have to rely on identifying the NIC
1766 * by its PCI subsystem ID, as we do below for the SysKonnect
1767 * SK-9D41.
1768 */
1769 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)
1770 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
1771 else {
1772 bge_read_eeprom(sc, (caddr_t)&hwcfg,
1773 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
1774 hwcfg = ntohl(hwcfg);
1775 }
1776
1777 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1778 sc->bge_tbi = 1;
1779
1780 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1781 if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41)
1782 sc->bge_tbi = 1;
1783
1784 if (sc->bge_tbi) {
1785 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK,
1786 bge_ifmedia_upd, bge_ifmedia_sts);
1787 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1788 ifmedia_add(&sc->bge_ifmedia,
1789 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
1790 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1791 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
1792 } else {
1793 /*
1794 * Do transceiver setup.
1795 */
1796 if (mii_phy_probe(dev, &sc->bge_miibus,
1797 bge_ifmedia_upd, bge_ifmedia_sts)) {
1798 printf("bge%d: MII without any PHY!\n", sc->bge_unit);
1799 bge_release_resources(sc);
1800 bge_free_jumbo_mem(sc);
1801 error = ENXIO;
1802 goto fail;
1803 }
1804 }
1805
1806 /*
1807 * When using the BCM5701 in PCI-X mode, data corruption has
1808 * been observed in the first few bytes of some received packets.
1809 * Aligning the packet buffer in memory eliminates the corruption.
1810 * Unfortunately, this misaligns the packet payloads. On platforms
1811 * which do not support unaligned accesses, we will realign the
1812 * payloads by copying the received packets.
1813 */
1814 switch (sc->bge_chipid) {
1815 case BGE_CHIPID_BCM5701_A0:
1816 case BGE_CHIPID_BCM5701_B0:
1817 case BGE_CHIPID_BCM5701_B2:
1818 case BGE_CHIPID_BCM5701_B5:
1819 /* If in PCI-X mode, work around the alignment bug. */
1820 if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) &
1821 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
1822 BGE_PCISTATE_PCI_BUSSPEED)
1823 sc->bge_rx_alignment_bug = 1;
1824 break;
1825 }
1826
1827 /*
1828 * Call MI attach routine.
1829 */
1830 ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
1831 callout_handle_init(&sc->bge_stat_ch);
1832
1833fail:
1834 splx(s);
1835
1836 return(error);
1837}
1838
1839static int
1840bge_detach(dev)
1841 device_t dev;
1842{
1843 struct bge_softc *sc;
1844 struct ifnet *ifp;
1845 int s;
1846
1847 s = splimp();
1848
1849 sc = device_get_softc(dev);
1850 ifp = &sc->arpcom.ac_if;
1851
1852 ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
1853 bge_stop(sc);
1854 bge_reset(sc);
1855
1856 if (sc->bge_tbi) {
1857 ifmedia_removeall(&sc->bge_ifmedia);
1858 } else {
1859 bus_generic_detach(dev);
1860 device_delete_child(dev, sc->bge_miibus);
1861 }
1862
1863 bge_release_resources(sc);
1864 bge_free_jumbo_mem(sc);
1865
1866 splx(s);
1867
1868 return(0);
1869}
1870
1871static void
1872bge_release_resources(sc)
1873 struct bge_softc *sc;
1874{
1875 device_t dev;
1876
1877 dev = sc->bge_dev;
1878
1879 if (sc->bge_vpd_prodname != NULL)
1880 free(sc->bge_vpd_prodname, M_DEVBUF);
1881
1882 if (sc->bge_vpd_readonly != NULL)
1883 free(sc->bge_vpd_readonly, M_DEVBUF);
1884
1885 if (sc->bge_intrhand != NULL)
1886 bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand);
1887
1888 if (sc->bge_irq != NULL)
1889 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq);
1890
1891 if (sc->bge_res != NULL)
1892 bus_release_resource(dev, SYS_RES_MEMORY,
1893 BGE_PCI_BAR0, sc->bge_res);
1894
1895 if (sc->bge_rdata != NULL)
1896 contigfree(sc->bge_rdata,
1897 sizeof(struct bge_ring_data), M_DEVBUF);
1898
1899 return;
1900}
1901
1902static void
1903bge_reset(sc)
1904 struct bge_softc *sc;
1905{
1906 device_t dev;
1907 u_int32_t cachesize, command, pcistate;
1908 int i, val = 0;
1909
1910 dev = sc->bge_dev;
1911
1912 /* Save some important PCI state. */
1913 cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4);
1914 command = pci_read_config(dev, BGE_PCI_CMD, 4);
1915 pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4);
1916
1917 pci_write_config(dev, BGE_PCI_MISC_CTL,
1918 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1919 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1920
1921 /* Issue global reset */
1922 bge_writereg_ind(sc, BGE_MISC_CFG,
1923 BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1));
1924
1925 DELAY(1000);
1926
1927 /* Reset some of the PCI state that got zapped by reset */
1928 pci_write_config(dev, BGE_PCI_MISC_CTL,
1929 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1930 BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4);
1931 pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4);
1932 pci_write_config(dev, BGE_PCI_CMD, command, 4);
1933 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
1934
1935 /*
1936 * Prevent PXE restart: write a magic number to the
1937 * general communications memory at 0xB50.
1938 */
1939 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1940 /*
1941 * Poll the value location we just wrote until
1942 * we see the 1's complement of the magic number.
1943 * This indicates that the firmware initialization
1944 * is complete.
1945 */
1946 for (i = 0; i < BGE_TIMEOUT; i++) {
1947 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
1948 if (val == ~BGE_MAGIC_NUMBER)
1949 break;
1950 DELAY(10);
1951 }
1952
1953 if (i == BGE_TIMEOUT) {
1954 printf("bge%d: firmware handshake timed out\n", sc->bge_unit);
1955 return;
1956 }
1957
1958 /*
1959 * XXX Wait for the value of the PCISTATE register to
1960 * return to its original pre-reset state. This is a
1961 * fairly good indicator of reset completion. If we don't
1962 * wait for the reset to fully complete, trying to read
1963 * from the device's non-PCI registers may yield garbage
1964 * results.
1965 */
1966 for (i = 0; i < BGE_TIMEOUT; i++) {
1967 if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate)
1968 break;
1969 DELAY(10);
1970 }
1971
1972 /* Enable memory arbiter. */
1973 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
1974
1975 /* Fix up byte swapping */
1976 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME|
1977 BGE_MODECTL_BYTESWAP_DATA);
1978
1979 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1980
1981 DELAY(10000);
1982
1983 return;
1984}
1985
1986/*
1987 * Frame reception handling. This is called if there's a frame
1988 * on the receive return list.
1989 *
1990 * Note: we have to be able to handle two possibilities here:
1991 * 1) the frame is from the jumbo recieve ring
1992 * 2) the frame is from the standard receive ring
1993 */
1994
1995static void
1996bge_rxeof(sc)
1997 struct bge_softc *sc;
1998{
1999 struct ifnet *ifp;
2000 int stdcnt = 0, jumbocnt = 0;
2001
2002 ifp = &sc->arpcom.ac_if;
2003
2004 while(sc->bge_rx_saved_considx !=
2005 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
2006 struct bge_rx_bd *cur_rx;
2007 u_int32_t rxidx;
2008 struct ether_header *eh;
2009 struct mbuf *m = NULL;
2010 u_int16_t vlan_tag = 0;
2011 int have_tag = 0;
2012
2013 cur_rx =
2014 &sc->bge_rdata->bge_rx_return_ring[sc->bge_rx_saved_considx];
2015
2016 rxidx = cur_rx->bge_idx;
2017 BGE_INC(sc->bge_rx_saved_considx, BGE_RETURN_RING_CNT);
2018
2019 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2020 have_tag = 1;
2021 vlan_tag = cur_rx->bge_vlan_tag;
2022 }
2023
2024 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2025 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2026 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2027 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2028 jumbocnt++;
2029 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2030 ifp->if_ierrors++;
2031 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2032 continue;
2033 }
2034 if (bge_newbuf_jumbo(sc,
2035 sc->bge_jumbo, NULL) == ENOBUFS) {
2036 ifp->if_ierrors++;
2037 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2038 continue;
2039 }
2040 } else {
2041 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2042 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2043 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2044 stdcnt++;
2045 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2046 ifp->if_ierrors++;
2047 bge_newbuf_std(sc, sc->bge_std, m);
2048 continue;
2049 }
2050 if (bge_newbuf_std(sc, sc->bge_std,
2051 NULL) == ENOBUFS) {
2052 ifp->if_ierrors++;
2053 bge_newbuf_std(sc, sc->bge_std, m);
2054 continue;
2055 }
2056 }
2057
2058 ifp->if_ipackets++;
2059#ifndef __i386__
2060 /*
2061 * The i386 allows unaligned accesses, but for other
2062 * platforms we must make sure the payload is aligned.
2063 */
2064 if (sc->bge_rx_alignment_bug) {
2065 bcopy(m->m_data, m->m_data + ETHER_ALIGN,
2066 cur_rx->bge_len);
2067 m->m_data += ETHER_ALIGN;
2068 }
2069#endif
2070 eh = mtod(m, struct ether_header *);
2071 m->m_pkthdr.len = m->m_len = cur_rx->bge_len;
2072 m->m_pkthdr.rcvif = ifp;
2073
2074 /* Remove header from mbuf and pass it on. */
2075 m_adj(m, sizeof(struct ether_header));
2076
2077#if 0 /* currently broken for some packets, possibly related to TCP options */
2078 if (ifp->if_hwassist) {
2079 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2080 if ((cur_rx->bge_ip_csum ^ 0xffff) == 0)
2081 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2082 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2083 m->m_pkthdr.csum_data =
2084 cur_rx->bge_tcp_udp_csum;
2085 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
2086 }
2087 }
2088#endif
2089
2090 /*
2091 * If we received a packet with a vlan tag, pass it
2092 * to vlan_input() instead of ether_input().
2093 */
2094 if (have_tag) {
2095 VLAN_INPUT_TAG(eh, m, vlan_tag);
2096 have_tag = vlan_tag = 0;
2097 continue;
2098 }
2099
2100 ether_input(ifp, eh, m);
2101 }
2102
2103 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2104 if (stdcnt)
2105 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2106 if (jumbocnt)
2107 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2108
2109 return;
2110}
2111
2112static void
2113bge_txeof(sc)
2114 struct bge_softc *sc;
2115{
2116 struct bge_tx_bd *cur_tx = NULL;
2117 struct ifnet *ifp;
2118
2119 ifp = &sc->arpcom.ac_if;
2120
2121 /*
2122 * Go through our tx ring and free mbufs for those
2123 * frames that have been sent.
2124 */
2125 while (sc->bge_tx_saved_considx !=
2126 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
2127 u_int32_t idx = 0;
2128
2129 idx = sc->bge_tx_saved_considx;
2130 cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
2131 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2132 ifp->if_opackets++;
2133 if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
2134 m_freem(sc->bge_cdata.bge_tx_chain[idx]);
2135 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2136 }
2137 sc->bge_txcnt--;
2138 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2139 ifp->if_timer = 0;
2140 }
2141
2142 if (cur_tx != NULL)
2143 ifp->if_flags &= ~IFF_OACTIVE;
2144
2145 return;
2146}
2147
2148static void
2149bge_intr(xsc)
2150 void *xsc;
2151{
2152 struct bge_softc *sc;
2153 struct ifnet *ifp;
2154
2155 sc = xsc;
2156 ifp = &sc->arpcom.ac_if;
2157
2158#ifdef notdef
2159 /* Avoid this for now -- checking this register is expensive. */
2160 /* Make sure this is really our interrupt. */
2161 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2162 return;
2163#endif
2164 /* Ack interrupt and stop others from occuring. */
2165 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2166
2167 /*
2168 * Process link state changes.
2169 * Grrr. The link status word in the status block does
2170 * not work correctly on the BCM5700 rev AX and BX chips,
2171 * according to all avaibable information. Hence, we have
2172 * to enable MII interrupts in order to properly obtain
2173 * async link changes. Unfortunately, this also means that
2174 * we have to read the MAC status register to detect link
2175 * changes, thereby adding an additional register access to
2176 * the interrupt handler.
2177 */
2178
2179 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
2180 u_int32_t status;
2181
2182 status = CSR_READ_4(sc, BGE_MAC_STS);
2183 if (status & BGE_MACSTAT_MI_INTERRUPT) {
2184 sc->bge_link = 0;
2185 untimeout(bge_tick, sc, sc->bge_stat_ch);
2186 bge_tick(sc);
2187 /* Clear the interrupt */
2188 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2189 BGE_EVTENB_MI_INTERRUPT);
2190 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
2191 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
2192 BRGPHY_INTRS);
2193 }
2194 } else {
2195 if ((sc->bge_rdata->bge_status_block.bge_status &
2196 BGE_STATFLAG_UPDATED) &&
2197 (sc->bge_rdata->bge_status_block.bge_status &
2198 BGE_STATFLAG_LINKSTATE_CHANGED)) {
2199 sc->bge_rdata->bge_status_block.bge_status &= ~(BGE_STATFLAG_UPDATED|BGE_STATFLAG_LINKSTATE_CHANGED);
2200 sc->bge_link = 0;
2201 untimeout(bge_tick, sc, sc->bge_stat_ch);
2202 bge_tick(sc);
2203 /* Clear the interrupt */
2204 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2205 BGE_MACSTAT_CFG_CHANGED);
2206
2207 /* Force flush the status block cached by PCI bridge */
2208 CSR_READ_4(sc, BGE_MBX_IRQ0_LO);
2209 }
2210 }
2211
2212 if (ifp->if_flags & IFF_RUNNING) {
2213 /* Check RX return ring producer/consumer */
2214 bge_rxeof(sc);
2215
2216 /* Check TX ring producer/consumer */
2217 bge_txeof(sc);
2218 }
2219
2220 bge_handle_events(sc);
2221
2222 /* Re-enable interrupts. */
2223 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2224
2225 if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
2226 bge_start(ifp);
2227
2228 return;
2229}
2230
2231static void
2232bge_tick(xsc)
2233 void *xsc;
2234{
2235 struct bge_softc *sc;
2236 struct mii_data *mii = NULL;
2237 struct ifmedia *ifm = NULL;
2238 struct ifnet *ifp;
2239 int s;
2240
2241 sc = xsc;
2242 ifp = &sc->arpcom.ac_if;
2243
2244 s = splimp();
2245
2246 bge_stats_update(sc);
2247 sc->bge_stat_ch = timeout(bge_tick, sc, hz);
2248 if (sc->bge_link) {
2249 splx(s);
2250 return;
2251 }
2252
2253 if (sc->bge_tbi) {
2254 ifm = &sc->bge_ifmedia;
2255 if (CSR_READ_4(sc, BGE_MAC_STS) &
2256 BGE_MACSTAT_TBI_PCS_SYNCHED) {
2257 sc->bge_link++;
2258 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
2259 printf("bge%d: gigabit link up\n", sc->bge_unit);
2260 if (ifp->if_snd.ifq_head != NULL)
2261 bge_start(ifp);
2262 }
2263 splx(s);
2264 return;
2265 }
2266
2267 mii = device_get_softc(sc->bge_miibus);
2268 mii_tick(mii);
2269
2270 if (!sc->bge_link) {
2271 mii_pollstat(mii);
2272 if (mii->mii_media_status & IFM_ACTIVE &&
2273 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2274 sc->bge_link++;
2275 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_TX ||
2276 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
2277 printf("bge%d: gigabit link up\n",
2278 sc->bge_unit);
2279 if (ifp->if_snd.ifq_head != NULL)
2280 bge_start(ifp);
2281 }
2282 }
2283
2284 splx(s);
2285
2286 return;
2287}
2288
2289static void
2290bge_stats_update(sc)
2291 struct bge_softc *sc;
2292{
2293 struct ifnet *ifp;
2294 struct bge_stats *stats;
2295
2296 ifp = &sc->arpcom.ac_if;
2297
2298 stats = (struct bge_stats *)(sc->bge_vhandle +
2299 BGE_MEMWIN_START + BGE_STATS_BLOCK);
2300
2301 ifp->if_collisions +=
2302 (stats->dot3StatsSingleCollisionFrames.bge_addr_lo +
2303 stats->dot3StatsMultipleCollisionFrames.bge_addr_lo +
2304 stats->dot3StatsExcessiveCollisions.bge_addr_lo +
2305 stats->dot3StatsLateCollisions.bge_addr_lo) -
2306 ifp->if_collisions;
2307
2308#ifdef notdef
2309 ifp->if_collisions +=
2310 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2311 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2312 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2313 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2314 ifp->if_collisions;
2315#endif
2316
2317 return;
2318}
2319
2320/*
2321 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2322 * pointers to descriptors.
2323 */
2324static int
2325bge_encap(sc, m_head, txidx)
2326 struct bge_softc *sc;
2327 struct mbuf *m_head;
2328 u_int32_t *txidx;
2329{
2330 struct bge_tx_bd *f = NULL;
2331 struct mbuf *m;
2332 u_int32_t frag, cur, cnt = 0;
2333 u_int16_t csum_flags = 0;
2334 struct ifvlan *ifv = NULL;
2335
2336 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
2337 m_head->m_pkthdr.rcvif != NULL &&
2338 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
2339 ifv = m_head->m_pkthdr.rcvif->if_softc;
2340
2341 m = m_head;
2342 cur = frag = *txidx;
2343
2344 if (m_head->m_pkthdr.csum_flags) {
2345 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
2346 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2347 if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
2348 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2349 if (m_head->m_flags & M_LASTFRAG)
2350 csum_flags |= BGE_TXBDFLAG_IP_FRAG_END;
2351 else if (m_head->m_flags & M_FRAG)
2352 csum_flags |= BGE_TXBDFLAG_IP_FRAG;
2353 }
2354 /*
2355 * Start packing the mbufs in this chain into
2356 * the fragment pointers. Stop when we run out
2357 * of fragments or hit the end of the mbuf chain.
2358 */
2359 for (m = m_head; m != NULL; m = m->m_next) {
2360 if (m->m_len != 0) {
2361 f = &sc->bge_rdata->bge_tx_ring[frag];
2362 if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
2363 break;
2364 BGE_HOSTADDR(f->bge_addr) =
2365 vtophys(mtod(m, vm_offset_t));
2366 f->bge_len = m->m_len;
2367 f->bge_flags = csum_flags;
2368 if (ifv != NULL) {
2369 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2370 f->bge_vlan_tag = ifv->ifv_tag;
2371 } else {
2372 f->bge_vlan_tag = 0;
2373 }
2374 /*
2375 * Sanity check: avoid coming within 16 descriptors
2376 * of the end of the ring.
2377 */
2378 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16)
2379 return(ENOBUFS);
2380 cur = frag;
2381 BGE_INC(frag, BGE_TX_RING_CNT);
2382 cnt++;
2383 }
2384 }
2385
2386 if (m != NULL)
2387 return(ENOBUFS);
2388
2389 if (frag == sc->bge_tx_saved_considx)
2390 return(ENOBUFS);
2391
2392 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
2393 sc->bge_cdata.bge_tx_chain[cur] = m_head;
2394 sc->bge_txcnt += cnt;
2395
2396 *txidx = frag;
2397
2398 return(0);
2399}
2400
2401/*
2402 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2403 * to the mbuf data regions directly in the transmit descriptors.
2404 */
2405static void
2406bge_start(ifp)
2407 struct ifnet *ifp;
2408{
2409 struct bge_softc *sc;
2410 struct mbuf *m_head = NULL;
2411 u_int32_t prodidx = 0;
2412
2413 sc = ifp->if_softc;
2414
2415 if (!sc->bge_link && ifp->if_snd.ifq_len < 10)
2416 return;
2417
2418 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
2419
2420 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2421 IF_DEQUEUE(&ifp->if_snd, m_head);
2422 if (m_head == NULL)
2423 break;
2424
2425 /*
2426 * XXX
2427 * safety overkill. If this is a fragmented packet chain
2428 * with delayed TCP/UDP checksums, then only encapsulate
2429 * it if we have enough descriptors to handle the entire
2430 * chain at once.
2431 * (paranoia -- may not actually be needed)
2432 */
2433 if (m_head->m_flags & M_FIRSTFRAG &&
2434 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
2435 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2436 m_head->m_pkthdr.csum_data + 16) {
2437 IF_PREPEND(&ifp->if_snd, m_head);
2438 ifp->if_flags |= IFF_OACTIVE;
2439 break;
2440 }
2441 }
2442
2443 /*
2444 * Pack the data into the transmit ring. If we
2445 * don't have room, set the OACTIVE flag and wait
2446 * for the NIC to drain the ring.
2447 */
2448 if (bge_encap(sc, m_head, &prodidx)) {
2449 IF_PREPEND(&ifp->if_snd, m_head);
2450 ifp->if_flags |= IFF_OACTIVE;
2451 break;
2452 }
2453
2454 /*
2455 * If there's a BPF listener, bounce a copy of this frame
2456 * to him.
2457 */
2458 if (ifp->if_bpf)
2459 bpf_mtap(ifp, m_head);
2460 }
2461
2462 /* Transmit */
2463 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2464 /* 5700 b2 errata */
2465 if (sc->bge_chiprev == BGE_CHIPREV_5700_BX)
2466 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2467
2468 /*
2469 * Set a timeout in case the chip goes out to lunch.
2470 */
2471 ifp->if_timer = 5;
2472
2473 return;
2474}
2475
2476static void
2477bge_init(xsc)
2478 void *xsc;
2479{
2480 struct bge_softc *sc = xsc;
2481 struct ifnet *ifp;
2482 u_int16_t *m;
2483 int s;
2484
2485 s = splimp();
2486
2487 ifp = &sc->arpcom.ac_if;
2488
2489 if (ifp->if_flags & IFF_RUNNING) {
2490 splx(s);
2491 return;
2492 }
2493
2494 /* Cancel pending I/O and flush buffers. */
2495 bge_stop(sc);
2496 bge_reset(sc);
2497 bge_chipinit(sc);
2498
2499 /*
2500 * Init the various state machines, ring
2501 * control blocks and firmware.
2502 */
2503 if (bge_blockinit(sc)) {
2504 printf("bge%d: initialization failure\n", sc->bge_unit);
2505 splx(s);
2506 return;
2507 }
2508
2509 ifp = &sc->arpcom.ac_if;
2510
2511 /* Specify MTU. */
2512 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2513 ETHER_HDR_LEN + ETHER_CRC_LEN);
2514
2515 /* Load our MAC address. */
2516 m = (u_int16_t *)&sc->arpcom.ac_enaddr[0];
2517 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2518 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2519
2520 /* Enable or disable promiscuous mode as needed. */
2521 if (ifp->if_flags & IFF_PROMISC) {
2522 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2523 } else {
2524 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2525 }
2526
2527 /* Program multicast filter. */
2528 bge_setmulti(sc);
2529
2530 /* Init RX ring. */
2531 bge_init_rx_ring_std(sc);
2532
2533 /* Init jumbo RX ring. */
2534 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2535 bge_init_rx_ring_jumbo(sc);
2536
2537 /* Init our RX return ring index */
2538 sc->bge_rx_saved_considx = 0;
2539
2540 /* Init TX ring. */
2541 bge_init_tx_ring(sc);
2542
2543 /* Turn on transmitter */
2544 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2545
2546 /* Turn on receiver */
2547 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2548
2549 /* Tell firmware we're alive. */
2550 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2551
2552 /* Enable host interrupts. */
2553 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
2554 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2555 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2556
2557 bge_ifmedia_upd(ifp);
2558
2559 ifp->if_flags |= IFF_RUNNING;
2560 ifp->if_flags &= ~IFF_OACTIVE;
2561
2562 splx(s);
2563
2564 sc->bge_stat_ch = timeout(bge_tick, sc, hz);
2565
2566 return;
2567}
2568
2569/*
2570 * Set media options.
2571 */
2572static int
2573bge_ifmedia_upd(ifp)
2574 struct ifnet *ifp;
2575{
2576 struct bge_softc *sc;
2577 struct mii_data *mii;
2578 struct ifmedia *ifm;
2579
2580 sc = ifp->if_softc;
2581 ifm = &sc->bge_ifmedia;
2582
2583 /* If this is a 1000baseX NIC, enable the TBI port. */
2584 if (sc->bge_tbi) {
2585 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2586 return(EINVAL);
2587 switch(IFM_SUBTYPE(ifm->ifm_media)) {
2588 case IFM_AUTO:
2589 break;
2590 case IFM_1000_SX:
2591 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
2592 BGE_CLRBIT(sc, BGE_MAC_MODE,
2593 BGE_MACMODE_HALF_DUPLEX);
2594 } else {
2595 BGE_SETBIT(sc, BGE_MAC_MODE,
2596 BGE_MACMODE_HALF_DUPLEX);
2597 }
2598 break;
2599 default:
2600 return(EINVAL);
2601 }
2602 return(0);
2603 }
2604
2605 mii = device_get_softc(sc->bge_miibus);
2606 sc->bge_link = 0;
2607 if (mii->mii_instance) {
2608 struct mii_softc *miisc;
2609 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
2610 miisc = LIST_NEXT(miisc, mii_list))
2611 mii_phy_reset(miisc);
2612 }
2613 mii_mediachg(mii);
2614
2615 return(0);
2616}
2617
2618/*
2619 * Report current media status.
2620 */
2621static void
2622bge_ifmedia_sts(ifp, ifmr)
2623 struct ifnet *ifp;
2624 struct ifmediareq *ifmr;
2625{
2626 struct bge_softc *sc;
2627 struct mii_data *mii;
2628
2629 sc = ifp->if_softc;
2630
2631 if (sc->bge_tbi) {
2632 ifmr->ifm_status = IFM_AVALID;
2633 ifmr->ifm_active = IFM_ETHER;
2634 if (CSR_READ_4(sc, BGE_MAC_STS) &
2635 BGE_MACSTAT_TBI_PCS_SYNCHED)
2636 ifmr->ifm_status |= IFM_ACTIVE;
2637 ifmr->ifm_active |= IFM_1000_SX;
2638 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
2639 ifmr->ifm_active |= IFM_HDX;
2640 else
2641 ifmr->ifm_active |= IFM_FDX;
2642 return;
2643 }
2644
2645 mii = device_get_softc(sc->bge_miibus);
2646 mii_pollstat(mii);
2647 ifmr->ifm_active = mii->mii_media_active;
2648 ifmr->ifm_status = mii->mii_media_status;
2649
2650 return;
2651}
2652
2653static int
2654bge_ioctl(ifp, command, data)
2655 struct ifnet *ifp;
2656 u_long command;
2657 caddr_t data;
2658{
2659 struct bge_softc *sc = ifp->if_softc;
2660 struct ifreq *ifr = (struct ifreq *) data;
2661 int s, mask, error = 0;
2662 struct mii_data *mii;
2663
2664 s = splimp();
2665
2666 switch(command) {
2667 case SIOCSIFADDR:
2668 case SIOCGIFADDR:
2669 error = ether_ioctl(ifp, command, data);
2670 break;
2671 case SIOCSIFMTU:
2672 if (ifr->ifr_mtu > BGE_JUMBO_MTU)
2673 error = EINVAL;
2674 else {
2675 ifp->if_mtu = ifr->ifr_mtu;
2676 ifp->if_flags &= ~IFF_RUNNING;
2677 bge_init(sc);
2678 }
2679 break;
2680 case SIOCSIFFLAGS:
2681 if (ifp->if_flags & IFF_UP) {
2682 /*
2683 * If only the state of the PROMISC flag changed,
2684 * then just use the 'set promisc mode' command
2685 * instead of reinitializing the entire NIC. Doing
2686 * a full re-init means reloading the firmware and
2687 * waiting for it to start up, which may take a
2688 * second or two.
2689 */
2690 if (ifp->if_flags & IFF_RUNNING &&
2691 ifp->if_flags & IFF_PROMISC &&
2692 !(sc->bge_if_flags & IFF_PROMISC)) {
2693 BGE_SETBIT(sc, BGE_RX_MODE,
2694 BGE_RXMODE_RX_PROMISC);
2695 } else if (ifp->if_flags & IFF_RUNNING &&
2696 !(ifp->if_flags & IFF_PROMISC) &&
2697 sc->bge_if_flags & IFF_PROMISC) {
2698 BGE_CLRBIT(sc, BGE_RX_MODE,
2699 BGE_RXMODE_RX_PROMISC);
2700 } else
2701 bge_init(sc);
2702 } else {
2703 if (ifp->if_flags & IFF_RUNNING) {
2704 bge_stop(sc);
2705 }
2706 }
2707 sc->bge_if_flags = ifp->if_flags;
2708 error = 0;
2709 break;
2710 case SIOCADDMULTI:
2711 case SIOCDELMULTI:
2712 if (ifp->if_flags & IFF_RUNNING) {
2713 bge_setmulti(sc);
2714 error = 0;
2715 }
2716 break;
2717 case SIOCSIFMEDIA:
2718 case SIOCGIFMEDIA:
2719 if (sc->bge_tbi) {
2720 error = ifmedia_ioctl(ifp, ifr,
2721 &sc->bge_ifmedia, command);
2722 } else {
2723 mii = device_get_softc(sc->bge_miibus);
2724 error = ifmedia_ioctl(ifp, ifr,
2725 &mii->mii_media, command);
2726 }
2727 break;
2728 case SIOCSIFCAP:
2729 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2730 if (mask & IFCAP_HWCSUM) {
2731 if (IFCAP_HWCSUM & ifp->if_capenable)
2732 ifp->if_capenable &= ~IFCAP_HWCSUM;
2733 else
2734 ifp->if_capenable |= IFCAP_HWCSUM;
2735 }
2736 error = 0;
2737 break;
2738 default:
2739 error = EINVAL;
2740 break;
2741 }
2742
2743 (void)splx(s);
2744
2745 return(error);
2746}
2747
2748static void
2749bge_watchdog(ifp)
2750 struct ifnet *ifp;
2751{
2752 struct bge_softc *sc;
2753
2754 sc = ifp->if_softc;
2755
2756 printf("bge%d: watchdog timeout -- resetting\n", sc->bge_unit);
2757
2758 ifp->if_flags &= ~IFF_RUNNING;
2759 bge_init(sc);
2760
2761 ifp->if_oerrors++;
2762
2763 return;
2764}
2765
2766/*
2767 * Stop the adapter and free any mbufs allocated to the
2768 * RX and TX lists.
2769 */
2770static void
2771bge_stop(sc)
2772 struct bge_softc *sc;
2773{
2774 struct ifnet *ifp;
2775 struct ifmedia_entry *ifm;
2776 struct mii_data *mii = NULL;
2777 int mtmp, itmp;
2778
2779 ifp = &sc->arpcom.ac_if;
2780
2781 if (!sc->bge_tbi)
2782 mii = device_get_softc(sc->bge_miibus);
2783
2784 untimeout(bge_tick, sc, sc->bge_stat_ch);
2785
2786 /*
2787 * Disable all of the receiver blocks
2788 */
2789 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2790 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2791 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2792 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2793 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
2794 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2795 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
2796
2797 /*
2798 * Disable all of the transmit blocks
2799 */
2800 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2801 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2802 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2803 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
2804 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
2805 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2806 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2807
2808 /*
2809 * Shut down all of the memory managers and related
2810 * state machines.
2811 */
2812 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
2813 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
2814 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2815 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
2816 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
2817 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
2818 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2819
2820 /* Disable host interrupts. */
2821 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2822 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2823
2824 /*
2825 * Tell firmware we're shutting down.
2826 */
2827 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2828
2829 /* Free the RX lists. */
2830 bge_free_rx_ring_std(sc);
2831
2832 /* Free jumbo RX list. */
2833 bge_free_rx_ring_jumbo(sc);
2834
2835 /* Free TX buffers. */
2836 bge_free_tx_ring(sc);
2837
2838 /*
2839 * Isolate/power down the PHY, but leave the media selection
2840 * unchanged so that things will be put back to normal when
2841 * we bring the interface back up.
2842 */
2843 if (!sc->bge_tbi) {
2844 itmp = ifp->if_flags;
2845 ifp->if_flags |= IFF_UP;
2846 ifm = mii->mii_media.ifm_cur;
2847 mtmp = ifm->ifm_media;
2848 ifm->ifm_media = IFM_ETHER|IFM_NONE;
2849 mii_mediachg(mii);
2850 ifm->ifm_media = mtmp;
2851 ifp->if_flags = itmp;
2852 }
2853
2854 sc->bge_link = 0;
2855
2856 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
2857
2858 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2859
2860 return;
2861}
2862
2863/*
2864 * Stop all chip I/O so that the kernel's probe routines don't
2865 * get confused by errant DMAs when rebooting.
2866 */
2867static void
2868bge_shutdown(dev)
2869 device_t dev;
2870{
2871 struct bge_softc *sc;
2872
2873 sc = device_get_softc(dev);
2874
2875 bge_stop(sc);
2876 bge_reset(sc);
2877
2878 return;
2879}