Merge from vendor branch CVS:
[dragonfly.git] / sys / dev / netif / gx / if_gx.c
1 /*-
2  * Copyright (c) 1999,2000,2001 Jonathan Lemon
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/dev/gx/if_gx.c,v 1.2.2.3 2001/12/14 19:51:39 jlemon Exp $
30  * $DragonFly: src/sys/dev/netif/gx/Attic/if_gx.c,v 1.22 2005/11/28 17:13:42 dillon Exp $
31  */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sockio.h>
36 #include <sys/mbuf.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/socket.h>
40 #include <sys/queue.h>
41 #include <sys/serialize.h>
42
43 #include <sys/thread2.h>
44
45 #include <net/if.h>
46 #include <net/ifq_var.h>
47 #include <net/if_arp.h>
48 #include <net/ethernet.h>
49 #include <net/if_dl.h>
50 #include <net/if_media.h>
51
52 #include <net/bpf.h>
53 #include <net/if_types.h>
54 #include <net/vlan/if_vlan_var.h>
55
56 #include <netinet/in_systm.h>
57 #include <netinet/in.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
60 #include <netinet/udp.h>
61
62 #include <vm/vm.h>              /* for vtophys */
63 #include <vm/pmap.h>            /* for vtophys */
64 #include <machine/clock.h>      /* for DELAY */
65 #include <machine/bus_memio.h>
66 #include <machine/bus.h>
67 #include <machine/resource.h>
68 #include <sys/bus.h>
69 #include <sys/rman.h>
70
71 #include <bus/pci/pcireg.h>
72 #include <bus/pci/pcivar.h>
73
74 #include "../mii_layer/mii.h"
75 #include "../mii_layer/miivar.h"
76
77 #include "if_gxreg.h"
78 #include "if_gxvar.h"
79
80 #include "miibus_if.h"
81
82 #define TUNABLE_TX_INTR_DELAY   100
83 #define TUNABLE_RX_INTR_DELAY   100
84
85 #define GX_CSUM_FEATURES        (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_IP_FRAGS)
86
87 /*
88  * Various supported device vendors/types and their names.
89  */
90 struct gx_device {
91         u_int16_t       vendor;
92         u_int16_t       device;
93         int             version_flags;
94         u_int32_t       version_ipg;
95         char            *name;
96 };
97
98 static struct gx_device gx_devs[] = {
99         { INTEL_VENDORID, DEVICEID_WISEMAN,
100             GXF_FORCE_TBI | GXF_OLD_REGS,
101             10 | 2 << 10 | 10 << 20,
102             "Intel Gigabit Ethernet (82542)" },
103         { INTEL_VENDORID, DEVICEID_LIVINGOOD_FIBER,
104             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
105             6 | 8 << 10 | 6 << 20,
106             "Intel Gigabit Ethernet (82543GC-F)" },
107         { INTEL_VENDORID, DEVICEID_LIVINGOOD_COPPER,
108             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
109             8 | 8 << 10 | 6 << 20,
110             "Intel Gigabit Ethernet (82543GC-T)" },
111 #if 0
112 /* notyet.. */
113         { INTEL_VENDORID, DEVICEID_CORDOVA_FIBER,
114             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
115             6 | 8 << 10 | 6 << 20,
116             "Intel Gigabit Ethernet (82544EI-F)" },
117         { INTEL_VENDORID, DEVICEID_CORDOVA_COPPER,
118             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
119             8 | 8 << 10 | 6 << 20,
120             "Intel Gigabit Ethernet (82544EI-T)" },
121         { INTEL_VENDORID, DEVICEID_CORDOVA2_COPPER,
122             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
123             8 | 8 << 10 | 6 << 20,
124             "Intel Gigabit Ethernet (82544GC-T)" },
125 #endif
126         { 0, 0, 0, NULL }
127 };
128
129 static struct gx_regs new_regs = {
130         GX_RX_RING_BASE, GX_RX_RING_LEN,
131         GX_RX_RING_HEAD, GX_RX_RING_TAIL,
132         GX_RX_INTR_DELAY, GX_RX_DMA_CTRL,
133
134         GX_TX_RING_BASE, GX_TX_RING_LEN,
135         GX_TX_RING_HEAD, GX_TX_RING_TAIL,
136         GX_TX_INTR_DELAY, GX_TX_DMA_CTRL,
137 };
138 static struct gx_regs old_regs = {
139         GX_RX_OLD_RING_BASE, GX_RX_OLD_RING_LEN,
140         GX_RX_OLD_RING_HEAD, GX_RX_OLD_RING_TAIL,
141         GX_RX_OLD_INTR_DELAY, GX_RX_OLD_DMA_CTRL,
142
143         GX_TX_OLD_RING_BASE, GX_TX_OLD_RING_LEN,
144         GX_TX_OLD_RING_HEAD, GX_TX_OLD_RING_TAIL,
145         GX_TX_OLD_INTR_DELAY, GX_TX_OLD_DMA_CTRL,
146 };
147
148 static int      gx_probe(device_t dev);
149 static int      gx_attach(device_t dev);
150 static int      gx_detach(device_t dev);
151 static void     gx_shutdown(device_t dev);
152
153 static void     gx_intr(void *xsc);
154 static void     gx_init(void *xsc);
155
156 static struct   gx_device *gx_match(device_t dev);
157 static void     gx_eeprom_getword(struct gx_softc *gx, int addr,
158                     u_int16_t *dest);
159 static int      gx_read_eeprom(struct gx_softc *gx, caddr_t dest, int off,
160                     int cnt);
161 static int      gx_ifmedia_upd(struct ifnet *ifp);
162 static void     gx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
163 static int      gx_miibus_readreg(device_t dev, int phy, int reg);
164 static void     gx_miibus_writereg(device_t dev, int phy, int reg, int value);
165 static void     gx_miibus_statchg(device_t dev);
166 static int      gx_ioctl(struct ifnet *ifp, u_long command, caddr_t data,
167                     struct ucred *);
168 static void     gx_setmulti(struct gx_softc *gx);
169 static void     gx_reset(struct gx_softc *gx);
170 static void     gx_phy_reset(struct gx_softc *gx);
171 static void     gx_stop(struct gx_softc *gx);
172 static void     gx_watchdog(struct ifnet *ifp);
173 static void     gx_start(struct ifnet *ifp);
174
175 static int      gx_init_rx_ring(struct gx_softc *gx);
176 static void     gx_free_rx_ring(struct gx_softc *gx);
177 static int      gx_init_tx_ring(struct gx_softc *gx);
178 static void     gx_free_tx_ring(struct gx_softc *gx);
179
180 static device_method_t gx_methods[] = {
181         /* Device interface */
182         DEVMETHOD(device_probe,         gx_probe),
183         DEVMETHOD(device_attach,        gx_attach),
184         DEVMETHOD(device_detach,        gx_detach),
185         DEVMETHOD(device_shutdown,      gx_shutdown),
186
187         /* MII interface */
188         DEVMETHOD(miibus_readreg,       gx_miibus_readreg),
189         DEVMETHOD(miibus_writereg,      gx_miibus_writereg),
190         DEVMETHOD(miibus_statchg,       gx_miibus_statchg),
191
192         { 0, 0 }
193 };
194
195 static driver_t gx_driver = {
196         "gx",
197         gx_methods,
198         sizeof(struct gx_softc)
199 };
200
201 static devclass_t gx_devclass;
202
203 DECLARE_DUMMY_MODULE(if_gx);
204 MODULE_DEPEND(if_gx, miibus, 1, 1, 1);
205 DRIVER_MODULE(if_gx, pci, gx_driver, gx_devclass, 0, 0);
206 DRIVER_MODULE(miibus, gx, miibus_driver, miibus_devclass, 0, 0);
207
208 static struct gx_device *
209 gx_match(device_t dev)
210 {
211         int i;
212
213         for (i = 0; gx_devs[i].name != NULL; i++) {
214                 if ((pci_get_vendor(dev) == gx_devs[i].vendor) &&
215                     (pci_get_device(dev) == gx_devs[i].device))
216                         return (&gx_devs[i]);
217         }
218         return (NULL);
219 }
220
221 static int
222 gx_probe(device_t dev)
223 {
224         struct gx_device *gx_dev;
225
226         gx_dev = gx_match(dev);
227         if (gx_dev == NULL)
228                 return (ENXIO);
229
230         device_set_desc(dev, gx_dev->name);
231         return (0);
232 }
233
234 static int
235 gx_attach(device_t dev)
236 {
237         struct gx_softc *gx;
238         struct gx_device *gx_dev;
239         struct ifnet *ifp;
240         u_int32_t command;
241         int rid;
242         int error = 0;
243
244         gx = device_get_softc(dev);
245         gx->gx_dev = dev;
246
247         gx_dev = gx_match(dev);
248         gx->gx_vflags = gx_dev->version_flags;
249         gx->gx_ipg = gx_dev->version_ipg;
250
251         /*
252          * Map control/status registers.
253          */
254         command = pci_read_config(dev, PCIR_COMMAND, 4);
255         command |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
256         if (gx->gx_vflags & GXF_ENABLE_MWI)
257                 command |= PCIM_CMD_MWIEN;
258         pci_write_config(dev, PCIR_COMMAND, command, 4);
259         command = pci_read_config(dev, PCIR_COMMAND, 4);
260
261 /* XXX check cache line size? */
262
263         if ((command & PCIM_CMD_MEMEN) == 0) {
264                 device_printf(dev, "failed to enable memory mapping!\n");
265                 error = ENXIO;
266                 goto fail;
267         }
268
269         rid = GX_PCI_LOMEM;
270         gx->gx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
271             RF_ACTIVE);
272 #if 0
273 /* support PIO mode */
274         rid = PCI_LOIO;
275         gx->gx_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
276             RF_ACTIVE);
277 #endif
278
279         if (gx->gx_res == NULL) {
280                 device_printf(dev, "couldn't map memory\n");
281                 error = ENXIO;
282                 goto fail;
283         }
284
285         gx->gx_btag = rman_get_bustag(gx->gx_res);
286         gx->gx_bhandle = rman_get_bushandle(gx->gx_res);
287
288         /* Allocate interrupt */
289         rid = 0;
290         gx->gx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
291             RF_SHAREABLE | RF_ACTIVE);
292
293         if (gx->gx_irq == NULL) {
294                 device_printf(dev, "couldn't map interrupt\n");
295                 error = ENXIO;
296                 goto fail;
297         }
298
299         /* compensate for different register mappings */
300         if (gx->gx_vflags & GXF_OLD_REGS)
301                 gx->gx_reg = old_regs;
302         else
303                 gx->gx_reg = new_regs;
304
305         if (gx_read_eeprom(gx, (caddr_t)&gx->arpcom.ac_enaddr,
306             GX_EEMAP_MAC, 3)) {
307                 device_printf(dev, "failed to read station address\n");
308                 error = ENXIO;
309                 goto fail;
310         }
311
312         /* Allocate the ring buffers. */
313         gx->gx_rdata = contigmalloc(sizeof(struct gx_ring_data), M_DEVBUF,
314             M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0);
315
316         if (gx->gx_rdata == NULL) {
317                 device_printf(dev, "no memory for list buffers!\n");
318                 error = ENXIO;
319                 goto fail;
320         }
321         bzero(gx->gx_rdata, sizeof(struct gx_ring_data));
322
323         /* Set default tuneable values. */
324         gx->gx_tx_intr_delay = TUNABLE_TX_INTR_DELAY;
325         gx->gx_rx_intr_delay = TUNABLE_RX_INTR_DELAY;
326
327         /* Set up ifnet structure */
328         ifp = &gx->arpcom.ac_if;
329         ifp->if_softc = gx;
330         if_initname(ifp, "gx", device_get_unit(dev));
331         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
332         ifp->if_ioctl = gx_ioctl;
333         ifp->if_start = gx_start;
334         ifp->if_watchdog = gx_watchdog;
335         ifp->if_init = gx_init;
336         ifp->if_mtu = ETHERMTU;
337         ifq_set_maxlen(&ifp->if_snd, GX_TX_RING_CNT - 1);
338         ifq_set_ready(&ifp->if_snd);
339
340         /* see if we can enable hardware checksumming */
341         if (gx->gx_vflags & GXF_CSUM) {
342                 ifp->if_capabilities = IFCAP_HWCSUM;
343                 ifp->if_capenable = ifp->if_capabilities;
344         }
345
346         /* figure out transciever type */
347         if (gx->gx_vflags & GXF_FORCE_TBI ||
348             CSR_READ_4(gx, GX_STATUS) & GX_STAT_TBIMODE)
349                 gx->gx_tbimode = 1;
350
351         if (gx->gx_tbimode) {
352                 /* SERDES transceiver */
353                 ifmedia_init(&gx->gx_media, IFM_IMASK, gx_ifmedia_upd,
354                     gx_ifmedia_sts);
355                 ifmedia_add(&gx->gx_media,
356                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
357                 ifmedia_add(&gx->gx_media, IFM_ETHER|IFM_AUTO, 0, NULL);
358                 ifmedia_set(&gx->gx_media, IFM_ETHER|IFM_AUTO);
359         } else {
360                 /* GMII/MII transceiver */
361                 gx_phy_reset(gx);
362                 if (mii_phy_probe(dev, &gx->gx_miibus, gx_ifmedia_upd,
363                     gx_ifmedia_sts)) {
364                         device_printf(dev, "GMII/MII, PHY not detected\n");
365                         error = ENXIO;
366                         goto fail;
367                 }
368         }
369
370         /*
371          * Call MI attach routines.
372          */
373         ether_ifattach(ifp, gx->arpcom.ac_enaddr, NULL);
374
375         error = bus_setup_intr(dev, gx->gx_irq, INTR_NETSAFE,
376                                gx_intr, gx, &gx->gx_intrhand, 
377                                ifp->if_serializer);
378         if (error) {
379                 ether_ifdetach(ifp);
380                 device_printf(dev, "couldn't setup irq\n");
381                 goto fail;
382         }
383
384         return (0);
385
386 fail:
387         gx_detach(dev);
388         return (error);
389 }
390
391 static void
392 gx_init(void *xsc)
393 {
394         struct gx_softc *gx = (struct gx_softc *)xsc;
395         struct ifmedia *ifm;
396         struct ifnet *ifp = &gx->arpcom.ac_if;
397         u_int16_t *m;
398         u_int32_t ctrl;
399         int i, tmp;
400
401         /* Disable host interrupts, halt chip. */
402         gx_reset(gx);
403
404         /* disable I/O, flush RX/TX FIFOs, and free RX/TX buffers */
405         gx_stop(gx);
406
407         /* Load our MAC address, invalidate other 15 RX addresses. */
408         m = (u_int16_t *)&gx->arpcom.ac_enaddr[0];
409         CSR_WRITE_4(gx, GX_RX_ADDR_BASE, (m[1] << 16) | m[0]);
410         CSR_WRITE_4(gx, GX_RX_ADDR_BASE + 4, m[2] | GX_RA_VALID);
411         for (i = 1; i < 16; i++)
412                 CSR_WRITE_8(gx, GX_RX_ADDR_BASE + i * 8, (u_quad_t)0);
413
414         /* Program multicast filter. */
415         gx_setmulti(gx);
416
417         /* Init RX ring. */
418         gx_init_rx_ring(gx);
419
420         /* Init TX ring. */
421         gx_init_tx_ring(gx);
422
423         if (gx->gx_vflags & GXF_DMA) {
424                 /* set up DMA control */        
425                 CSR_WRITE_4(gx, gx->gx_reg.r_rx_dma_ctrl, 0x00010000);
426                 CSR_WRITE_4(gx, gx->gx_reg.r_tx_dma_ctrl, 0x00000000);
427         }
428
429         /* enable receiver */
430         ctrl = GX_RXC_ENABLE | GX_RXC_RX_THOLD_EIGHTH | GX_RXC_RX_BSIZE_2K;
431         ctrl |= GX_RXC_BCAST_ACCEPT;
432
433         /* Enable or disable promiscuous mode as needed. */
434         if (ifp->if_flags & IFF_PROMISC)
435                 ctrl |= GX_RXC_UNI_PROMISC;
436
437         /* This is required if we want to accept jumbo frames */
438         if (ifp->if_mtu > ETHERMTU)
439                 ctrl |= GX_RXC_LONG_PKT_ENABLE;
440
441         /* setup receive checksum control */
442         if (ifp->if_capenable & IFCAP_RXCSUM)
443                 CSR_WRITE_4(gx, GX_RX_CSUM_CONTROL,
444                     GX_CSUM_TCP/* | GX_CSUM_IP*/);
445
446         /* setup transmit checksum control */
447         if (ifp->if_capenable & IFCAP_TXCSUM)
448                 ifp->if_hwassist = GX_CSUM_FEATURES;
449
450         ctrl |= GX_RXC_STRIP_ETHERCRC;          /* not on 82542? */
451         CSR_WRITE_4(gx, GX_RX_CONTROL, ctrl);
452
453         /* enable transmitter */
454         ctrl = GX_TXC_ENABLE | GX_TXC_PAD_SHORT_PKTS | GX_TXC_COLL_RETRY_16;
455
456         /* XXX we should support half-duplex here too... */
457         ctrl |= GX_TXC_COLL_TIME_FDX;
458
459         CSR_WRITE_4(gx, GX_TX_CONTROL, ctrl);
460
461         /*
462          * set up recommended IPG times, which vary depending on chip type:
463          *      IPG transmit time:  80ns
464          *      IPG receive time 1: 20ns
465          *      IPG receive time 2: 80ns
466          */
467         CSR_WRITE_4(gx, GX_TX_IPG, gx->gx_ipg);
468
469         /* set up 802.3x MAC flow control address -- 01:80:c2:00:00:01 */
470         CSR_WRITE_4(gx, GX_FLOW_CTRL_BASE, 0x00C28001);
471         CSR_WRITE_4(gx, GX_FLOW_CTRL_BASE+4, 0x00000100);
472
473         /* set up 802.3x MAC flow control type -- 88:08 */
474         CSR_WRITE_4(gx, GX_FLOW_CTRL_TYPE, 0x8808);
475
476         /* Set up tuneables */
477         CSR_WRITE_4(gx, gx->gx_reg.r_rx_delay, gx->gx_rx_intr_delay);
478         CSR_WRITE_4(gx, gx->gx_reg.r_tx_delay, gx->gx_tx_intr_delay);
479
480         /*
481          * Configure chip for correct operation.
482          */
483         ctrl = GX_CTRL_DUPLEX;
484 #if BYTE_ORDER == BIG_ENDIAN
485         ctrl |= GX_CTRL_BIGENDIAN;
486 #endif
487         ctrl |= GX_CTRL_VLAN_ENABLE;
488
489         if (gx->gx_tbimode) {
490                 /*
491                  * It seems that TXCW must be initialized from the EEPROM
492                  * manually.
493                  *
494                  * XXX
495                  * should probably read the eeprom and re-insert the
496                  * values here.
497                  */
498 #define TXCONFIG_WORD   0x000001A0
499                 CSR_WRITE_4(gx, GX_TX_CONFIG, TXCONFIG_WORD);
500
501                 /* turn on hardware autonegotiate */
502                 GX_SETBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
503         } else {
504                 /*
505                  * Auto-detect speed from PHY, instead of using direct
506                  * indication.  The SLU bit doesn't force the link, but
507                  * must be present for ASDE to work.
508                  */
509                 gx_phy_reset(gx);
510                 ctrl |= GX_CTRL_SET_LINK_UP | GX_CTRL_AUTOSPEED;
511         }
512
513         /*
514          * Take chip out of reset and start it running.
515          */
516         CSR_WRITE_4(gx, GX_CTRL, ctrl);
517
518         /* Turn interrupts on. */
519         CSR_WRITE_4(gx, GX_INT_MASK_SET, GX_INT_WANTED);
520
521         ifp->if_flags |= IFF_RUNNING;
522         ifp->if_flags &= ~IFF_OACTIVE;
523
524         /*
525          * Set the current media.
526          */
527         if (gx->gx_miibus != NULL) {
528                 mii_mediachg(device_get_softc(gx->gx_miibus));
529         } else {
530                 ifm = &gx->gx_media;
531                 tmp = ifm->ifm_media;
532                 ifm->ifm_media = ifm->ifm_cur->ifm_media;
533                 gx_ifmedia_upd(ifp);
534                 ifm->ifm_media = tmp;
535         }
536
537         /*
538          * XXX
539          * Have the LINK0 flag force the link in TBI mode.
540          */
541         if (gx->gx_tbimode && ifp->if_flags & IFF_LINK0) {
542                 GX_CLRBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
543                 GX_SETBIT(gx, GX_CTRL, GX_CTRL_SET_LINK_UP);
544         }
545
546 #if 0
547 printf("66mhz: %s  64bit: %s\n",
548         CSR_READ_4(gx, GX_STATUS) & GX_STAT_PCI66 ? "yes" : "no",
549         CSR_READ_4(gx, GX_STATUS) & GX_STAT_BUS64 ? "yes" : "no");
550 #endif
551 }
552
553 /*
554  * Stop all chip I/O so that the kernel's probe routines don't
555  * get confused by errant DMAs when rebooting.
556  */
557 static void
558 gx_shutdown(device_t dev)
559 {
560         struct gx_softc *gx;
561
562         gx = device_get_softc(dev);
563         gx_reset(gx);
564         gx_stop(gx);
565 }
566
567 static int
568 gx_detach(device_t dev)
569 {
570         struct gx_softc *gx = device_get_softc(dev);
571         struct ifnet *ifp = &gx->arpcom.ac_if;
572
573         lwkt_serialize_enter(ifp->if_serializer);
574         if (device_is_attached(dev)) {
575                 ether_ifdetach(ifp);
576                 gx_reset(gx);
577                 gx_stop(gx);
578         }
579
580         if (gx->gx_miibus)
581                 device_delete_child(gx->gx_dev, gx->gx_miibus);
582         bus_generic_detach(gx->gx_dev);
583
584         if (gx->gx_intrhand)
585                 bus_teardown_intr(gx->gx_dev, gx->gx_irq, gx->gx_intrhand);
586
587         if (gx->gx_irq)
588                 bus_release_resource(gx->gx_dev, SYS_RES_IRQ, 0, gx->gx_irq);
589         if (gx->gx_res)
590                 bus_release_resource(gx->gx_dev, SYS_RES_MEMORY,
591                     GX_PCI_LOMEM, gx->gx_res);
592
593         if (gx->gx_rdata)
594                 contigfree(gx->gx_rdata, sizeof(struct gx_ring_data),
595                            M_DEVBUF);
596
597         if (gx->gx_tbimode)
598                 ifmedia_removeall(&gx->gx_media);
599
600         lwkt_serialize_exit(ifp->if_serializer);
601         return (0);
602 }
603
604 static void
605 gx_eeprom_getword(struct gx_softc *gx, int addr, u_int16_t *dest)
606 {
607         u_int16_t word = 0;
608         u_int32_t base, reg;
609         int x;
610
611         addr = (GX_EE_OPC_READ << GX_EE_ADDR_SIZE) |
612             (addr & ((1 << GX_EE_ADDR_SIZE) - 1));
613
614         base = CSR_READ_4(gx, GX_EEPROM_CTRL);
615         base &= ~(GX_EE_DATA_OUT | GX_EE_DATA_IN | GX_EE_CLOCK);
616         base |= GX_EE_SELECT;
617
618         CSR_WRITE_4(gx, GX_EEPROM_CTRL, base);
619
620         for (x = 1 << ((GX_EE_OPC_SIZE + GX_EE_ADDR_SIZE) - 1); x; x >>= 1) {
621                 reg = base | (addr & x ? GX_EE_DATA_IN : 0);
622                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg);
623                 DELAY(10);
624                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg | GX_EE_CLOCK);
625                 DELAY(10);
626                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg);
627                 DELAY(10);
628         }
629
630         for (x = 1 << 15; x; x >>= 1) {
631                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, base | GX_EE_CLOCK);
632                 DELAY(10);
633                 reg = CSR_READ_4(gx, GX_EEPROM_CTRL);
634                 if (reg & GX_EE_DATA_OUT)
635                         word |= x;
636                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, base);
637                 DELAY(10);
638         }
639
640         CSR_WRITE_4(gx, GX_EEPROM_CTRL, base & ~GX_EE_SELECT);
641         DELAY(10);
642
643         *dest = word;
644 }
645         
646 static int
647 gx_read_eeprom(struct gx_softc *gx, caddr_t dest, int off, int cnt)
648 {
649         u_int16_t *word;
650         int i;
651
652         word = (u_int16_t *)dest;
653         for (i = 0; i < cnt; i ++) {
654                 gx_eeprom_getword(gx, off + i, word);
655                 word++;
656         }
657         return (0);
658 }
659
660 /*
661  * Set media options.
662  */
663 static int
664 gx_ifmedia_upd(struct ifnet *ifp)
665 {
666         struct gx_softc *gx;
667         struct ifmedia *ifm;
668         struct mii_data *mii;
669
670         gx = ifp->if_softc;
671
672         if (gx->gx_tbimode) {
673                 ifm = &gx->gx_media;
674                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
675                         return (EINVAL);
676                 switch (IFM_SUBTYPE(ifm->ifm_media)) {
677                 case IFM_AUTO:
678                         GX_SETBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
679                         GX_SETBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
680                         GX_CLRBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
681                         break;
682                 case IFM_1000_SX:
683                         device_printf(gx->gx_dev,
684                             "manual config not supported yet.\n");
685 #if 0
686                         GX_CLRBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
687                         config = /* bit symbols for 802.3z */0;
688                         ctrl |= GX_CTRL_SET_LINK_UP;
689                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
690                                 ctrl |= GX_CTRL_DUPLEX;
691 #endif
692                         break;
693                 default:
694                         return (EINVAL);
695                 }
696         } else {
697                 ifm = &gx->gx_media;
698
699                 /*
700                  * 1000TX half duplex does not work.
701                  */
702                 if (IFM_TYPE(ifm->ifm_media) == IFM_ETHER &&
703                     IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T &&
704                     (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) == 0)
705                         return (EINVAL);
706                 mii = device_get_softc(gx->gx_miibus);
707                 mii_mediachg(mii);
708         }
709         return (0);
710 }
711
712 /*
713  * Report current media status.
714  */
715 static void
716 gx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
717 {
718         struct gx_softc *gx;
719         struct mii_data *mii;
720         u_int32_t status;
721
722         gx = ifp->if_softc;
723
724         if (gx->gx_tbimode) {
725                 ifmr->ifm_status = IFM_AVALID;
726                 ifmr->ifm_active = IFM_ETHER;
727
728                 status = CSR_READ_4(gx, GX_STATUS);
729                 if ((status & GX_STAT_LINKUP) == 0)
730                         return;
731
732                 ifmr->ifm_status |= IFM_ACTIVE;
733                 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
734         } else {
735                 mii = device_get_softc(gx->gx_miibus);
736                 mii_pollstat(mii);
737                 if ((mii->mii_media_active & (IFM_1000_T | IFM_HDX)) ==
738                     (IFM_1000_T | IFM_HDX))
739                         mii->mii_media_active = IFM_ETHER | IFM_NONE;
740                 ifmr->ifm_active = mii->mii_media_active;
741                 ifmr->ifm_status = mii->mii_media_status;
742         }
743 }
744
745 static void 
746 gx_mii_shiftin(struct gx_softc *gx, int data, int length)
747 {
748         u_int32_t reg, x;
749
750         /*
751          * Set up default GPIO direction + PHY data out.
752          */
753         reg = CSR_READ_4(gx, GX_CTRL);
754         reg &= ~(GX_CTRL_GPIO_DIR_MASK | GX_CTRL_PHY_IO | GX_CTRL_PHY_CLK);
755         reg |= GX_CTRL_GPIO_DIR | GX_CTRL_PHY_IO_DIR;
756
757         /*
758          * Shift in data to PHY.
759          */
760         for (x = 1 << (length - 1); x; x >>= 1) {
761                 if (data & x)
762                         reg |= GX_CTRL_PHY_IO;
763                 else
764                         reg &= ~GX_CTRL_PHY_IO;
765                 CSR_WRITE_4(gx, GX_CTRL, reg);
766                 DELAY(10);
767                 CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
768                 DELAY(10);
769                 CSR_WRITE_4(gx, GX_CTRL, reg);
770                 DELAY(10);
771         }
772 }
773
774 static u_int16_t 
775 gx_mii_shiftout(struct gx_softc *gx)
776 {
777         u_int32_t reg;
778         u_int16_t data;
779         int x;
780
781         /*
782          * Set up default GPIO direction + PHY data in.
783          */
784         reg = CSR_READ_4(gx, GX_CTRL);
785         reg &= ~(GX_CTRL_GPIO_DIR_MASK | GX_CTRL_PHY_IO | GX_CTRL_PHY_CLK);
786         reg |= GX_CTRL_GPIO_DIR;
787
788         CSR_WRITE_4(gx, GX_CTRL, reg);
789         DELAY(10);
790         CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
791         DELAY(10);
792         CSR_WRITE_4(gx, GX_CTRL, reg);
793         DELAY(10);
794         /*
795          * Shift out data from PHY.
796          */
797         data = 0;
798         for (x = 1 << 15; x; x >>= 1) {
799                 CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
800                 DELAY(10);
801                 if (CSR_READ_4(gx, GX_CTRL) & GX_CTRL_PHY_IO)
802                         data |= x;
803                 CSR_WRITE_4(gx, GX_CTRL, reg);
804                 DELAY(10);
805         }
806         CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
807         DELAY(10);
808         CSR_WRITE_4(gx, GX_CTRL, reg);
809         DELAY(10);
810
811         return (data);
812 }
813
814 static int
815 gx_miibus_readreg(device_t dev, int phy, int reg)
816 {
817         struct gx_softc *gx;
818
819         gx = device_get_softc(dev);
820         if (gx->gx_tbimode)
821                 return (0);
822
823         /*
824          * XXX
825          * Note: Cordova has a MDIC register. livingood and < have mii bits
826          */ 
827
828         gx_mii_shiftin(gx, GX_PHY_PREAMBLE, GX_PHY_PREAMBLE_LEN);
829         gx_mii_shiftin(gx, (GX_PHY_SOF << 12) | (GX_PHY_OP_READ << 10) |
830             (phy << 5) | reg, GX_PHY_READ_LEN);
831         return (gx_mii_shiftout(gx));
832 }
833
834 static void
835 gx_miibus_writereg(device_t dev, int phy, int reg, int value)
836 {
837         struct gx_softc *gx;
838
839         gx = device_get_softc(dev);
840         if (gx->gx_tbimode)
841                 return;
842
843         gx_mii_shiftin(gx, GX_PHY_PREAMBLE, GX_PHY_PREAMBLE_LEN);
844         gx_mii_shiftin(gx, (GX_PHY_SOF << 30) | (GX_PHY_OP_WRITE << 28) |
845             (phy << 23) | (reg << 18) | (GX_PHY_TURNAROUND << 16) |
846             (value & 0xffff), GX_PHY_WRITE_LEN);
847 }
848
849 static void
850 gx_miibus_statchg(device_t dev)
851 {
852         struct gx_softc *gx = device_get_softc(dev);
853         struct mii_data *mii;
854         int reg;
855
856         if (gx->gx_tbimode)
857                 return;
858
859         /*
860          * Set flow control behavior to mirror what PHY negotiated.
861          */
862         mii = device_get_softc(gx->gx_miibus);
863
864         reg = CSR_READ_4(gx, GX_CTRL);
865         if (mii->mii_media_active & IFM_FLAG0)
866                 reg |= GX_CTRL_RX_FLOWCTRL;
867         else
868                 reg &= ~GX_CTRL_RX_FLOWCTRL;
869         if (mii->mii_media_active & IFM_FLAG1)
870                 reg |= GX_CTRL_TX_FLOWCTRL;
871         else
872                 reg &= ~GX_CTRL_TX_FLOWCTRL;
873         CSR_WRITE_4(gx, GX_CTRL, reg);
874 }
875
876 static int
877 gx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
878 {
879         struct gx_softc *gx = ifp->if_softc;
880         struct ifreq *ifr = (struct ifreq *)data;
881         struct mii_data *mii;
882         int mask, error = 0;
883
884         switch (command) {
885         case SIOCSIFMTU:
886                 if (ifr->ifr_mtu > GX_MAX_MTU) {
887                         error = EINVAL;
888                 } else {
889                         ifp->if_mtu = ifr->ifr_mtu;
890                         gx_init(gx);
891                 }
892                 break;
893         case SIOCSIFFLAGS:
894                 if ((ifp->if_flags & IFF_UP) == 0) {
895                         gx_stop(gx);
896                 } else if (ifp->if_flags & IFF_RUNNING &&
897                     ((ifp->if_flags & IFF_PROMISC) != 
898                     (gx->gx_if_flags & IFF_PROMISC))) {
899                         if (ifp->if_flags & IFF_PROMISC)
900                                 GX_SETBIT(gx, GX_RX_CONTROL, GX_RXC_UNI_PROMISC);
901                         else 
902                                 GX_CLRBIT(gx, GX_RX_CONTROL, GX_RXC_UNI_PROMISC);
903                 } else {
904                         gx_init(gx);
905                 }
906                 gx->gx_if_flags = ifp->if_flags;
907                 break;
908         case SIOCADDMULTI:
909         case SIOCDELMULTI:
910                 if (ifp->if_flags & IFF_RUNNING)
911                         gx_setmulti(gx);
912                 break;
913         case SIOCSIFMEDIA:
914         case SIOCGIFMEDIA:
915                 if (gx->gx_miibus != NULL) {
916                         mii = device_get_softc(gx->gx_miibus);
917                         error = ifmedia_ioctl(ifp, ifr,
918                             &mii->mii_media, command);
919                 } else {
920                         error = ifmedia_ioctl(ifp, ifr, &gx->gx_media, command);
921                 }
922                 break;
923         case SIOCSIFCAP:
924                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
925                 if (mask & IFCAP_HWCSUM) {
926                         if (IFCAP_HWCSUM & ifp->if_capenable)
927                                 ifp->if_capenable &= ~IFCAP_HWCSUM;
928                         else
929                                 ifp->if_capenable |= IFCAP_HWCSUM;
930                         if (ifp->if_flags & IFF_RUNNING)
931                                 gx_init(gx);
932                 }
933                 break;
934         default:
935                 error = ether_ioctl(ifp, command, data);
936                 break;
937         }
938         return (error);
939 }
940
941 static void
942 gx_phy_reset(struct gx_softc *gx)
943 {
944         int reg;
945
946         GX_SETBIT(gx, GX_CTRL, GX_CTRL_SET_LINK_UP);
947
948         /*
949          * PHY reset is active low.
950          */
951         reg = CSR_READ_4(gx, GX_CTRL_EXT);
952         reg &= ~(GX_CTRLX_GPIO_DIR_MASK | GX_CTRLX_PHY_RESET);
953         reg |= GX_CTRLX_GPIO_DIR;
954
955         CSR_WRITE_4(gx, GX_CTRL_EXT, reg | GX_CTRLX_PHY_RESET);
956         DELAY(10);
957         CSR_WRITE_4(gx, GX_CTRL_EXT, reg);
958         DELAY(10);
959         CSR_WRITE_4(gx, GX_CTRL_EXT, reg | GX_CTRLX_PHY_RESET);
960         DELAY(10);
961
962 #if 0
963         /* post-livingood (cordova) only */
964                 GX_SETBIT(gx, GX_CTRL, 0x80000000);
965                 DELAY(1000);
966                 GX_CLRBIT(gx, GX_CTRL, 0x80000000);
967 #endif
968 }
969
970 static void
971 gx_reset(struct gx_softc *gx)
972 {
973
974         /* Disable host interrupts. */
975         CSR_WRITE_4(gx, GX_INT_MASK_CLR, GX_INT_ALL);
976
977         /* reset chip (THWAP!) */
978         GX_SETBIT(gx, GX_CTRL, GX_CTRL_DEVICE_RESET);
979         DELAY(10);
980 }
981
982 static void
983 gx_stop(struct gx_softc *gx)
984 {
985         struct ifnet *ifp;
986
987         ifp = &gx->arpcom.ac_if;
988
989         /* reset and flush transmitter */
990         CSR_WRITE_4(gx, GX_TX_CONTROL, GX_TXC_RESET);
991
992         /* reset and flush receiver */
993         CSR_WRITE_4(gx, GX_RX_CONTROL, GX_RXC_RESET);
994
995         /* reset link */
996         if (gx->gx_tbimode)
997                 GX_SETBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
998
999         /* Free the RX lists. */
1000         gx_free_rx_ring(gx);
1001
1002         /* Free TX buffers. */
1003         gx_free_tx_ring(gx);
1004
1005         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1006 }
1007
1008 static void
1009 gx_watchdog(struct ifnet *ifp)
1010 {
1011         struct gx_softc *gx;
1012
1013         gx = ifp->if_softc;
1014
1015         device_printf(gx->gx_dev, "watchdog timeout -- resetting\n");
1016         gx_reset(gx);
1017         gx_init(gx);
1018
1019         ifp->if_oerrors++;
1020 }
1021
1022 /*
1023  * Intialize a receive ring descriptor.
1024  */
1025 static int
1026 gx_newbuf(struct gx_softc *gx, int idx, struct mbuf *m)
1027 {
1028         struct mbuf *m_new = NULL;
1029         struct gx_rx_desc *r;
1030
1031         if (m == NULL) {
1032                 MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
1033                 if (m_new == NULL) {
1034                         device_printf(gx->gx_dev, 
1035                             "mbuf allocation failed -- packet dropped\n");
1036                         return (ENOBUFS);
1037                 }
1038                 MCLGET(m_new, MB_DONTWAIT);
1039                 if ((m_new->m_flags & M_EXT) == 0) {
1040                         device_printf(gx->gx_dev, 
1041                             "cluster allocation failed -- packet dropped\n");
1042                         m_freem(m_new);
1043                         return (ENOBUFS);
1044                 }
1045                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1046         } else {
1047                 m->m_len = m->m_pkthdr.len = MCLBYTES;
1048                 m->m_data = m->m_ext.ext_buf;
1049                 m->m_next = NULL;
1050                 m_new = m;
1051         }
1052
1053         /*
1054          * XXX
1055          * this will _NOT_ work for large MTU's; it will overwrite
1056          * the end of the buffer.  E.g.: take this out for jumbograms,
1057          * but then that breaks alignment.
1058          */
1059         if (gx->arpcom.ac_if.if_mtu <= ETHERMTU)
1060                 m_adj(m_new, ETHER_ALIGN);
1061
1062         gx->gx_cdata.gx_rx_chain[idx] = m_new;
1063         r = &gx->gx_rdata->gx_rx_ring[idx];
1064         r->rx_addr = vtophys(mtod(m_new, caddr_t));
1065         r->rx_staterr = 0;
1066
1067         return (0);
1068 }
1069
1070 /*
1071  * The receive ring can have up to 64K descriptors, which at 2K per mbuf
1072  * cluster, could add up to 128M of memory.  Due to alignment constraints,
1073  * the number of descriptors must be a multiple of 8.  For now, we
1074  * allocate 256 entries and hope that our CPU is fast enough to keep up
1075  * with the NIC.
1076  */
1077 static int
1078 gx_init_rx_ring(struct gx_softc *gx)
1079 {
1080         int i, error;
1081
1082         for (i = 0; i < GX_RX_RING_CNT; i++) {
1083                 error = gx_newbuf(gx, i, NULL);
1084                 if (error)
1085                         return (error);
1086         }
1087
1088         /* bring receiver out of reset state, leave disabled */
1089         CSR_WRITE_4(gx, GX_RX_CONTROL, 0);
1090
1091         /* set up ring registers */
1092         CSR_WRITE_8(gx, gx->gx_reg.r_rx_base,
1093             (u_quad_t)vtophys(gx->gx_rdata->gx_rx_ring));
1094
1095         CSR_WRITE_4(gx, gx->gx_reg.r_rx_length,
1096             GX_RX_RING_CNT * sizeof(struct gx_rx_desc));
1097         CSR_WRITE_4(gx, gx->gx_reg.r_rx_head, 0);
1098         CSR_WRITE_4(gx, gx->gx_reg.r_rx_tail, GX_RX_RING_CNT - 1);
1099         gx->gx_rx_tail_idx = 0;
1100
1101         return (0);
1102 }
1103
1104 static void
1105 gx_free_rx_ring(struct gx_softc *gx)
1106 {
1107         struct mbuf **mp;
1108         int i;
1109
1110         mp = gx->gx_cdata.gx_rx_chain;
1111         for (i = 0; i < GX_RX_RING_CNT; i++, mp++) {
1112                 if (*mp != NULL) {
1113                         m_freem(*mp);
1114                         *mp = NULL;
1115                 }
1116         }
1117         bzero((void *)gx->gx_rdata->gx_rx_ring,
1118             GX_RX_RING_CNT * sizeof(struct gx_rx_desc));
1119
1120         /* release any partially-received packet chain */
1121         if (gx->gx_pkthdr != NULL) {
1122                 m_freem(gx->gx_pkthdr);
1123                 gx->gx_pkthdr = NULL;
1124         }
1125 }
1126
1127 static int
1128 gx_init_tx_ring(struct gx_softc *gx)
1129 {
1130
1131         /* bring transmitter out of reset state, leave disabled */
1132         CSR_WRITE_4(gx, GX_TX_CONTROL, 0);
1133
1134         /* set up ring registers */
1135         CSR_WRITE_8(gx, gx->gx_reg.r_tx_base,
1136             (u_quad_t)vtophys(gx->gx_rdata->gx_tx_ring));
1137         CSR_WRITE_4(gx, gx->gx_reg.r_tx_length,
1138             GX_TX_RING_CNT * sizeof(struct gx_tx_desc));
1139         CSR_WRITE_4(gx, gx->gx_reg.r_tx_head, 0);
1140         CSR_WRITE_4(gx, gx->gx_reg.r_tx_tail, 0);
1141         gx->gx_tx_head_idx = 0;
1142         gx->gx_tx_tail_idx = 0;
1143         gx->gx_txcnt = 0;
1144
1145         /* set up initial TX context */
1146         gx->gx_txcontext = GX_TXCONTEXT_NONE;
1147
1148         return (0);
1149 }
1150
1151 static void
1152 gx_free_tx_ring(struct gx_softc *gx)
1153 {
1154         struct mbuf **mp;
1155         int i;
1156
1157         mp = gx->gx_cdata.gx_tx_chain;
1158         for (i = 0; i < GX_TX_RING_CNT; i++, mp++) {
1159                 if (*mp != NULL) {
1160                         m_freem(*mp);
1161                         *mp = NULL;
1162                 }
1163         }
1164         bzero((void *)&gx->gx_rdata->gx_tx_ring,
1165             GX_TX_RING_CNT * sizeof(struct gx_tx_desc));
1166 }
1167
1168 static void
1169 gx_setmulti(struct gx_softc *gx)
1170 {
1171         int i;
1172
1173         /* wipe out the multicast table */
1174         for (i = 1; i < 128; i++)
1175                 CSR_WRITE_4(gx, GX_MULTICAST_BASE + i * 4, 0);
1176 }
1177
1178 static void
1179 gx_rxeof(struct gx_softc *gx)
1180 {
1181         struct gx_rx_desc *rx;
1182         struct ifnet *ifp;
1183         int idx, staterr, len;
1184         struct mbuf *m;
1185
1186         gx->gx_rx_interrupts++;
1187
1188         ifp = &gx->arpcom.ac_if;
1189         idx = gx->gx_rx_tail_idx;
1190
1191         while (gx->gx_rdata->gx_rx_ring[idx].rx_staterr & GX_RXSTAT_COMPLETED) {
1192
1193                 rx = &gx->gx_rdata->gx_rx_ring[idx];
1194                 m = gx->gx_cdata.gx_rx_chain[idx];
1195                 /*
1196                  * gx_newbuf overwrites status and length bits, so we 
1197                  * make a copy of them here.
1198                  */
1199                 len = rx->rx_len;
1200                 staterr = rx->rx_staterr;
1201
1202                 if (staterr & GX_INPUT_ERROR)
1203                         goto ierror;
1204
1205                 if (gx_newbuf(gx, idx, NULL) == ENOBUFS)
1206                         goto ierror;
1207
1208                 GX_INC(idx, GX_RX_RING_CNT);
1209
1210                 if (staterr & GX_RXSTAT_INEXACT_MATCH) {
1211                         /*
1212                          * multicast packet, must verify against
1213                          * multicast address.
1214                          */
1215                 }
1216
1217                 if ((staterr & GX_RXSTAT_END_OF_PACKET) == 0) {
1218                         if (gx->gx_pkthdr == NULL) {
1219                                 m->m_len = len;
1220                                 m->m_pkthdr.len = len;
1221                                 gx->gx_pkthdr = m;
1222                                 gx->gx_pktnextp = &m->m_next;
1223                         } else {
1224                                 m->m_len = len;
1225                                 gx->gx_pkthdr->m_pkthdr.len += len;
1226                                 *(gx->gx_pktnextp) = m;
1227                                 gx->gx_pktnextp = &m->m_next;
1228                         }
1229                         continue;
1230                 }
1231
1232                 if (gx->gx_pkthdr == NULL) {
1233                         m->m_len = len;
1234                         m->m_pkthdr.len = len;
1235                 } else {
1236                         m->m_len = len;
1237                         gx->gx_pkthdr->m_pkthdr.len += len;
1238                         *(gx->gx_pktnextp) = m;
1239                         m = gx->gx_pkthdr;
1240                         gx->gx_pkthdr = NULL;
1241                 }
1242
1243                 ifp->if_ipackets++;
1244                 m->m_pkthdr.rcvif = ifp;
1245
1246 #define IP_CSMASK       (GX_RXSTAT_IGNORE_CSUM | GX_RXSTAT_HAS_IP_CSUM)
1247 #define TCP_CSMASK \
1248     (GX_RXSTAT_IGNORE_CSUM | GX_RXSTAT_HAS_TCP_CSUM | GX_RXERR_TCP_CSUM)
1249                 if (ifp->if_capenable & IFCAP_RXCSUM) {
1250 #if 0
1251                         /*
1252                          * Intel Erratum #23 indicates that the Receive IP
1253                          * Checksum offload feature has been completely
1254                          * disabled.
1255                          */
1256                         if ((staterr & IP_CSUM_MASK) == GX_RXSTAT_HAS_IP_CSUM) {
1257                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1258                                 if ((staterr & GX_RXERR_IP_CSUM) == 0)
1259                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1260                         }
1261 #endif
1262                         if ((staterr & TCP_CSMASK) == GX_RXSTAT_HAS_TCP_CSUM) {
1263                                 m->m_pkthdr.csum_flags |=
1264                                     CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1265                                 m->m_pkthdr.csum_data = 0xffff;
1266                         }
1267                 }
1268                 /*
1269                  * If we received a packet with a vlan tag, pass it
1270                  * to vlan_input() instead of ether_input().
1271                  */
1272                 if (staterr & GX_RXSTAT_VLAN_PKT)
1273                         VLAN_INPUT_TAG(m, rx->rx_special);
1274                 else
1275                         ifp->if_input(ifp, m);
1276                 continue;
1277
1278   ierror:
1279                 ifp->if_ierrors++;
1280                 gx_newbuf(gx, idx, m);
1281
1282                 /* 
1283                  * XXX
1284                  * this isn't quite right.  Suppose we have a packet that
1285                  * spans 5 descriptors (9K split into 2K buffers).  If
1286                  * the 3rd descriptor sets an error, we need to ignore
1287                  * the last two.  The way things stand now, the last two
1288                  * will be accepted as a single packet.
1289                  *
1290                  * we don't worry about this -- the chip may not set an
1291                  * error in this case, and the checksum of the upper layers
1292                  * will catch the error.
1293                  */
1294                 if (gx->gx_pkthdr != NULL) {
1295                         m_freem(gx->gx_pkthdr);
1296                         gx->gx_pkthdr = NULL;
1297                 }
1298                 GX_INC(idx, GX_RX_RING_CNT);
1299         }
1300
1301         gx->gx_rx_tail_idx = idx;
1302         if (--idx < 0)
1303                 idx = GX_RX_RING_CNT - 1;
1304         CSR_WRITE_4(gx, gx->gx_reg.r_rx_tail, idx);
1305 }
1306
1307 static void
1308 gx_txeof(struct gx_softc *gx)
1309 {
1310         struct ifnet *ifp;
1311         int idx, cnt;
1312
1313         gx->gx_tx_interrupts++;
1314
1315         ifp = &gx->arpcom.ac_if;
1316         idx = gx->gx_tx_head_idx;
1317         cnt = gx->gx_txcnt;
1318
1319         /*
1320          * If the system chipset performs I/O write buffering, it is 
1321          * possible for the PIO read of the head descriptor to bypass the
1322          * memory write of the descriptor, resulting in reading a descriptor
1323          * which has not been updated yet.
1324          */
1325         while (cnt) {
1326                 struct gx_tx_desc_old *tx;
1327
1328                 tx = (struct gx_tx_desc_old *)&gx->gx_rdata->gx_tx_ring[idx];
1329                 cnt--;
1330
1331                 if ((tx->tx_command & GX_TXOLD_END_OF_PKT) == 0) {
1332                         GX_INC(idx, GX_TX_RING_CNT);
1333                         continue;
1334                 }
1335
1336                 if ((tx->tx_status & GX_TXSTAT_DONE) == 0)
1337                         break;
1338
1339                 ifp->if_opackets++;
1340
1341                 m_freem(gx->gx_cdata.gx_tx_chain[idx]);
1342                 gx->gx_cdata.gx_tx_chain[idx] = NULL;
1343                 gx->gx_txcnt = cnt;
1344                 ifp->if_timer = 0;
1345
1346                 GX_INC(idx, GX_TX_RING_CNT);
1347                 gx->gx_tx_head_idx = idx;
1348         }
1349
1350         if (gx->gx_txcnt == 0)
1351                 ifp->if_flags &= ~IFF_OACTIVE;
1352 }
1353
1354 static void
1355 gx_intr(void *xsc)
1356 {
1357         struct gx_softc *gx = xsc;
1358         struct ifnet *ifp = &gx->arpcom.ac_if;
1359         u_int32_t intr;
1360
1361         gx->gx_interrupts++;
1362
1363         /* Disable host interrupts. */
1364         CSR_WRITE_4(gx, GX_INT_MASK_CLR, GX_INT_ALL);
1365
1366         /*
1367          * find out why we're being bothered.
1368          * reading this register automatically clears all bits.
1369          */
1370         intr = CSR_READ_4(gx, GX_INT_READ);
1371
1372         /* Check RX return ring producer/consumer */
1373         if (intr & (GX_INT_RCV_TIMER | GX_INT_RCV_THOLD | GX_INT_RCV_OVERRUN))
1374                 gx_rxeof(gx);
1375
1376         /* Check TX ring producer/consumer */
1377         if (intr & (GX_INT_XMIT_DONE | GX_INT_XMIT_EMPTY))
1378                 gx_txeof(gx);
1379
1380         /*
1381          * handle other interrupts here.
1382          */
1383
1384         /*
1385          * Link change interrupts are not reliable; the interrupt may
1386          * not be generated if the link is lost.  However, the register
1387          * read is reliable, so check that.  Use SEQ errors to possibly
1388          * indicate that the link has changed.
1389          */
1390         if (intr & GX_INT_LINK_CHANGE) {
1391                 if ((CSR_READ_4(gx, GX_STATUS) & GX_STAT_LINKUP) == 0) {
1392                         device_printf(gx->gx_dev, "link down\n");
1393                 } else {
1394                         device_printf(gx->gx_dev, "link up\n");
1395                 }
1396         }
1397
1398         /* Turn interrupts on. */
1399         CSR_WRITE_4(gx, GX_INT_MASK_SET, GX_INT_WANTED);
1400
1401         if (ifp->if_flags & IFF_RUNNING && !ifq_is_empty(&ifp->if_snd))
1402                 gx_start(ifp);
1403 }
1404
1405 /*
1406  * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
1407  * pointers to descriptors.
1408  */
1409 static int
1410 gx_encap(struct gx_softc *gx, struct mbuf *m_head)
1411 {
1412         struct gx_tx_desc_data *tx = NULL;
1413         struct gx_tx_desc_ctx *tctx;
1414         struct mbuf *m;
1415         int idx, cnt, csumopts, txcontext;
1416         struct ifvlan *ifv = NULL;
1417
1418         if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1419             m_head->m_pkthdr.rcvif != NULL &&
1420             m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1421                 ifv = m_head->m_pkthdr.rcvif->if_softc;
1422
1423         cnt = gx->gx_txcnt;
1424         idx = gx->gx_tx_tail_idx;
1425         txcontext = gx->gx_txcontext;
1426
1427         /*
1428          * Insure we have at least 4 descriptors pre-allocated.
1429          */
1430         if (cnt >= GX_TX_RING_CNT - 4)
1431                 return (ENOBUFS);
1432
1433         /*
1434          * Set up the appropriate offload context if necessary.
1435          */
1436         csumopts = 0;
1437         if (m_head->m_pkthdr.csum_flags) {
1438                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1439                         csumopts |= GX_TXTCP_OPT_IP_CSUM;
1440                 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) {
1441                         csumopts |= GX_TXTCP_OPT_TCP_CSUM;
1442                         txcontext = GX_TXCONTEXT_TCPIP;
1443                 } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) {
1444                         csumopts |= GX_TXTCP_OPT_TCP_CSUM;
1445                         txcontext = GX_TXCONTEXT_UDPIP;
1446                 } else if (txcontext == GX_TXCONTEXT_NONE)
1447                         txcontext = GX_TXCONTEXT_TCPIP;
1448                 if (txcontext == gx->gx_txcontext)
1449                         goto context_done;
1450
1451                 tctx = (struct gx_tx_desc_ctx *)&gx->gx_rdata->gx_tx_ring[idx];
1452                 tctx->tx_ip_csum_start = ETHER_HDR_LEN;
1453                 tctx->tx_ip_csum_end = ETHER_HDR_LEN + sizeof(struct ip) - 1;
1454                 tctx->tx_ip_csum_offset = 
1455                     ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
1456                 tctx->tx_tcp_csum_start = ETHER_HDR_LEN + sizeof(struct ip);
1457                 tctx->tx_tcp_csum_end = 0;
1458                 if (txcontext == GX_TXCONTEXT_TCPIP)
1459                         tctx->tx_tcp_csum_offset = ETHER_HDR_LEN +
1460                             sizeof(struct ip) + offsetof(struct tcphdr, th_sum);
1461                 else
1462                         tctx->tx_tcp_csum_offset = ETHER_HDR_LEN +
1463                             sizeof(struct ip) + offsetof(struct udphdr, uh_sum);
1464                 tctx->tx_command = GX_TXCTX_EXTENSION | GX_TXCTX_INT_DELAY;
1465                 tctx->tx_type = 0;
1466                 tctx->tx_status = 0;
1467                 GX_INC(idx, GX_TX_RING_CNT);
1468                 cnt++;
1469         }
1470 context_done:
1471
1472         /*
1473          * Start packing the mbufs in this chain into the transmit
1474          * descriptors.  Stop when we run out of descriptors or hit
1475          * the end of the mbuf chain.
1476          */
1477         for (m = m_head; m != NULL; m = m->m_next) {
1478                 if (m->m_len == 0)
1479                         continue;
1480
1481                 if (cnt == GX_TX_RING_CNT) {
1482 printf("overflow(2): %d, %d\n", cnt, GX_TX_RING_CNT);
1483                         return (ENOBUFS);
1484 }
1485
1486                 tx = (struct gx_tx_desc_data *)&gx->gx_rdata->gx_tx_ring[idx];
1487                 tx->tx_addr = vtophys(mtod(m, vm_offset_t));
1488                 tx->tx_status = 0;
1489                 tx->tx_len = m->m_len;
1490                 if (gx->arpcom.ac_if.if_hwassist) {
1491                         tx->tx_type = 1;
1492                         tx->tx_command = GX_TXTCP_EXTENSION;
1493                         tx->tx_options = csumopts;
1494                 } else {
1495                         /*
1496                          * This is really a struct gx_tx_desc_old.
1497                          */
1498                         tx->tx_command = 0;
1499                 }
1500                 GX_INC(idx, GX_TX_RING_CNT);
1501                 cnt++;
1502         }
1503
1504         if (tx != NULL) {
1505                 tx->tx_command |= GX_TXTCP_REPORT_STATUS | GX_TXTCP_INT_DELAY |
1506                     GX_TXTCP_ETHER_CRC | GX_TXTCP_END_OF_PKT;
1507                 if (ifv != NULL) {
1508                         tx->tx_command |= GX_TXTCP_VLAN_ENABLE;
1509                         tx->tx_vlan = ifv->ifv_tag;
1510                 }
1511                 gx->gx_txcnt = cnt;
1512                 gx->gx_tx_tail_idx = idx;
1513                 gx->gx_txcontext = txcontext;
1514                 idx = GX_PREV(idx, GX_TX_RING_CNT);
1515                 gx->gx_cdata.gx_tx_chain[idx] = m_head;
1516
1517                 CSR_WRITE_4(gx, gx->gx_reg.r_tx_tail, gx->gx_tx_tail_idx);
1518         }
1519         
1520         return (0);
1521 }
1522  
1523 /*
1524  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1525  * to the mbuf data regions directly in the transmit descriptors.
1526  */
1527 static void
1528 gx_start(struct ifnet *ifp)
1529 {
1530         struct gx_softc *gx = ifp->if_softc;
1531         struct mbuf *m_head;
1532
1533         for (;;) {
1534                 m_head = ifq_poll(&ifp->if_snd);
1535                 if (m_head == NULL)
1536                         break;
1537
1538                 /*
1539                  * Pack the data into the transmit ring. If we
1540                  * don't have room, set the OACTIVE flag and wait
1541                  * for the NIC to drain the ring.
1542                  */
1543                 if (gx_encap(gx, m_head) != 0) {
1544                         ifp->if_flags |= IFF_OACTIVE;
1545                         break;
1546                 }
1547                 ifq_dequeue(&ifp->if_snd, m_head);
1548
1549                 BPF_MTAP(ifp, m_head);
1550
1551                 /*
1552                  * Set a timeout in case the chip goes out to lunch.
1553                  */
1554                 ifp->if_timer = 5;
1555         }
1556 }