Fix the design of ifq_dequeue/altq_dequeue by adding an mbuf pointer and
[dragonfly.git] / sys / dev / netif / gx / if_gx.c
1 /*-
2  * Copyright (c) 1999,2000,2001 Jonathan Lemon
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/dev/gx/if_gx.c,v 1.2.2.3 2001/12/14 19:51:39 jlemon Exp $
30  * $DragonFly: src/sys/dev/netif/gx/Attic/if_gx.c,v 1.21 2005/11/22 00:24:32 dillon Exp $
31  */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sockio.h>
36 #include <sys/mbuf.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/socket.h>
40 #include <sys/thread2.h>
41 #include <sys/queue.h>
42
43 #include <net/if.h>
44 #include <net/ifq_var.h>
45 #include <net/if_arp.h>
46 #include <net/ethernet.h>
47 #include <net/if_dl.h>
48 #include <net/if_media.h>
49
50 #include <net/bpf.h>
51 #include <net/if_types.h>
52 #include <net/vlan/if_vlan_var.h>
53
54 #include <netinet/in_systm.h>
55 #include <netinet/in.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <netinet/udp.h>
59
60 #include <vm/vm.h>              /* for vtophys */
61 #include <vm/pmap.h>            /* for vtophys */
62 #include <machine/clock.h>      /* for DELAY */
63 #include <machine/bus_memio.h>
64 #include <machine/bus.h>
65 #include <machine/resource.h>
66 #include <sys/bus.h>
67 #include <sys/rman.h>
68
69 #include <bus/pci/pcireg.h>
70 #include <bus/pci/pcivar.h>
71
72 #include "../mii_layer/mii.h"
73 #include "../mii_layer/miivar.h"
74
75 #include "if_gxreg.h"
76 #include "if_gxvar.h"
77
78 #include "miibus_if.h"
79
80 #define TUNABLE_TX_INTR_DELAY   100
81 #define TUNABLE_RX_INTR_DELAY   100
82
83 #define GX_CSUM_FEATURES        (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_IP_FRAGS)
84
85 /*
86  * Various supported device vendors/types and their names.
87  */
88 struct gx_device {
89         u_int16_t       vendor;
90         u_int16_t       device;
91         int             version_flags;
92         u_int32_t       version_ipg;
93         char            *name;
94 };
95
96 static struct gx_device gx_devs[] = {
97         { INTEL_VENDORID, DEVICEID_WISEMAN,
98             GXF_FORCE_TBI | GXF_OLD_REGS,
99             10 | 2 << 10 | 10 << 20,
100             "Intel Gigabit Ethernet (82542)" },
101         { INTEL_VENDORID, DEVICEID_LIVINGOOD_FIBER,
102             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
103             6 | 8 << 10 | 6 << 20,
104             "Intel Gigabit Ethernet (82543GC-F)" },
105         { INTEL_VENDORID, DEVICEID_LIVINGOOD_COPPER,
106             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
107             8 | 8 << 10 | 6 << 20,
108             "Intel Gigabit Ethernet (82543GC-T)" },
109 #if 0
110 /* notyet.. */
111         { INTEL_VENDORID, DEVICEID_CORDOVA_FIBER,
112             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
113             6 | 8 << 10 | 6 << 20,
114             "Intel Gigabit Ethernet (82544EI-F)" },
115         { INTEL_VENDORID, DEVICEID_CORDOVA_COPPER,
116             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
117             8 | 8 << 10 | 6 << 20,
118             "Intel Gigabit Ethernet (82544EI-T)" },
119         { INTEL_VENDORID, DEVICEID_CORDOVA2_COPPER,
120             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
121             8 | 8 << 10 | 6 << 20,
122             "Intel Gigabit Ethernet (82544GC-T)" },
123 #endif
124         { 0, 0, 0, NULL }
125 };
126
127 static struct gx_regs new_regs = {
128         GX_RX_RING_BASE, GX_RX_RING_LEN,
129         GX_RX_RING_HEAD, GX_RX_RING_TAIL,
130         GX_RX_INTR_DELAY, GX_RX_DMA_CTRL,
131
132         GX_TX_RING_BASE, GX_TX_RING_LEN,
133         GX_TX_RING_HEAD, GX_TX_RING_TAIL,
134         GX_TX_INTR_DELAY, GX_TX_DMA_CTRL,
135 };
136 static struct gx_regs old_regs = {
137         GX_RX_OLD_RING_BASE, GX_RX_OLD_RING_LEN,
138         GX_RX_OLD_RING_HEAD, GX_RX_OLD_RING_TAIL,
139         GX_RX_OLD_INTR_DELAY, GX_RX_OLD_DMA_CTRL,
140
141         GX_TX_OLD_RING_BASE, GX_TX_OLD_RING_LEN,
142         GX_TX_OLD_RING_HEAD, GX_TX_OLD_RING_TAIL,
143         GX_TX_OLD_INTR_DELAY, GX_TX_OLD_DMA_CTRL,
144 };
145
146 static int      gx_probe(device_t dev);
147 static int      gx_attach(device_t dev);
148 static int      gx_detach(device_t dev);
149 static void     gx_shutdown(device_t dev);
150
151 static void     gx_intr(void *xsc);
152 static void     gx_init(void *xsc);
153
154 static struct   gx_device *gx_match(device_t dev);
155 static void     gx_eeprom_getword(struct gx_softc *gx, int addr,
156                     u_int16_t *dest);
157 static int      gx_read_eeprom(struct gx_softc *gx, caddr_t dest, int off,
158                     int cnt);
159 static int      gx_ifmedia_upd(struct ifnet *ifp);
160 static void     gx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
161 static int      gx_miibus_readreg(device_t dev, int phy, int reg);
162 static void     gx_miibus_writereg(device_t dev, int phy, int reg, int value);
163 static void     gx_miibus_statchg(device_t dev);
164 static int      gx_ioctl(struct ifnet *ifp, u_long command, caddr_t data,
165                     struct ucred *);
166 static void     gx_setmulti(struct gx_softc *gx);
167 static void     gx_reset(struct gx_softc *gx);
168 static void     gx_phy_reset(struct gx_softc *gx);
169 static void     gx_stop(struct gx_softc *gx);
170 static void     gx_watchdog(struct ifnet *ifp);
171 static void     gx_start(struct ifnet *ifp);
172
173 static int      gx_init_rx_ring(struct gx_softc *gx);
174 static void     gx_free_rx_ring(struct gx_softc *gx);
175 static int      gx_init_tx_ring(struct gx_softc *gx);
176 static void     gx_free_tx_ring(struct gx_softc *gx);
177
178 static device_method_t gx_methods[] = {
179         /* Device interface */
180         DEVMETHOD(device_probe,         gx_probe),
181         DEVMETHOD(device_attach,        gx_attach),
182         DEVMETHOD(device_detach,        gx_detach),
183         DEVMETHOD(device_shutdown,      gx_shutdown),
184
185         /* MII interface */
186         DEVMETHOD(miibus_readreg,       gx_miibus_readreg),
187         DEVMETHOD(miibus_writereg,      gx_miibus_writereg),
188         DEVMETHOD(miibus_statchg,       gx_miibus_statchg),
189
190         { 0, 0 }
191 };
192
193 static driver_t gx_driver = {
194         "gx",
195         gx_methods,
196         sizeof(struct gx_softc)
197 };
198
199 static devclass_t gx_devclass;
200
201 DECLARE_DUMMY_MODULE(if_gx);
202 MODULE_DEPEND(if_gx, miibus, 1, 1, 1);
203 DRIVER_MODULE(if_gx, pci, gx_driver, gx_devclass, 0, 0);
204 DRIVER_MODULE(miibus, gx, miibus_driver, miibus_devclass, 0, 0);
205
206 static struct gx_device *
207 gx_match(device_t dev)
208 {
209         int i;
210
211         for (i = 0; gx_devs[i].name != NULL; i++) {
212                 if ((pci_get_vendor(dev) == gx_devs[i].vendor) &&
213                     (pci_get_device(dev) == gx_devs[i].device))
214                         return (&gx_devs[i]);
215         }
216         return (NULL);
217 }
218
219 static int
220 gx_probe(device_t dev)
221 {
222         struct gx_device *gx_dev;
223
224         gx_dev = gx_match(dev);
225         if (gx_dev == NULL)
226                 return (ENXIO);
227
228         device_set_desc(dev, gx_dev->name);
229         return (0);
230 }
231
232 static int
233 gx_attach(device_t dev)
234 {
235         struct gx_softc *gx;
236         struct gx_device *gx_dev;
237         struct ifnet *ifp;
238         u_int32_t command;
239         int rid;
240         int error = 0;
241
242         gx = device_get_softc(dev);
243         gx->gx_dev = dev;
244
245         gx_dev = gx_match(dev);
246         gx->gx_vflags = gx_dev->version_flags;
247         gx->gx_ipg = gx_dev->version_ipg;
248
249         /*
250          * Map control/status registers.
251          */
252         command = pci_read_config(dev, PCIR_COMMAND, 4);
253         command |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
254         if (gx->gx_vflags & GXF_ENABLE_MWI)
255                 command |= PCIM_CMD_MWIEN;
256         pci_write_config(dev, PCIR_COMMAND, command, 4);
257         command = pci_read_config(dev, PCIR_COMMAND, 4);
258
259 /* XXX check cache line size? */
260
261         if ((command & PCIM_CMD_MEMEN) == 0) {
262                 device_printf(dev, "failed to enable memory mapping!\n");
263                 error = ENXIO;
264                 goto fail;
265         }
266
267         rid = GX_PCI_LOMEM;
268         gx->gx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
269             RF_ACTIVE);
270 #if 0
271 /* support PIO mode */
272         rid = PCI_LOIO;
273         gx->gx_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
274             RF_ACTIVE);
275 #endif
276
277         if (gx->gx_res == NULL) {
278                 device_printf(dev, "couldn't map memory\n");
279                 error = ENXIO;
280                 goto fail;
281         }
282
283         gx->gx_btag = rman_get_bustag(gx->gx_res);
284         gx->gx_bhandle = rman_get_bushandle(gx->gx_res);
285
286         /* Allocate interrupt */
287         rid = 0;
288         gx->gx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
289             RF_SHAREABLE | RF_ACTIVE);
290
291         if (gx->gx_irq == NULL) {
292                 device_printf(dev, "couldn't map interrupt\n");
293                 error = ENXIO;
294                 goto fail;
295         }
296
297         /* compensate for different register mappings */
298         if (gx->gx_vflags & GXF_OLD_REGS)
299                 gx->gx_reg = old_regs;
300         else
301                 gx->gx_reg = new_regs;
302
303         if (gx_read_eeprom(gx, (caddr_t)&gx->arpcom.ac_enaddr,
304             GX_EEMAP_MAC, 3)) {
305                 device_printf(dev, "failed to read station address\n");
306                 error = ENXIO;
307                 goto fail;
308         }
309
310         /* Allocate the ring buffers. */
311         gx->gx_rdata = contigmalloc(sizeof(struct gx_ring_data), M_DEVBUF,
312             M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0);
313
314         if (gx->gx_rdata == NULL) {
315                 device_printf(dev, "no memory for list buffers!\n");
316                 error = ENXIO;
317                 goto fail;
318         }
319         bzero(gx->gx_rdata, sizeof(struct gx_ring_data));
320
321         /* Set default tuneable values. */
322         gx->gx_tx_intr_delay = TUNABLE_TX_INTR_DELAY;
323         gx->gx_rx_intr_delay = TUNABLE_RX_INTR_DELAY;
324
325         /* Set up ifnet structure */
326         ifp = &gx->arpcom.ac_if;
327         ifp->if_softc = gx;
328         if_initname(ifp, "gx", device_get_unit(dev));
329         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
330         ifp->if_ioctl = gx_ioctl;
331         ifp->if_start = gx_start;
332         ifp->if_watchdog = gx_watchdog;
333         ifp->if_init = gx_init;
334         ifp->if_mtu = ETHERMTU;
335         ifq_set_maxlen(&ifp->if_snd, GX_TX_RING_CNT - 1);
336         ifq_set_ready(&ifp->if_snd);
337
338         /* see if we can enable hardware checksumming */
339         if (gx->gx_vflags & GXF_CSUM) {
340                 ifp->if_capabilities = IFCAP_HWCSUM;
341                 ifp->if_capenable = ifp->if_capabilities;
342         }
343
344         /* figure out transciever type */
345         if (gx->gx_vflags & GXF_FORCE_TBI ||
346             CSR_READ_4(gx, GX_STATUS) & GX_STAT_TBIMODE)
347                 gx->gx_tbimode = 1;
348
349         if (gx->gx_tbimode) {
350                 /* SERDES transceiver */
351                 ifmedia_init(&gx->gx_media, IFM_IMASK, gx_ifmedia_upd,
352                     gx_ifmedia_sts);
353                 ifmedia_add(&gx->gx_media,
354                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
355                 ifmedia_add(&gx->gx_media, IFM_ETHER|IFM_AUTO, 0, NULL);
356                 ifmedia_set(&gx->gx_media, IFM_ETHER|IFM_AUTO);
357         } else {
358                 /* GMII/MII transceiver */
359                 gx_phy_reset(gx);
360                 if (mii_phy_probe(dev, &gx->gx_miibus, gx_ifmedia_upd,
361                     gx_ifmedia_sts)) {
362                         device_printf(dev, "GMII/MII, PHY not detected\n");
363                         error = ENXIO;
364                         goto fail;
365                 }
366         }
367
368         /*
369          * Call MI attach routines.
370          */
371         ether_ifattach(ifp, gx->arpcom.ac_enaddr);
372
373         error = bus_setup_intr(dev, gx->gx_irq, 0,
374                                gx_intr, gx, &gx->gx_intrhand, NULL);
375         if (error) {
376                 ether_ifdetach(ifp);
377                 device_printf(dev, "couldn't setup irq\n");
378                 goto fail;
379         }
380
381         return (0);
382
383 fail:
384         gx_detach(dev);
385         return (error);
386 }
387
388 static void
389 gx_init(void *xsc)
390 {
391         struct gx_softc *gx = (struct gx_softc *)xsc;
392         struct ifmedia *ifm;
393         struct ifnet *ifp = &gx->arpcom.ac_if;
394         u_int16_t *m;
395         u_int32_t ctrl;
396         int i, tmp;
397
398         crit_enter();
399
400         /* Disable host interrupts, halt chip. */
401         gx_reset(gx);
402
403         /* disable I/O, flush RX/TX FIFOs, and free RX/TX buffers */
404         gx_stop(gx);
405
406         /* Load our MAC address, invalidate other 15 RX addresses. */
407         m = (u_int16_t *)&gx->arpcom.ac_enaddr[0];
408         CSR_WRITE_4(gx, GX_RX_ADDR_BASE, (m[1] << 16) | m[0]);
409         CSR_WRITE_4(gx, GX_RX_ADDR_BASE + 4, m[2] | GX_RA_VALID);
410         for (i = 1; i < 16; i++)
411                 CSR_WRITE_8(gx, GX_RX_ADDR_BASE + i * 8, (u_quad_t)0);
412
413         /* Program multicast filter. */
414         gx_setmulti(gx);
415
416         /* Init RX ring. */
417         gx_init_rx_ring(gx);
418
419         /* Init TX ring. */
420         gx_init_tx_ring(gx);
421
422         if (gx->gx_vflags & GXF_DMA) {
423                 /* set up DMA control */        
424                 CSR_WRITE_4(gx, gx->gx_reg.r_rx_dma_ctrl, 0x00010000);
425                 CSR_WRITE_4(gx, gx->gx_reg.r_tx_dma_ctrl, 0x00000000);
426         }
427
428         /* enable receiver */
429         ctrl = GX_RXC_ENABLE | GX_RXC_RX_THOLD_EIGHTH | GX_RXC_RX_BSIZE_2K;
430         ctrl |= GX_RXC_BCAST_ACCEPT;
431
432         /* Enable or disable promiscuous mode as needed. */
433         if (ifp->if_flags & IFF_PROMISC)
434                 ctrl |= GX_RXC_UNI_PROMISC;
435
436         /* This is required if we want to accept jumbo frames */
437         if (ifp->if_mtu > ETHERMTU)
438                 ctrl |= GX_RXC_LONG_PKT_ENABLE;
439
440         /* setup receive checksum control */
441         if (ifp->if_capenable & IFCAP_RXCSUM)
442                 CSR_WRITE_4(gx, GX_RX_CSUM_CONTROL,
443                     GX_CSUM_TCP/* | GX_CSUM_IP*/);
444
445         /* setup transmit checksum control */
446         if (ifp->if_capenable & IFCAP_TXCSUM)
447                 ifp->if_hwassist = GX_CSUM_FEATURES;
448
449         ctrl |= GX_RXC_STRIP_ETHERCRC;          /* not on 82542? */
450         CSR_WRITE_4(gx, GX_RX_CONTROL, ctrl);
451
452         /* enable transmitter */
453         ctrl = GX_TXC_ENABLE | GX_TXC_PAD_SHORT_PKTS | GX_TXC_COLL_RETRY_16;
454
455         /* XXX we should support half-duplex here too... */
456         ctrl |= GX_TXC_COLL_TIME_FDX;
457
458         CSR_WRITE_4(gx, GX_TX_CONTROL, ctrl);
459
460         /*
461          * set up recommended IPG times, which vary depending on chip type:
462          *      IPG transmit time:  80ns
463          *      IPG receive time 1: 20ns
464          *      IPG receive time 2: 80ns
465          */
466         CSR_WRITE_4(gx, GX_TX_IPG, gx->gx_ipg);
467
468         /* set up 802.3x MAC flow control address -- 01:80:c2:00:00:01 */
469         CSR_WRITE_4(gx, GX_FLOW_CTRL_BASE, 0x00C28001);
470         CSR_WRITE_4(gx, GX_FLOW_CTRL_BASE+4, 0x00000100);
471
472         /* set up 802.3x MAC flow control type -- 88:08 */
473         CSR_WRITE_4(gx, GX_FLOW_CTRL_TYPE, 0x8808);
474
475         /* Set up tuneables */
476         CSR_WRITE_4(gx, gx->gx_reg.r_rx_delay, gx->gx_rx_intr_delay);
477         CSR_WRITE_4(gx, gx->gx_reg.r_tx_delay, gx->gx_tx_intr_delay);
478
479         /*
480          * Configure chip for correct operation.
481          */
482         ctrl = GX_CTRL_DUPLEX;
483 #if BYTE_ORDER == BIG_ENDIAN
484         ctrl |= GX_CTRL_BIGENDIAN;
485 #endif
486         ctrl |= GX_CTRL_VLAN_ENABLE;
487
488         if (gx->gx_tbimode) {
489                 /*
490                  * It seems that TXCW must be initialized from the EEPROM
491                  * manually.
492                  *
493                  * XXX
494                  * should probably read the eeprom and re-insert the
495                  * values here.
496                  */
497 #define TXCONFIG_WORD   0x000001A0
498                 CSR_WRITE_4(gx, GX_TX_CONFIG, TXCONFIG_WORD);
499
500                 /* turn on hardware autonegotiate */
501                 GX_SETBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
502         } else {
503                 /*
504                  * Auto-detect speed from PHY, instead of using direct
505                  * indication.  The SLU bit doesn't force the link, but
506                  * must be present for ASDE to work.
507                  */
508                 gx_phy_reset(gx);
509                 ctrl |= GX_CTRL_SET_LINK_UP | GX_CTRL_AUTOSPEED;
510         }
511
512         /*
513          * Take chip out of reset and start it running.
514          */
515         CSR_WRITE_4(gx, GX_CTRL, ctrl);
516
517         /* Turn interrupts on. */
518         CSR_WRITE_4(gx, GX_INT_MASK_SET, GX_INT_WANTED);
519
520         ifp->if_flags |= IFF_RUNNING;
521         ifp->if_flags &= ~IFF_OACTIVE;
522
523         /*
524          * Set the current media.
525          */
526         if (gx->gx_miibus != NULL) {
527                 mii_mediachg(device_get_softc(gx->gx_miibus));
528         } else {
529                 ifm = &gx->gx_media;
530                 tmp = ifm->ifm_media;
531                 ifm->ifm_media = ifm->ifm_cur->ifm_media;
532                 gx_ifmedia_upd(ifp);
533                 ifm->ifm_media = tmp;
534         }
535
536         /*
537          * XXX
538          * Have the LINK0 flag force the link in TBI mode.
539          */
540         if (gx->gx_tbimode && ifp->if_flags & IFF_LINK0) {
541                 GX_CLRBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
542                 GX_SETBIT(gx, GX_CTRL, GX_CTRL_SET_LINK_UP);
543         }
544
545 #if 0
546 printf("66mhz: %s  64bit: %s\n",
547         CSR_READ_4(gx, GX_STATUS) & GX_STAT_PCI66 ? "yes" : "no",
548         CSR_READ_4(gx, GX_STATUS) & GX_STAT_BUS64 ? "yes" : "no");
549 #endif
550
551         crit_exit();
552 }
553
554 /*
555  * Stop all chip I/O so that the kernel's probe routines don't
556  * get confused by errant DMAs when rebooting.
557  */
558 static void
559 gx_shutdown(device_t dev)
560 {
561         struct gx_softc *gx;
562
563         gx = device_get_softc(dev);
564         gx_reset(gx);
565         gx_stop(gx);
566 }
567
568 static int
569 gx_detach(device_t dev)
570 {
571         struct gx_softc *gx = device_get_softc(dev);
572         struct ifnet *ifp = &gx->arpcom.ac_if;
573
574         if (device_is_attached(dev)) {
575                 ether_ifdetach(ifp);
576                 gx_reset(gx);
577                 gx_stop(gx);
578         }
579
580         if (gx->gx_miibus)
581                 device_delete_child(gx->gx_dev, gx->gx_miibus);
582         bus_generic_detach(gx->gx_dev);
583
584         if (gx->gx_intrhand)
585                 bus_teardown_intr(gx->gx_dev, gx->gx_irq, gx->gx_intrhand);
586
587         crit_exit();
588
589         if (gx->gx_irq)
590                 bus_release_resource(gx->gx_dev, SYS_RES_IRQ, 0, gx->gx_irq);
591         if (gx->gx_res)
592                 bus_release_resource(gx->gx_dev, SYS_RES_MEMORY,
593                     GX_PCI_LOMEM, gx->gx_res);
594
595         if (gx->gx_rdata)
596                 contigfree(gx->gx_rdata, sizeof(struct gx_ring_data),
597                            M_DEVBUF);
598
599         if (gx->gx_tbimode)
600                 ifmedia_removeall(&gx->gx_media);
601
602         return (0);
603 }
604
605 static void
606 gx_eeprom_getword(struct gx_softc *gx, int addr, u_int16_t *dest)
607 {
608         u_int16_t word = 0;
609         u_int32_t base, reg;
610         int x;
611
612         addr = (GX_EE_OPC_READ << GX_EE_ADDR_SIZE) |
613             (addr & ((1 << GX_EE_ADDR_SIZE) - 1));
614
615         base = CSR_READ_4(gx, GX_EEPROM_CTRL);
616         base &= ~(GX_EE_DATA_OUT | GX_EE_DATA_IN | GX_EE_CLOCK);
617         base |= GX_EE_SELECT;
618
619         CSR_WRITE_4(gx, GX_EEPROM_CTRL, base);
620
621         for (x = 1 << ((GX_EE_OPC_SIZE + GX_EE_ADDR_SIZE) - 1); x; x >>= 1) {
622                 reg = base | (addr & x ? GX_EE_DATA_IN : 0);
623                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg);
624                 DELAY(10);
625                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg | GX_EE_CLOCK);
626                 DELAY(10);
627                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg);
628                 DELAY(10);
629         }
630
631         for (x = 1 << 15; x; x >>= 1) {
632                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, base | GX_EE_CLOCK);
633                 DELAY(10);
634                 reg = CSR_READ_4(gx, GX_EEPROM_CTRL);
635                 if (reg & GX_EE_DATA_OUT)
636                         word |= x;
637                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, base);
638                 DELAY(10);
639         }
640
641         CSR_WRITE_4(gx, GX_EEPROM_CTRL, base & ~GX_EE_SELECT);
642         DELAY(10);
643
644         *dest = word;
645 }
646         
647 static int
648 gx_read_eeprom(struct gx_softc *gx, caddr_t dest, int off, int cnt)
649 {
650         u_int16_t *word;
651         int i;
652
653         word = (u_int16_t *)dest;
654         for (i = 0; i < cnt; i ++) {
655                 gx_eeprom_getword(gx, off + i, word);
656                 word++;
657         }
658         return (0);
659 }
660
661 /*
662  * Set media options.
663  */
664 static int
665 gx_ifmedia_upd(struct ifnet *ifp)
666 {
667         struct gx_softc *gx;
668         struct ifmedia *ifm;
669         struct mii_data *mii;
670
671         gx = ifp->if_softc;
672
673         if (gx->gx_tbimode) {
674                 ifm = &gx->gx_media;
675                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
676                         return (EINVAL);
677                 switch (IFM_SUBTYPE(ifm->ifm_media)) {
678                 case IFM_AUTO:
679                         GX_SETBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
680                         GX_SETBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
681                         GX_CLRBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
682                         break;
683                 case IFM_1000_SX:
684                         device_printf(gx->gx_dev,
685                             "manual config not supported yet.\n");
686 #if 0
687                         GX_CLRBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
688                         config = /* bit symbols for 802.3z */0;
689                         ctrl |= GX_CTRL_SET_LINK_UP;
690                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
691                                 ctrl |= GX_CTRL_DUPLEX;
692 #endif
693                         break;
694                 default:
695                         return (EINVAL);
696                 }
697         } else {
698                 ifm = &gx->gx_media;
699
700                 /*
701                  * 1000TX half duplex does not work.
702                  */
703                 if (IFM_TYPE(ifm->ifm_media) == IFM_ETHER &&
704                     IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T &&
705                     (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) == 0)
706                         return (EINVAL);
707                 mii = device_get_softc(gx->gx_miibus);
708                 mii_mediachg(mii);
709         }
710         return (0);
711 }
712
713 /*
714  * Report current media status.
715  */
716 static void
717 gx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
718 {
719         struct gx_softc *gx;
720         struct mii_data *mii;
721         u_int32_t status;
722
723         gx = ifp->if_softc;
724
725         if (gx->gx_tbimode) {
726                 ifmr->ifm_status = IFM_AVALID;
727                 ifmr->ifm_active = IFM_ETHER;
728
729                 status = CSR_READ_4(gx, GX_STATUS);
730                 if ((status & GX_STAT_LINKUP) == 0)
731                         return;
732
733                 ifmr->ifm_status |= IFM_ACTIVE;
734                 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
735         } else {
736                 mii = device_get_softc(gx->gx_miibus);
737                 mii_pollstat(mii);
738                 if ((mii->mii_media_active & (IFM_1000_T | IFM_HDX)) ==
739                     (IFM_1000_T | IFM_HDX))
740                         mii->mii_media_active = IFM_ETHER | IFM_NONE;
741                 ifmr->ifm_active = mii->mii_media_active;
742                 ifmr->ifm_status = mii->mii_media_status;
743         }
744 }
745
746 static void 
747 gx_mii_shiftin(struct gx_softc *gx, int data, int length)
748 {
749         u_int32_t reg, x;
750
751         /*
752          * Set up default GPIO direction + PHY data out.
753          */
754         reg = CSR_READ_4(gx, GX_CTRL);
755         reg &= ~(GX_CTRL_GPIO_DIR_MASK | GX_CTRL_PHY_IO | GX_CTRL_PHY_CLK);
756         reg |= GX_CTRL_GPIO_DIR | GX_CTRL_PHY_IO_DIR;
757
758         /*
759          * Shift in data to PHY.
760          */
761         for (x = 1 << (length - 1); x; x >>= 1) {
762                 if (data & x)
763                         reg |= GX_CTRL_PHY_IO;
764                 else
765                         reg &= ~GX_CTRL_PHY_IO;
766                 CSR_WRITE_4(gx, GX_CTRL, reg);
767                 DELAY(10);
768                 CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
769                 DELAY(10);
770                 CSR_WRITE_4(gx, GX_CTRL, reg);
771                 DELAY(10);
772         }
773 }
774
775 static u_int16_t 
776 gx_mii_shiftout(struct gx_softc *gx)
777 {
778         u_int32_t reg;
779         u_int16_t data;
780         int x;
781
782         /*
783          * Set up default GPIO direction + PHY data in.
784          */
785         reg = CSR_READ_4(gx, GX_CTRL);
786         reg &= ~(GX_CTRL_GPIO_DIR_MASK | GX_CTRL_PHY_IO | GX_CTRL_PHY_CLK);
787         reg |= GX_CTRL_GPIO_DIR;
788
789         CSR_WRITE_4(gx, GX_CTRL, reg);
790         DELAY(10);
791         CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
792         DELAY(10);
793         CSR_WRITE_4(gx, GX_CTRL, reg);
794         DELAY(10);
795         /*
796          * Shift out data from PHY.
797          */
798         data = 0;
799         for (x = 1 << 15; x; x >>= 1) {
800                 CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
801                 DELAY(10);
802                 if (CSR_READ_4(gx, GX_CTRL) & GX_CTRL_PHY_IO)
803                         data |= x;
804                 CSR_WRITE_4(gx, GX_CTRL, reg);
805                 DELAY(10);
806         }
807         CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
808         DELAY(10);
809         CSR_WRITE_4(gx, GX_CTRL, reg);
810         DELAY(10);
811
812         return (data);
813 }
814
815 static int
816 gx_miibus_readreg(device_t dev, int phy, int reg)
817 {
818         struct gx_softc *gx;
819
820         gx = device_get_softc(dev);
821         if (gx->gx_tbimode)
822                 return (0);
823
824         /*
825          * XXX
826          * Note: Cordova has a MDIC register. livingood and < have mii bits
827          */ 
828
829         gx_mii_shiftin(gx, GX_PHY_PREAMBLE, GX_PHY_PREAMBLE_LEN);
830         gx_mii_shiftin(gx, (GX_PHY_SOF << 12) | (GX_PHY_OP_READ << 10) |
831             (phy << 5) | reg, GX_PHY_READ_LEN);
832         return (gx_mii_shiftout(gx));
833 }
834
835 static void
836 gx_miibus_writereg(device_t dev, int phy, int reg, int value)
837 {
838         struct gx_softc *gx;
839
840         gx = device_get_softc(dev);
841         if (gx->gx_tbimode)
842                 return;
843
844         gx_mii_shiftin(gx, GX_PHY_PREAMBLE, GX_PHY_PREAMBLE_LEN);
845         gx_mii_shiftin(gx, (GX_PHY_SOF << 30) | (GX_PHY_OP_WRITE << 28) |
846             (phy << 23) | (reg << 18) | (GX_PHY_TURNAROUND << 16) |
847             (value & 0xffff), GX_PHY_WRITE_LEN);
848 }
849
850 static void
851 gx_miibus_statchg(device_t dev)
852 {
853         struct gx_softc *gx = device_get_softc(dev);
854         struct mii_data *mii;
855         int reg;
856
857         if (gx->gx_tbimode)
858                 return;
859
860         /*
861          * Set flow control behavior to mirror what PHY negotiated.
862          */
863         mii = device_get_softc(gx->gx_miibus);
864
865         crit_enter();
866
867         reg = CSR_READ_4(gx, GX_CTRL);
868         if (mii->mii_media_active & IFM_FLAG0)
869                 reg |= GX_CTRL_RX_FLOWCTRL;
870         else
871                 reg &= ~GX_CTRL_RX_FLOWCTRL;
872         if (mii->mii_media_active & IFM_FLAG1)
873                 reg |= GX_CTRL_TX_FLOWCTRL;
874         else
875                 reg &= ~GX_CTRL_TX_FLOWCTRL;
876         CSR_WRITE_4(gx, GX_CTRL, reg);
877
878         crit_exit();
879 }
880
881 static int
882 gx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
883 {
884         struct gx_softc *gx = ifp->if_softc;
885         struct ifreq *ifr = (struct ifreq *)data;
886         struct mii_data *mii;
887         int mask, error = 0;
888
889         crit_enter();
890
891         switch (command) {
892         case SIOCSIFMTU:
893                 if (ifr->ifr_mtu > GX_MAX_MTU) {
894                         error = EINVAL;
895                 } else {
896                         ifp->if_mtu = ifr->ifr_mtu;
897                         gx_init(gx);
898                 }
899                 break;
900         case SIOCSIFFLAGS:
901                 if ((ifp->if_flags & IFF_UP) == 0) {
902                         gx_stop(gx);
903                 } else if (ifp->if_flags & IFF_RUNNING &&
904                     ((ifp->if_flags & IFF_PROMISC) != 
905                     (gx->gx_if_flags & IFF_PROMISC))) {
906                         if (ifp->if_flags & IFF_PROMISC)
907                                 GX_SETBIT(gx, GX_RX_CONTROL, GX_RXC_UNI_PROMISC);
908                         else 
909                                 GX_CLRBIT(gx, GX_RX_CONTROL, GX_RXC_UNI_PROMISC);
910                 } else {
911                         gx_init(gx);
912                 }
913                 gx->gx_if_flags = ifp->if_flags;
914                 break;
915         case SIOCADDMULTI:
916         case SIOCDELMULTI:
917                 if (ifp->if_flags & IFF_RUNNING)
918                         gx_setmulti(gx);
919                 break;
920         case SIOCSIFMEDIA:
921         case SIOCGIFMEDIA:
922                 if (gx->gx_miibus != NULL) {
923                         mii = device_get_softc(gx->gx_miibus);
924                         error = ifmedia_ioctl(ifp, ifr,
925                             &mii->mii_media, command);
926                 } else {
927                         error = ifmedia_ioctl(ifp, ifr, &gx->gx_media, command);
928                 }
929                 break;
930         case SIOCSIFCAP:
931                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
932                 if (mask & IFCAP_HWCSUM) {
933                         if (IFCAP_HWCSUM & ifp->if_capenable)
934                                 ifp->if_capenable &= ~IFCAP_HWCSUM;
935                         else
936                                 ifp->if_capenable |= IFCAP_HWCSUM;
937                         if (ifp->if_flags & IFF_RUNNING)
938                                 gx_init(gx);
939                 }
940                 break;
941         default:
942                 error = ether_ioctl(ifp, command, data);
943                 break;
944         }
945
946         crit_exit();
947
948         return (error);
949 }
950
951 static void
952 gx_phy_reset(struct gx_softc *gx)
953 {
954         int reg;
955
956         GX_SETBIT(gx, GX_CTRL, GX_CTRL_SET_LINK_UP);
957
958         /*
959          * PHY reset is active low.
960          */
961         reg = CSR_READ_4(gx, GX_CTRL_EXT);
962         reg &= ~(GX_CTRLX_GPIO_DIR_MASK | GX_CTRLX_PHY_RESET);
963         reg |= GX_CTRLX_GPIO_DIR;
964
965         CSR_WRITE_4(gx, GX_CTRL_EXT, reg | GX_CTRLX_PHY_RESET);
966         DELAY(10);
967         CSR_WRITE_4(gx, GX_CTRL_EXT, reg);
968         DELAY(10);
969         CSR_WRITE_4(gx, GX_CTRL_EXT, reg | GX_CTRLX_PHY_RESET);
970         DELAY(10);
971
972 #if 0
973         /* post-livingood (cordova) only */
974                 GX_SETBIT(gx, GX_CTRL, 0x80000000);
975                 DELAY(1000);
976                 GX_CLRBIT(gx, GX_CTRL, 0x80000000);
977 #endif
978 }
979
980 static void
981 gx_reset(struct gx_softc *gx)
982 {
983
984         /* Disable host interrupts. */
985         CSR_WRITE_4(gx, GX_INT_MASK_CLR, GX_INT_ALL);
986
987         /* reset chip (THWAP!) */
988         GX_SETBIT(gx, GX_CTRL, GX_CTRL_DEVICE_RESET);
989         DELAY(10);
990 }
991
992 static void
993 gx_stop(struct gx_softc *gx)
994 {
995         struct ifnet *ifp;
996
997         ifp = &gx->arpcom.ac_if;
998
999         /* reset and flush transmitter */
1000         CSR_WRITE_4(gx, GX_TX_CONTROL, GX_TXC_RESET);
1001
1002         /* reset and flush receiver */
1003         CSR_WRITE_4(gx, GX_RX_CONTROL, GX_RXC_RESET);
1004
1005         /* reset link */
1006         if (gx->gx_tbimode)
1007                 GX_SETBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
1008
1009         /* Free the RX lists. */
1010         gx_free_rx_ring(gx);
1011
1012         /* Free TX buffers. */
1013         gx_free_tx_ring(gx);
1014
1015         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1016 }
1017
1018 static void
1019 gx_watchdog(struct ifnet *ifp)
1020 {
1021         struct gx_softc *gx;
1022
1023         gx = ifp->if_softc;
1024
1025         device_printf(gx->gx_dev, "watchdog timeout -- resetting\n");
1026         gx_reset(gx);
1027         gx_init(gx);
1028
1029         ifp->if_oerrors++;
1030 }
1031
1032 /*
1033  * Intialize a receive ring descriptor.
1034  */
1035 static int
1036 gx_newbuf(struct gx_softc *gx, int idx, struct mbuf *m)
1037 {
1038         struct mbuf *m_new = NULL;
1039         struct gx_rx_desc *r;
1040
1041         if (m == NULL) {
1042                 MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
1043                 if (m_new == NULL) {
1044                         device_printf(gx->gx_dev, 
1045                             "mbuf allocation failed -- packet dropped\n");
1046                         return (ENOBUFS);
1047                 }
1048                 MCLGET(m_new, MB_DONTWAIT);
1049                 if ((m_new->m_flags & M_EXT) == 0) {
1050                         device_printf(gx->gx_dev, 
1051                             "cluster allocation failed -- packet dropped\n");
1052                         m_freem(m_new);
1053                         return (ENOBUFS);
1054                 }
1055                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1056         } else {
1057                 m->m_len = m->m_pkthdr.len = MCLBYTES;
1058                 m->m_data = m->m_ext.ext_buf;
1059                 m->m_next = NULL;
1060                 m_new = m;
1061         }
1062
1063         /*
1064          * XXX
1065          * this will _NOT_ work for large MTU's; it will overwrite
1066          * the end of the buffer.  E.g.: take this out for jumbograms,
1067          * but then that breaks alignment.
1068          */
1069         if (gx->arpcom.ac_if.if_mtu <= ETHERMTU)
1070                 m_adj(m_new, ETHER_ALIGN);
1071
1072         gx->gx_cdata.gx_rx_chain[idx] = m_new;
1073         r = &gx->gx_rdata->gx_rx_ring[idx];
1074         r->rx_addr = vtophys(mtod(m_new, caddr_t));
1075         r->rx_staterr = 0;
1076
1077         return (0);
1078 }
1079
1080 /*
1081  * The receive ring can have up to 64K descriptors, which at 2K per mbuf
1082  * cluster, could add up to 128M of memory.  Due to alignment constraints,
1083  * the number of descriptors must be a multiple of 8.  For now, we
1084  * allocate 256 entries and hope that our CPU is fast enough to keep up
1085  * with the NIC.
1086  */
1087 static int
1088 gx_init_rx_ring(struct gx_softc *gx)
1089 {
1090         int i, error;
1091
1092         for (i = 0; i < GX_RX_RING_CNT; i++) {
1093                 error = gx_newbuf(gx, i, NULL);
1094                 if (error)
1095                         return (error);
1096         }
1097
1098         /* bring receiver out of reset state, leave disabled */
1099         CSR_WRITE_4(gx, GX_RX_CONTROL, 0);
1100
1101         /* set up ring registers */
1102         CSR_WRITE_8(gx, gx->gx_reg.r_rx_base,
1103             (u_quad_t)vtophys(gx->gx_rdata->gx_rx_ring));
1104
1105         CSR_WRITE_4(gx, gx->gx_reg.r_rx_length,
1106             GX_RX_RING_CNT * sizeof(struct gx_rx_desc));
1107         CSR_WRITE_4(gx, gx->gx_reg.r_rx_head, 0);
1108         CSR_WRITE_4(gx, gx->gx_reg.r_rx_tail, GX_RX_RING_CNT - 1);
1109         gx->gx_rx_tail_idx = 0;
1110
1111         return (0);
1112 }
1113
1114 static void
1115 gx_free_rx_ring(struct gx_softc *gx)
1116 {
1117         struct mbuf **mp;
1118         int i;
1119
1120         mp = gx->gx_cdata.gx_rx_chain;
1121         for (i = 0; i < GX_RX_RING_CNT; i++, mp++) {
1122                 if (*mp != NULL) {
1123                         m_freem(*mp);
1124                         *mp = NULL;
1125                 }
1126         }
1127         bzero((void *)gx->gx_rdata->gx_rx_ring,
1128             GX_RX_RING_CNT * sizeof(struct gx_rx_desc));
1129
1130         /* release any partially-received packet chain */
1131         if (gx->gx_pkthdr != NULL) {
1132                 m_freem(gx->gx_pkthdr);
1133                 gx->gx_pkthdr = NULL;
1134         }
1135 }
1136
1137 static int
1138 gx_init_tx_ring(struct gx_softc *gx)
1139 {
1140
1141         /* bring transmitter out of reset state, leave disabled */
1142         CSR_WRITE_4(gx, GX_TX_CONTROL, 0);
1143
1144         /* set up ring registers */
1145         CSR_WRITE_8(gx, gx->gx_reg.r_tx_base,
1146             (u_quad_t)vtophys(gx->gx_rdata->gx_tx_ring));
1147         CSR_WRITE_4(gx, gx->gx_reg.r_tx_length,
1148             GX_TX_RING_CNT * sizeof(struct gx_tx_desc));
1149         CSR_WRITE_4(gx, gx->gx_reg.r_tx_head, 0);
1150         CSR_WRITE_4(gx, gx->gx_reg.r_tx_tail, 0);
1151         gx->gx_tx_head_idx = 0;
1152         gx->gx_tx_tail_idx = 0;
1153         gx->gx_txcnt = 0;
1154
1155         /* set up initial TX context */
1156         gx->gx_txcontext = GX_TXCONTEXT_NONE;
1157
1158         return (0);
1159 }
1160
1161 static void
1162 gx_free_tx_ring(struct gx_softc *gx)
1163 {
1164         struct mbuf **mp;
1165         int i;
1166
1167         mp = gx->gx_cdata.gx_tx_chain;
1168         for (i = 0; i < GX_TX_RING_CNT; i++, mp++) {
1169                 if (*mp != NULL) {
1170                         m_freem(*mp);
1171                         *mp = NULL;
1172                 }
1173         }
1174         bzero((void *)&gx->gx_rdata->gx_tx_ring,
1175             GX_TX_RING_CNT * sizeof(struct gx_tx_desc));
1176 }
1177
1178 static void
1179 gx_setmulti(struct gx_softc *gx)
1180 {
1181         int i;
1182
1183         /* wipe out the multicast table */
1184         for (i = 1; i < 128; i++)
1185                 CSR_WRITE_4(gx, GX_MULTICAST_BASE + i * 4, 0);
1186 }
1187
1188 static void
1189 gx_rxeof(struct gx_softc *gx)
1190 {
1191         struct gx_rx_desc *rx;
1192         struct ifnet *ifp;
1193         int idx, staterr, len;
1194         struct mbuf *m;
1195
1196         gx->gx_rx_interrupts++;
1197
1198         ifp = &gx->arpcom.ac_if;
1199         idx = gx->gx_rx_tail_idx;
1200
1201         while (gx->gx_rdata->gx_rx_ring[idx].rx_staterr & GX_RXSTAT_COMPLETED) {
1202
1203                 rx = &gx->gx_rdata->gx_rx_ring[idx];
1204                 m = gx->gx_cdata.gx_rx_chain[idx];
1205                 /*
1206                  * gx_newbuf overwrites status and length bits, so we 
1207                  * make a copy of them here.
1208                  */
1209                 len = rx->rx_len;
1210                 staterr = rx->rx_staterr;
1211
1212                 if (staterr & GX_INPUT_ERROR)
1213                         goto ierror;
1214
1215                 if (gx_newbuf(gx, idx, NULL) == ENOBUFS)
1216                         goto ierror;
1217
1218                 GX_INC(idx, GX_RX_RING_CNT);
1219
1220                 if (staterr & GX_RXSTAT_INEXACT_MATCH) {
1221                         /*
1222                          * multicast packet, must verify against
1223                          * multicast address.
1224                          */
1225                 }
1226
1227                 if ((staterr & GX_RXSTAT_END_OF_PACKET) == 0) {
1228                         if (gx->gx_pkthdr == NULL) {
1229                                 m->m_len = len;
1230                                 m->m_pkthdr.len = len;
1231                                 gx->gx_pkthdr = m;
1232                                 gx->gx_pktnextp = &m->m_next;
1233                         } else {
1234                                 m->m_len = len;
1235                                 gx->gx_pkthdr->m_pkthdr.len += len;
1236                                 *(gx->gx_pktnextp) = m;
1237                                 gx->gx_pktnextp = &m->m_next;
1238                         }
1239                         continue;
1240                 }
1241
1242                 if (gx->gx_pkthdr == NULL) {
1243                         m->m_len = len;
1244                         m->m_pkthdr.len = len;
1245                 } else {
1246                         m->m_len = len;
1247                         gx->gx_pkthdr->m_pkthdr.len += len;
1248                         *(gx->gx_pktnextp) = m;
1249                         m = gx->gx_pkthdr;
1250                         gx->gx_pkthdr = NULL;
1251                 }
1252
1253                 ifp->if_ipackets++;
1254                 m->m_pkthdr.rcvif = ifp;
1255
1256 #define IP_CSMASK       (GX_RXSTAT_IGNORE_CSUM | GX_RXSTAT_HAS_IP_CSUM)
1257 #define TCP_CSMASK \
1258     (GX_RXSTAT_IGNORE_CSUM | GX_RXSTAT_HAS_TCP_CSUM | GX_RXERR_TCP_CSUM)
1259                 if (ifp->if_capenable & IFCAP_RXCSUM) {
1260 #if 0
1261                         /*
1262                          * Intel Erratum #23 indicates that the Receive IP
1263                          * Checksum offload feature has been completely
1264                          * disabled.
1265                          */
1266                         if ((staterr & IP_CSUM_MASK) == GX_RXSTAT_HAS_IP_CSUM) {
1267                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1268                                 if ((staterr & GX_RXERR_IP_CSUM) == 0)
1269                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1270                         }
1271 #endif
1272                         if ((staterr & TCP_CSMASK) == GX_RXSTAT_HAS_TCP_CSUM) {
1273                                 m->m_pkthdr.csum_flags |=
1274                                     CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1275                                 m->m_pkthdr.csum_data = 0xffff;
1276                         }
1277                 }
1278                 /*
1279                  * If we received a packet with a vlan tag, pass it
1280                  * to vlan_input() instead of ether_input().
1281                  */
1282                 if (staterr & GX_RXSTAT_VLAN_PKT)
1283                         VLAN_INPUT_TAG(m, rx->rx_special);
1284                 else
1285                         (*ifp->if_input)(ifp, m);
1286                 continue;
1287
1288   ierror:
1289                 ifp->if_ierrors++;
1290                 gx_newbuf(gx, idx, m);
1291
1292                 /* 
1293                  * XXX
1294                  * this isn't quite right.  Suppose we have a packet that
1295                  * spans 5 descriptors (9K split into 2K buffers).  If
1296                  * the 3rd descriptor sets an error, we need to ignore
1297                  * the last two.  The way things stand now, the last two
1298                  * will be accepted as a single packet.
1299                  *
1300                  * we don't worry about this -- the chip may not set an
1301                  * error in this case, and the checksum of the upper layers
1302                  * will catch the error.
1303                  */
1304                 if (gx->gx_pkthdr != NULL) {
1305                         m_freem(gx->gx_pkthdr);
1306                         gx->gx_pkthdr = NULL;
1307                 }
1308                 GX_INC(idx, GX_RX_RING_CNT);
1309         }
1310
1311         gx->gx_rx_tail_idx = idx;
1312         if (--idx < 0)
1313                 idx = GX_RX_RING_CNT - 1;
1314         CSR_WRITE_4(gx, gx->gx_reg.r_rx_tail, idx);
1315 }
1316
1317 static void
1318 gx_txeof(struct gx_softc *gx)
1319 {
1320         struct ifnet *ifp;
1321         int idx, cnt;
1322
1323         gx->gx_tx_interrupts++;
1324
1325         ifp = &gx->arpcom.ac_if;
1326         idx = gx->gx_tx_head_idx;
1327         cnt = gx->gx_txcnt;
1328
1329         /*
1330          * If the system chipset performs I/O write buffering, it is 
1331          * possible for the PIO read of the head descriptor to bypass the
1332          * memory write of the descriptor, resulting in reading a descriptor
1333          * which has not been updated yet.
1334          */
1335         while (cnt) {
1336                 struct gx_tx_desc_old *tx;
1337
1338                 tx = (struct gx_tx_desc_old *)&gx->gx_rdata->gx_tx_ring[idx];
1339                 cnt--;
1340
1341                 if ((tx->tx_command & GX_TXOLD_END_OF_PKT) == 0) {
1342                         GX_INC(idx, GX_TX_RING_CNT);
1343                         continue;
1344                 }
1345
1346                 if ((tx->tx_status & GX_TXSTAT_DONE) == 0)
1347                         break;
1348
1349                 ifp->if_opackets++;
1350
1351                 m_freem(gx->gx_cdata.gx_tx_chain[idx]);
1352                 gx->gx_cdata.gx_tx_chain[idx] = NULL;
1353                 gx->gx_txcnt = cnt;
1354                 ifp->if_timer = 0;
1355
1356                 GX_INC(idx, GX_TX_RING_CNT);
1357                 gx->gx_tx_head_idx = idx;
1358         }
1359
1360         if (gx->gx_txcnt == 0)
1361                 ifp->if_flags &= ~IFF_OACTIVE;
1362 }
1363
1364 static void
1365 gx_intr(void *xsc)
1366 {
1367         struct gx_softc *gx = xsc;
1368         struct ifnet *ifp = &gx->arpcom.ac_if;
1369         u_int32_t intr;
1370
1371         crit_enter();
1372
1373         gx->gx_interrupts++;
1374
1375         /* Disable host interrupts. */
1376         CSR_WRITE_4(gx, GX_INT_MASK_CLR, GX_INT_ALL);
1377
1378         /*
1379          * find out why we're being bothered.
1380          * reading this register automatically clears all bits.
1381          */
1382         intr = CSR_READ_4(gx, GX_INT_READ);
1383
1384         /* Check RX return ring producer/consumer */
1385         if (intr & (GX_INT_RCV_TIMER | GX_INT_RCV_THOLD | GX_INT_RCV_OVERRUN))
1386                 gx_rxeof(gx);
1387
1388         /* Check TX ring producer/consumer */
1389         if (intr & (GX_INT_XMIT_DONE | GX_INT_XMIT_EMPTY))
1390                 gx_txeof(gx);
1391
1392         /*
1393          * handle other interrupts here.
1394          */
1395
1396         /*
1397          * Link change interrupts are not reliable; the interrupt may
1398          * not be generated if the link is lost.  However, the register
1399          * read is reliable, so check that.  Use SEQ errors to possibly
1400          * indicate that the link has changed.
1401          */
1402         if (intr & GX_INT_LINK_CHANGE) {
1403                 if ((CSR_READ_4(gx, GX_STATUS) & GX_STAT_LINKUP) == 0) {
1404                         device_printf(gx->gx_dev, "link down\n");
1405                 } else {
1406                         device_printf(gx->gx_dev, "link up\n");
1407                 }
1408         }
1409
1410         /* Turn interrupts on. */
1411         CSR_WRITE_4(gx, GX_INT_MASK_SET, GX_INT_WANTED);
1412
1413         if (ifp->if_flags & IFF_RUNNING && !ifq_is_empty(&ifp->if_snd))
1414                 gx_start(ifp);
1415
1416         crit_exit();
1417 }
1418
1419 /*
1420  * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
1421  * pointers to descriptors.
1422  */
1423 static int
1424 gx_encap(struct gx_softc *gx, struct mbuf *m_head)
1425 {
1426         struct gx_tx_desc_data *tx = NULL;
1427         struct gx_tx_desc_ctx *tctx;
1428         struct mbuf *m;
1429         int idx, cnt, csumopts, txcontext;
1430         struct ifvlan *ifv = NULL;
1431
1432         if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1433             m_head->m_pkthdr.rcvif != NULL &&
1434             m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1435                 ifv = m_head->m_pkthdr.rcvif->if_softc;
1436
1437         cnt = gx->gx_txcnt;
1438         idx = gx->gx_tx_tail_idx;
1439         txcontext = gx->gx_txcontext;
1440
1441         /*
1442          * Insure we have at least 4 descriptors pre-allocated.
1443          */
1444         if (cnt >= GX_TX_RING_CNT - 4)
1445                 return (ENOBUFS);
1446
1447         /*
1448          * Set up the appropriate offload context if necessary.
1449          */
1450         csumopts = 0;
1451         if (m_head->m_pkthdr.csum_flags) {
1452                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1453                         csumopts |= GX_TXTCP_OPT_IP_CSUM;
1454                 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) {
1455                         csumopts |= GX_TXTCP_OPT_TCP_CSUM;
1456                         txcontext = GX_TXCONTEXT_TCPIP;
1457                 } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) {
1458                         csumopts |= GX_TXTCP_OPT_TCP_CSUM;
1459                         txcontext = GX_TXCONTEXT_UDPIP;
1460                 } else if (txcontext == GX_TXCONTEXT_NONE)
1461                         txcontext = GX_TXCONTEXT_TCPIP;
1462                 if (txcontext == gx->gx_txcontext)
1463                         goto context_done;
1464
1465                 tctx = (struct gx_tx_desc_ctx *)&gx->gx_rdata->gx_tx_ring[idx];
1466                 tctx->tx_ip_csum_start = ETHER_HDR_LEN;
1467                 tctx->tx_ip_csum_end = ETHER_HDR_LEN + sizeof(struct ip) - 1;
1468                 tctx->tx_ip_csum_offset = 
1469                     ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
1470                 tctx->tx_tcp_csum_start = ETHER_HDR_LEN + sizeof(struct ip);
1471                 tctx->tx_tcp_csum_end = 0;
1472                 if (txcontext == GX_TXCONTEXT_TCPIP)
1473                         tctx->tx_tcp_csum_offset = ETHER_HDR_LEN +
1474                             sizeof(struct ip) + offsetof(struct tcphdr, th_sum);
1475                 else
1476                         tctx->tx_tcp_csum_offset = ETHER_HDR_LEN +
1477                             sizeof(struct ip) + offsetof(struct udphdr, uh_sum);
1478                 tctx->tx_command = GX_TXCTX_EXTENSION | GX_TXCTX_INT_DELAY;
1479                 tctx->tx_type = 0;
1480                 tctx->tx_status = 0;
1481                 GX_INC(idx, GX_TX_RING_CNT);
1482                 cnt++;
1483         }
1484 context_done:
1485
1486         /*
1487          * Start packing the mbufs in this chain into the transmit
1488          * descriptors.  Stop when we run out of descriptors or hit
1489          * the end of the mbuf chain.
1490          */
1491         for (m = m_head; m != NULL; m = m->m_next) {
1492                 if (m->m_len == 0)
1493                         continue;
1494
1495                 if (cnt == GX_TX_RING_CNT) {
1496 printf("overflow(2): %d, %d\n", cnt, GX_TX_RING_CNT);
1497                         return (ENOBUFS);
1498 }
1499
1500                 tx = (struct gx_tx_desc_data *)&gx->gx_rdata->gx_tx_ring[idx];
1501                 tx->tx_addr = vtophys(mtod(m, vm_offset_t));
1502                 tx->tx_status = 0;
1503                 tx->tx_len = m->m_len;
1504                 if (gx->arpcom.ac_if.if_hwassist) {
1505                         tx->tx_type = 1;
1506                         tx->tx_command = GX_TXTCP_EXTENSION;
1507                         tx->tx_options = csumopts;
1508                 } else {
1509                         /*
1510                          * This is really a struct gx_tx_desc_old.
1511                          */
1512                         tx->tx_command = 0;
1513                 }
1514                 GX_INC(idx, GX_TX_RING_CNT);
1515                 cnt++;
1516         }
1517
1518         if (tx != NULL) {
1519                 tx->tx_command |= GX_TXTCP_REPORT_STATUS | GX_TXTCP_INT_DELAY |
1520                     GX_TXTCP_ETHER_CRC | GX_TXTCP_END_OF_PKT;
1521                 if (ifv != NULL) {
1522                         tx->tx_command |= GX_TXTCP_VLAN_ENABLE;
1523                         tx->tx_vlan = ifv->ifv_tag;
1524                 }
1525                 gx->gx_txcnt = cnt;
1526                 gx->gx_tx_tail_idx = idx;
1527                 gx->gx_txcontext = txcontext;
1528                 idx = GX_PREV(idx, GX_TX_RING_CNT);
1529                 gx->gx_cdata.gx_tx_chain[idx] = m_head;
1530
1531                 CSR_WRITE_4(gx, gx->gx_reg.r_tx_tail, gx->gx_tx_tail_idx);
1532         }
1533         
1534         return (0);
1535 }
1536  
1537 /*
1538  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1539  * to the mbuf data regions directly in the transmit descriptors.
1540  */
1541 static void
1542 gx_start(struct ifnet *ifp)
1543 {
1544         struct gx_softc *gx = ifp->if_softc;
1545         struct mbuf *m_head;
1546
1547         crit_enter();
1548
1549         for (;;) {
1550                 m_head = ifq_poll(&ifp->if_snd);
1551                 if (m_head == NULL)
1552                         break;
1553
1554                 /*
1555                  * Pack the data into the transmit ring. If we
1556                  * don't have room, set the OACTIVE flag and wait
1557                  * for the NIC to drain the ring.
1558                  */
1559                 if (gx_encap(gx, m_head) != 0) {
1560                         ifp->if_flags |= IFF_OACTIVE;
1561                         break;
1562                 }
1563                 ifq_dequeue(&ifp->if_snd, m_head);
1564
1565                 BPF_MTAP(ifp, m_head);
1566
1567                 /*
1568                  * Set a timeout in case the chip goes out to lunch.
1569                  */
1570                 ifp->if_timer = 5;
1571         }
1572
1573         crit_exit();
1574 }