ea3c2dfbb2e2390c8ed2a731833ee8ed202704b0
[dragonfly.git] / sys / dev / netif / gx / if_gx.c
1 /*-
2  * Copyright (c) 1999,2000,2001 Jonathan Lemon
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/dev/gx/if_gx.c,v 1.2.2.3 2001/12/14 19:51:39 jlemon Exp $
30  * $DragonFly: src/sys/dev/netif/gx/Attic/if_gx.c,v 1.24 2006/08/01 18:03:53 swildner Exp $
31  */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sockio.h>
36 #include <sys/mbuf.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/socket.h>
40 #include <sys/queue.h>
41 #include <sys/serialize.h>
42
43 #include <sys/thread2.h>
44
45 #include <net/if.h>
46 #include <net/ifq_var.h>
47 #include <net/if_arp.h>
48 #include <net/ethernet.h>
49 #include <net/if_dl.h>
50 #include <net/if_media.h>
51
52 #include <net/bpf.h>
53 #include <net/if_types.h>
54 #include <net/vlan/if_vlan_var.h>
55
56 #include <netinet/in_systm.h>
57 #include <netinet/in.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
60 #include <netinet/udp.h>
61
62 #include <vm/vm.h>              /* for vtophys */
63 #include <vm/pmap.h>            /* for vtophys */
64 #include <machine/clock.h>      /* for DELAY */
65 #include <machine/bus_memio.h>
66 #include <machine/bus.h>
67 #include <machine/resource.h>
68 #include <sys/bus.h>
69 #include <sys/rman.h>
70
71 #include <bus/pci/pcidevs.h>
72 #include <bus/pci/pcireg.h>
73 #include <bus/pci/pcivar.h>
74
75 #include "../mii_layer/mii.h"
76 #include "../mii_layer/miivar.h"
77
78 #include "if_gxreg.h"
79 #include "if_gxvar.h"
80
81 #include "miibus_if.h"
82
83 #define TUNABLE_TX_INTR_DELAY   100
84 #define TUNABLE_RX_INTR_DELAY   100
85
86 #define GX_CSUM_FEATURES        (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_IP_FRAGS)
87
88 /*
89  * Various supported device vendors/types and their names.
90  */
91 struct gx_device {
92         u_int16_t       vendor;
93         u_int16_t       device;
94         int             version_flags;
95         u_int32_t       version_ipg;
96         char            *name;
97 };
98
99 static struct gx_device gx_devs[] = {
100         { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
101             GXF_FORCE_TBI | GXF_OLD_REGS,
102             10 | 2 << 10 | 10 << 20,
103             "Intel Gigabit Ethernet (82542)" },
104         { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
105             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
106             6 | 8 << 10 | 6 << 20,
107             "Intel Gigabit Ethernet (82543GC-F)" },
108         { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
109             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
110             8 | 8 << 10 | 6 << 20,
111             "Intel Gigabit Ethernet (82543GC-T)" },
112 #if 0
113 /* notyet.. */
114         { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
115             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
116             6 | 8 << 10 | 6 << 20,
117             "Intel Gigabit Ethernet (82544EI-F)" },
118         { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
119             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
120             8 | 8 << 10 | 6 << 20,
121             "Intel Gigabit Ethernet (82544EI-T)" },
122         { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
123             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
124             8 | 8 << 10 | 6 << 20,
125             "Intel Gigabit Ethernet (82544GC-T)" },
126 #endif
127         { 0, 0, 0, NULL }
128 };
129
130 static struct gx_regs new_regs = {
131         GX_RX_RING_BASE, GX_RX_RING_LEN,
132         GX_RX_RING_HEAD, GX_RX_RING_TAIL,
133         GX_RX_INTR_DELAY, GX_RX_DMA_CTRL,
134
135         GX_TX_RING_BASE, GX_TX_RING_LEN,
136         GX_TX_RING_HEAD, GX_TX_RING_TAIL,
137         GX_TX_INTR_DELAY, GX_TX_DMA_CTRL,
138 };
139 static struct gx_regs old_regs = {
140         GX_RX_OLD_RING_BASE, GX_RX_OLD_RING_LEN,
141         GX_RX_OLD_RING_HEAD, GX_RX_OLD_RING_TAIL,
142         GX_RX_OLD_INTR_DELAY, GX_RX_OLD_DMA_CTRL,
143
144         GX_TX_OLD_RING_BASE, GX_TX_OLD_RING_LEN,
145         GX_TX_OLD_RING_HEAD, GX_TX_OLD_RING_TAIL,
146         GX_TX_OLD_INTR_DELAY, GX_TX_OLD_DMA_CTRL,
147 };
148
149 static int      gx_probe(device_t dev);
150 static int      gx_attach(device_t dev);
151 static int      gx_detach(device_t dev);
152 static void     gx_shutdown(device_t dev);
153
154 static void     gx_intr(void *xsc);
155 static void     gx_init(void *xsc);
156
157 static struct   gx_device *gx_match(device_t dev);
158 static void     gx_eeprom_getword(struct gx_softc *gx, int addr,
159                     u_int16_t *dest);
160 static int      gx_read_eeprom(struct gx_softc *gx, caddr_t dest, int off,
161                     int cnt);
162 static int      gx_ifmedia_upd(struct ifnet *ifp);
163 static void     gx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
164 static int      gx_miibus_readreg(device_t dev, int phy, int reg);
165 static void     gx_miibus_writereg(device_t dev, int phy, int reg, int value);
166 static void     gx_miibus_statchg(device_t dev);
167 static int      gx_ioctl(struct ifnet *ifp, u_long command, caddr_t data,
168                     struct ucred *);
169 static void     gx_setmulti(struct gx_softc *gx);
170 static void     gx_reset(struct gx_softc *gx);
171 static void     gx_phy_reset(struct gx_softc *gx);
172 static void     gx_stop(struct gx_softc *gx);
173 static void     gx_watchdog(struct ifnet *ifp);
174 static void     gx_start(struct ifnet *ifp);
175
176 static int      gx_init_rx_ring(struct gx_softc *gx);
177 static void     gx_free_rx_ring(struct gx_softc *gx);
178 static int      gx_init_tx_ring(struct gx_softc *gx);
179 static void     gx_free_tx_ring(struct gx_softc *gx);
180
181 static device_method_t gx_methods[] = {
182         /* Device interface */
183         DEVMETHOD(device_probe,         gx_probe),
184         DEVMETHOD(device_attach,        gx_attach),
185         DEVMETHOD(device_detach,        gx_detach),
186         DEVMETHOD(device_shutdown,      gx_shutdown),
187
188         /* MII interface */
189         DEVMETHOD(miibus_readreg,       gx_miibus_readreg),
190         DEVMETHOD(miibus_writereg,      gx_miibus_writereg),
191         DEVMETHOD(miibus_statchg,       gx_miibus_statchg),
192
193         { 0, 0 }
194 };
195
196 static driver_t gx_driver = {
197         "gx",
198         gx_methods,
199         sizeof(struct gx_softc)
200 };
201
202 static devclass_t gx_devclass;
203
204 DECLARE_DUMMY_MODULE(if_gx);
205 MODULE_DEPEND(if_gx, miibus, 1, 1, 1);
206 DRIVER_MODULE(if_gx, pci, gx_driver, gx_devclass, 0, 0);
207 DRIVER_MODULE(miibus, gx, miibus_driver, miibus_devclass, 0, 0);
208
209 static struct gx_device *
210 gx_match(device_t dev)
211 {
212         int i;
213
214         for (i = 0; gx_devs[i].name != NULL; i++) {
215                 if ((pci_get_vendor(dev) == gx_devs[i].vendor) &&
216                     (pci_get_device(dev) == gx_devs[i].device))
217                         return (&gx_devs[i]);
218         }
219         return (NULL);
220 }
221
222 static int
223 gx_probe(device_t dev)
224 {
225         struct gx_device *gx_dev;
226
227         gx_dev = gx_match(dev);
228         if (gx_dev == NULL)
229                 return (ENXIO);
230
231         device_set_desc(dev, gx_dev->name);
232         return (0);
233 }
234
235 static int
236 gx_attach(device_t dev)
237 {
238         struct gx_softc *gx;
239         struct gx_device *gx_dev;
240         struct ifnet *ifp;
241         u_int32_t command;
242         int rid;
243         int error = 0;
244
245         gx = device_get_softc(dev);
246         gx->gx_dev = dev;
247
248         gx_dev = gx_match(dev);
249         gx->gx_vflags = gx_dev->version_flags;
250         gx->gx_ipg = gx_dev->version_ipg;
251
252         /*
253          * Map control/status registers.
254          */
255         command = pci_read_config(dev, PCIR_COMMAND, 4);
256         command |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
257         if (gx->gx_vflags & GXF_ENABLE_MWI)
258                 command |= PCIM_CMD_MWIEN;
259         pci_write_config(dev, PCIR_COMMAND, command, 4);
260         command = pci_read_config(dev, PCIR_COMMAND, 4);
261
262 /* XXX check cache line size? */
263
264         if ((command & PCIM_CMD_MEMEN) == 0) {
265                 device_printf(dev, "failed to enable memory mapping!\n");
266                 error = ENXIO;
267                 goto fail;
268         }
269
270         rid = GX_PCI_LOMEM;
271         gx->gx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
272             RF_ACTIVE);
273 #if 0
274 /* support PIO mode */
275         rid = PCI_LOIO;
276         gx->gx_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
277             RF_ACTIVE);
278 #endif
279
280         if (gx->gx_res == NULL) {
281                 device_printf(dev, "couldn't map memory\n");
282                 error = ENXIO;
283                 goto fail;
284         }
285
286         gx->gx_btag = rman_get_bustag(gx->gx_res);
287         gx->gx_bhandle = rman_get_bushandle(gx->gx_res);
288
289         /* Allocate interrupt */
290         rid = 0;
291         gx->gx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
292             RF_SHAREABLE | RF_ACTIVE);
293
294         if (gx->gx_irq == NULL) {
295                 device_printf(dev, "couldn't map interrupt\n");
296                 error = ENXIO;
297                 goto fail;
298         }
299
300         /* compensate for different register mappings */
301         if (gx->gx_vflags & GXF_OLD_REGS)
302                 gx->gx_reg = old_regs;
303         else
304                 gx->gx_reg = new_regs;
305
306         if (gx_read_eeprom(gx, (caddr_t)&gx->arpcom.ac_enaddr,
307             GX_EEMAP_MAC, 3)) {
308                 device_printf(dev, "failed to read station address\n");
309                 error = ENXIO;
310                 goto fail;
311         }
312
313         /* Allocate the ring buffers. */
314         gx->gx_rdata = contigmalloc(sizeof(struct gx_ring_data), M_DEVBUF,
315             M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0);
316
317         if (gx->gx_rdata == NULL) {
318                 device_printf(dev, "no memory for list buffers!\n");
319                 error = ENXIO;
320                 goto fail;
321         }
322         bzero(gx->gx_rdata, sizeof(struct gx_ring_data));
323
324         /* Set default tuneable values. */
325         gx->gx_tx_intr_delay = TUNABLE_TX_INTR_DELAY;
326         gx->gx_rx_intr_delay = TUNABLE_RX_INTR_DELAY;
327
328         /* Set up ifnet structure */
329         ifp = &gx->arpcom.ac_if;
330         ifp->if_softc = gx;
331         if_initname(ifp, "gx", device_get_unit(dev));
332         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
333         ifp->if_ioctl = gx_ioctl;
334         ifp->if_start = gx_start;
335         ifp->if_watchdog = gx_watchdog;
336         ifp->if_init = gx_init;
337         ifp->if_mtu = ETHERMTU;
338         ifq_set_maxlen(&ifp->if_snd, GX_TX_RING_CNT - 1);
339         ifq_set_ready(&ifp->if_snd);
340
341         /* see if we can enable hardware checksumming */
342         if (gx->gx_vflags & GXF_CSUM) {
343                 ifp->if_capabilities = IFCAP_HWCSUM;
344                 ifp->if_capenable = ifp->if_capabilities;
345         }
346
347         /* figure out transciever type */
348         if (gx->gx_vflags & GXF_FORCE_TBI ||
349             CSR_READ_4(gx, GX_STATUS) & GX_STAT_TBIMODE)
350                 gx->gx_tbimode = 1;
351
352         if (gx->gx_tbimode) {
353                 /* SERDES transceiver */
354                 ifmedia_init(&gx->gx_media, IFM_IMASK, gx_ifmedia_upd,
355                     gx_ifmedia_sts);
356                 ifmedia_add(&gx->gx_media,
357                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
358                 ifmedia_add(&gx->gx_media, IFM_ETHER|IFM_AUTO, 0, NULL);
359                 ifmedia_set(&gx->gx_media, IFM_ETHER|IFM_AUTO);
360         } else {
361                 /* GMII/MII transceiver */
362                 gx_phy_reset(gx);
363                 if (mii_phy_probe(dev, &gx->gx_miibus, gx_ifmedia_upd,
364                     gx_ifmedia_sts)) {
365                         device_printf(dev, "GMII/MII, PHY not detected\n");
366                         error = ENXIO;
367                         goto fail;
368                 }
369         }
370
371         /*
372          * Call MI attach routines.
373          */
374         ether_ifattach(ifp, gx->arpcom.ac_enaddr, NULL);
375
376         error = bus_setup_intr(dev, gx->gx_irq, INTR_NETSAFE,
377                                gx_intr, gx, &gx->gx_intrhand, 
378                                ifp->if_serializer);
379         if (error) {
380                 ether_ifdetach(ifp);
381                 device_printf(dev, "couldn't setup irq\n");
382                 goto fail;
383         }
384
385         return (0);
386
387 fail:
388         gx_detach(dev);
389         return (error);
390 }
391
392 static void
393 gx_init(void *xsc)
394 {
395         struct gx_softc *gx = (struct gx_softc *)xsc;
396         struct ifmedia *ifm;
397         struct ifnet *ifp = &gx->arpcom.ac_if;
398         u_int16_t *m;
399         u_int32_t ctrl;
400         int i, tmp;
401
402         /* Disable host interrupts, halt chip. */
403         gx_reset(gx);
404
405         /* disable I/O, flush RX/TX FIFOs, and free RX/TX buffers */
406         gx_stop(gx);
407
408         /* Load our MAC address, invalidate other 15 RX addresses. */
409         m = (u_int16_t *)&gx->arpcom.ac_enaddr[0];
410         CSR_WRITE_4(gx, GX_RX_ADDR_BASE, (m[1] << 16) | m[0]);
411         CSR_WRITE_4(gx, GX_RX_ADDR_BASE + 4, m[2] | GX_RA_VALID);
412         for (i = 1; i < 16; i++)
413                 CSR_WRITE_8(gx, GX_RX_ADDR_BASE + i * 8, (u_quad_t)0);
414
415         /* Program multicast filter. */
416         gx_setmulti(gx);
417
418         /* Init RX ring. */
419         gx_init_rx_ring(gx);
420
421         /* Init TX ring. */
422         gx_init_tx_ring(gx);
423
424         if (gx->gx_vflags & GXF_DMA) {
425                 /* set up DMA control */        
426                 CSR_WRITE_4(gx, gx->gx_reg.r_rx_dma_ctrl, 0x00010000);
427                 CSR_WRITE_4(gx, gx->gx_reg.r_tx_dma_ctrl, 0x00000000);
428         }
429
430         /* enable receiver */
431         ctrl = GX_RXC_ENABLE | GX_RXC_RX_THOLD_EIGHTH | GX_RXC_RX_BSIZE_2K;
432         ctrl |= GX_RXC_BCAST_ACCEPT;
433
434         /* Enable or disable promiscuous mode as needed. */
435         if (ifp->if_flags & IFF_PROMISC)
436                 ctrl |= GX_RXC_UNI_PROMISC;
437
438         /* This is required if we want to accept jumbo frames */
439         if (ifp->if_mtu > ETHERMTU)
440                 ctrl |= GX_RXC_LONG_PKT_ENABLE;
441
442         /* setup receive checksum control */
443         if (ifp->if_capenable & IFCAP_RXCSUM)
444                 CSR_WRITE_4(gx, GX_RX_CSUM_CONTROL,
445                     GX_CSUM_TCP/* | GX_CSUM_IP*/);
446
447         /* setup transmit checksum control */
448         if (ifp->if_capenable & IFCAP_TXCSUM)
449                 ifp->if_hwassist = GX_CSUM_FEATURES;
450
451         ctrl |= GX_RXC_STRIP_ETHERCRC;          /* not on 82542? */
452         CSR_WRITE_4(gx, GX_RX_CONTROL, ctrl);
453
454         /* enable transmitter */
455         ctrl = GX_TXC_ENABLE | GX_TXC_PAD_SHORT_PKTS | GX_TXC_COLL_RETRY_16;
456
457         /* XXX we should support half-duplex here too... */
458         ctrl |= GX_TXC_COLL_TIME_FDX;
459
460         CSR_WRITE_4(gx, GX_TX_CONTROL, ctrl);
461
462         /*
463          * set up recommended IPG times, which vary depending on chip type:
464          *      IPG transmit time:  80ns
465          *      IPG receive time 1: 20ns
466          *      IPG receive time 2: 80ns
467          */
468         CSR_WRITE_4(gx, GX_TX_IPG, gx->gx_ipg);
469
470         /* set up 802.3x MAC flow control address -- 01:80:c2:00:00:01 */
471         CSR_WRITE_4(gx, GX_FLOW_CTRL_BASE, 0x00C28001);
472         CSR_WRITE_4(gx, GX_FLOW_CTRL_BASE+4, 0x00000100);
473
474         /* set up 802.3x MAC flow control type -- 88:08 */
475         CSR_WRITE_4(gx, GX_FLOW_CTRL_TYPE, 0x8808);
476
477         /* Set up tuneables */
478         CSR_WRITE_4(gx, gx->gx_reg.r_rx_delay, gx->gx_rx_intr_delay);
479         CSR_WRITE_4(gx, gx->gx_reg.r_tx_delay, gx->gx_tx_intr_delay);
480
481         /*
482          * Configure chip for correct operation.
483          */
484         ctrl = GX_CTRL_DUPLEX;
485 #if BYTE_ORDER == BIG_ENDIAN
486         ctrl |= GX_CTRL_BIGENDIAN;
487 #endif
488         ctrl |= GX_CTRL_VLAN_ENABLE;
489
490         if (gx->gx_tbimode) {
491                 /*
492                  * It seems that TXCW must be initialized from the EEPROM
493                  * manually.
494                  *
495                  * XXX
496                  * should probably read the eeprom and re-insert the
497                  * values here.
498                  */
499 #define TXCONFIG_WORD   0x000001A0
500                 CSR_WRITE_4(gx, GX_TX_CONFIG, TXCONFIG_WORD);
501
502                 /* turn on hardware autonegotiate */
503                 GX_SETBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
504         } else {
505                 /*
506                  * Auto-detect speed from PHY, instead of using direct
507                  * indication.  The SLU bit doesn't force the link, but
508                  * must be present for ASDE to work.
509                  */
510                 gx_phy_reset(gx);
511                 ctrl |= GX_CTRL_SET_LINK_UP | GX_CTRL_AUTOSPEED;
512         }
513
514         /*
515          * Take chip out of reset and start it running.
516          */
517         CSR_WRITE_4(gx, GX_CTRL, ctrl);
518
519         /* Turn interrupts on. */
520         CSR_WRITE_4(gx, GX_INT_MASK_SET, GX_INT_WANTED);
521
522         ifp->if_flags |= IFF_RUNNING;
523         ifp->if_flags &= ~IFF_OACTIVE;
524
525         /*
526          * Set the current media.
527          */
528         if (gx->gx_miibus != NULL) {
529                 mii_mediachg(device_get_softc(gx->gx_miibus));
530         } else {
531                 ifm = &gx->gx_media;
532                 tmp = ifm->ifm_media;
533                 ifm->ifm_media = ifm->ifm_cur->ifm_media;
534                 gx_ifmedia_upd(ifp);
535                 ifm->ifm_media = tmp;
536         }
537
538         /*
539          * XXX
540          * Have the LINK0 flag force the link in TBI mode.
541          */
542         if (gx->gx_tbimode && ifp->if_flags & IFF_LINK0) {
543                 GX_CLRBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
544                 GX_SETBIT(gx, GX_CTRL, GX_CTRL_SET_LINK_UP);
545         }
546
547 #if 0
548 printf("66mhz: %s  64bit: %s\n",
549         CSR_READ_4(gx, GX_STATUS) & GX_STAT_PCI66 ? "yes" : "no",
550         CSR_READ_4(gx, GX_STATUS) & GX_STAT_BUS64 ? "yes" : "no");
551 #endif
552 }
553
554 /*
555  * Stop all chip I/O so that the kernel's probe routines don't
556  * get confused by errant DMAs when rebooting.
557  */
558 static void
559 gx_shutdown(device_t dev)
560 {
561         struct gx_softc *gx;
562
563         gx = device_get_softc(dev);
564         gx_reset(gx);
565         gx_stop(gx);
566 }
567
568 static int
569 gx_detach(device_t dev)
570 {
571         struct gx_softc *gx = device_get_softc(dev);
572         struct ifnet *ifp = &gx->arpcom.ac_if;
573
574         if (device_is_attached(dev)) {
575                 lwkt_serialize_enter(ifp->if_serializer);
576                 gx_reset(gx);
577                 gx_stop(gx);
578                 bus_teardown_intr(gx->gx_dev, gx->gx_irq, gx->gx_intrhand);
579                 lwkt_serialize_exit(ifp->if_serializer);
580
581                 ether_ifdetach(ifp);
582         }
583         if (gx->gx_miibus)
584                 device_delete_child(gx->gx_dev, gx->gx_miibus);
585         bus_generic_detach(gx->gx_dev);
586
587         if (gx->gx_irq)
588                 bus_release_resource(gx->gx_dev, SYS_RES_IRQ, 0, gx->gx_irq);
589         if (gx->gx_res)
590                 bus_release_resource(gx->gx_dev, SYS_RES_MEMORY,
591                     GX_PCI_LOMEM, gx->gx_res);
592
593         if (gx->gx_rdata)
594                 contigfree(gx->gx_rdata, sizeof(struct gx_ring_data),
595                            M_DEVBUF);
596
597         if (gx->gx_tbimode)
598                 ifmedia_removeall(&gx->gx_media);
599
600         return (0);
601 }
602
603 static void
604 gx_eeprom_getword(struct gx_softc *gx, int addr, u_int16_t *dest)
605 {
606         u_int16_t word = 0;
607         u_int32_t base, reg;
608         int x;
609
610         addr = (GX_EE_OPC_READ << GX_EE_ADDR_SIZE) |
611             (addr & ((1 << GX_EE_ADDR_SIZE) - 1));
612
613         base = CSR_READ_4(gx, GX_EEPROM_CTRL);
614         base &= ~(GX_EE_DATA_OUT | GX_EE_DATA_IN | GX_EE_CLOCK);
615         base |= GX_EE_SELECT;
616
617         CSR_WRITE_4(gx, GX_EEPROM_CTRL, base);
618
619         for (x = 1 << ((GX_EE_OPC_SIZE + GX_EE_ADDR_SIZE) - 1); x; x >>= 1) {
620                 reg = base | (addr & x ? GX_EE_DATA_IN : 0);
621                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg);
622                 DELAY(10);
623                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg | GX_EE_CLOCK);
624                 DELAY(10);
625                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg);
626                 DELAY(10);
627         }
628
629         for (x = 1 << 15; x; x >>= 1) {
630                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, base | GX_EE_CLOCK);
631                 DELAY(10);
632                 reg = CSR_READ_4(gx, GX_EEPROM_CTRL);
633                 if (reg & GX_EE_DATA_OUT)
634                         word |= x;
635                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, base);
636                 DELAY(10);
637         }
638
639         CSR_WRITE_4(gx, GX_EEPROM_CTRL, base & ~GX_EE_SELECT);
640         DELAY(10);
641
642         *dest = word;
643 }
644         
645 static int
646 gx_read_eeprom(struct gx_softc *gx, caddr_t dest, int off, int cnt)
647 {
648         u_int16_t *word;
649         int i;
650
651         word = (u_int16_t *)dest;
652         for (i = 0; i < cnt; i ++) {
653                 gx_eeprom_getword(gx, off + i, word);
654                 word++;
655         }
656         return (0);
657 }
658
659 /*
660  * Set media options.
661  */
662 static int
663 gx_ifmedia_upd(struct ifnet *ifp)
664 {
665         struct gx_softc *gx;
666         struct ifmedia *ifm;
667         struct mii_data *mii;
668
669         gx = ifp->if_softc;
670
671         if (gx->gx_tbimode) {
672                 ifm = &gx->gx_media;
673                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
674                         return (EINVAL);
675                 switch (IFM_SUBTYPE(ifm->ifm_media)) {
676                 case IFM_AUTO:
677                         GX_SETBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
678                         GX_SETBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
679                         GX_CLRBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
680                         break;
681                 case IFM_1000_SX:
682                         device_printf(gx->gx_dev,
683                             "manual config not supported yet.\n");
684 #if 0
685                         GX_CLRBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
686                         config = /* bit symbols for 802.3z */0;
687                         ctrl |= GX_CTRL_SET_LINK_UP;
688                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
689                                 ctrl |= GX_CTRL_DUPLEX;
690 #endif
691                         break;
692                 default:
693                         return (EINVAL);
694                 }
695         } else {
696                 ifm = &gx->gx_media;
697
698                 /*
699                  * 1000TX half duplex does not work.
700                  */
701                 if (IFM_TYPE(ifm->ifm_media) == IFM_ETHER &&
702                     IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T &&
703                     (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) == 0)
704                         return (EINVAL);
705                 mii = device_get_softc(gx->gx_miibus);
706                 mii_mediachg(mii);
707         }
708         return (0);
709 }
710
711 /*
712  * Report current media status.
713  */
714 static void
715 gx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
716 {
717         struct gx_softc *gx;
718         struct mii_data *mii;
719         u_int32_t status;
720
721         gx = ifp->if_softc;
722
723         if (gx->gx_tbimode) {
724                 ifmr->ifm_status = IFM_AVALID;
725                 ifmr->ifm_active = IFM_ETHER;
726
727                 status = CSR_READ_4(gx, GX_STATUS);
728                 if ((status & GX_STAT_LINKUP) == 0)
729                         return;
730
731                 ifmr->ifm_status |= IFM_ACTIVE;
732                 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
733         } else {
734                 mii = device_get_softc(gx->gx_miibus);
735                 mii_pollstat(mii);
736                 if ((mii->mii_media_active & (IFM_1000_T | IFM_HDX)) ==
737                     (IFM_1000_T | IFM_HDX))
738                         mii->mii_media_active = IFM_ETHER | IFM_NONE;
739                 ifmr->ifm_active = mii->mii_media_active;
740                 ifmr->ifm_status = mii->mii_media_status;
741         }
742 }
743
744 static void 
745 gx_mii_shiftin(struct gx_softc *gx, int data, int length)
746 {
747         u_int32_t reg, x;
748
749         /*
750          * Set up default GPIO direction + PHY data out.
751          */
752         reg = CSR_READ_4(gx, GX_CTRL);
753         reg &= ~(GX_CTRL_GPIO_DIR_MASK | GX_CTRL_PHY_IO | GX_CTRL_PHY_CLK);
754         reg |= GX_CTRL_GPIO_DIR | GX_CTRL_PHY_IO_DIR;
755
756         /*
757          * Shift in data to PHY.
758          */
759         for (x = 1 << (length - 1); x; x >>= 1) {
760                 if (data & x)
761                         reg |= GX_CTRL_PHY_IO;
762                 else
763                         reg &= ~GX_CTRL_PHY_IO;
764                 CSR_WRITE_4(gx, GX_CTRL, reg);
765                 DELAY(10);
766                 CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
767                 DELAY(10);
768                 CSR_WRITE_4(gx, GX_CTRL, reg);
769                 DELAY(10);
770         }
771 }
772
773 static u_int16_t 
774 gx_mii_shiftout(struct gx_softc *gx)
775 {
776         u_int32_t reg;
777         u_int16_t data;
778         int x;
779
780         /*
781          * Set up default GPIO direction + PHY data in.
782          */
783         reg = CSR_READ_4(gx, GX_CTRL);
784         reg &= ~(GX_CTRL_GPIO_DIR_MASK | GX_CTRL_PHY_IO | GX_CTRL_PHY_CLK);
785         reg |= GX_CTRL_GPIO_DIR;
786
787         CSR_WRITE_4(gx, GX_CTRL, reg);
788         DELAY(10);
789         CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
790         DELAY(10);
791         CSR_WRITE_4(gx, GX_CTRL, reg);
792         DELAY(10);
793         /*
794          * Shift out data from PHY.
795          */
796         data = 0;
797         for (x = 1 << 15; x; x >>= 1) {
798                 CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
799                 DELAY(10);
800                 if (CSR_READ_4(gx, GX_CTRL) & GX_CTRL_PHY_IO)
801                         data |= x;
802                 CSR_WRITE_4(gx, GX_CTRL, reg);
803                 DELAY(10);
804         }
805         CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
806         DELAY(10);
807         CSR_WRITE_4(gx, GX_CTRL, reg);
808         DELAY(10);
809
810         return (data);
811 }
812
813 static int
814 gx_miibus_readreg(device_t dev, int phy, int reg)
815 {
816         struct gx_softc *gx;
817
818         gx = device_get_softc(dev);
819         if (gx->gx_tbimode)
820                 return (0);
821
822         /*
823          * XXX
824          * Note: Cordova has a MDIC register. livingood and < have mii bits
825          */ 
826
827         gx_mii_shiftin(gx, GX_PHY_PREAMBLE, GX_PHY_PREAMBLE_LEN);
828         gx_mii_shiftin(gx, (GX_PHY_SOF << 12) | (GX_PHY_OP_READ << 10) |
829             (phy << 5) | reg, GX_PHY_READ_LEN);
830         return (gx_mii_shiftout(gx));
831 }
832
833 static void
834 gx_miibus_writereg(device_t dev, int phy, int reg, int value)
835 {
836         struct gx_softc *gx;
837
838         gx = device_get_softc(dev);
839         if (gx->gx_tbimode)
840                 return;
841
842         gx_mii_shiftin(gx, GX_PHY_PREAMBLE, GX_PHY_PREAMBLE_LEN);
843         gx_mii_shiftin(gx, (GX_PHY_SOF << 30) | (GX_PHY_OP_WRITE << 28) |
844             (phy << 23) | (reg << 18) | (GX_PHY_TURNAROUND << 16) |
845             (value & 0xffff), GX_PHY_WRITE_LEN);
846 }
847
848 static void
849 gx_miibus_statchg(device_t dev)
850 {
851         struct gx_softc *gx = device_get_softc(dev);
852         struct mii_data *mii;
853         int reg;
854
855         if (gx->gx_tbimode)
856                 return;
857
858         /*
859          * Set flow control behavior to mirror what PHY negotiated.
860          */
861         mii = device_get_softc(gx->gx_miibus);
862
863         reg = CSR_READ_4(gx, GX_CTRL);
864         if (mii->mii_media_active & IFM_FLAG0)
865                 reg |= GX_CTRL_RX_FLOWCTRL;
866         else
867                 reg &= ~GX_CTRL_RX_FLOWCTRL;
868         if (mii->mii_media_active & IFM_FLAG1)
869                 reg |= GX_CTRL_TX_FLOWCTRL;
870         else
871                 reg &= ~GX_CTRL_TX_FLOWCTRL;
872         CSR_WRITE_4(gx, GX_CTRL, reg);
873 }
874
875 static int
876 gx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
877 {
878         struct gx_softc *gx = ifp->if_softc;
879         struct ifreq *ifr = (struct ifreq *)data;
880         struct mii_data *mii;
881         int mask, error = 0;
882
883         switch (command) {
884         case SIOCSIFMTU:
885                 if (ifr->ifr_mtu > GX_MAX_MTU) {
886                         error = EINVAL;
887                 } else {
888                         ifp->if_mtu = ifr->ifr_mtu;
889                         gx_init(gx);
890                 }
891                 break;
892         case SIOCSIFFLAGS:
893                 if ((ifp->if_flags & IFF_UP) == 0) {
894                         gx_stop(gx);
895                 } else if (ifp->if_flags & IFF_RUNNING &&
896                     ((ifp->if_flags & IFF_PROMISC) != 
897                     (gx->gx_if_flags & IFF_PROMISC))) {
898                         if (ifp->if_flags & IFF_PROMISC)
899                                 GX_SETBIT(gx, GX_RX_CONTROL, GX_RXC_UNI_PROMISC);
900                         else 
901                                 GX_CLRBIT(gx, GX_RX_CONTROL, GX_RXC_UNI_PROMISC);
902                 } else {
903                         gx_init(gx);
904                 }
905                 gx->gx_if_flags = ifp->if_flags;
906                 break;
907         case SIOCADDMULTI:
908         case SIOCDELMULTI:
909                 if (ifp->if_flags & IFF_RUNNING)
910                         gx_setmulti(gx);
911                 break;
912         case SIOCSIFMEDIA:
913         case SIOCGIFMEDIA:
914                 if (gx->gx_miibus != NULL) {
915                         mii = device_get_softc(gx->gx_miibus);
916                         error = ifmedia_ioctl(ifp, ifr,
917                             &mii->mii_media, command);
918                 } else {
919                         error = ifmedia_ioctl(ifp, ifr, &gx->gx_media, command);
920                 }
921                 break;
922         case SIOCSIFCAP:
923                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
924                 if (mask & IFCAP_HWCSUM) {
925                         if (IFCAP_HWCSUM & ifp->if_capenable)
926                                 ifp->if_capenable &= ~IFCAP_HWCSUM;
927                         else
928                                 ifp->if_capenable |= IFCAP_HWCSUM;
929                         if (ifp->if_flags & IFF_RUNNING)
930                                 gx_init(gx);
931                 }
932                 break;
933         default:
934                 error = ether_ioctl(ifp, command, data);
935                 break;
936         }
937         return (error);
938 }
939
940 static void
941 gx_phy_reset(struct gx_softc *gx)
942 {
943         int reg;
944
945         GX_SETBIT(gx, GX_CTRL, GX_CTRL_SET_LINK_UP);
946
947         /*
948          * PHY reset is active low.
949          */
950         reg = CSR_READ_4(gx, GX_CTRL_EXT);
951         reg &= ~(GX_CTRLX_GPIO_DIR_MASK | GX_CTRLX_PHY_RESET);
952         reg |= GX_CTRLX_GPIO_DIR;
953
954         CSR_WRITE_4(gx, GX_CTRL_EXT, reg | GX_CTRLX_PHY_RESET);
955         DELAY(10);
956         CSR_WRITE_4(gx, GX_CTRL_EXT, reg);
957         DELAY(10);
958         CSR_WRITE_4(gx, GX_CTRL_EXT, reg | GX_CTRLX_PHY_RESET);
959         DELAY(10);
960
961 #if 0
962         /* post-livingood (cordova) only */
963                 GX_SETBIT(gx, GX_CTRL, 0x80000000);
964                 DELAY(1000);
965                 GX_CLRBIT(gx, GX_CTRL, 0x80000000);
966 #endif
967 }
968
969 static void
970 gx_reset(struct gx_softc *gx)
971 {
972
973         /* Disable host interrupts. */
974         CSR_WRITE_4(gx, GX_INT_MASK_CLR, GX_INT_ALL);
975
976         /* reset chip (THWAP!) */
977         GX_SETBIT(gx, GX_CTRL, GX_CTRL_DEVICE_RESET);
978         DELAY(10);
979 }
980
981 static void
982 gx_stop(struct gx_softc *gx)
983 {
984         struct ifnet *ifp;
985
986         ifp = &gx->arpcom.ac_if;
987
988         /* reset and flush transmitter */
989         CSR_WRITE_4(gx, GX_TX_CONTROL, GX_TXC_RESET);
990
991         /* reset and flush receiver */
992         CSR_WRITE_4(gx, GX_RX_CONTROL, GX_RXC_RESET);
993
994         /* reset link */
995         if (gx->gx_tbimode)
996                 GX_SETBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
997
998         /* Free the RX lists. */
999         gx_free_rx_ring(gx);
1000
1001         /* Free TX buffers. */
1002         gx_free_tx_ring(gx);
1003
1004         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1005 }
1006
1007 static void
1008 gx_watchdog(struct ifnet *ifp)
1009 {
1010         struct gx_softc *gx;
1011
1012         gx = ifp->if_softc;
1013
1014         device_printf(gx->gx_dev, "watchdog timeout -- resetting\n");
1015         gx_reset(gx);
1016         gx_init(gx);
1017
1018         ifp->if_oerrors++;
1019 }
1020
1021 /*
1022  * Intialize a receive ring descriptor.
1023  */
1024 static int
1025 gx_newbuf(struct gx_softc *gx, int idx, struct mbuf *m)
1026 {
1027         struct mbuf *m_new = NULL;
1028         struct gx_rx_desc *r;
1029
1030         if (m == NULL) {
1031                 MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
1032                 if (m_new == NULL) {
1033                         device_printf(gx->gx_dev, 
1034                             "mbuf allocation failed -- packet dropped\n");
1035                         return (ENOBUFS);
1036                 }
1037                 MCLGET(m_new, MB_DONTWAIT);
1038                 if ((m_new->m_flags & M_EXT) == 0) {
1039                         device_printf(gx->gx_dev, 
1040                             "cluster allocation failed -- packet dropped\n");
1041                         m_freem(m_new);
1042                         return (ENOBUFS);
1043                 }
1044                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1045         } else {
1046                 m->m_len = m->m_pkthdr.len = MCLBYTES;
1047                 m->m_data = m->m_ext.ext_buf;
1048                 m->m_next = NULL;
1049                 m_new = m;
1050         }
1051
1052         /*
1053          * XXX
1054          * this will _NOT_ work for large MTU's; it will overwrite
1055          * the end of the buffer.  E.g.: take this out for jumbograms,
1056          * but then that breaks alignment.
1057          */
1058         if (gx->arpcom.ac_if.if_mtu <= ETHERMTU)
1059                 m_adj(m_new, ETHER_ALIGN);
1060
1061         gx->gx_cdata.gx_rx_chain[idx] = m_new;
1062         r = &gx->gx_rdata->gx_rx_ring[idx];
1063         r->rx_addr = vtophys(mtod(m_new, caddr_t));
1064         r->rx_staterr = 0;
1065
1066         return (0);
1067 }
1068
1069 /*
1070  * The receive ring can have up to 64K descriptors, which at 2K per mbuf
1071  * cluster, could add up to 128M of memory.  Due to alignment constraints,
1072  * the number of descriptors must be a multiple of 8.  For now, we
1073  * allocate 256 entries and hope that our CPU is fast enough to keep up
1074  * with the NIC.
1075  */
1076 static int
1077 gx_init_rx_ring(struct gx_softc *gx)
1078 {
1079         int i, error;
1080
1081         for (i = 0; i < GX_RX_RING_CNT; i++) {
1082                 error = gx_newbuf(gx, i, NULL);
1083                 if (error)
1084                         return (error);
1085         }
1086
1087         /* bring receiver out of reset state, leave disabled */
1088         CSR_WRITE_4(gx, GX_RX_CONTROL, 0);
1089
1090         /* set up ring registers */
1091         CSR_WRITE_8(gx, gx->gx_reg.r_rx_base,
1092             (u_quad_t)vtophys(gx->gx_rdata->gx_rx_ring));
1093
1094         CSR_WRITE_4(gx, gx->gx_reg.r_rx_length,
1095             GX_RX_RING_CNT * sizeof(struct gx_rx_desc));
1096         CSR_WRITE_4(gx, gx->gx_reg.r_rx_head, 0);
1097         CSR_WRITE_4(gx, gx->gx_reg.r_rx_tail, GX_RX_RING_CNT - 1);
1098         gx->gx_rx_tail_idx = 0;
1099
1100         return (0);
1101 }
1102
1103 static void
1104 gx_free_rx_ring(struct gx_softc *gx)
1105 {
1106         struct mbuf **mp;
1107         int i;
1108
1109         mp = gx->gx_cdata.gx_rx_chain;
1110         for (i = 0; i < GX_RX_RING_CNT; i++, mp++) {
1111                 if (*mp != NULL) {
1112                         m_freem(*mp);
1113                         *mp = NULL;
1114                 }
1115         }
1116         bzero((void *)gx->gx_rdata->gx_rx_ring,
1117             GX_RX_RING_CNT * sizeof(struct gx_rx_desc));
1118
1119         /* release any partially-received packet chain */
1120         if (gx->gx_pkthdr != NULL) {
1121                 m_freem(gx->gx_pkthdr);
1122                 gx->gx_pkthdr = NULL;
1123         }
1124 }
1125
1126 static int
1127 gx_init_tx_ring(struct gx_softc *gx)
1128 {
1129
1130         /* bring transmitter out of reset state, leave disabled */
1131         CSR_WRITE_4(gx, GX_TX_CONTROL, 0);
1132
1133         /* set up ring registers */
1134         CSR_WRITE_8(gx, gx->gx_reg.r_tx_base,
1135             (u_quad_t)vtophys(gx->gx_rdata->gx_tx_ring));
1136         CSR_WRITE_4(gx, gx->gx_reg.r_tx_length,
1137             GX_TX_RING_CNT * sizeof(struct gx_tx_desc));
1138         CSR_WRITE_4(gx, gx->gx_reg.r_tx_head, 0);
1139         CSR_WRITE_4(gx, gx->gx_reg.r_tx_tail, 0);
1140         gx->gx_tx_head_idx = 0;
1141         gx->gx_tx_tail_idx = 0;
1142         gx->gx_txcnt = 0;
1143
1144         /* set up initial TX context */
1145         gx->gx_txcontext = GX_TXCONTEXT_NONE;
1146
1147         return (0);
1148 }
1149
1150 static void
1151 gx_free_tx_ring(struct gx_softc *gx)
1152 {
1153         struct mbuf **mp;
1154         int i;
1155
1156         mp = gx->gx_cdata.gx_tx_chain;
1157         for (i = 0; i < GX_TX_RING_CNT; i++, mp++) {
1158                 if (*mp != NULL) {
1159                         m_freem(*mp);
1160                         *mp = NULL;
1161                 }
1162         }
1163         bzero((void *)&gx->gx_rdata->gx_tx_ring,
1164             GX_TX_RING_CNT * sizeof(struct gx_tx_desc));
1165 }
1166
1167 static void
1168 gx_setmulti(struct gx_softc *gx)
1169 {
1170         int i;
1171
1172         /* wipe out the multicast table */
1173         for (i = 1; i < 128; i++)
1174                 CSR_WRITE_4(gx, GX_MULTICAST_BASE + i * 4, 0);
1175 }
1176
1177 static void
1178 gx_rxeof(struct gx_softc *gx)
1179 {
1180         struct gx_rx_desc *rx;
1181         struct ifnet *ifp;
1182         int idx, staterr, len;
1183         struct mbuf *m;
1184
1185         gx->gx_rx_interrupts++;
1186
1187         ifp = &gx->arpcom.ac_if;
1188         idx = gx->gx_rx_tail_idx;
1189
1190         while (gx->gx_rdata->gx_rx_ring[idx].rx_staterr & GX_RXSTAT_COMPLETED) {
1191
1192                 rx = &gx->gx_rdata->gx_rx_ring[idx];
1193                 m = gx->gx_cdata.gx_rx_chain[idx];
1194                 /*
1195                  * gx_newbuf overwrites status and length bits, so we 
1196                  * make a copy of them here.
1197                  */
1198                 len = rx->rx_len;
1199                 staterr = rx->rx_staterr;
1200
1201                 if (staterr & GX_INPUT_ERROR)
1202                         goto ierror;
1203
1204                 if (gx_newbuf(gx, idx, NULL) == ENOBUFS)
1205                         goto ierror;
1206
1207                 GX_INC(idx, GX_RX_RING_CNT);
1208
1209                 if (staterr & GX_RXSTAT_INEXACT_MATCH) {
1210                         /*
1211                          * multicast packet, must verify against
1212                          * multicast address.
1213                          */
1214                 }
1215
1216                 if ((staterr & GX_RXSTAT_END_OF_PACKET) == 0) {
1217                         if (gx->gx_pkthdr == NULL) {
1218                                 m->m_len = len;
1219                                 m->m_pkthdr.len = len;
1220                                 gx->gx_pkthdr = m;
1221                                 gx->gx_pktnextp = &m->m_next;
1222                         } else {
1223                                 m->m_len = len;
1224                                 gx->gx_pkthdr->m_pkthdr.len += len;
1225                                 *(gx->gx_pktnextp) = m;
1226                                 gx->gx_pktnextp = &m->m_next;
1227                         }
1228                         continue;
1229                 }
1230
1231                 if (gx->gx_pkthdr == NULL) {
1232                         m->m_len = len;
1233                         m->m_pkthdr.len = len;
1234                 } else {
1235                         m->m_len = len;
1236                         gx->gx_pkthdr->m_pkthdr.len += len;
1237                         *(gx->gx_pktnextp) = m;
1238                         m = gx->gx_pkthdr;
1239                         gx->gx_pkthdr = NULL;
1240                 }
1241
1242                 ifp->if_ipackets++;
1243                 m->m_pkthdr.rcvif = ifp;
1244
1245 #define IP_CSMASK       (GX_RXSTAT_IGNORE_CSUM | GX_RXSTAT_HAS_IP_CSUM)
1246 #define TCP_CSMASK \
1247     (GX_RXSTAT_IGNORE_CSUM | GX_RXSTAT_HAS_TCP_CSUM | GX_RXERR_TCP_CSUM)
1248                 if (ifp->if_capenable & IFCAP_RXCSUM) {
1249 #if 0
1250                         /*
1251                          * Intel Erratum #23 indicates that the Receive IP
1252                          * Checksum offload feature has been completely
1253                          * disabled.
1254                          */
1255                         if ((staterr & IP_CSUM_MASK) == GX_RXSTAT_HAS_IP_CSUM) {
1256                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1257                                 if ((staterr & GX_RXERR_IP_CSUM) == 0)
1258                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1259                         }
1260 #endif
1261                         if ((staterr & TCP_CSMASK) == GX_RXSTAT_HAS_TCP_CSUM) {
1262                                 m->m_pkthdr.csum_flags |=
1263                                     CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1264                                 m->m_pkthdr.csum_data = 0xffff;
1265                         }
1266                 }
1267                 /*
1268                  * If we received a packet with a vlan tag, pass it
1269                  * to vlan_input() instead of ether_input().
1270                  */
1271                 if (staterr & GX_RXSTAT_VLAN_PKT)
1272                         VLAN_INPUT_TAG(m, rx->rx_special);
1273                 else
1274                         ifp->if_input(ifp, m);
1275                 continue;
1276
1277   ierror:
1278                 ifp->if_ierrors++;
1279                 gx_newbuf(gx, idx, m);
1280
1281                 /* 
1282                  * XXX
1283                  * this isn't quite right.  Suppose we have a packet that
1284                  * spans 5 descriptors (9K split into 2K buffers).  If
1285                  * the 3rd descriptor sets an error, we need to ignore
1286                  * the last two.  The way things stand now, the last two
1287                  * will be accepted as a single packet.
1288                  *
1289                  * we don't worry about this -- the chip may not set an
1290                  * error in this case, and the checksum of the upper layers
1291                  * will catch the error.
1292                  */
1293                 if (gx->gx_pkthdr != NULL) {
1294                         m_freem(gx->gx_pkthdr);
1295                         gx->gx_pkthdr = NULL;
1296                 }
1297                 GX_INC(idx, GX_RX_RING_CNT);
1298         }
1299
1300         gx->gx_rx_tail_idx = idx;
1301         if (--idx < 0)
1302                 idx = GX_RX_RING_CNT - 1;
1303         CSR_WRITE_4(gx, gx->gx_reg.r_rx_tail, idx);
1304 }
1305
1306 static void
1307 gx_txeof(struct gx_softc *gx)
1308 {
1309         struct ifnet *ifp;
1310         int idx, cnt;
1311
1312         gx->gx_tx_interrupts++;
1313
1314         ifp = &gx->arpcom.ac_if;
1315         idx = gx->gx_tx_head_idx;
1316         cnt = gx->gx_txcnt;
1317
1318         /*
1319          * If the system chipset performs I/O write buffering, it is 
1320          * possible for the PIO read of the head descriptor to bypass the
1321          * memory write of the descriptor, resulting in reading a descriptor
1322          * which has not been updated yet.
1323          */
1324         while (cnt) {
1325                 struct gx_tx_desc_old *tx;
1326
1327                 tx = (struct gx_tx_desc_old *)&gx->gx_rdata->gx_tx_ring[idx];
1328                 cnt--;
1329
1330                 if ((tx->tx_command & GX_TXOLD_END_OF_PKT) == 0) {
1331                         GX_INC(idx, GX_TX_RING_CNT);
1332                         continue;
1333                 }
1334
1335                 if ((tx->tx_status & GX_TXSTAT_DONE) == 0)
1336                         break;
1337
1338                 ifp->if_opackets++;
1339
1340                 m_freem(gx->gx_cdata.gx_tx_chain[idx]);
1341                 gx->gx_cdata.gx_tx_chain[idx] = NULL;
1342                 gx->gx_txcnt = cnt;
1343                 ifp->if_timer = 0;
1344
1345                 GX_INC(idx, GX_TX_RING_CNT);
1346                 gx->gx_tx_head_idx = idx;
1347         }
1348
1349         if (gx->gx_txcnt == 0)
1350                 ifp->if_flags &= ~IFF_OACTIVE;
1351 }
1352
1353 static void
1354 gx_intr(void *xsc)
1355 {
1356         struct gx_softc *gx = xsc;
1357         struct ifnet *ifp = &gx->arpcom.ac_if;
1358         u_int32_t intr;
1359
1360         gx->gx_interrupts++;
1361
1362         /* Disable host interrupts. */
1363         CSR_WRITE_4(gx, GX_INT_MASK_CLR, GX_INT_ALL);
1364
1365         /*
1366          * find out why we're being bothered.
1367          * reading this register automatically clears all bits.
1368          */
1369         intr = CSR_READ_4(gx, GX_INT_READ);
1370
1371         /* Check RX return ring producer/consumer */
1372         if (intr & (GX_INT_RCV_TIMER | GX_INT_RCV_THOLD | GX_INT_RCV_OVERRUN))
1373                 gx_rxeof(gx);
1374
1375         /* Check TX ring producer/consumer */
1376         if (intr & (GX_INT_XMIT_DONE | GX_INT_XMIT_EMPTY))
1377                 gx_txeof(gx);
1378
1379         /*
1380          * handle other interrupts here.
1381          */
1382
1383         /*
1384          * Link change interrupts are not reliable; the interrupt may
1385          * not be generated if the link is lost.  However, the register
1386          * read is reliable, so check that.  Use SEQ errors to possibly
1387          * indicate that the link has changed.
1388          */
1389         if (intr & GX_INT_LINK_CHANGE) {
1390                 if ((CSR_READ_4(gx, GX_STATUS) & GX_STAT_LINKUP) == 0) {
1391                         device_printf(gx->gx_dev, "link down\n");
1392                 } else {
1393                         device_printf(gx->gx_dev, "link up\n");
1394                 }
1395         }
1396
1397         /* Turn interrupts on. */
1398         CSR_WRITE_4(gx, GX_INT_MASK_SET, GX_INT_WANTED);
1399
1400         if (ifp->if_flags & IFF_RUNNING && !ifq_is_empty(&ifp->if_snd))
1401                 gx_start(ifp);
1402 }
1403
1404 /*
1405  * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
1406  * pointers to descriptors.
1407  */
1408 static int
1409 gx_encap(struct gx_softc *gx, struct mbuf *m_head)
1410 {
1411         struct gx_tx_desc_data *tx = NULL;
1412         struct gx_tx_desc_ctx *tctx;
1413         struct mbuf *m;
1414         int idx, cnt, csumopts, txcontext;
1415         struct ifvlan *ifv = NULL;
1416
1417         if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1418             m_head->m_pkthdr.rcvif != NULL &&
1419             m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1420                 ifv = m_head->m_pkthdr.rcvif->if_softc;
1421
1422         cnt = gx->gx_txcnt;
1423         idx = gx->gx_tx_tail_idx;
1424         txcontext = gx->gx_txcontext;
1425
1426         /*
1427          * Insure we have at least 4 descriptors pre-allocated.
1428          */
1429         if (cnt >= GX_TX_RING_CNT - 4)
1430                 return (ENOBUFS);
1431
1432         /*
1433          * Set up the appropriate offload context if necessary.
1434          */
1435         csumopts = 0;
1436         if (m_head->m_pkthdr.csum_flags) {
1437                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1438                         csumopts |= GX_TXTCP_OPT_IP_CSUM;
1439                 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) {
1440                         csumopts |= GX_TXTCP_OPT_TCP_CSUM;
1441                         txcontext = GX_TXCONTEXT_TCPIP;
1442                 } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) {
1443                         csumopts |= GX_TXTCP_OPT_TCP_CSUM;
1444                         txcontext = GX_TXCONTEXT_UDPIP;
1445                 } else if (txcontext == GX_TXCONTEXT_NONE)
1446                         txcontext = GX_TXCONTEXT_TCPIP;
1447                 if (txcontext == gx->gx_txcontext)
1448                         goto context_done;
1449
1450                 tctx = (struct gx_tx_desc_ctx *)&gx->gx_rdata->gx_tx_ring[idx];
1451                 tctx->tx_ip_csum_start = ETHER_HDR_LEN;
1452                 tctx->tx_ip_csum_end = ETHER_HDR_LEN + sizeof(struct ip) - 1;
1453                 tctx->tx_ip_csum_offset = 
1454                     ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
1455                 tctx->tx_tcp_csum_start = ETHER_HDR_LEN + sizeof(struct ip);
1456                 tctx->tx_tcp_csum_end = 0;
1457                 if (txcontext == GX_TXCONTEXT_TCPIP)
1458                         tctx->tx_tcp_csum_offset = ETHER_HDR_LEN +
1459                             sizeof(struct ip) + offsetof(struct tcphdr, th_sum);
1460                 else
1461                         tctx->tx_tcp_csum_offset = ETHER_HDR_LEN +
1462                             sizeof(struct ip) + offsetof(struct udphdr, uh_sum);
1463                 tctx->tx_command = GX_TXCTX_EXTENSION | GX_TXCTX_INT_DELAY;
1464                 tctx->tx_type = 0;
1465                 tctx->tx_status = 0;
1466                 GX_INC(idx, GX_TX_RING_CNT);
1467                 cnt++;
1468         }
1469 context_done:
1470
1471         /*
1472          * Start packing the mbufs in this chain into the transmit
1473          * descriptors.  Stop when we run out of descriptors or hit
1474          * the end of the mbuf chain.
1475          */
1476         for (m = m_head; m != NULL; m = m->m_next) {
1477                 if (m->m_len == 0)
1478                         continue;
1479
1480                 if (cnt == GX_TX_RING_CNT) {
1481 printf("overflow(2): %d, %d\n", cnt, GX_TX_RING_CNT);
1482                         return (ENOBUFS);
1483 }
1484
1485                 tx = (struct gx_tx_desc_data *)&gx->gx_rdata->gx_tx_ring[idx];
1486                 tx->tx_addr = vtophys(mtod(m, vm_offset_t));
1487                 tx->tx_status = 0;
1488                 tx->tx_len = m->m_len;
1489                 if (gx->arpcom.ac_if.if_hwassist) {
1490                         tx->tx_type = 1;
1491                         tx->tx_command = GX_TXTCP_EXTENSION;
1492                         tx->tx_options = csumopts;
1493                 } else {
1494                         /*
1495                          * This is really a struct gx_tx_desc_old.
1496                          */
1497                         tx->tx_command = 0;
1498                 }
1499                 GX_INC(idx, GX_TX_RING_CNT);
1500                 cnt++;
1501         }
1502
1503         if (tx != NULL) {
1504                 tx->tx_command |= GX_TXTCP_REPORT_STATUS | GX_TXTCP_INT_DELAY |
1505                     GX_TXTCP_ETHER_CRC | GX_TXTCP_END_OF_PKT;
1506                 if (ifv != NULL) {
1507                         tx->tx_command |= GX_TXTCP_VLAN_ENABLE;
1508                         tx->tx_vlan = ifv->ifv_tag;
1509                 }
1510                 gx->gx_txcnt = cnt;
1511                 gx->gx_tx_tail_idx = idx;
1512                 gx->gx_txcontext = txcontext;
1513                 idx = GX_PREV(idx, GX_TX_RING_CNT);
1514                 gx->gx_cdata.gx_tx_chain[idx] = m_head;
1515
1516                 CSR_WRITE_4(gx, gx->gx_reg.r_tx_tail, gx->gx_tx_tail_idx);
1517         }
1518         
1519         return (0);
1520 }
1521  
1522 /*
1523  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1524  * to the mbuf data regions directly in the transmit descriptors.
1525  */
1526 static void
1527 gx_start(struct ifnet *ifp)
1528 {
1529         struct gx_softc *gx = ifp->if_softc;
1530         struct mbuf *m_head;
1531
1532         for (;;) {
1533                 m_head = ifq_poll(&ifp->if_snd);
1534                 if (m_head == NULL)
1535                         break;
1536
1537                 /*
1538                  * Pack the data into the transmit ring. If we
1539                  * don't have room, set the OACTIVE flag and wait
1540                  * for the NIC to drain the ring.
1541                  */
1542                 if (gx_encap(gx, m_head) != 0) {
1543                         ifp->if_flags |= IFF_OACTIVE;
1544                         break;
1545                 }
1546                 ifq_dequeue(&ifp->if_snd, m_head);
1547
1548                 BPF_MTAP(ifp, m_head);
1549
1550                 /*
1551                  * Set a timeout in case the chip goes out to lunch.
1552                  */
1553                 ifp->if_timer = 5;
1554         }
1555 }