Merge from vendor branch NTPD:
[dragonfly.git] / sys / dev / netif / gx / if_gx.c
1 /*-
2  * Copyright (c) 1999,2000,2001 Jonathan Lemon
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/dev/gx/if_gx.c,v 1.2.2.3 2001/12/14 19:51:39 jlemon Exp $
30  * $DragonFly: src/sys/dev/netif/gx/Attic/if_gx.c,v 1.13 2005/02/18 23:15:00 joerg Exp $
31  */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sockio.h>
36 #include <sys/mbuf.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/socket.h>
40 #include <sys/queue.h>
41
42 #include <net/if.h>
43 #include <net/ifq_var.h>
44 #include <net/if_arp.h>
45 #include <net/ethernet.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48
49 #include <net/bpf.h>
50 #include <net/if_types.h>
51 #include <net/vlan/if_vlan_var.h>
52
53 #include <netinet/in_systm.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58
59 #include <vm/vm.h>              /* for vtophys */
60 #include <vm/pmap.h>            /* for vtophys */
61 #include <machine/clock.h>      /* for DELAY */
62 #include <machine/bus_memio.h>
63 #include <machine/bus.h>
64 #include <machine/resource.h>
65 #include <sys/bus.h>
66 #include <sys/rman.h>
67
68 #include <bus/pci/pcireg.h>
69 #include <bus/pci/pcivar.h>
70
71 #include "../mii_layer/mii.h"
72 #include "../mii_layer/miivar.h"
73
74 #include "if_gxreg.h"
75 #include "if_gxvar.h"
76
77 #include "miibus_if.h"
78
79 #define TUNABLE_TX_INTR_DELAY   100
80 #define TUNABLE_RX_INTR_DELAY   100
81
82 #define GX_CSUM_FEATURES        (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_IP_FRAGS)
83
84 /*
85  * Various supported device vendors/types and their names.
86  */
87 struct gx_device {
88         u_int16_t       vendor;
89         u_int16_t       device;
90         int             version_flags;
91         u_int32_t       version_ipg;
92         char            *name;
93 };
94
95 static struct gx_device gx_devs[] = {
96         { INTEL_VENDORID, DEVICEID_WISEMAN,
97             GXF_FORCE_TBI | GXF_OLD_REGS,
98             10 | 2 << 10 | 10 << 20,
99             "Intel Gigabit Ethernet (82542)" },
100         { INTEL_VENDORID, DEVICEID_LIVINGOOD_FIBER,
101             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
102             6 | 8 << 10 | 6 << 20,
103             "Intel Gigabit Ethernet (82543GC-F)" },
104         { INTEL_VENDORID, DEVICEID_LIVINGOOD_COPPER,
105             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
106             8 | 8 << 10 | 6 << 20,
107             "Intel Gigabit Ethernet (82543GC-T)" },
108 #if 0
109 /* notyet.. */
110         { INTEL_VENDORID, DEVICEID_CORDOVA_FIBER,
111             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
112             6 | 8 << 10 | 6 << 20,
113             "Intel Gigabit Ethernet (82544EI-F)" },
114         { INTEL_VENDORID, DEVICEID_CORDOVA_COPPER,
115             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
116             8 | 8 << 10 | 6 << 20,
117             "Intel Gigabit Ethernet (82544EI-T)" },
118         { INTEL_VENDORID, DEVICEID_CORDOVA2_COPPER,
119             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
120             8 | 8 << 10 | 6 << 20,
121             "Intel Gigabit Ethernet (82544GC-T)" },
122 #endif
123         { 0, 0, 0, NULL }
124 };
125
126 static struct gx_regs new_regs = {
127         GX_RX_RING_BASE, GX_RX_RING_LEN,
128         GX_RX_RING_HEAD, GX_RX_RING_TAIL,
129         GX_RX_INTR_DELAY, GX_RX_DMA_CTRL,
130
131         GX_TX_RING_BASE, GX_TX_RING_LEN,
132         GX_TX_RING_HEAD, GX_TX_RING_TAIL,
133         GX_TX_INTR_DELAY, GX_TX_DMA_CTRL,
134 };
135 static struct gx_regs old_regs = {
136         GX_RX_OLD_RING_BASE, GX_RX_OLD_RING_LEN,
137         GX_RX_OLD_RING_HEAD, GX_RX_OLD_RING_TAIL,
138         GX_RX_OLD_INTR_DELAY, GX_RX_OLD_DMA_CTRL,
139
140         GX_TX_OLD_RING_BASE, GX_TX_OLD_RING_LEN,
141         GX_TX_OLD_RING_HEAD, GX_TX_OLD_RING_TAIL,
142         GX_TX_OLD_INTR_DELAY, GX_TX_OLD_DMA_CTRL,
143 };
144
145 static int      gx_probe(device_t dev);
146 static int      gx_attach(device_t dev);
147 static int      gx_detach(device_t dev);
148 static void     gx_shutdown(device_t dev);
149
150 static void     gx_intr(void *xsc);
151 static void     gx_init(void *xsc);
152
153 static struct   gx_device *gx_match(device_t dev);
154 static void     gx_eeprom_getword(struct gx_softc *gx, int addr,
155                     u_int16_t *dest);
156 static int      gx_read_eeprom(struct gx_softc *gx, caddr_t dest, int off,
157                     int cnt);
158 static int      gx_ifmedia_upd(struct ifnet *ifp);
159 static void     gx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
160 static int      gx_miibus_readreg(device_t dev, int phy, int reg);
161 static void     gx_miibus_writereg(device_t dev, int phy, int reg, int value);
162 static void     gx_miibus_statchg(device_t dev);
163 static int      gx_ioctl(struct ifnet *ifp, u_long command, caddr_t data,
164                     struct ucred *);
165 static void     gx_setmulti(struct gx_softc *gx);
166 static void     gx_reset(struct gx_softc *gx);
167 static void     gx_phy_reset(struct gx_softc *gx);
168 static void     gx_release(struct gx_softc *gx);
169 static void     gx_stop(struct gx_softc *gx);
170 static void     gx_watchdog(struct ifnet *ifp);
171 static void     gx_start(struct ifnet *ifp);
172
173 static int      gx_init_rx_ring(struct gx_softc *gx);
174 static void     gx_free_rx_ring(struct gx_softc *gx);
175 static int      gx_init_tx_ring(struct gx_softc *gx);
176 static void     gx_free_tx_ring(struct gx_softc *gx);
177
178 static device_method_t gx_methods[] = {
179         /* Device interface */
180         DEVMETHOD(device_probe,         gx_probe),
181         DEVMETHOD(device_attach,        gx_attach),
182         DEVMETHOD(device_detach,        gx_detach),
183         DEVMETHOD(device_shutdown,      gx_shutdown),
184
185         /* MII interface */
186         DEVMETHOD(miibus_readreg,       gx_miibus_readreg),
187         DEVMETHOD(miibus_writereg,      gx_miibus_writereg),
188         DEVMETHOD(miibus_statchg,       gx_miibus_statchg),
189
190         { 0, 0 }
191 };
192
193 static driver_t gx_driver = {
194         "gx",
195         gx_methods,
196         sizeof(struct gx_softc)
197 };
198
199 static devclass_t gx_devclass;
200
201 DECLARE_DUMMY_MODULE(if_gx);
202 MODULE_DEPEND(if_gx, miibus, 1, 1, 1);
203 DRIVER_MODULE(if_gx, pci, gx_driver, gx_devclass, 0, 0);
204 DRIVER_MODULE(miibus, gx, miibus_driver, miibus_devclass, 0, 0);
205
206 static struct gx_device *
207 gx_match(device_t dev)
208 {
209         int i;
210
211         for (i = 0; gx_devs[i].name != NULL; i++) {
212                 if ((pci_get_vendor(dev) == gx_devs[i].vendor) &&
213                     (pci_get_device(dev) == gx_devs[i].device))
214                         return (&gx_devs[i]);
215         }
216         return (NULL);
217 }
218
219 static int
220 gx_probe(device_t dev)
221 {
222         struct gx_device *gx_dev;
223
224         gx_dev = gx_match(dev);
225         if (gx_dev == NULL)
226                 return (ENXIO);
227
228         device_set_desc(dev, gx_dev->name);
229         return (0);
230 }
231
232 static int
233 gx_attach(device_t dev)
234 {
235         struct gx_softc *gx;
236         struct gx_device *gx_dev;
237         struct ifnet *ifp;
238         u_int32_t command;
239         int rid, s;
240         int error = 0;
241
242         s = splimp();
243
244         gx = device_get_softc(dev);
245         bzero(gx, sizeof(struct gx_softc));
246         gx->gx_dev = dev;
247
248         gx_dev = gx_match(dev);
249         gx->gx_vflags = gx_dev->version_flags;
250         gx->gx_ipg = gx_dev->version_ipg;
251
252         mtx_init(&gx->gx_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE);
253
254         GX_LOCK(gx);
255
256         /*
257          * Map control/status registers.
258          */
259         command = pci_read_config(dev, PCIR_COMMAND, 4);
260         command |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
261         if (gx->gx_vflags & GXF_ENABLE_MWI)
262                 command |= PCIM_CMD_MWIEN;
263         pci_write_config(dev, PCIR_COMMAND, command, 4);
264         command = pci_read_config(dev, PCIR_COMMAND, 4);
265
266 /* XXX check cache line size? */
267
268         if ((command & PCIM_CMD_MEMEN) == 0) {
269                 device_printf(dev, "failed to enable memory mapping!\n");
270                 error = ENXIO;
271                 goto fail;
272         }
273
274         rid = GX_PCI_LOMEM;
275         gx->gx_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
276             0, ~0, 1, RF_ACTIVE);
277 #if 0
278 /* support PIO mode */
279         rid = PCI_LOIO;
280         gx->gx_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid,
281             0, ~0, 1, RF_ACTIVE);
282 #endif
283
284         if (gx->gx_res == NULL) {
285                 device_printf(dev, "couldn't map memory\n");
286                 error = ENXIO;
287                 goto fail;
288         }
289
290         gx->gx_btag = rman_get_bustag(gx->gx_res);
291         gx->gx_bhandle = rman_get_bushandle(gx->gx_res);
292
293         /* Allocate interrupt */
294         rid = 0;
295         gx->gx_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
296             RF_SHAREABLE | RF_ACTIVE);
297
298         if (gx->gx_irq == NULL) {
299                 device_printf(dev, "couldn't map interrupt\n");
300                 error = ENXIO;
301                 goto fail;
302         }
303
304         error = bus_setup_intr(dev, gx->gx_irq, INTR_TYPE_NET,
305            gx_intr, gx, &gx->gx_intrhand);
306         if (error) {
307                 device_printf(dev, "couldn't setup irq\n");
308                 goto fail;
309         }
310
311         /* compensate for different register mappings */
312         if (gx->gx_vflags & GXF_OLD_REGS)
313                 gx->gx_reg = old_regs;
314         else
315                 gx->gx_reg = new_regs;
316
317         if (gx_read_eeprom(gx, (caddr_t)&gx->arpcom.ac_enaddr,
318             GX_EEMAP_MAC, 3)) {
319                 device_printf(dev, "failed to read station address\n");
320                 error = ENXIO;
321                 goto fail;
322         }
323
324         /* Allocate the ring buffers. */
325         gx->gx_rdata = contigmalloc(sizeof(struct gx_ring_data), M_DEVBUF,
326             M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
327
328         if (gx->gx_rdata == NULL) {
329                 device_printf(dev, "no memory for list buffers!\n");
330                 error = ENXIO;
331                 goto fail;
332         }
333         bzero(gx->gx_rdata, sizeof(struct gx_ring_data));
334
335         /* Set default tuneable values. */
336         gx->gx_tx_intr_delay = TUNABLE_TX_INTR_DELAY;
337         gx->gx_rx_intr_delay = TUNABLE_RX_INTR_DELAY;
338
339         /* Set up ifnet structure */
340         ifp = &gx->arpcom.ac_if;
341         ifp->if_softc = gx;
342         if_initname(ifp, "gx", device_get_unit(dev));
343         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
344         ifp->if_ioctl = gx_ioctl;
345         ifp->if_start = gx_start;
346         ifp->if_watchdog = gx_watchdog;
347         ifp->if_init = gx_init;
348         ifp->if_mtu = ETHERMTU;
349         ifq_set_maxlen(&ifp->if_snd, GX_TX_RING_CNT - 1);
350         ifq_set_ready(&ifp->if_snd);
351
352         /* see if we can enable hardware checksumming */
353         if (gx->gx_vflags & GXF_CSUM) {
354                 ifp->if_capabilities = IFCAP_HWCSUM;
355                 ifp->if_capenable = ifp->if_capabilities;
356         }
357
358         /* figure out transciever type */
359         if (gx->gx_vflags & GXF_FORCE_TBI ||
360             CSR_READ_4(gx, GX_STATUS) & GX_STAT_TBIMODE)
361                 gx->gx_tbimode = 1;
362
363         if (gx->gx_tbimode) {
364                 /* SERDES transceiver */
365                 ifmedia_init(&gx->gx_media, IFM_IMASK, gx_ifmedia_upd,
366                     gx_ifmedia_sts);
367                 ifmedia_add(&gx->gx_media,
368                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
369                 ifmedia_add(&gx->gx_media, IFM_ETHER|IFM_AUTO, 0, NULL);
370                 ifmedia_set(&gx->gx_media, IFM_ETHER|IFM_AUTO);
371         } else {
372                 /* GMII/MII transceiver */
373                 gx_phy_reset(gx);
374                 if (mii_phy_probe(dev, &gx->gx_miibus, gx_ifmedia_upd,
375                     gx_ifmedia_sts)) {
376                         device_printf(dev, "GMII/MII, PHY not detected\n");
377                         error = ENXIO;
378                         goto fail;
379                 }
380         }
381
382         /*
383          * Call MI attach routines.
384          */
385         ether_ifattach(ifp, gx->arpcom.ac_enaddr);
386
387         GX_UNLOCK(gx);
388         splx(s);
389         return (0);
390
391 fail:
392         GX_UNLOCK(gx);
393         gx_release(gx);
394         splx(s);
395         return (error);
396 }
397
398 static void
399 gx_release(struct gx_softc *gx)
400 {
401
402         bus_generic_detach(gx->gx_dev);
403         if (gx->gx_miibus)
404                 device_delete_child(gx->gx_dev, gx->gx_miibus);
405
406         if (gx->gx_intrhand)
407                 bus_teardown_intr(gx->gx_dev, gx->gx_irq, gx->gx_intrhand);
408         if (gx->gx_irq)
409                 bus_release_resource(gx->gx_dev, SYS_RES_IRQ, 0, gx->gx_irq);
410         if (gx->gx_res)
411                 bus_release_resource(gx->gx_dev, SYS_RES_MEMORY,
412                     GX_PCI_LOMEM, gx->gx_res);
413 }
414
415 static void
416 gx_init(void *xsc)
417 {
418         struct gx_softc *gx = (struct gx_softc *)xsc;
419         struct ifmedia *ifm;
420         struct ifnet *ifp;
421         device_t dev;
422         u_int16_t *m;
423         u_int32_t ctrl;
424         int s, i, tmp;
425
426         dev = gx->gx_dev;
427         ifp = &gx->arpcom.ac_if;
428
429         s = splimp();
430         GX_LOCK(gx);
431
432         /* Disable host interrupts, halt chip. */
433         gx_reset(gx);
434
435         /* disable I/O, flush RX/TX FIFOs, and free RX/TX buffers */
436         gx_stop(gx);
437
438         /* Load our MAC address, invalidate other 15 RX addresses. */
439         m = (u_int16_t *)&gx->arpcom.ac_enaddr[0];
440         CSR_WRITE_4(gx, GX_RX_ADDR_BASE, (m[1] << 16) | m[0]);
441         CSR_WRITE_4(gx, GX_RX_ADDR_BASE + 4, m[2] | GX_RA_VALID);
442         for (i = 1; i < 16; i++)
443                 CSR_WRITE_8(gx, GX_RX_ADDR_BASE + i * 8, (u_quad_t)0);
444
445         /* Program multicast filter. */
446         gx_setmulti(gx);
447
448         /* Init RX ring. */
449         gx_init_rx_ring(gx);
450
451         /* Init TX ring. */
452         gx_init_tx_ring(gx);
453
454         if (gx->gx_vflags & GXF_DMA) {
455                 /* set up DMA control */        
456                 CSR_WRITE_4(gx, gx->gx_reg.r_rx_dma_ctrl, 0x00010000);
457                 CSR_WRITE_4(gx, gx->gx_reg.r_tx_dma_ctrl, 0x00000000);
458         }
459
460         /* enable receiver */
461         ctrl = GX_RXC_ENABLE | GX_RXC_RX_THOLD_EIGHTH | GX_RXC_RX_BSIZE_2K;
462         ctrl |= GX_RXC_BCAST_ACCEPT;
463
464         /* Enable or disable promiscuous mode as needed. */
465         if (ifp->if_flags & IFF_PROMISC)
466                 ctrl |= GX_RXC_UNI_PROMISC;
467
468         /* This is required if we want to accept jumbo frames */
469         if (ifp->if_mtu > ETHERMTU)
470                 ctrl |= GX_RXC_LONG_PKT_ENABLE;
471
472         /* setup receive checksum control */
473         if (ifp->if_capenable & IFCAP_RXCSUM)
474                 CSR_WRITE_4(gx, GX_RX_CSUM_CONTROL,
475                     GX_CSUM_TCP/* | GX_CSUM_IP*/);
476
477         /* setup transmit checksum control */
478         if (ifp->if_capenable & IFCAP_TXCSUM)
479                 ifp->if_hwassist = GX_CSUM_FEATURES;
480
481         ctrl |= GX_RXC_STRIP_ETHERCRC;          /* not on 82542? */
482         CSR_WRITE_4(gx, GX_RX_CONTROL, ctrl);
483
484         /* enable transmitter */
485         ctrl = GX_TXC_ENABLE | GX_TXC_PAD_SHORT_PKTS | GX_TXC_COLL_RETRY_16;
486
487         /* XXX we should support half-duplex here too... */
488         ctrl |= GX_TXC_COLL_TIME_FDX;
489
490         CSR_WRITE_4(gx, GX_TX_CONTROL, ctrl);
491
492         /*
493          * set up recommended IPG times, which vary depending on chip type:
494          *      IPG transmit time:  80ns
495          *      IPG receive time 1: 20ns
496          *      IPG receive time 2: 80ns
497          */
498         CSR_WRITE_4(gx, GX_TX_IPG, gx->gx_ipg);
499
500         /* set up 802.3x MAC flow control address -- 01:80:c2:00:00:01 */
501         CSR_WRITE_4(gx, GX_FLOW_CTRL_BASE, 0x00C28001);
502         CSR_WRITE_4(gx, GX_FLOW_CTRL_BASE+4, 0x00000100);
503
504         /* set up 802.3x MAC flow control type -- 88:08 */
505         CSR_WRITE_4(gx, GX_FLOW_CTRL_TYPE, 0x8808);
506
507         /* Set up tuneables */
508         CSR_WRITE_4(gx, gx->gx_reg.r_rx_delay, gx->gx_rx_intr_delay);
509         CSR_WRITE_4(gx, gx->gx_reg.r_tx_delay, gx->gx_tx_intr_delay);
510
511         /*
512          * Configure chip for correct operation.
513          */
514         ctrl = GX_CTRL_DUPLEX;
515 #if BYTE_ORDER == BIG_ENDIAN
516         ctrl |= GX_CTRL_BIGENDIAN;
517 #endif
518         ctrl |= GX_CTRL_VLAN_ENABLE;
519
520         if (gx->gx_tbimode) {
521                 /*
522                  * It seems that TXCW must be initialized from the EEPROM
523                  * manually.
524                  *
525                  * XXX
526                  * should probably read the eeprom and re-insert the
527                  * values here.
528                  */
529 #define TXCONFIG_WORD   0x000001A0
530                 CSR_WRITE_4(gx, GX_TX_CONFIG, TXCONFIG_WORD);
531
532                 /* turn on hardware autonegotiate */
533                 GX_SETBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
534         } else {
535                 /*
536                  * Auto-detect speed from PHY, instead of using direct
537                  * indication.  The SLU bit doesn't force the link, but
538                  * must be present for ASDE to work.
539                  */
540                 gx_phy_reset(gx);
541                 ctrl |= GX_CTRL_SET_LINK_UP | GX_CTRL_AUTOSPEED;
542         }
543
544         /*
545          * Take chip out of reset and start it running.
546          */
547         CSR_WRITE_4(gx, GX_CTRL, ctrl);
548
549         /* Turn interrupts on. */
550         CSR_WRITE_4(gx, GX_INT_MASK_SET, GX_INT_WANTED);
551
552         ifp->if_flags |= IFF_RUNNING;
553         ifp->if_flags &= ~IFF_OACTIVE;
554
555         /*
556          * Set the current media.
557          */
558         if (gx->gx_miibus != NULL) {
559                 mii_mediachg(device_get_softc(gx->gx_miibus));
560         } else {
561                 ifm = &gx->gx_media;
562                 tmp = ifm->ifm_media;
563                 ifm->ifm_media = ifm->ifm_cur->ifm_media;
564                 gx_ifmedia_upd(ifp);
565                 ifm->ifm_media = tmp;
566         }
567
568         /*
569          * XXX
570          * Have the LINK0 flag force the link in TBI mode.
571          */
572         if (gx->gx_tbimode && ifp->if_flags & IFF_LINK0) {
573                 GX_CLRBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
574                 GX_SETBIT(gx, GX_CTRL, GX_CTRL_SET_LINK_UP);
575         }
576
577 #if 0
578 printf("66mhz: %s  64bit: %s\n",
579         CSR_READ_4(gx, GX_STATUS) & GX_STAT_PCI66 ? "yes" : "no",
580         CSR_READ_4(gx, GX_STATUS) & GX_STAT_BUS64 ? "yes" : "no");
581 #endif
582
583         GX_UNLOCK(gx);
584         splx(s);
585 }
586
587 /*
588  * Stop all chip I/O so that the kernel's probe routines don't
589  * get confused by errant DMAs when rebooting.
590  */
591 static void
592 gx_shutdown(device_t dev)
593 {
594         struct gx_softc *gx;
595
596         gx = device_get_softc(dev);
597         gx_reset(gx);
598         gx_stop(gx);
599 }
600
601 static int
602 gx_detach(device_t dev)
603 {
604         struct gx_softc *gx;
605         struct ifnet *ifp;
606         int s;
607
608         s = splimp();
609
610         gx = device_get_softc(dev);
611         ifp = &gx->arpcom.ac_if;
612         GX_LOCK(gx);
613
614         ether_ifdetach(ifp);
615         gx_reset(gx);
616         gx_stop(gx);
617         ifmedia_removeall(&gx->gx_media);
618         gx_release(gx);
619
620         contigfree(gx->gx_rdata, sizeof(struct gx_ring_data), M_DEVBUF);
621                 
622         GX_UNLOCK(gx);
623         mtx_destroy(&gx->gx_mtx);
624         splx(s);
625
626         return (0);
627 }
628
629 static void
630 gx_eeprom_getword(struct gx_softc *gx, int addr, u_int16_t *dest)
631 {
632         u_int16_t word = 0;
633         u_int32_t base, reg;
634         int x;
635
636         addr = (GX_EE_OPC_READ << GX_EE_ADDR_SIZE) |
637             (addr & ((1 << GX_EE_ADDR_SIZE) - 1));
638
639         base = CSR_READ_4(gx, GX_EEPROM_CTRL);
640         base &= ~(GX_EE_DATA_OUT | GX_EE_DATA_IN | GX_EE_CLOCK);
641         base |= GX_EE_SELECT;
642
643         CSR_WRITE_4(gx, GX_EEPROM_CTRL, base);
644
645         for (x = 1 << ((GX_EE_OPC_SIZE + GX_EE_ADDR_SIZE) - 1); x; x >>= 1) {
646                 reg = base | (addr & x ? GX_EE_DATA_IN : 0);
647                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg);
648                 DELAY(10);
649                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg | GX_EE_CLOCK);
650                 DELAY(10);
651                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg);
652                 DELAY(10);
653         }
654
655         for (x = 1 << 15; x; x >>= 1) {
656                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, base | GX_EE_CLOCK);
657                 DELAY(10);
658                 reg = CSR_READ_4(gx, GX_EEPROM_CTRL);
659                 if (reg & GX_EE_DATA_OUT)
660                         word |= x;
661                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, base);
662                 DELAY(10);
663         }
664
665         CSR_WRITE_4(gx, GX_EEPROM_CTRL, base & ~GX_EE_SELECT);
666         DELAY(10);
667
668         *dest = word;
669 }
670         
671 static int
672 gx_read_eeprom(struct gx_softc *gx, caddr_t dest, int off, int cnt)
673 {
674         u_int16_t *word;
675         int i;
676
677         word = (u_int16_t *)dest;
678         for (i = 0; i < cnt; i ++) {
679                 gx_eeprom_getword(gx, off + i, word);
680                 word++;
681         }
682         return (0);
683 }
684
685 /*
686  * Set media options.
687  */
688 static int
689 gx_ifmedia_upd(struct ifnet *ifp)
690 {
691         struct gx_softc *gx;
692         struct ifmedia *ifm;
693         struct mii_data *mii;
694
695         gx = ifp->if_softc;
696
697         if (gx->gx_tbimode) {
698                 ifm = &gx->gx_media;
699                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
700                         return (EINVAL);
701                 switch (IFM_SUBTYPE(ifm->ifm_media)) {
702                 case IFM_AUTO:
703                         GX_SETBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
704                         GX_SETBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
705                         GX_CLRBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
706                         break;
707                 case IFM_1000_SX:
708                         device_printf(gx->gx_dev,
709                             "manual config not supported yet.\n");
710 #if 0
711                         GX_CLRBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
712                         config = /* bit symbols for 802.3z */0;
713                         ctrl |= GX_CTRL_SET_LINK_UP;
714                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
715                                 ctrl |= GX_CTRL_DUPLEX;
716 #endif
717                         break;
718                 default:
719                         return (EINVAL);
720                 }
721         } else {
722                 ifm = &gx->gx_media;
723
724                 /*
725                  * 1000TX half duplex does not work.
726                  */
727                 if (IFM_TYPE(ifm->ifm_media) == IFM_ETHER &&
728                     IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T &&
729                     (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) == 0)
730                         return (EINVAL);
731                 mii = device_get_softc(gx->gx_miibus);
732                 mii_mediachg(mii);
733         }
734         return (0);
735 }
736
737 /*
738  * Report current media status.
739  */
740 static void
741 gx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
742 {
743         struct gx_softc *gx;
744         struct mii_data *mii;
745         u_int32_t status;
746
747         gx = ifp->if_softc;
748
749         if (gx->gx_tbimode) {
750                 ifmr->ifm_status = IFM_AVALID;
751                 ifmr->ifm_active = IFM_ETHER;
752
753                 status = CSR_READ_4(gx, GX_STATUS);
754                 if ((status & GX_STAT_LINKUP) == 0)
755                         return;
756
757                 ifmr->ifm_status |= IFM_ACTIVE;
758                 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
759         } else {
760                 mii = device_get_softc(gx->gx_miibus);
761                 mii_pollstat(mii);
762                 if ((mii->mii_media_active & (IFM_1000_T | IFM_HDX)) ==
763                     (IFM_1000_T | IFM_HDX))
764                         mii->mii_media_active = IFM_ETHER | IFM_NONE;
765                 ifmr->ifm_active = mii->mii_media_active;
766                 ifmr->ifm_status = mii->mii_media_status;
767         }
768 }
769
770 static void 
771 gx_mii_shiftin(struct gx_softc *gx, int data, int length)
772 {
773         u_int32_t reg, x;
774
775         /*
776          * Set up default GPIO direction + PHY data out.
777          */
778         reg = CSR_READ_4(gx, GX_CTRL);
779         reg &= ~(GX_CTRL_GPIO_DIR_MASK | GX_CTRL_PHY_IO | GX_CTRL_PHY_CLK);
780         reg |= GX_CTRL_GPIO_DIR | GX_CTRL_PHY_IO_DIR;
781
782         /*
783          * Shift in data to PHY.
784          */
785         for (x = 1 << (length - 1); x; x >>= 1) {
786                 if (data & x)
787                         reg |= GX_CTRL_PHY_IO;
788                 else
789                         reg &= ~GX_CTRL_PHY_IO;
790                 CSR_WRITE_4(gx, GX_CTRL, reg);
791                 DELAY(10);
792                 CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
793                 DELAY(10);
794                 CSR_WRITE_4(gx, GX_CTRL, reg);
795                 DELAY(10);
796         }
797 }
798
799 static u_int16_t 
800 gx_mii_shiftout(struct gx_softc *gx)
801 {
802         u_int32_t reg;
803         u_int16_t data;
804         int x;
805
806         /*
807          * Set up default GPIO direction + PHY data in.
808          */
809         reg = CSR_READ_4(gx, GX_CTRL);
810         reg &= ~(GX_CTRL_GPIO_DIR_MASK | GX_CTRL_PHY_IO | GX_CTRL_PHY_CLK);
811         reg |= GX_CTRL_GPIO_DIR;
812
813         CSR_WRITE_4(gx, GX_CTRL, reg);
814         DELAY(10);
815         CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
816         DELAY(10);
817         CSR_WRITE_4(gx, GX_CTRL, reg);
818         DELAY(10);
819         /*
820          * Shift out data from PHY.
821          */
822         data = 0;
823         for (x = 1 << 15; x; x >>= 1) {
824                 CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
825                 DELAY(10);
826                 if (CSR_READ_4(gx, GX_CTRL) & GX_CTRL_PHY_IO)
827                         data |= x;
828                 CSR_WRITE_4(gx, GX_CTRL, reg);
829                 DELAY(10);
830         }
831         CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
832         DELAY(10);
833         CSR_WRITE_4(gx, GX_CTRL, reg);
834         DELAY(10);
835
836         return (data);
837 }
838
839 static int
840 gx_miibus_readreg(device_t dev, int phy, int reg)
841 {
842         struct gx_softc *gx;
843
844         gx = device_get_softc(dev);
845         if (gx->gx_tbimode)
846                 return (0);
847
848         /*
849          * XXX
850          * Note: Cordova has a MDIC register. livingood and < have mii bits
851          */ 
852
853         gx_mii_shiftin(gx, GX_PHY_PREAMBLE, GX_PHY_PREAMBLE_LEN);
854         gx_mii_shiftin(gx, (GX_PHY_SOF << 12) | (GX_PHY_OP_READ << 10) |
855             (phy << 5) | reg, GX_PHY_READ_LEN);
856         return (gx_mii_shiftout(gx));
857 }
858
859 static void
860 gx_miibus_writereg(device_t dev, int phy, int reg, int value)
861 {
862         struct gx_softc *gx;
863
864         gx = device_get_softc(dev);
865         if (gx->gx_tbimode)
866                 return;
867
868         gx_mii_shiftin(gx, GX_PHY_PREAMBLE, GX_PHY_PREAMBLE_LEN);
869         gx_mii_shiftin(gx, (GX_PHY_SOF << 30) | (GX_PHY_OP_WRITE << 28) |
870             (phy << 23) | (reg << 18) | (GX_PHY_TURNAROUND << 16) |
871             (value & 0xffff), GX_PHY_WRITE_LEN);
872 }
873
874 static void
875 gx_miibus_statchg(device_t dev)
876 {
877         struct gx_softc *gx;
878         struct mii_data *mii;
879         int reg, s;
880
881         gx = device_get_softc(dev);
882         if (gx->gx_tbimode)
883                 return;
884
885         /*
886          * Set flow control behavior to mirror what PHY negotiated.
887          */
888         mii = device_get_softc(gx->gx_miibus);
889
890         s = splimp();
891         GX_LOCK(gx);
892
893         reg = CSR_READ_4(gx, GX_CTRL);
894         if (mii->mii_media_active & IFM_FLAG0)
895                 reg |= GX_CTRL_RX_FLOWCTRL;
896         else
897                 reg &= ~GX_CTRL_RX_FLOWCTRL;
898         if (mii->mii_media_active & IFM_FLAG1)
899                 reg |= GX_CTRL_TX_FLOWCTRL;
900         else
901                 reg &= ~GX_CTRL_TX_FLOWCTRL;
902         CSR_WRITE_4(gx, GX_CTRL, reg);
903
904         GX_UNLOCK(gx);
905         splx(s);
906 }
907
908 static int
909 gx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
910 {
911         struct gx_softc *gx = ifp->if_softc;
912         struct ifreq *ifr = (struct ifreq *)data;
913         struct mii_data *mii;
914         int s, mask, error = 0;
915
916         s = splimp();
917         GX_LOCK(gx);
918
919         switch (command) {
920         case SIOCSIFADDR:
921         case SIOCGIFADDR:
922                 error = ether_ioctl(ifp, command, data);
923                 break;
924         case SIOCSIFMTU:
925                 if (ifr->ifr_mtu > GX_MAX_MTU) {
926                         error = EINVAL;
927                 } else {
928                         ifp->if_mtu = ifr->ifr_mtu;
929                         gx_init(gx);
930                 }
931                 break;
932         case SIOCSIFFLAGS:
933                 if ((ifp->if_flags & IFF_UP) == 0) {
934                         gx_stop(gx);
935                 } else if (ifp->if_flags & IFF_RUNNING &&
936                     ((ifp->if_flags & IFF_PROMISC) != 
937                     (gx->gx_if_flags & IFF_PROMISC))) {
938                         if (ifp->if_flags & IFF_PROMISC)
939                                 GX_SETBIT(gx, GX_RX_CONTROL, GX_RXC_UNI_PROMISC);
940                         else 
941                                 GX_CLRBIT(gx, GX_RX_CONTROL, GX_RXC_UNI_PROMISC);
942                 } else {
943                         gx_init(gx);
944                 }
945                 gx->gx_if_flags = ifp->if_flags;
946                 break;
947         case SIOCADDMULTI:
948         case SIOCDELMULTI:
949                 if (ifp->if_flags & IFF_RUNNING)
950                         gx_setmulti(gx);
951                 break;
952         case SIOCSIFMEDIA:
953         case SIOCGIFMEDIA:
954                 if (gx->gx_miibus != NULL) {
955                         mii = device_get_softc(gx->gx_miibus);
956                         error = ifmedia_ioctl(ifp, ifr,
957                             &mii->mii_media, command);
958                 } else {
959                         error = ifmedia_ioctl(ifp, ifr, &gx->gx_media, command);
960                 }
961                 break;
962         case SIOCSIFCAP:
963                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
964                 if (mask & IFCAP_HWCSUM) {
965                         if (IFCAP_HWCSUM & ifp->if_capenable)
966                                 ifp->if_capenable &= ~IFCAP_HWCSUM;
967                         else
968                                 ifp->if_capenable |= IFCAP_HWCSUM;
969                         if (ifp->if_flags & IFF_RUNNING)
970                                 gx_init(gx);
971                 }
972                 break;
973         default:
974                 error = EINVAL;
975                 break;
976         }
977
978         GX_UNLOCK(gx);
979         splx(s);
980         return (error);
981 }
982
983 static void
984 gx_phy_reset(struct gx_softc *gx)
985 {
986         int reg;
987
988         GX_SETBIT(gx, GX_CTRL, GX_CTRL_SET_LINK_UP);
989
990         /*
991          * PHY reset is active low.
992          */
993         reg = CSR_READ_4(gx, GX_CTRL_EXT);
994         reg &= ~(GX_CTRLX_GPIO_DIR_MASK | GX_CTRLX_PHY_RESET);
995         reg |= GX_CTRLX_GPIO_DIR;
996
997         CSR_WRITE_4(gx, GX_CTRL_EXT, reg | GX_CTRLX_PHY_RESET);
998         DELAY(10);
999         CSR_WRITE_4(gx, GX_CTRL_EXT, reg);
1000         DELAY(10);
1001         CSR_WRITE_4(gx, GX_CTRL_EXT, reg | GX_CTRLX_PHY_RESET);
1002         DELAY(10);
1003
1004 #if 0
1005         /* post-livingood (cordova) only */
1006                 GX_SETBIT(gx, GX_CTRL, 0x80000000);
1007                 DELAY(1000);
1008                 GX_CLRBIT(gx, GX_CTRL, 0x80000000);
1009 #endif
1010 }
1011
1012 static void
1013 gx_reset(struct gx_softc *gx)
1014 {
1015
1016         /* Disable host interrupts. */
1017         CSR_WRITE_4(gx, GX_INT_MASK_CLR, GX_INT_ALL);
1018
1019         /* reset chip (THWAP!) */
1020         GX_SETBIT(gx, GX_CTRL, GX_CTRL_DEVICE_RESET);
1021         DELAY(10);
1022 }
1023
1024 static void
1025 gx_stop(struct gx_softc *gx)
1026 {
1027         struct ifnet *ifp;
1028
1029         ifp = &gx->arpcom.ac_if;
1030
1031         /* reset and flush transmitter */
1032         CSR_WRITE_4(gx, GX_TX_CONTROL, GX_TXC_RESET);
1033
1034         /* reset and flush receiver */
1035         CSR_WRITE_4(gx, GX_RX_CONTROL, GX_RXC_RESET);
1036
1037         /* reset link */
1038         if (gx->gx_tbimode)
1039                 GX_SETBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
1040
1041         /* Free the RX lists. */
1042         gx_free_rx_ring(gx);
1043
1044         /* Free TX buffers. */
1045         gx_free_tx_ring(gx);
1046
1047         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1048 }
1049
1050 static void
1051 gx_watchdog(struct ifnet *ifp)
1052 {
1053         struct gx_softc *gx;
1054
1055         gx = ifp->if_softc;
1056
1057         device_printf(gx->gx_dev, "watchdog timeout -- resetting\n");
1058         gx_reset(gx);
1059         gx_init(gx);
1060
1061         ifp->if_oerrors++;
1062 }
1063
1064 /*
1065  * Intialize a receive ring descriptor.
1066  */
1067 static int
1068 gx_newbuf(struct gx_softc *gx, int idx, struct mbuf *m)
1069 {
1070         struct mbuf *m_new = NULL;
1071         struct gx_rx_desc *r;
1072
1073         if (m == NULL) {
1074                 MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
1075                 if (m_new == NULL) {
1076                         device_printf(gx->gx_dev, 
1077                             "mbuf allocation failed -- packet dropped\n");
1078                         return (ENOBUFS);
1079                 }
1080                 MCLGET(m_new, MB_DONTWAIT);
1081                 if ((m_new->m_flags & M_EXT) == 0) {
1082                         device_printf(gx->gx_dev, 
1083                             "cluster allocation failed -- packet dropped\n");
1084                         m_freem(m_new);
1085                         return (ENOBUFS);
1086                 }
1087                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1088         } else {
1089                 m->m_len = m->m_pkthdr.len = MCLBYTES;
1090                 m->m_data = m->m_ext.ext_buf;
1091                 m->m_next = NULL;
1092                 m_new = m;
1093         }
1094
1095         /*
1096          * XXX
1097          * this will _NOT_ work for large MTU's; it will overwrite
1098          * the end of the buffer.  E.g.: take this out for jumbograms,
1099          * but then that breaks alignment.
1100          */
1101         if (gx->arpcom.ac_if.if_mtu <= ETHERMTU)
1102                 m_adj(m_new, ETHER_ALIGN);
1103
1104         gx->gx_cdata.gx_rx_chain[idx] = m_new;
1105         r = &gx->gx_rdata->gx_rx_ring[idx];
1106         r->rx_addr = vtophys(mtod(m_new, caddr_t));
1107         r->rx_staterr = 0;
1108
1109         return (0);
1110 }
1111
1112 /*
1113  * The receive ring can have up to 64K descriptors, which at 2K per mbuf
1114  * cluster, could add up to 128M of memory.  Due to alignment constraints,
1115  * the number of descriptors must be a multiple of 8.  For now, we
1116  * allocate 256 entries and hope that our CPU is fast enough to keep up
1117  * with the NIC.
1118  */
1119 static int
1120 gx_init_rx_ring(struct gx_softc *gx)
1121 {
1122         int i, error;
1123
1124         for (i = 0; i < GX_RX_RING_CNT; i++) {
1125                 error = gx_newbuf(gx, i, NULL);
1126                 if (error)
1127                         return (error);
1128         }
1129
1130         /* bring receiver out of reset state, leave disabled */
1131         CSR_WRITE_4(gx, GX_RX_CONTROL, 0);
1132
1133         /* set up ring registers */
1134         CSR_WRITE_8(gx, gx->gx_reg.r_rx_base,
1135             (u_quad_t)vtophys(gx->gx_rdata->gx_rx_ring));
1136
1137         CSR_WRITE_4(gx, gx->gx_reg.r_rx_length,
1138             GX_RX_RING_CNT * sizeof(struct gx_rx_desc));
1139         CSR_WRITE_4(gx, gx->gx_reg.r_rx_head, 0);
1140         CSR_WRITE_4(gx, gx->gx_reg.r_rx_tail, GX_RX_RING_CNT - 1);
1141         gx->gx_rx_tail_idx = 0;
1142
1143         return (0);
1144 }
1145
1146 static void
1147 gx_free_rx_ring(struct gx_softc *gx)
1148 {
1149         struct mbuf **mp;
1150         int i;
1151
1152         mp = gx->gx_cdata.gx_rx_chain;
1153         for (i = 0; i < GX_RX_RING_CNT; i++, mp++) {
1154                 if (*mp != NULL) {
1155                         m_freem(*mp);
1156                         *mp = NULL;
1157                 }
1158         }
1159         bzero((void *)gx->gx_rdata->gx_rx_ring,
1160             GX_RX_RING_CNT * sizeof(struct gx_rx_desc));
1161
1162         /* release any partially-received packet chain */
1163         if (gx->gx_pkthdr != NULL) {
1164                 m_freem(gx->gx_pkthdr);
1165                 gx->gx_pkthdr = NULL;
1166         }
1167 }
1168
1169 static int
1170 gx_init_tx_ring(struct gx_softc *gx)
1171 {
1172
1173         /* bring transmitter out of reset state, leave disabled */
1174         CSR_WRITE_4(gx, GX_TX_CONTROL, 0);
1175
1176         /* set up ring registers */
1177         CSR_WRITE_8(gx, gx->gx_reg.r_tx_base,
1178             (u_quad_t)vtophys(gx->gx_rdata->gx_tx_ring));
1179         CSR_WRITE_4(gx, gx->gx_reg.r_tx_length,
1180             GX_TX_RING_CNT * sizeof(struct gx_tx_desc));
1181         CSR_WRITE_4(gx, gx->gx_reg.r_tx_head, 0);
1182         CSR_WRITE_4(gx, gx->gx_reg.r_tx_tail, 0);
1183         gx->gx_tx_head_idx = 0;
1184         gx->gx_tx_tail_idx = 0;
1185         gx->gx_txcnt = 0;
1186
1187         /* set up initial TX context */
1188         gx->gx_txcontext = GX_TXCONTEXT_NONE;
1189
1190         return (0);
1191 }
1192
1193 static void
1194 gx_free_tx_ring(struct gx_softc *gx)
1195 {
1196         struct mbuf **mp;
1197         int i;
1198
1199         mp = gx->gx_cdata.gx_tx_chain;
1200         for (i = 0; i < GX_TX_RING_CNT; i++, mp++) {
1201                 if (*mp != NULL) {
1202                         m_freem(*mp);
1203                         *mp = NULL;
1204                 }
1205         }
1206         bzero((void *)&gx->gx_rdata->gx_tx_ring,
1207             GX_TX_RING_CNT * sizeof(struct gx_tx_desc));
1208 }
1209
1210 static void
1211 gx_setmulti(struct gx_softc *gx)
1212 {
1213         int i;
1214
1215         /* wipe out the multicast table */
1216         for (i = 1; i < 128; i++)
1217                 CSR_WRITE_4(gx, GX_MULTICAST_BASE + i * 4, 0);
1218 }
1219
1220 static void
1221 gx_rxeof(struct gx_softc *gx)
1222 {
1223         struct gx_rx_desc *rx;
1224         struct ifnet *ifp;
1225         int idx, staterr, len;
1226         struct mbuf *m;
1227
1228         gx->gx_rx_interrupts++;
1229
1230         ifp = &gx->arpcom.ac_if;
1231         idx = gx->gx_rx_tail_idx;
1232
1233         while (gx->gx_rdata->gx_rx_ring[idx].rx_staterr & GX_RXSTAT_COMPLETED) {
1234
1235                 rx = &gx->gx_rdata->gx_rx_ring[idx];
1236                 m = gx->gx_cdata.gx_rx_chain[idx];
1237                 /*
1238                  * gx_newbuf overwrites status and length bits, so we 
1239                  * make a copy of them here.
1240                  */
1241                 len = rx->rx_len;
1242                 staterr = rx->rx_staterr;
1243
1244                 if (staterr & GX_INPUT_ERROR)
1245                         goto ierror;
1246
1247                 if (gx_newbuf(gx, idx, NULL) == ENOBUFS)
1248                         goto ierror;
1249
1250                 GX_INC(idx, GX_RX_RING_CNT);
1251
1252                 if (staterr & GX_RXSTAT_INEXACT_MATCH) {
1253                         /*
1254                          * multicast packet, must verify against
1255                          * multicast address.
1256                          */
1257                 }
1258
1259                 if ((staterr & GX_RXSTAT_END_OF_PACKET) == 0) {
1260                         if (gx->gx_pkthdr == NULL) {
1261                                 m->m_len = len;
1262                                 m->m_pkthdr.len = len;
1263                                 gx->gx_pkthdr = m;
1264                                 gx->gx_pktnextp = &m->m_next;
1265                         } else {
1266                                 m->m_len = len;
1267                                 m->m_flags &= ~M_PKTHDR;
1268                                 gx->gx_pkthdr->m_pkthdr.len += len;
1269                                 *(gx->gx_pktnextp) = m;
1270                                 gx->gx_pktnextp = &m->m_next;
1271                         }
1272                         continue;
1273                 }
1274
1275                 if (gx->gx_pkthdr == NULL) {
1276                         m->m_len = len;
1277                         m->m_pkthdr.len = len;
1278                 } else {
1279                         m->m_len = len;
1280                         m->m_flags &= ~M_PKTHDR;
1281                         gx->gx_pkthdr->m_pkthdr.len += len;
1282                         *(gx->gx_pktnextp) = m;
1283                         m = gx->gx_pkthdr;
1284                         gx->gx_pkthdr = NULL;
1285                 }
1286
1287                 ifp->if_ipackets++;
1288                 m->m_pkthdr.rcvif = ifp;
1289
1290 #define IP_CSMASK       (GX_RXSTAT_IGNORE_CSUM | GX_RXSTAT_HAS_IP_CSUM)
1291 #define TCP_CSMASK \
1292     (GX_RXSTAT_IGNORE_CSUM | GX_RXSTAT_HAS_TCP_CSUM | GX_RXERR_TCP_CSUM)
1293                 if (ifp->if_capenable & IFCAP_RXCSUM) {
1294 #if 0
1295                         /*
1296                          * Intel Erratum #23 indicates that the Receive IP
1297                          * Checksum offload feature has been completely
1298                          * disabled.
1299                          */
1300                         if ((staterr & IP_CSUM_MASK) == GX_RXSTAT_HAS_IP_CSUM) {
1301                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1302                                 if ((staterr & GX_RXERR_IP_CSUM) == 0)
1303                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1304                         }
1305 #endif
1306                         if ((staterr & TCP_CSMASK) == GX_RXSTAT_HAS_TCP_CSUM) {
1307                                 m->m_pkthdr.csum_flags |=
1308                                     CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1309                                 m->m_pkthdr.csum_data = 0xffff;
1310                         }
1311                 }
1312                 /*
1313                  * If we received a packet with a vlan tag, pass it
1314                  * to vlan_input() instead of ether_input().
1315                  */
1316                 if (staterr & GX_RXSTAT_VLAN_PKT)
1317                         VLAN_INPUT_TAG(m, rx->rx_special);
1318                 else
1319                         (*ifp->if_input)(ifp, m);
1320                 continue;
1321
1322   ierror:
1323                 ifp->if_ierrors++;
1324                 gx_newbuf(gx, idx, m);
1325
1326                 /* 
1327                  * XXX
1328                  * this isn't quite right.  Suppose we have a packet that
1329                  * spans 5 descriptors (9K split into 2K buffers).  If
1330                  * the 3rd descriptor sets an error, we need to ignore
1331                  * the last two.  The way things stand now, the last two
1332                  * will be accepted as a single packet.
1333                  *
1334                  * we don't worry about this -- the chip may not set an
1335                  * error in this case, and the checksum of the upper layers
1336                  * will catch the error.
1337                  */
1338                 if (gx->gx_pkthdr != NULL) {
1339                         m_freem(gx->gx_pkthdr);
1340                         gx->gx_pkthdr = NULL;
1341                 }
1342                 GX_INC(idx, GX_RX_RING_CNT);
1343         }
1344
1345         gx->gx_rx_tail_idx = idx;
1346         if (--idx < 0)
1347                 idx = GX_RX_RING_CNT - 1;
1348         CSR_WRITE_4(gx, gx->gx_reg.r_rx_tail, idx);
1349 }
1350
1351 static void
1352 gx_txeof(struct gx_softc *gx)
1353 {
1354         struct ifnet *ifp;
1355         int idx, cnt;
1356
1357         gx->gx_tx_interrupts++;
1358
1359         ifp = &gx->arpcom.ac_if;
1360         idx = gx->gx_tx_head_idx;
1361         cnt = gx->gx_txcnt;
1362
1363         /*
1364          * If the system chipset performs I/O write buffering, it is 
1365          * possible for the PIO read of the head descriptor to bypass the
1366          * memory write of the descriptor, resulting in reading a descriptor
1367          * which has not been updated yet.
1368          */
1369         while (cnt) {
1370                 struct gx_tx_desc_old *tx;
1371
1372                 tx = (struct gx_tx_desc_old *)&gx->gx_rdata->gx_tx_ring[idx];
1373                 cnt--;
1374
1375                 if ((tx->tx_command & GX_TXOLD_END_OF_PKT) == 0) {
1376                         GX_INC(idx, GX_TX_RING_CNT);
1377                         continue;
1378                 }
1379
1380                 if ((tx->tx_status & GX_TXSTAT_DONE) == 0)
1381                         break;
1382
1383                 ifp->if_opackets++;
1384
1385                 m_freem(gx->gx_cdata.gx_tx_chain[idx]);
1386                 gx->gx_cdata.gx_tx_chain[idx] = NULL;
1387                 gx->gx_txcnt = cnt;
1388                 ifp->if_timer = 0;
1389
1390                 GX_INC(idx, GX_TX_RING_CNT);
1391                 gx->gx_tx_head_idx = idx;
1392         }
1393
1394         if (gx->gx_txcnt == 0)
1395                 ifp->if_flags &= ~IFF_OACTIVE;
1396 }
1397
1398 static void
1399 gx_intr(void *xsc)
1400 {
1401         struct gx_softc *gx;
1402         struct ifnet *ifp;
1403         u_int32_t intr;
1404         int s;
1405
1406         gx = xsc;
1407         ifp = &gx->arpcom.ac_if;
1408
1409         s = splimp();
1410
1411         gx->gx_interrupts++;
1412
1413         /* Disable host interrupts. */
1414         CSR_WRITE_4(gx, GX_INT_MASK_CLR, GX_INT_ALL);
1415
1416         /*
1417          * find out why we're being bothered.
1418          * reading this register automatically clears all bits.
1419          */
1420         intr = CSR_READ_4(gx, GX_INT_READ);
1421
1422         /* Check RX return ring producer/consumer */
1423         if (intr & (GX_INT_RCV_TIMER | GX_INT_RCV_THOLD | GX_INT_RCV_OVERRUN))
1424                 gx_rxeof(gx);
1425
1426         /* Check TX ring producer/consumer */
1427         if (intr & (GX_INT_XMIT_DONE | GX_INT_XMIT_EMPTY))
1428                 gx_txeof(gx);
1429
1430         /*
1431          * handle other interrupts here.
1432          */
1433
1434         /*
1435          * Link change interrupts are not reliable; the interrupt may
1436          * not be generated if the link is lost.  However, the register
1437          * read is reliable, so check that.  Use SEQ errors to possibly
1438          * indicate that the link has changed.
1439          */
1440         if (intr & GX_INT_LINK_CHANGE) {
1441                 if ((CSR_READ_4(gx, GX_STATUS) & GX_STAT_LINKUP) == 0) {
1442                         device_printf(gx->gx_dev, "link down\n");
1443                 } else {
1444                         device_printf(gx->gx_dev, "link up\n");
1445                 }
1446         }
1447
1448         /* Turn interrupts on. */
1449         CSR_WRITE_4(gx, GX_INT_MASK_SET, GX_INT_WANTED);
1450
1451         if (ifp->if_flags & IFF_RUNNING && !ifq_is_empty(&ifp->if_snd))
1452                 gx_start(ifp);
1453
1454         splx(s);
1455 }
1456
1457 /*
1458  * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
1459  * pointers to descriptors.
1460  */
1461 static int
1462 gx_encap(struct gx_softc *gx, struct mbuf *m_head)
1463 {
1464         struct gx_tx_desc_data *tx = NULL;
1465         struct gx_tx_desc_ctx *tctx;
1466         struct mbuf *m;
1467         int idx, cnt, csumopts, txcontext;
1468         struct ifvlan *ifv = NULL;
1469
1470         if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1471             m_head->m_pkthdr.rcvif != NULL &&
1472             m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1473                 ifv = m_head->m_pkthdr.rcvif->if_softc;
1474
1475         cnt = gx->gx_txcnt;
1476         idx = gx->gx_tx_tail_idx;
1477         txcontext = gx->gx_txcontext;
1478
1479         /*
1480          * Insure we have at least 4 descriptors pre-allocated.
1481          */
1482         if (cnt >= GX_TX_RING_CNT - 4)
1483                 return (ENOBUFS);
1484
1485         /*
1486          * Set up the appropriate offload context if necessary.
1487          */
1488         csumopts = 0;
1489         if (m_head->m_pkthdr.csum_flags) {
1490                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1491                         csumopts |= GX_TXTCP_OPT_IP_CSUM;
1492                 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) {
1493                         csumopts |= GX_TXTCP_OPT_TCP_CSUM;
1494                         txcontext = GX_TXCONTEXT_TCPIP;
1495                 } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) {
1496                         csumopts |= GX_TXTCP_OPT_TCP_CSUM;
1497                         txcontext = GX_TXCONTEXT_UDPIP;
1498                 } else if (txcontext == GX_TXCONTEXT_NONE)
1499                         txcontext = GX_TXCONTEXT_TCPIP;
1500                 if (txcontext == gx->gx_txcontext)
1501                         goto context_done;
1502
1503                 tctx = (struct gx_tx_desc_ctx *)&gx->gx_rdata->gx_tx_ring[idx];
1504                 tctx->tx_ip_csum_start = ETHER_HDR_LEN;
1505                 tctx->tx_ip_csum_end = ETHER_HDR_LEN + sizeof(struct ip) - 1;
1506                 tctx->tx_ip_csum_offset = 
1507                     ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
1508                 tctx->tx_tcp_csum_start = ETHER_HDR_LEN + sizeof(struct ip);
1509                 tctx->tx_tcp_csum_end = 0;
1510                 if (txcontext == GX_TXCONTEXT_TCPIP)
1511                         tctx->tx_tcp_csum_offset = ETHER_HDR_LEN +
1512                             sizeof(struct ip) + offsetof(struct tcphdr, th_sum);
1513                 else
1514                         tctx->tx_tcp_csum_offset = ETHER_HDR_LEN +
1515                             sizeof(struct ip) + offsetof(struct udphdr, uh_sum);
1516                 tctx->tx_command = GX_TXCTX_EXTENSION | GX_TXCTX_INT_DELAY;
1517                 tctx->tx_type = 0;
1518                 tctx->tx_status = 0;
1519                 GX_INC(idx, GX_TX_RING_CNT);
1520                 cnt++;
1521         }
1522 context_done:
1523
1524         /*
1525          * Start packing the mbufs in this chain into the transmit
1526          * descriptors.  Stop when we run out of descriptors or hit
1527          * the end of the mbuf chain.
1528          */
1529         for (m = m_head; m != NULL; m = m->m_next) {
1530                 if (m->m_len == 0)
1531                         continue;
1532
1533                 if (cnt == GX_TX_RING_CNT) {
1534 printf("overflow(2): %d, %d\n", cnt, GX_TX_RING_CNT);
1535                         return (ENOBUFS);
1536 }
1537
1538                 tx = (struct gx_tx_desc_data *)&gx->gx_rdata->gx_tx_ring[idx];
1539                 tx->tx_addr = vtophys(mtod(m, vm_offset_t));
1540                 tx->tx_status = 0;
1541                 tx->tx_len = m->m_len;
1542                 if (gx->arpcom.ac_if.if_hwassist) {
1543                         tx->tx_type = 1;
1544                         tx->tx_command = GX_TXTCP_EXTENSION;
1545                         tx->tx_options = csumopts;
1546                 } else {
1547                         /*
1548                          * This is really a struct gx_tx_desc_old.
1549                          */
1550                         tx->tx_command = 0;
1551                 }
1552                 GX_INC(idx, GX_TX_RING_CNT);
1553                 cnt++;
1554         }
1555
1556         if (tx != NULL) {
1557                 tx->tx_command |= GX_TXTCP_REPORT_STATUS | GX_TXTCP_INT_DELAY |
1558                     GX_TXTCP_ETHER_CRC | GX_TXTCP_END_OF_PKT;
1559                 if (ifv != NULL) {
1560                         tx->tx_command |= GX_TXTCP_VLAN_ENABLE;
1561                         tx->tx_vlan = ifv->ifv_tag;
1562                 }
1563                 gx->gx_txcnt = cnt;
1564                 gx->gx_tx_tail_idx = idx;
1565                 gx->gx_txcontext = txcontext;
1566                 idx = GX_PREV(idx, GX_TX_RING_CNT);
1567                 gx->gx_cdata.gx_tx_chain[idx] = m_head;
1568
1569                 CSR_WRITE_4(gx, gx->gx_reg.r_tx_tail, gx->gx_tx_tail_idx);
1570         }
1571         
1572         return (0);
1573 }
1574  
1575 /*
1576  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1577  * to the mbuf data regions directly in the transmit descriptors.
1578  */
1579 static void
1580 gx_start(struct ifnet *ifp)
1581 {
1582         struct gx_softc *gx;
1583         struct mbuf *m_head;
1584         int s;
1585
1586         s = splimp();
1587
1588         gx = ifp->if_softc;
1589
1590         for (;;) {
1591                 m_head = ifq_poll(&ifp->if_snd);
1592                 if (m_head == NULL)
1593                         break;
1594
1595                 /*
1596                  * Pack the data into the transmit ring. If we
1597                  * don't have room, set the OACTIVE flag and wait
1598                  * for the NIC to drain the ring.
1599                  */
1600                 if (gx_encap(gx, m_head) != 0) {
1601                         ifp->if_flags |= IFF_OACTIVE;
1602                         break;
1603                 }
1604                 m_head = ifq_dequeue(&ifp->if_snd);
1605
1606                 BPF_MTAP(ifp, m_head);
1607
1608                 /*
1609                  * Set a timeout in case the chip goes out to lunch.
1610                  */
1611                 ifp->if_timer = 5;
1612         }
1613
1614         splx(s);
1615 }