Eliminate use of curthread in if_ioctl functions by passing down the
[dragonfly.git] / sys / dev / netif / gx / if_gx.c
1 /*-
2  * Copyright (c) 1999,2000,2001 Jonathan Lemon
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $FreeBSD: src/sys/dev/gx/if_gx.c,v 1.2.2.3 2001/12/14 19:51:39 jlemon Exp $
30  * $DragonFly: src/sys/dev/netif/gx/Attic/if_gx.c,v 1.7 2004/03/23 22:19:01 hsu Exp $
31  */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sockio.h>
36 #include <sys/mbuf.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/socket.h>
40 #include <sys/queue.h>
41
42 #include <net/if.h>
43 #include <net/if_arp.h>
44 #include <net/ethernet.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47
48 #include <net/bpf.h>
49 #include <net/if_types.h>
50 #include <net/vlan/if_vlan_var.h>
51
52 #include <netinet/in_systm.h>
53 #include <netinet/in.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <netinet/udp.h>
57
58 #include <vm/vm.h>              /* for vtophys */
59 #include <vm/pmap.h>            /* for vtophys */
60 #include <machine/clock.h>      /* for DELAY */
61 #include <machine/bus_memio.h>
62 #include <machine/bus.h>
63 #include <machine/resource.h>
64 #include <sys/bus.h>
65 #include <sys/rman.h>
66
67 #include <bus/pci/pcireg.h>
68 #include <bus/pci/pcivar.h>
69
70 #include "../mii_layer/mii.h"
71 #include "../mii_layer/miivar.h"
72
73 #include "if_gxreg.h"
74 #include "if_gxvar.h"
75
76 #include "miibus_if.h"
77
78 #define TUNABLE_TX_INTR_DELAY   100
79 #define TUNABLE_RX_INTR_DELAY   100
80
81 #define GX_CSUM_FEATURES        (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_IP_FRAGS)
82
83 /*
84  * Various supported device vendors/types and their names.
85  */
86 struct gx_device {
87         u_int16_t       vendor;
88         u_int16_t       device;
89         int             version_flags;
90         u_int32_t       version_ipg;
91         char            *name;
92 };
93
94 static struct gx_device gx_devs[] = {
95         { INTEL_VENDORID, DEVICEID_WISEMAN,
96             GXF_FORCE_TBI | GXF_OLD_REGS,
97             10 | 2 << 10 | 10 << 20,
98             "Intel Gigabit Ethernet (82542)" },
99         { INTEL_VENDORID, DEVICEID_LIVINGOOD_FIBER,
100             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
101             6 | 8 << 10 | 6 << 20,
102             "Intel Gigabit Ethernet (82543GC-F)" },
103         { INTEL_VENDORID, DEVICEID_LIVINGOOD_COPPER,
104             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
105             8 | 8 << 10 | 6 << 20,
106             "Intel Gigabit Ethernet (82543GC-T)" },
107 #if 0
108 /* notyet.. */
109         { INTEL_VENDORID, DEVICEID_CORDOVA_FIBER,
110             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
111             6 | 8 << 10 | 6 << 20,
112             "Intel Gigabit Ethernet (82544EI-F)" },
113         { INTEL_VENDORID, DEVICEID_CORDOVA_COPPER,
114             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
115             8 | 8 << 10 | 6 << 20,
116             "Intel Gigabit Ethernet (82544EI-T)" },
117         { INTEL_VENDORID, DEVICEID_CORDOVA2_COPPER,
118             GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
119             8 | 8 << 10 | 6 << 20,
120             "Intel Gigabit Ethernet (82544GC-T)" },
121 #endif
122         { 0, 0, 0, NULL }
123 };
124
125 static struct gx_regs new_regs = {
126         GX_RX_RING_BASE, GX_RX_RING_LEN,
127         GX_RX_RING_HEAD, GX_RX_RING_TAIL,
128         GX_RX_INTR_DELAY, GX_RX_DMA_CTRL,
129
130         GX_TX_RING_BASE, GX_TX_RING_LEN,
131         GX_TX_RING_HEAD, GX_TX_RING_TAIL,
132         GX_TX_INTR_DELAY, GX_TX_DMA_CTRL,
133 };
134 static struct gx_regs old_regs = {
135         GX_RX_OLD_RING_BASE, GX_RX_OLD_RING_LEN,
136         GX_RX_OLD_RING_HEAD, GX_RX_OLD_RING_TAIL,
137         GX_RX_OLD_INTR_DELAY, GX_RX_OLD_DMA_CTRL,
138
139         GX_TX_OLD_RING_BASE, GX_TX_OLD_RING_LEN,
140         GX_TX_OLD_RING_HEAD, GX_TX_OLD_RING_TAIL,
141         GX_TX_OLD_INTR_DELAY, GX_TX_OLD_DMA_CTRL,
142 };
143
144 static int      gx_probe(device_t dev);
145 static int      gx_attach(device_t dev);
146 static int      gx_detach(device_t dev);
147 static void     gx_shutdown(device_t dev);
148
149 static void     gx_intr(void *xsc);
150 static void     gx_init(void *xsc);
151
152 static struct   gx_device *gx_match(device_t dev);
153 static void     gx_eeprom_getword(struct gx_softc *gx, int addr,
154                     u_int16_t *dest);
155 static int      gx_read_eeprom(struct gx_softc *gx, caddr_t dest, int off,
156                     int cnt);
157 static int      gx_ifmedia_upd(struct ifnet *ifp);
158 static void     gx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
159 static int      gx_miibus_readreg(device_t dev, int phy, int reg);
160 static void     gx_miibus_writereg(device_t dev, int phy, int reg, int value);
161 static void     gx_miibus_statchg(device_t dev);
162 static int      gx_ioctl(struct ifnet *ifp, u_long command, caddr_t data,
163                     struct ucred *);
164 static void     gx_setmulti(struct gx_softc *gx);
165 static void     gx_reset(struct gx_softc *gx);
166 static void     gx_phy_reset(struct gx_softc *gx);
167 static void     gx_release(struct gx_softc *gx);
168 static void     gx_stop(struct gx_softc *gx);
169 static void     gx_watchdog(struct ifnet *ifp);
170 static void     gx_start(struct ifnet *ifp);
171
172 static int      gx_init_rx_ring(struct gx_softc *gx);
173 static void     gx_free_rx_ring(struct gx_softc *gx);
174 static int      gx_init_tx_ring(struct gx_softc *gx);
175 static void     gx_free_tx_ring(struct gx_softc *gx);
176
177 static device_method_t gx_methods[] = {
178         /* Device interface */
179         DEVMETHOD(device_probe,         gx_probe),
180         DEVMETHOD(device_attach,        gx_attach),
181         DEVMETHOD(device_detach,        gx_detach),
182         DEVMETHOD(device_shutdown,      gx_shutdown),
183
184         /* MII interface */
185         DEVMETHOD(miibus_readreg,       gx_miibus_readreg),
186         DEVMETHOD(miibus_writereg,      gx_miibus_writereg),
187         DEVMETHOD(miibus_statchg,       gx_miibus_statchg),
188
189         { 0, 0 }
190 };
191
192 static driver_t gx_driver = {
193         "gx",
194         gx_methods,
195         sizeof(struct gx_softc)
196 };
197
198 static devclass_t gx_devclass;
199
200 DECLARE_DUMMY_MODULE(if_gx);
201 MODULE_DEPEND(if_gx, miibus, 1, 1, 1);
202 DRIVER_MODULE(if_gx, pci, gx_driver, gx_devclass, 0, 0);
203 DRIVER_MODULE(miibus, gx, miibus_driver, miibus_devclass, 0, 0);
204
205 static struct gx_device *
206 gx_match(device_t dev)
207 {
208         int i;
209
210         for (i = 0; gx_devs[i].name != NULL; i++) {
211                 if ((pci_get_vendor(dev) == gx_devs[i].vendor) &&
212                     (pci_get_device(dev) == gx_devs[i].device))
213                         return (&gx_devs[i]);
214         }
215         return (NULL);
216 }
217
218 static int
219 gx_probe(device_t dev)
220 {
221         struct gx_device *gx_dev;
222
223         gx_dev = gx_match(dev);
224         if (gx_dev == NULL)
225                 return (ENXIO);
226
227         device_set_desc(dev, gx_dev->name);
228         return (0);
229 }
230
231 static int
232 gx_attach(device_t dev)
233 {
234         struct gx_softc *gx;
235         struct gx_device *gx_dev;
236         struct ifnet *ifp;
237         u_int32_t command;
238         int rid, s;
239         int error = 0;
240
241         s = splimp();
242
243         gx = device_get_softc(dev);
244         bzero(gx, sizeof(struct gx_softc));
245         gx->gx_dev = dev;
246
247         gx_dev = gx_match(dev);
248         gx->gx_vflags = gx_dev->version_flags;
249         gx->gx_ipg = gx_dev->version_ipg;
250
251         mtx_init(&gx->gx_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE);
252
253         GX_LOCK(gx);
254
255         /*
256          * Map control/status registers.
257          */
258         command = pci_read_config(dev, PCIR_COMMAND, 4);
259         command |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
260         if (gx->gx_vflags & GXF_ENABLE_MWI)
261                 command |= PCIM_CMD_MWIEN;
262         pci_write_config(dev, PCIR_COMMAND, command, 4);
263         command = pci_read_config(dev, PCIR_COMMAND, 4);
264
265 /* XXX check cache line size? */
266
267         if ((command & PCIM_CMD_MEMEN) == 0) {
268                 device_printf(dev, "failed to enable memory mapping!\n");
269                 error = ENXIO;
270                 goto fail;
271         }
272
273         rid = GX_PCI_LOMEM;
274         gx->gx_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
275             0, ~0, 1, RF_ACTIVE);
276 #if 0
277 /* support PIO mode */
278         rid = PCI_LOIO;
279         gx->gx_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid,
280             0, ~0, 1, RF_ACTIVE);
281 #endif
282
283         if (gx->gx_res == NULL) {
284                 device_printf(dev, "couldn't map memory\n");
285                 error = ENXIO;
286                 goto fail;
287         }
288
289         gx->gx_btag = rman_get_bustag(gx->gx_res);
290         gx->gx_bhandle = rman_get_bushandle(gx->gx_res);
291
292         /* Allocate interrupt */
293         rid = 0;
294         gx->gx_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
295             RF_SHAREABLE | RF_ACTIVE);
296
297         if (gx->gx_irq == NULL) {
298                 device_printf(dev, "couldn't map interrupt\n");
299                 error = ENXIO;
300                 goto fail;
301         }
302
303         error = bus_setup_intr(dev, gx->gx_irq, INTR_TYPE_NET,
304            gx_intr, gx, &gx->gx_intrhand);
305         if (error) {
306                 device_printf(dev, "couldn't setup irq\n");
307                 goto fail;
308         }
309
310         /* compensate for different register mappings */
311         if (gx->gx_vflags & GXF_OLD_REGS)
312                 gx->gx_reg = old_regs;
313         else
314                 gx->gx_reg = new_regs;
315
316         if (gx_read_eeprom(gx, (caddr_t)&gx->arpcom.ac_enaddr,
317             GX_EEMAP_MAC, 3)) {
318                 device_printf(dev, "failed to read station address\n");
319                 error = ENXIO;
320                 goto fail;
321         }
322         device_printf(dev, "Ethernet address: %6D\n",
323             gx->arpcom.ac_enaddr, ":");
324
325         /* Allocate the ring buffers. */
326         gx->gx_rdata = contigmalloc(sizeof(struct gx_ring_data), M_DEVBUF,
327             M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
328
329         if (gx->gx_rdata == NULL) {
330                 device_printf(dev, "no memory for list buffers!\n");
331                 error = ENXIO;
332                 goto fail;
333         }
334         bzero(gx->gx_rdata, sizeof(struct gx_ring_data));
335
336         /* Set default tuneable values. */
337         gx->gx_tx_intr_delay = TUNABLE_TX_INTR_DELAY;
338         gx->gx_rx_intr_delay = TUNABLE_RX_INTR_DELAY;
339
340         /* Set up ifnet structure */
341         ifp = &gx->arpcom.ac_if;
342         ifp->if_softc = gx;
343         if_initname(ifp, "gx", device_get_unit(dev));
344         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
345         ifp->if_ioctl = gx_ioctl;
346         ifp->if_output = ether_output;
347         ifp->if_start = gx_start;
348         ifp->if_watchdog = gx_watchdog;
349         ifp->if_init = gx_init;
350         ifp->if_mtu = ETHERMTU;
351         ifp->if_snd.ifq_maxlen = GX_TX_RING_CNT - 1;
352
353         /* see if we can enable hardware checksumming */
354         if (gx->gx_vflags & GXF_CSUM) {
355                 ifp->if_capabilities = IFCAP_HWCSUM;
356                 ifp->if_capenable = ifp->if_capabilities;
357         }
358
359         /* figure out transciever type */
360         if (gx->gx_vflags & GXF_FORCE_TBI ||
361             CSR_READ_4(gx, GX_STATUS) & GX_STAT_TBIMODE)
362                 gx->gx_tbimode = 1;
363
364         if (gx->gx_tbimode) {
365                 /* SERDES transceiver */
366                 ifmedia_init(&gx->gx_media, IFM_IMASK, gx_ifmedia_upd,
367                     gx_ifmedia_sts);
368                 ifmedia_add(&gx->gx_media,
369                     IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
370                 ifmedia_add(&gx->gx_media, IFM_ETHER|IFM_AUTO, 0, NULL);
371                 ifmedia_set(&gx->gx_media, IFM_ETHER|IFM_AUTO);
372         } else {
373                 /* GMII/MII transceiver */
374                 gx_phy_reset(gx);
375                 if (mii_phy_probe(dev, &gx->gx_miibus, gx_ifmedia_upd,
376                     gx_ifmedia_sts)) {
377                         device_printf(dev, "GMII/MII, PHY not detected\n");
378                         error = ENXIO;
379                         goto fail;
380                 }
381         }
382
383         /*
384          * Call MI attach routines.
385          */
386         ether_ifattach(ifp, gx->arpcom.ac_enaddr);
387
388         GX_UNLOCK(gx);
389         splx(s);
390         return (0);
391
392 fail:
393         GX_UNLOCK(gx);
394         gx_release(gx);
395         splx(s);
396         return (error);
397 }
398
399 static void
400 gx_release(struct gx_softc *gx)
401 {
402
403         bus_generic_detach(gx->gx_dev);
404         if (gx->gx_miibus)
405                 device_delete_child(gx->gx_dev, gx->gx_miibus);
406
407         if (gx->gx_intrhand)
408                 bus_teardown_intr(gx->gx_dev, gx->gx_irq, gx->gx_intrhand);
409         if (gx->gx_irq)
410                 bus_release_resource(gx->gx_dev, SYS_RES_IRQ, 0, gx->gx_irq);
411         if (gx->gx_res)
412                 bus_release_resource(gx->gx_dev, SYS_RES_MEMORY,
413                     GX_PCI_LOMEM, gx->gx_res);
414 }
415
416 static void
417 gx_init(void *xsc)
418 {
419         struct gx_softc *gx = (struct gx_softc *)xsc;
420         struct ifmedia *ifm;
421         struct ifnet *ifp;
422         device_t dev;
423         u_int16_t *m;
424         u_int32_t ctrl;
425         int s, i, tmp;
426
427         dev = gx->gx_dev;
428         ifp = &gx->arpcom.ac_if;
429
430         s = splimp();
431         GX_LOCK(gx);
432
433         /* Disable host interrupts, halt chip. */
434         gx_reset(gx);
435
436         /* disable I/O, flush RX/TX FIFOs, and free RX/TX buffers */
437         gx_stop(gx);
438
439         /* Load our MAC address, invalidate other 15 RX addresses. */
440         m = (u_int16_t *)&gx->arpcom.ac_enaddr[0];
441         CSR_WRITE_4(gx, GX_RX_ADDR_BASE, (m[1] << 16) | m[0]);
442         CSR_WRITE_4(gx, GX_RX_ADDR_BASE + 4, m[2] | GX_RA_VALID);
443         for (i = 1; i < 16; i++)
444                 CSR_WRITE_8(gx, GX_RX_ADDR_BASE + i * 8, (u_quad_t)0);
445
446         /* Program multicast filter. */
447         gx_setmulti(gx);
448
449         /* Init RX ring. */
450         gx_init_rx_ring(gx);
451
452         /* Init TX ring. */
453         gx_init_tx_ring(gx);
454
455         if (gx->gx_vflags & GXF_DMA) {
456                 /* set up DMA control */        
457                 CSR_WRITE_4(gx, gx->gx_reg.r_rx_dma_ctrl, 0x00010000);
458                 CSR_WRITE_4(gx, gx->gx_reg.r_tx_dma_ctrl, 0x00000000);
459         }
460
461         /* enable receiver */
462         ctrl = GX_RXC_ENABLE | GX_RXC_RX_THOLD_EIGHTH | GX_RXC_RX_BSIZE_2K;
463         ctrl |= GX_RXC_BCAST_ACCEPT;
464
465         /* Enable or disable promiscuous mode as needed. */
466         if (ifp->if_flags & IFF_PROMISC)
467                 ctrl |= GX_RXC_UNI_PROMISC;
468
469         /* This is required if we want to accept jumbo frames */
470         if (ifp->if_mtu > ETHERMTU)
471                 ctrl |= GX_RXC_LONG_PKT_ENABLE;
472
473         /* setup receive checksum control */
474         if (ifp->if_capenable & IFCAP_RXCSUM)
475                 CSR_WRITE_4(gx, GX_RX_CSUM_CONTROL,
476                     GX_CSUM_TCP/* | GX_CSUM_IP*/);
477
478         /* setup transmit checksum control */
479         if (ifp->if_capenable & IFCAP_TXCSUM)
480                 ifp->if_hwassist = GX_CSUM_FEATURES;
481
482         ctrl |= GX_RXC_STRIP_ETHERCRC;          /* not on 82542? */
483         CSR_WRITE_4(gx, GX_RX_CONTROL, ctrl);
484
485         /* enable transmitter */
486         ctrl = GX_TXC_ENABLE | GX_TXC_PAD_SHORT_PKTS | GX_TXC_COLL_RETRY_16;
487
488         /* XXX we should support half-duplex here too... */
489         ctrl |= GX_TXC_COLL_TIME_FDX;
490
491         CSR_WRITE_4(gx, GX_TX_CONTROL, ctrl);
492
493         /*
494          * set up recommended IPG times, which vary depending on chip type:
495          *      IPG transmit time:  80ns
496          *      IPG receive time 1: 20ns
497          *      IPG receive time 2: 80ns
498          */
499         CSR_WRITE_4(gx, GX_TX_IPG, gx->gx_ipg);
500
501         /* set up 802.3x MAC flow control address -- 01:80:c2:00:00:01 */
502         CSR_WRITE_4(gx, GX_FLOW_CTRL_BASE, 0x00C28001);
503         CSR_WRITE_4(gx, GX_FLOW_CTRL_BASE+4, 0x00000100);
504
505         /* set up 802.3x MAC flow control type -- 88:08 */
506         CSR_WRITE_4(gx, GX_FLOW_CTRL_TYPE, 0x8808);
507
508         /* Set up tuneables */
509         CSR_WRITE_4(gx, gx->gx_reg.r_rx_delay, gx->gx_rx_intr_delay);
510         CSR_WRITE_4(gx, gx->gx_reg.r_tx_delay, gx->gx_tx_intr_delay);
511
512         /*
513          * Configure chip for correct operation.
514          */
515         ctrl = GX_CTRL_DUPLEX;
516 #if BYTE_ORDER == BIG_ENDIAN
517         ctrl |= GX_CTRL_BIGENDIAN;
518 #endif
519         ctrl |= GX_CTRL_VLAN_ENABLE;
520
521         if (gx->gx_tbimode) {
522                 /*
523                  * It seems that TXCW must be initialized from the EEPROM
524                  * manually.
525                  *
526                  * XXX
527                  * should probably read the eeprom and re-insert the
528                  * values here.
529                  */
530 #define TXCONFIG_WORD   0x000001A0
531                 CSR_WRITE_4(gx, GX_TX_CONFIG, TXCONFIG_WORD);
532
533                 /* turn on hardware autonegotiate */
534                 GX_SETBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
535         } else {
536                 /*
537                  * Auto-detect speed from PHY, instead of using direct
538                  * indication.  The SLU bit doesn't force the link, but
539                  * must be present for ASDE to work.
540                  */
541                 gx_phy_reset(gx);
542                 ctrl |= GX_CTRL_SET_LINK_UP | GX_CTRL_AUTOSPEED;
543         }
544
545         /*
546          * Take chip out of reset and start it running.
547          */
548         CSR_WRITE_4(gx, GX_CTRL, ctrl);
549
550         /* Turn interrupts on. */
551         CSR_WRITE_4(gx, GX_INT_MASK_SET, GX_INT_WANTED);
552
553         ifp->if_flags |= IFF_RUNNING;
554         ifp->if_flags &= ~IFF_OACTIVE;
555
556         /*
557          * Set the current media.
558          */
559         if (gx->gx_miibus != NULL) {
560                 mii_mediachg(device_get_softc(gx->gx_miibus));
561         } else {
562                 ifm = &gx->gx_media;
563                 tmp = ifm->ifm_media;
564                 ifm->ifm_media = ifm->ifm_cur->ifm_media;
565                 gx_ifmedia_upd(ifp);
566                 ifm->ifm_media = tmp;
567         }
568
569         /*
570          * XXX
571          * Have the LINK0 flag force the link in TBI mode.
572          */
573         if (gx->gx_tbimode && ifp->if_flags & IFF_LINK0) {
574                 GX_CLRBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
575                 GX_SETBIT(gx, GX_CTRL, GX_CTRL_SET_LINK_UP);
576         }
577
578 #if 0
579 printf("66mhz: %s  64bit: %s\n",
580         CSR_READ_4(gx, GX_STATUS) & GX_STAT_PCI66 ? "yes" : "no",
581         CSR_READ_4(gx, GX_STATUS) & GX_STAT_BUS64 ? "yes" : "no");
582 #endif
583
584         GX_UNLOCK(gx);
585         splx(s);
586 }
587
588 /*
589  * Stop all chip I/O so that the kernel's probe routines don't
590  * get confused by errant DMAs when rebooting.
591  */
592 static void
593 gx_shutdown(device_t dev)
594 {
595         struct gx_softc *gx;
596
597         gx = device_get_softc(dev);
598         gx_reset(gx);
599         gx_stop(gx);
600 }
601
602 static int
603 gx_detach(device_t dev)
604 {
605         struct gx_softc *gx;
606         struct ifnet *ifp;
607         int s;
608
609         s = splimp();
610
611         gx = device_get_softc(dev);
612         ifp = &gx->arpcom.ac_if;
613         GX_LOCK(gx);
614
615         ether_ifdetach(ifp);
616         gx_reset(gx);
617         gx_stop(gx);
618         ifmedia_removeall(&gx->gx_media);
619         gx_release(gx);
620
621         contigfree(gx->gx_rdata, sizeof(struct gx_ring_data), M_DEVBUF);
622                 
623         GX_UNLOCK(gx);
624         mtx_destroy(&gx->gx_mtx);
625         splx(s);
626
627         return (0);
628 }
629
630 static void
631 gx_eeprom_getword(struct gx_softc *gx, int addr, u_int16_t *dest)
632 {
633         u_int16_t word = 0;
634         u_int32_t base, reg;
635         int x;
636
637         addr = (GX_EE_OPC_READ << GX_EE_ADDR_SIZE) |
638             (addr & ((1 << GX_EE_ADDR_SIZE) - 1));
639
640         base = CSR_READ_4(gx, GX_EEPROM_CTRL);
641         base &= ~(GX_EE_DATA_OUT | GX_EE_DATA_IN | GX_EE_CLOCK);
642         base |= GX_EE_SELECT;
643
644         CSR_WRITE_4(gx, GX_EEPROM_CTRL, base);
645
646         for (x = 1 << ((GX_EE_OPC_SIZE + GX_EE_ADDR_SIZE) - 1); x; x >>= 1) {
647                 reg = base | (addr & x ? GX_EE_DATA_IN : 0);
648                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg);
649                 DELAY(10);
650                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg | GX_EE_CLOCK);
651                 DELAY(10);
652                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg);
653                 DELAY(10);
654         }
655
656         for (x = 1 << 15; x; x >>= 1) {
657                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, base | GX_EE_CLOCK);
658                 DELAY(10);
659                 reg = CSR_READ_4(gx, GX_EEPROM_CTRL);
660                 if (reg & GX_EE_DATA_OUT)
661                         word |= x;
662                 CSR_WRITE_4(gx, GX_EEPROM_CTRL, base);
663                 DELAY(10);
664         }
665
666         CSR_WRITE_4(gx, GX_EEPROM_CTRL, base & ~GX_EE_SELECT);
667         DELAY(10);
668
669         *dest = word;
670 }
671         
672 static int
673 gx_read_eeprom(struct gx_softc *gx, caddr_t dest, int off, int cnt)
674 {
675         u_int16_t *word;
676         int i;
677
678         word = (u_int16_t *)dest;
679         for (i = 0; i < cnt; i ++) {
680                 gx_eeprom_getword(gx, off + i, word);
681                 word++;
682         }
683         return (0);
684 }
685
686 /*
687  * Set media options.
688  */
689 static int
690 gx_ifmedia_upd(struct ifnet *ifp)
691 {
692         struct gx_softc *gx;
693         struct ifmedia *ifm;
694         struct mii_data *mii;
695
696         gx = ifp->if_softc;
697
698         if (gx->gx_tbimode) {
699                 ifm = &gx->gx_media;
700                 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
701                         return (EINVAL);
702                 switch (IFM_SUBTYPE(ifm->ifm_media)) {
703                 case IFM_AUTO:
704                         GX_SETBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
705                         GX_SETBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
706                         GX_CLRBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
707                         break;
708                 case IFM_1000_SX:
709                         device_printf(gx->gx_dev,
710                             "manual config not supported yet.\n");
711 #if 0
712                         GX_CLRBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
713                         config = /* bit symbols for 802.3z */0;
714                         ctrl |= GX_CTRL_SET_LINK_UP;
715                         if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
716                                 ctrl |= GX_CTRL_DUPLEX;
717 #endif
718                         break;
719                 default:
720                         return (EINVAL);
721                 }
722         } else {
723                 ifm = &gx->gx_media;
724
725                 /*
726                  * 1000TX half duplex does not work.
727                  */
728                 if (IFM_TYPE(ifm->ifm_media) == IFM_ETHER &&
729                     IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_TX &&
730                     (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) == 0)
731                         return (EINVAL);
732                 mii = device_get_softc(gx->gx_miibus);
733                 mii_mediachg(mii);
734         }
735         return (0);
736 }
737
738 /*
739  * Report current media status.
740  */
741 static void
742 gx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
743 {
744         struct gx_softc *gx;
745         struct mii_data *mii;
746         u_int32_t status;
747
748         gx = ifp->if_softc;
749
750         if (gx->gx_tbimode) {
751                 ifmr->ifm_status = IFM_AVALID;
752                 ifmr->ifm_active = IFM_ETHER;
753
754                 status = CSR_READ_4(gx, GX_STATUS);
755                 if ((status & GX_STAT_LINKUP) == 0)
756                         return;
757
758                 ifmr->ifm_status |= IFM_ACTIVE;
759                 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
760         } else {
761                 mii = device_get_softc(gx->gx_miibus);
762                 mii_pollstat(mii);
763                 if ((mii->mii_media_active & (IFM_1000_TX | IFM_HDX)) ==
764                     (IFM_1000_TX | IFM_HDX))
765                         mii->mii_media_active = IFM_ETHER | IFM_NONE;
766                 ifmr->ifm_active = mii->mii_media_active;
767                 ifmr->ifm_status = mii->mii_media_status;
768         }
769 }
770
771 static void 
772 gx_mii_shiftin(struct gx_softc *gx, int data, int length)
773 {
774         u_int32_t reg, x;
775
776         /*
777          * Set up default GPIO direction + PHY data out.
778          */
779         reg = CSR_READ_4(gx, GX_CTRL);
780         reg &= ~(GX_CTRL_GPIO_DIR_MASK | GX_CTRL_PHY_IO | GX_CTRL_PHY_CLK);
781         reg |= GX_CTRL_GPIO_DIR | GX_CTRL_PHY_IO_DIR;
782
783         /*
784          * Shift in data to PHY.
785          */
786         for (x = 1 << (length - 1); x; x >>= 1) {
787                 if (data & x)
788                         reg |= GX_CTRL_PHY_IO;
789                 else
790                         reg &= ~GX_CTRL_PHY_IO;
791                 CSR_WRITE_4(gx, GX_CTRL, reg);
792                 DELAY(10);
793                 CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
794                 DELAY(10);
795                 CSR_WRITE_4(gx, GX_CTRL, reg);
796                 DELAY(10);
797         }
798 }
799
800 static u_int16_t 
801 gx_mii_shiftout(struct gx_softc *gx)
802 {
803         u_int32_t reg;
804         u_int16_t data;
805         int x;
806
807         /*
808          * Set up default GPIO direction + PHY data in.
809          */
810         reg = CSR_READ_4(gx, GX_CTRL);
811         reg &= ~(GX_CTRL_GPIO_DIR_MASK | GX_CTRL_PHY_IO | GX_CTRL_PHY_CLK);
812         reg |= GX_CTRL_GPIO_DIR;
813
814         CSR_WRITE_4(gx, GX_CTRL, reg);
815         DELAY(10);
816         CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
817         DELAY(10);
818         CSR_WRITE_4(gx, GX_CTRL, reg);
819         DELAY(10);
820         /*
821          * Shift out data from PHY.
822          */
823         data = 0;
824         for (x = 1 << 15; x; x >>= 1) {
825                 CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
826                 DELAY(10);
827                 if (CSR_READ_4(gx, GX_CTRL) & GX_CTRL_PHY_IO)
828                         data |= x;
829                 CSR_WRITE_4(gx, GX_CTRL, reg);
830                 DELAY(10);
831         }
832         CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
833         DELAY(10);
834         CSR_WRITE_4(gx, GX_CTRL, reg);
835         DELAY(10);
836
837         return (data);
838 }
839
840 static int
841 gx_miibus_readreg(device_t dev, int phy, int reg)
842 {
843         struct gx_softc *gx;
844
845         gx = device_get_softc(dev);
846         if (gx->gx_tbimode)
847                 return (0);
848
849         /*
850          * XXX
851          * Note: Cordova has a MDIC register. livingood and < have mii bits
852          */ 
853
854         gx_mii_shiftin(gx, GX_PHY_PREAMBLE, GX_PHY_PREAMBLE_LEN);
855         gx_mii_shiftin(gx, (GX_PHY_SOF << 12) | (GX_PHY_OP_READ << 10) |
856             (phy << 5) | reg, GX_PHY_READ_LEN);
857         return (gx_mii_shiftout(gx));
858 }
859
860 static void
861 gx_miibus_writereg(device_t dev, int phy, int reg, int value)
862 {
863         struct gx_softc *gx;
864
865         gx = device_get_softc(dev);
866         if (gx->gx_tbimode)
867                 return;
868
869         gx_mii_shiftin(gx, GX_PHY_PREAMBLE, GX_PHY_PREAMBLE_LEN);
870         gx_mii_shiftin(gx, (GX_PHY_SOF << 30) | (GX_PHY_OP_WRITE << 28) |
871             (phy << 23) | (reg << 18) | (GX_PHY_TURNAROUND << 16) |
872             (value & 0xffff), GX_PHY_WRITE_LEN);
873 }
874
875 static void
876 gx_miibus_statchg(device_t dev)
877 {
878         struct gx_softc *gx;
879         struct mii_data *mii;
880         int reg, s;
881
882         gx = device_get_softc(dev);
883         if (gx->gx_tbimode)
884                 return;
885
886         /*
887          * Set flow control behavior to mirror what PHY negotiated.
888          */
889         mii = device_get_softc(gx->gx_miibus);
890
891         s = splimp();
892         GX_LOCK(gx);
893
894         reg = CSR_READ_4(gx, GX_CTRL);
895         if (mii->mii_media_active & IFM_FLAG0)
896                 reg |= GX_CTRL_RX_FLOWCTRL;
897         else
898                 reg &= ~GX_CTRL_RX_FLOWCTRL;
899         if (mii->mii_media_active & IFM_FLAG1)
900                 reg |= GX_CTRL_TX_FLOWCTRL;
901         else
902                 reg &= ~GX_CTRL_TX_FLOWCTRL;
903         CSR_WRITE_4(gx, GX_CTRL, reg);
904
905         GX_UNLOCK(gx);
906         splx(s);
907 }
908
909 static int
910 gx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
911 {
912         struct gx_softc *gx = ifp->if_softc;
913         struct ifreq *ifr = (struct ifreq *)data;
914         struct mii_data *mii;
915         int s, mask, error = 0;
916
917         s = splimp();
918         GX_LOCK(gx);
919
920         switch (command) {
921         case SIOCSIFADDR:
922         case SIOCGIFADDR:
923                 error = ether_ioctl(ifp, command, data);
924                 break;
925         case SIOCSIFMTU:
926                 if (ifr->ifr_mtu > GX_MAX_MTU) {
927                         error = EINVAL;
928                 } else {
929                         ifp->if_mtu = ifr->ifr_mtu;
930                         gx_init(gx);
931                 }
932                 break;
933         case SIOCSIFFLAGS:
934                 if ((ifp->if_flags & IFF_UP) == 0) {
935                         gx_stop(gx);
936                 } else if (ifp->if_flags & IFF_RUNNING &&
937                     ((ifp->if_flags & IFF_PROMISC) != 
938                     (gx->gx_if_flags & IFF_PROMISC))) {
939                         if (ifp->if_flags & IFF_PROMISC)
940                                 GX_SETBIT(gx, GX_RX_CONTROL, GX_RXC_UNI_PROMISC);
941                         else 
942                                 GX_CLRBIT(gx, GX_RX_CONTROL, GX_RXC_UNI_PROMISC);
943                 } else {
944                         gx_init(gx);
945                 }
946                 gx->gx_if_flags = ifp->if_flags;
947                 break;
948         case SIOCADDMULTI:
949         case SIOCDELMULTI:
950                 if (ifp->if_flags & IFF_RUNNING)
951                         gx_setmulti(gx);
952                 break;
953         case SIOCSIFMEDIA:
954         case SIOCGIFMEDIA:
955                 if (gx->gx_miibus != NULL) {
956                         mii = device_get_softc(gx->gx_miibus);
957                         error = ifmedia_ioctl(ifp, ifr,
958                             &mii->mii_media, command);
959                 } else {
960                         error = ifmedia_ioctl(ifp, ifr, &gx->gx_media, command);
961                 }
962                 break;
963         case SIOCSIFCAP:
964                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
965                 if (mask & IFCAP_HWCSUM) {
966                         if (IFCAP_HWCSUM & ifp->if_capenable)
967                                 ifp->if_capenable &= ~IFCAP_HWCSUM;
968                         else
969                                 ifp->if_capenable |= IFCAP_HWCSUM;
970                         if (ifp->if_flags & IFF_RUNNING)
971                                 gx_init(gx);
972                 }
973                 break;
974         default:
975                 error = EINVAL;
976                 break;
977         }
978
979         GX_UNLOCK(gx);
980         splx(s);
981         return (error);
982 }
983
984 static void
985 gx_phy_reset(struct gx_softc *gx)
986 {
987         int reg;
988
989         GX_SETBIT(gx, GX_CTRL, GX_CTRL_SET_LINK_UP);
990
991         /*
992          * PHY reset is active low.
993          */
994         reg = CSR_READ_4(gx, GX_CTRL_EXT);
995         reg &= ~(GX_CTRLX_GPIO_DIR_MASK | GX_CTRLX_PHY_RESET);
996         reg |= GX_CTRLX_GPIO_DIR;
997
998         CSR_WRITE_4(gx, GX_CTRL_EXT, reg | GX_CTRLX_PHY_RESET);
999         DELAY(10);
1000         CSR_WRITE_4(gx, GX_CTRL_EXT, reg);
1001         DELAY(10);
1002         CSR_WRITE_4(gx, GX_CTRL_EXT, reg | GX_CTRLX_PHY_RESET);
1003         DELAY(10);
1004
1005 #if 0
1006         /* post-livingood (cordova) only */
1007                 GX_SETBIT(gx, GX_CTRL, 0x80000000);
1008                 DELAY(1000);
1009                 GX_CLRBIT(gx, GX_CTRL, 0x80000000);
1010 #endif
1011 }
1012
1013 static void
1014 gx_reset(struct gx_softc *gx)
1015 {
1016
1017         /* Disable host interrupts. */
1018         CSR_WRITE_4(gx, GX_INT_MASK_CLR, GX_INT_ALL);
1019
1020         /* reset chip (THWAP!) */
1021         GX_SETBIT(gx, GX_CTRL, GX_CTRL_DEVICE_RESET);
1022         DELAY(10);
1023 }
1024
1025 static void
1026 gx_stop(struct gx_softc *gx)
1027 {
1028         struct ifnet *ifp;
1029
1030         ifp = &gx->arpcom.ac_if;
1031
1032         /* reset and flush transmitter */
1033         CSR_WRITE_4(gx, GX_TX_CONTROL, GX_TXC_RESET);
1034
1035         /* reset and flush receiver */
1036         CSR_WRITE_4(gx, GX_RX_CONTROL, GX_RXC_RESET);
1037
1038         /* reset link */
1039         if (gx->gx_tbimode)
1040                 GX_SETBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
1041
1042         /* Free the RX lists. */
1043         gx_free_rx_ring(gx);
1044
1045         /* Free TX buffers. */
1046         gx_free_tx_ring(gx);
1047
1048         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1049 }
1050
1051 static void
1052 gx_watchdog(struct ifnet *ifp)
1053 {
1054         struct gx_softc *gx;
1055
1056         gx = ifp->if_softc;
1057
1058         device_printf(gx->gx_dev, "watchdog timeout -- resetting\n");
1059         gx_reset(gx);
1060         gx_init(gx);
1061
1062         ifp->if_oerrors++;
1063 }
1064
1065 /*
1066  * Intialize a receive ring descriptor.
1067  */
1068 static int
1069 gx_newbuf(struct gx_softc *gx, int idx, struct mbuf *m)
1070 {
1071         struct mbuf *m_new = NULL;
1072         struct gx_rx_desc *r;
1073
1074         if (m == NULL) {
1075                 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1076                 if (m_new == NULL) {
1077                         device_printf(gx->gx_dev, 
1078                             "mbuf allocation failed -- packet dropped\n");
1079                         return (ENOBUFS);
1080                 }
1081                 MCLGET(m_new, M_DONTWAIT);
1082                 if ((m_new->m_flags & M_EXT) == 0) {
1083                         device_printf(gx->gx_dev, 
1084                             "cluster allocation failed -- packet dropped\n");
1085                         m_freem(m_new);
1086                         return (ENOBUFS);
1087                 }
1088                 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1089         } else {
1090                 m->m_len = m->m_pkthdr.len = MCLBYTES;
1091                 m->m_data = m->m_ext.ext_buf;
1092                 m->m_next = NULL;
1093                 m_new = m;
1094         }
1095
1096         /*
1097          * XXX
1098          * this will _NOT_ work for large MTU's; it will overwrite
1099          * the end of the buffer.  E.g.: take this out for jumbograms,
1100          * but then that breaks alignment.
1101          */
1102         if (gx->arpcom.ac_if.if_mtu <= ETHERMTU)
1103                 m_adj(m_new, ETHER_ALIGN);
1104
1105         gx->gx_cdata.gx_rx_chain[idx] = m_new;
1106         r = &gx->gx_rdata->gx_rx_ring[idx];
1107         r->rx_addr = vtophys(mtod(m_new, caddr_t));
1108         r->rx_staterr = 0;
1109
1110         return (0);
1111 }
1112
1113 /*
1114  * The receive ring can have up to 64K descriptors, which at 2K per mbuf
1115  * cluster, could add up to 128M of memory.  Due to alignment constraints,
1116  * the number of descriptors must be a multiple of 8.  For now, we
1117  * allocate 256 entries and hope that our CPU is fast enough to keep up
1118  * with the NIC.
1119  */
1120 static int
1121 gx_init_rx_ring(struct gx_softc *gx)
1122 {
1123         int i, error;
1124
1125         for (i = 0; i < GX_RX_RING_CNT; i++) {
1126                 error = gx_newbuf(gx, i, NULL);
1127                 if (error)
1128                         return (error);
1129         }
1130
1131         /* bring receiver out of reset state, leave disabled */
1132         CSR_WRITE_4(gx, GX_RX_CONTROL, 0);
1133
1134         /* set up ring registers */
1135         CSR_WRITE_8(gx, gx->gx_reg.r_rx_base,
1136             (u_quad_t)vtophys(gx->gx_rdata->gx_rx_ring));
1137
1138         CSR_WRITE_4(gx, gx->gx_reg.r_rx_length,
1139             GX_RX_RING_CNT * sizeof(struct gx_rx_desc));
1140         CSR_WRITE_4(gx, gx->gx_reg.r_rx_head, 0);
1141         CSR_WRITE_4(gx, gx->gx_reg.r_rx_tail, GX_RX_RING_CNT - 1);
1142         gx->gx_rx_tail_idx = 0;
1143
1144         return (0);
1145 }
1146
1147 static void
1148 gx_free_rx_ring(struct gx_softc *gx)
1149 {
1150         struct mbuf **mp;
1151         int i;
1152
1153         mp = gx->gx_cdata.gx_rx_chain;
1154         for (i = 0; i < GX_RX_RING_CNT; i++, mp++) {
1155                 if (*mp != NULL) {
1156                         m_freem(*mp);
1157                         *mp = NULL;
1158                 }
1159         }
1160         bzero((void *)gx->gx_rdata->gx_rx_ring,
1161             GX_RX_RING_CNT * sizeof(struct gx_rx_desc));
1162
1163         /* release any partially-received packet chain */
1164         if (gx->gx_pkthdr != NULL) {
1165                 m_freem(gx->gx_pkthdr);
1166                 gx->gx_pkthdr = NULL;
1167         }
1168 }
1169
1170 static int
1171 gx_init_tx_ring(struct gx_softc *gx)
1172 {
1173
1174         /* bring transmitter out of reset state, leave disabled */
1175         CSR_WRITE_4(gx, GX_TX_CONTROL, 0);
1176
1177         /* set up ring registers */
1178         CSR_WRITE_8(gx, gx->gx_reg.r_tx_base,
1179             (u_quad_t)vtophys(gx->gx_rdata->gx_tx_ring));
1180         CSR_WRITE_4(gx, gx->gx_reg.r_tx_length,
1181             GX_TX_RING_CNT * sizeof(struct gx_tx_desc));
1182         CSR_WRITE_4(gx, gx->gx_reg.r_tx_head, 0);
1183         CSR_WRITE_4(gx, gx->gx_reg.r_tx_tail, 0);
1184         gx->gx_tx_head_idx = 0;
1185         gx->gx_tx_tail_idx = 0;
1186         gx->gx_txcnt = 0;
1187
1188         /* set up initial TX context */
1189         gx->gx_txcontext = GX_TXCONTEXT_NONE;
1190
1191         return (0);
1192 }
1193
1194 static void
1195 gx_free_tx_ring(struct gx_softc *gx)
1196 {
1197         struct mbuf **mp;
1198         int i;
1199
1200         mp = gx->gx_cdata.gx_tx_chain;
1201         for (i = 0; i < GX_TX_RING_CNT; i++, mp++) {
1202                 if (*mp != NULL) {
1203                         m_freem(*mp);
1204                         *mp = NULL;
1205                 }
1206         }
1207         bzero((void *)&gx->gx_rdata->gx_tx_ring,
1208             GX_TX_RING_CNT * sizeof(struct gx_tx_desc));
1209 }
1210
1211 static void
1212 gx_setmulti(struct gx_softc *gx)
1213 {
1214         int i;
1215
1216         /* wipe out the multicast table */
1217         for (i = 1; i < 128; i++)
1218                 CSR_WRITE_4(gx, GX_MULTICAST_BASE + i * 4, 0);
1219 }
1220
1221 static void
1222 gx_rxeof(struct gx_softc *gx)
1223 {
1224         struct ether_header *eh;
1225         struct gx_rx_desc *rx;
1226         struct ifnet *ifp;
1227         int idx, staterr, len;
1228         struct mbuf *m;
1229
1230         gx->gx_rx_interrupts++;
1231
1232         ifp = &gx->arpcom.ac_if;
1233         idx = gx->gx_rx_tail_idx;
1234
1235         while (gx->gx_rdata->gx_rx_ring[idx].rx_staterr & GX_RXSTAT_COMPLETED) {
1236
1237                 rx = &gx->gx_rdata->gx_rx_ring[idx];
1238                 m = gx->gx_cdata.gx_rx_chain[idx];
1239                 /*
1240                  * gx_newbuf overwrites status and length bits, so we 
1241                  * make a copy of them here.
1242                  */
1243                 len = rx->rx_len;
1244                 staterr = rx->rx_staterr;
1245
1246                 if (staterr & GX_INPUT_ERROR)
1247                         goto ierror;
1248
1249                 if (gx_newbuf(gx, idx, NULL) == ENOBUFS)
1250                         goto ierror;
1251
1252                 GX_INC(idx, GX_RX_RING_CNT);
1253
1254                 if (staterr & GX_RXSTAT_INEXACT_MATCH) {
1255                         /*
1256                          * multicast packet, must verify against
1257                          * multicast address.
1258                          */
1259                 }
1260
1261                 if ((staterr & GX_RXSTAT_END_OF_PACKET) == 0) {
1262                         if (gx->gx_pkthdr == NULL) {
1263                                 m->m_len = len;
1264                                 m->m_pkthdr.len = len;
1265                                 gx->gx_pkthdr = m;
1266                                 gx->gx_pktnextp = &m->m_next;
1267                         } else {
1268                                 m->m_len = len;
1269                                 m->m_flags &= ~M_PKTHDR;
1270                                 gx->gx_pkthdr->m_pkthdr.len += len;
1271                                 *(gx->gx_pktnextp) = m;
1272                                 gx->gx_pktnextp = &m->m_next;
1273                         }
1274                         continue;
1275                 }
1276
1277                 if (gx->gx_pkthdr == NULL) {
1278                         m->m_len = len;
1279                         m->m_pkthdr.len = len;
1280                 } else {
1281                         m->m_len = len;
1282                         m->m_flags &= ~M_PKTHDR;
1283                         gx->gx_pkthdr->m_pkthdr.len += len;
1284                         *(gx->gx_pktnextp) = m;
1285                         m = gx->gx_pkthdr;
1286                         gx->gx_pkthdr = NULL;
1287                 }
1288
1289                 ifp->if_ipackets++;
1290                 eh = mtod(m, struct ether_header *);
1291                 m->m_pkthdr.rcvif = ifp;
1292
1293                 /* Remove header from mbuf and pass it on. */
1294                 m_adj(m, sizeof(struct ether_header));
1295
1296 #define IP_CSMASK       (GX_RXSTAT_IGNORE_CSUM | GX_RXSTAT_HAS_IP_CSUM)
1297 #define TCP_CSMASK \
1298     (GX_RXSTAT_IGNORE_CSUM | GX_RXSTAT_HAS_TCP_CSUM | GX_RXERR_TCP_CSUM)
1299                 if (ifp->if_capenable & IFCAP_RXCSUM) {
1300 #if 0
1301                         /*
1302                          * Intel Erratum #23 indicates that the Receive IP
1303                          * Checksum offload feature has been completely
1304                          * disabled.
1305                          */
1306                         if ((staterr & IP_CSUM_MASK) == GX_RXSTAT_HAS_IP_CSUM) {
1307                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1308                                 if ((staterr & GX_RXERR_IP_CSUM) == 0)
1309                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1310                         }
1311 #endif
1312                         if ((staterr & TCP_CSMASK) == GX_RXSTAT_HAS_TCP_CSUM) {
1313                                 m->m_pkthdr.csum_flags |=
1314                                     CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1315                                 m->m_pkthdr.csum_data = 0xffff;
1316                         }
1317                 }
1318                 /*
1319                  * If we received a packet with a vlan tag, pass it
1320                  * to vlan_input() instead of ether_input().
1321                  */
1322                 if (staterr & GX_RXSTAT_VLAN_PKT) {
1323                         VLAN_INPUT_TAG(eh, m, rx->rx_special);
1324                         continue;
1325                 }
1326                 ether_input(ifp, eh, m);
1327                 continue;
1328
1329   ierror:
1330                 ifp->if_ierrors++;
1331                 gx_newbuf(gx, idx, m);
1332
1333                 /* 
1334                  * XXX
1335                  * this isn't quite right.  Suppose we have a packet that
1336                  * spans 5 descriptors (9K split into 2K buffers).  If
1337                  * the 3rd descriptor sets an error, we need to ignore
1338                  * the last two.  The way things stand now, the last two
1339                  * will be accepted as a single packet.
1340                  *
1341                  * we don't worry about this -- the chip may not set an
1342                  * error in this case, and the checksum of the upper layers
1343                  * will catch the error.
1344                  */
1345                 if (gx->gx_pkthdr != NULL) {
1346                         m_freem(gx->gx_pkthdr);
1347                         gx->gx_pkthdr = NULL;
1348                 }
1349                 GX_INC(idx, GX_RX_RING_CNT);
1350         }
1351
1352         gx->gx_rx_tail_idx = idx;
1353         if (--idx < 0)
1354                 idx = GX_RX_RING_CNT - 1;
1355         CSR_WRITE_4(gx, gx->gx_reg.r_rx_tail, idx);
1356 }
1357
1358 static void
1359 gx_txeof(struct gx_softc *gx)
1360 {
1361         struct ifnet *ifp;
1362         int idx, cnt;
1363
1364         gx->gx_tx_interrupts++;
1365
1366         ifp = &gx->arpcom.ac_if;
1367         idx = gx->gx_tx_head_idx;
1368         cnt = gx->gx_txcnt;
1369
1370         /*
1371          * If the system chipset performs I/O write buffering, it is 
1372          * possible for the PIO read of the head descriptor to bypass the
1373          * memory write of the descriptor, resulting in reading a descriptor
1374          * which has not been updated yet.
1375          */
1376         while (cnt) {
1377                 struct gx_tx_desc_old *tx;
1378
1379                 tx = (struct gx_tx_desc_old *)&gx->gx_rdata->gx_tx_ring[idx];
1380                 cnt--;
1381
1382                 if ((tx->tx_command & GX_TXOLD_END_OF_PKT) == 0) {
1383                         GX_INC(idx, GX_TX_RING_CNT);
1384                         continue;
1385                 }
1386
1387                 if ((tx->tx_status & GX_TXSTAT_DONE) == 0)
1388                         break;
1389
1390                 ifp->if_opackets++;
1391
1392                 m_freem(gx->gx_cdata.gx_tx_chain[idx]);
1393                 gx->gx_cdata.gx_tx_chain[idx] = NULL;
1394                 gx->gx_txcnt = cnt;
1395                 ifp->if_timer = 0;
1396
1397                 GX_INC(idx, GX_TX_RING_CNT);
1398                 gx->gx_tx_head_idx = idx;
1399         }
1400
1401         if (gx->gx_txcnt == 0)
1402                 ifp->if_flags &= ~IFF_OACTIVE;
1403 }
1404
1405 static void
1406 gx_intr(void *xsc)
1407 {
1408         struct gx_softc *gx;
1409         struct ifnet *ifp;
1410         u_int32_t intr;
1411         int s;
1412
1413         gx = xsc;
1414         ifp = &gx->arpcom.ac_if;
1415
1416         s = splimp();
1417
1418         gx->gx_interrupts++;
1419
1420         /* Disable host interrupts. */
1421         CSR_WRITE_4(gx, GX_INT_MASK_CLR, GX_INT_ALL);
1422
1423         /*
1424          * find out why we're being bothered.
1425          * reading this register automatically clears all bits.
1426          */
1427         intr = CSR_READ_4(gx, GX_INT_READ);
1428
1429         /* Check RX return ring producer/consumer */
1430         if (intr & (GX_INT_RCV_TIMER | GX_INT_RCV_THOLD | GX_INT_RCV_OVERRUN))
1431                 gx_rxeof(gx);
1432
1433         /* Check TX ring producer/consumer */
1434         if (intr & (GX_INT_XMIT_DONE | GX_INT_XMIT_EMPTY))
1435                 gx_txeof(gx);
1436
1437         /*
1438          * handle other interrupts here.
1439          */
1440
1441         /*
1442          * Link change interrupts are not reliable; the interrupt may
1443          * not be generated if the link is lost.  However, the register
1444          * read is reliable, so check that.  Use SEQ errors to possibly
1445          * indicate that the link has changed.
1446          */
1447         if (intr & GX_INT_LINK_CHANGE) {
1448                 if ((CSR_READ_4(gx, GX_STATUS) & GX_STAT_LINKUP) == 0) {
1449                         device_printf(gx->gx_dev, "link down\n");
1450                 } else {
1451                         device_printf(gx->gx_dev, "link up\n");
1452                 }
1453         }
1454
1455         /* Turn interrupts on. */
1456         CSR_WRITE_4(gx, GX_INT_MASK_SET, GX_INT_WANTED);
1457
1458         if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
1459                 gx_start(ifp);
1460
1461         splx(s);
1462 }
1463
1464 /*
1465  * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
1466  * pointers to descriptors.
1467  */
1468 static int
1469 gx_encap(struct gx_softc *gx, struct mbuf *m_head)
1470 {
1471         struct gx_tx_desc_data *tx = NULL;
1472         struct gx_tx_desc_ctx *tctx;
1473         struct mbuf *m;
1474         int idx, cnt, csumopts, txcontext;
1475         struct ifvlan *ifv = NULL;
1476
1477         if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1478             m_head->m_pkthdr.rcvif != NULL &&
1479             m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1480                 ifv = m_head->m_pkthdr.rcvif->if_softc;
1481
1482         cnt = gx->gx_txcnt;
1483         idx = gx->gx_tx_tail_idx;
1484         txcontext = gx->gx_txcontext;
1485
1486         /*
1487          * Insure we have at least 4 descriptors pre-allocated.
1488          */
1489         if (cnt >= GX_TX_RING_CNT - 4)
1490                 return (ENOBUFS);
1491
1492         /*
1493          * Set up the appropriate offload context if necessary.
1494          */
1495         csumopts = 0;
1496         if (m_head->m_pkthdr.csum_flags) {
1497                 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1498                         csumopts |= GX_TXTCP_OPT_IP_CSUM;
1499                 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) {
1500                         csumopts |= GX_TXTCP_OPT_TCP_CSUM;
1501                         txcontext = GX_TXCONTEXT_TCPIP;
1502                 } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) {
1503                         csumopts |= GX_TXTCP_OPT_TCP_CSUM;
1504                         txcontext = GX_TXCONTEXT_UDPIP;
1505                 } else if (txcontext == GX_TXCONTEXT_NONE)
1506                         txcontext = GX_TXCONTEXT_TCPIP;
1507                 if (txcontext == gx->gx_txcontext)
1508                         goto context_done;
1509
1510                 tctx = (struct gx_tx_desc_ctx *)&gx->gx_rdata->gx_tx_ring[idx];
1511                 tctx->tx_ip_csum_start = ETHER_HDR_LEN;
1512                 tctx->tx_ip_csum_end = ETHER_HDR_LEN + sizeof(struct ip) - 1;
1513                 tctx->tx_ip_csum_offset = 
1514                     ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
1515                 tctx->tx_tcp_csum_start = ETHER_HDR_LEN + sizeof(struct ip);
1516                 tctx->tx_tcp_csum_end = 0;
1517                 if (txcontext == GX_TXCONTEXT_TCPIP)
1518                         tctx->tx_tcp_csum_offset = ETHER_HDR_LEN +
1519                             sizeof(struct ip) + offsetof(struct tcphdr, th_sum);
1520                 else
1521                         tctx->tx_tcp_csum_offset = ETHER_HDR_LEN +
1522                             sizeof(struct ip) + offsetof(struct udphdr, uh_sum);
1523                 tctx->tx_command = GX_TXCTX_EXTENSION | GX_TXCTX_INT_DELAY;
1524                 tctx->tx_type = 0;
1525                 tctx->tx_status = 0;
1526                 GX_INC(idx, GX_TX_RING_CNT);
1527                 cnt++;
1528         }
1529 context_done:
1530
1531         /*
1532          * Start packing the mbufs in this chain into the transmit
1533          * descriptors.  Stop when we run out of descriptors or hit
1534          * the end of the mbuf chain.
1535          */
1536         for (m = m_head; m != NULL; m = m->m_next) {
1537                 if (m->m_len == 0)
1538                         continue;
1539
1540                 if (cnt == GX_TX_RING_CNT) {
1541 printf("overflow(2): %d, %d\n", cnt, GX_TX_RING_CNT);
1542                         return (ENOBUFS);
1543 }
1544
1545                 tx = (struct gx_tx_desc_data *)&gx->gx_rdata->gx_tx_ring[idx];
1546                 tx->tx_addr = vtophys(mtod(m, vm_offset_t));
1547                 tx->tx_status = 0;
1548                 tx->tx_len = m->m_len;
1549                 if (gx->arpcom.ac_if.if_hwassist) {
1550                         tx->tx_type = 1;
1551                         tx->tx_command = GX_TXTCP_EXTENSION;
1552                         tx->tx_options = csumopts;
1553                 } else {
1554                         /*
1555                          * This is really a struct gx_tx_desc_old.
1556                          */
1557                         tx->tx_command = 0;
1558                 }
1559                 GX_INC(idx, GX_TX_RING_CNT);
1560                 cnt++;
1561         }
1562
1563         if (tx != NULL) {
1564                 tx->tx_command |= GX_TXTCP_REPORT_STATUS | GX_TXTCP_INT_DELAY |
1565                     GX_TXTCP_ETHER_CRC | GX_TXTCP_END_OF_PKT;
1566                 if (ifv != NULL) {
1567                         tx->tx_command |= GX_TXTCP_VLAN_ENABLE;
1568                         tx->tx_vlan = ifv->ifv_tag;
1569                 }
1570                 gx->gx_txcnt = cnt;
1571                 gx->gx_tx_tail_idx = idx;
1572                 gx->gx_txcontext = txcontext;
1573                 idx = GX_PREV(idx, GX_TX_RING_CNT);
1574                 gx->gx_cdata.gx_tx_chain[idx] = m_head;
1575
1576                 CSR_WRITE_4(gx, gx->gx_reg.r_tx_tail, gx->gx_tx_tail_idx);
1577         }
1578         
1579         return (0);
1580 }
1581  
1582 /*
1583  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1584  * to the mbuf data regions directly in the transmit descriptors.
1585  */
1586 static void
1587 gx_start(struct ifnet *ifp)
1588 {
1589         struct gx_softc *gx;
1590         struct mbuf *m_head;
1591         int s;
1592
1593         s = splimp();
1594
1595         gx = ifp->if_softc;
1596
1597         for (;;) {
1598                 IF_DEQUEUE(&ifp->if_snd, m_head);
1599                 if (m_head == NULL)
1600                         break;
1601
1602                 /*
1603                  * Pack the data into the transmit ring. If we
1604                  * don't have room, set the OACTIVE flag and wait
1605                  * for the NIC to drain the ring.
1606                  */
1607                 if (gx_encap(gx, m_head) != 0) {
1608                         IF_PREPEND(&ifp->if_snd, m_head);
1609                         ifp->if_flags |= IFF_OACTIVE;
1610                         break;
1611                 }
1612
1613                 /*
1614                  * If there's a BPF listener, bounce a copy of this frame
1615                  * to him.
1616                  */
1617                 if (ifp->if_bpf)
1618                         bpf_mtap(ifp, m_head);
1619
1620                 /*
1621                  * Set a timeout in case the chip goes out to lunch.
1622                  */
1623                 ifp->if_timer = 5;
1624         }
1625
1626         splx(s);
1627 }