2 * Copyright (c) 1999,2000,2001 Jonathan Lemon
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * $FreeBSD: src/sys/dev/gx/if_gx.c,v 1.2.2.3 2001/12/14 19:51:39 jlemon Exp $
30 * $DragonFly: src/sys/dev/netif/gx/Attic/if_gx.c,v 1.9 2004/07/02 17:42:17 joerg Exp $
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/sockio.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/socket.h>
40 #include <sys/queue.h>
43 #include <net/if_arp.h>
44 #include <net/ethernet.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
49 #include <net/if_types.h>
50 #include <net/vlan/if_vlan_var.h>
52 #include <netinet/in_systm.h>
53 #include <netinet/in.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <netinet/udp.h>
58 #include <vm/vm.h> /* for vtophys */
59 #include <vm/pmap.h> /* for vtophys */
60 #include <machine/clock.h> /* for DELAY */
61 #include <machine/bus_memio.h>
62 #include <machine/bus.h>
63 #include <machine/resource.h>
67 #include <bus/pci/pcireg.h>
68 #include <bus/pci/pcivar.h>
70 #include "../mii_layer/mii.h"
71 #include "../mii_layer/miivar.h"
76 #include "miibus_if.h"
78 #define TUNABLE_TX_INTR_DELAY 100
79 #define TUNABLE_RX_INTR_DELAY 100
81 #define GX_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_IP_FRAGS)
84 * Various supported device vendors/types and their names.
90 u_int32_t version_ipg;
94 static struct gx_device gx_devs[] = {
95 { INTEL_VENDORID, DEVICEID_WISEMAN,
96 GXF_FORCE_TBI | GXF_OLD_REGS,
97 10 | 2 << 10 | 10 << 20,
98 "Intel Gigabit Ethernet (82542)" },
99 { INTEL_VENDORID, DEVICEID_LIVINGOOD_FIBER,
100 GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
101 6 | 8 << 10 | 6 << 20,
102 "Intel Gigabit Ethernet (82543GC-F)" },
103 { INTEL_VENDORID, DEVICEID_LIVINGOOD_COPPER,
104 GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
105 8 | 8 << 10 | 6 << 20,
106 "Intel Gigabit Ethernet (82543GC-T)" },
109 { INTEL_VENDORID, DEVICEID_CORDOVA_FIBER,
110 GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
111 6 | 8 << 10 | 6 << 20,
112 "Intel Gigabit Ethernet (82544EI-F)" },
113 { INTEL_VENDORID, DEVICEID_CORDOVA_COPPER,
114 GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
115 8 | 8 << 10 | 6 << 20,
116 "Intel Gigabit Ethernet (82544EI-T)" },
117 { INTEL_VENDORID, DEVICEID_CORDOVA2_COPPER,
118 GXF_DMA | GXF_ENABLE_MWI | GXF_CSUM,
119 8 | 8 << 10 | 6 << 20,
120 "Intel Gigabit Ethernet (82544GC-T)" },
125 static struct gx_regs new_regs = {
126 GX_RX_RING_BASE, GX_RX_RING_LEN,
127 GX_RX_RING_HEAD, GX_RX_RING_TAIL,
128 GX_RX_INTR_DELAY, GX_RX_DMA_CTRL,
130 GX_TX_RING_BASE, GX_TX_RING_LEN,
131 GX_TX_RING_HEAD, GX_TX_RING_TAIL,
132 GX_TX_INTR_DELAY, GX_TX_DMA_CTRL,
134 static struct gx_regs old_regs = {
135 GX_RX_OLD_RING_BASE, GX_RX_OLD_RING_LEN,
136 GX_RX_OLD_RING_HEAD, GX_RX_OLD_RING_TAIL,
137 GX_RX_OLD_INTR_DELAY, GX_RX_OLD_DMA_CTRL,
139 GX_TX_OLD_RING_BASE, GX_TX_OLD_RING_LEN,
140 GX_TX_OLD_RING_HEAD, GX_TX_OLD_RING_TAIL,
141 GX_TX_OLD_INTR_DELAY, GX_TX_OLD_DMA_CTRL,
144 static int gx_probe(device_t dev);
145 static int gx_attach(device_t dev);
146 static int gx_detach(device_t dev);
147 static void gx_shutdown(device_t dev);
149 static void gx_intr(void *xsc);
150 static void gx_init(void *xsc);
152 static struct gx_device *gx_match(device_t dev);
153 static void gx_eeprom_getword(struct gx_softc *gx, int addr,
155 static int gx_read_eeprom(struct gx_softc *gx, caddr_t dest, int off,
157 static int gx_ifmedia_upd(struct ifnet *ifp);
158 static void gx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
159 static int gx_miibus_readreg(device_t dev, int phy, int reg);
160 static void gx_miibus_writereg(device_t dev, int phy, int reg, int value);
161 static void gx_miibus_statchg(device_t dev);
162 static int gx_ioctl(struct ifnet *ifp, u_long command, caddr_t data,
164 static void gx_setmulti(struct gx_softc *gx);
165 static void gx_reset(struct gx_softc *gx);
166 static void gx_phy_reset(struct gx_softc *gx);
167 static void gx_release(struct gx_softc *gx);
168 static void gx_stop(struct gx_softc *gx);
169 static void gx_watchdog(struct ifnet *ifp);
170 static void gx_start(struct ifnet *ifp);
172 static int gx_init_rx_ring(struct gx_softc *gx);
173 static void gx_free_rx_ring(struct gx_softc *gx);
174 static int gx_init_tx_ring(struct gx_softc *gx);
175 static void gx_free_tx_ring(struct gx_softc *gx);
177 static device_method_t gx_methods[] = {
178 /* Device interface */
179 DEVMETHOD(device_probe, gx_probe),
180 DEVMETHOD(device_attach, gx_attach),
181 DEVMETHOD(device_detach, gx_detach),
182 DEVMETHOD(device_shutdown, gx_shutdown),
185 DEVMETHOD(miibus_readreg, gx_miibus_readreg),
186 DEVMETHOD(miibus_writereg, gx_miibus_writereg),
187 DEVMETHOD(miibus_statchg, gx_miibus_statchg),
192 static driver_t gx_driver = {
195 sizeof(struct gx_softc)
198 static devclass_t gx_devclass;
200 DECLARE_DUMMY_MODULE(if_gx);
201 MODULE_DEPEND(if_gx, miibus, 1, 1, 1);
202 DRIVER_MODULE(if_gx, pci, gx_driver, gx_devclass, 0, 0);
203 DRIVER_MODULE(miibus, gx, miibus_driver, miibus_devclass, 0, 0);
205 static struct gx_device *
206 gx_match(device_t dev)
210 for (i = 0; gx_devs[i].name != NULL; i++) {
211 if ((pci_get_vendor(dev) == gx_devs[i].vendor) &&
212 (pci_get_device(dev) == gx_devs[i].device))
213 return (&gx_devs[i]);
219 gx_probe(device_t dev)
221 struct gx_device *gx_dev;
223 gx_dev = gx_match(dev);
227 device_set_desc(dev, gx_dev->name);
232 gx_attach(device_t dev)
235 struct gx_device *gx_dev;
243 gx = device_get_softc(dev);
244 bzero(gx, sizeof(struct gx_softc));
247 gx_dev = gx_match(dev);
248 gx->gx_vflags = gx_dev->version_flags;
249 gx->gx_ipg = gx_dev->version_ipg;
251 mtx_init(&gx->gx_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE);
256 * Map control/status registers.
258 command = pci_read_config(dev, PCIR_COMMAND, 4);
259 command |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
260 if (gx->gx_vflags & GXF_ENABLE_MWI)
261 command |= PCIM_CMD_MWIEN;
262 pci_write_config(dev, PCIR_COMMAND, command, 4);
263 command = pci_read_config(dev, PCIR_COMMAND, 4);
265 /* XXX check cache line size? */
267 if ((command & PCIM_CMD_MEMEN) == 0) {
268 device_printf(dev, "failed to enable memory mapping!\n");
274 gx->gx_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
275 0, ~0, 1, RF_ACTIVE);
277 /* support PIO mode */
279 gx->gx_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid,
280 0, ~0, 1, RF_ACTIVE);
283 if (gx->gx_res == NULL) {
284 device_printf(dev, "couldn't map memory\n");
289 gx->gx_btag = rman_get_bustag(gx->gx_res);
290 gx->gx_bhandle = rman_get_bushandle(gx->gx_res);
292 /* Allocate interrupt */
294 gx->gx_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
295 RF_SHAREABLE | RF_ACTIVE);
297 if (gx->gx_irq == NULL) {
298 device_printf(dev, "couldn't map interrupt\n");
303 error = bus_setup_intr(dev, gx->gx_irq, INTR_TYPE_NET,
304 gx_intr, gx, &gx->gx_intrhand);
306 device_printf(dev, "couldn't setup irq\n");
310 /* compensate for different register mappings */
311 if (gx->gx_vflags & GXF_OLD_REGS)
312 gx->gx_reg = old_regs;
314 gx->gx_reg = new_regs;
316 if (gx_read_eeprom(gx, (caddr_t)&gx->arpcom.ac_enaddr,
318 device_printf(dev, "failed to read station address\n");
323 /* Allocate the ring buffers. */
324 gx->gx_rdata = contigmalloc(sizeof(struct gx_ring_data), M_DEVBUF,
325 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
327 if (gx->gx_rdata == NULL) {
328 device_printf(dev, "no memory for list buffers!\n");
332 bzero(gx->gx_rdata, sizeof(struct gx_ring_data));
334 /* Set default tuneable values. */
335 gx->gx_tx_intr_delay = TUNABLE_TX_INTR_DELAY;
336 gx->gx_rx_intr_delay = TUNABLE_RX_INTR_DELAY;
338 /* Set up ifnet structure */
339 ifp = &gx->arpcom.ac_if;
341 if_initname(ifp, "gx", device_get_unit(dev));
342 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
343 ifp->if_ioctl = gx_ioctl;
344 ifp->if_output = ether_output;
345 ifp->if_start = gx_start;
346 ifp->if_watchdog = gx_watchdog;
347 ifp->if_init = gx_init;
348 ifp->if_mtu = ETHERMTU;
349 ifp->if_snd.ifq_maxlen = GX_TX_RING_CNT - 1;
351 /* see if we can enable hardware checksumming */
352 if (gx->gx_vflags & GXF_CSUM) {
353 ifp->if_capabilities = IFCAP_HWCSUM;
354 ifp->if_capenable = ifp->if_capabilities;
357 /* figure out transciever type */
358 if (gx->gx_vflags & GXF_FORCE_TBI ||
359 CSR_READ_4(gx, GX_STATUS) & GX_STAT_TBIMODE)
362 if (gx->gx_tbimode) {
363 /* SERDES transceiver */
364 ifmedia_init(&gx->gx_media, IFM_IMASK, gx_ifmedia_upd,
366 ifmedia_add(&gx->gx_media,
367 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
368 ifmedia_add(&gx->gx_media, IFM_ETHER|IFM_AUTO, 0, NULL);
369 ifmedia_set(&gx->gx_media, IFM_ETHER|IFM_AUTO);
371 /* GMII/MII transceiver */
373 if (mii_phy_probe(dev, &gx->gx_miibus, gx_ifmedia_upd,
375 device_printf(dev, "GMII/MII, PHY not detected\n");
382 * Call MI attach routines.
384 ether_ifattach(ifp, gx->arpcom.ac_enaddr);
398 gx_release(struct gx_softc *gx)
401 bus_generic_detach(gx->gx_dev);
403 device_delete_child(gx->gx_dev, gx->gx_miibus);
406 bus_teardown_intr(gx->gx_dev, gx->gx_irq, gx->gx_intrhand);
408 bus_release_resource(gx->gx_dev, SYS_RES_IRQ, 0, gx->gx_irq);
410 bus_release_resource(gx->gx_dev, SYS_RES_MEMORY,
411 GX_PCI_LOMEM, gx->gx_res);
417 struct gx_softc *gx = (struct gx_softc *)xsc;
426 ifp = &gx->arpcom.ac_if;
431 /* Disable host interrupts, halt chip. */
434 /* disable I/O, flush RX/TX FIFOs, and free RX/TX buffers */
437 /* Load our MAC address, invalidate other 15 RX addresses. */
438 m = (u_int16_t *)&gx->arpcom.ac_enaddr[0];
439 CSR_WRITE_4(gx, GX_RX_ADDR_BASE, (m[1] << 16) | m[0]);
440 CSR_WRITE_4(gx, GX_RX_ADDR_BASE + 4, m[2] | GX_RA_VALID);
441 for (i = 1; i < 16; i++)
442 CSR_WRITE_8(gx, GX_RX_ADDR_BASE + i * 8, (u_quad_t)0);
444 /* Program multicast filter. */
453 if (gx->gx_vflags & GXF_DMA) {
454 /* set up DMA control */
455 CSR_WRITE_4(gx, gx->gx_reg.r_rx_dma_ctrl, 0x00010000);
456 CSR_WRITE_4(gx, gx->gx_reg.r_tx_dma_ctrl, 0x00000000);
459 /* enable receiver */
460 ctrl = GX_RXC_ENABLE | GX_RXC_RX_THOLD_EIGHTH | GX_RXC_RX_BSIZE_2K;
461 ctrl |= GX_RXC_BCAST_ACCEPT;
463 /* Enable or disable promiscuous mode as needed. */
464 if (ifp->if_flags & IFF_PROMISC)
465 ctrl |= GX_RXC_UNI_PROMISC;
467 /* This is required if we want to accept jumbo frames */
468 if (ifp->if_mtu > ETHERMTU)
469 ctrl |= GX_RXC_LONG_PKT_ENABLE;
471 /* setup receive checksum control */
472 if (ifp->if_capenable & IFCAP_RXCSUM)
473 CSR_WRITE_4(gx, GX_RX_CSUM_CONTROL,
474 GX_CSUM_TCP/* | GX_CSUM_IP*/);
476 /* setup transmit checksum control */
477 if (ifp->if_capenable & IFCAP_TXCSUM)
478 ifp->if_hwassist = GX_CSUM_FEATURES;
480 ctrl |= GX_RXC_STRIP_ETHERCRC; /* not on 82542? */
481 CSR_WRITE_4(gx, GX_RX_CONTROL, ctrl);
483 /* enable transmitter */
484 ctrl = GX_TXC_ENABLE | GX_TXC_PAD_SHORT_PKTS | GX_TXC_COLL_RETRY_16;
486 /* XXX we should support half-duplex here too... */
487 ctrl |= GX_TXC_COLL_TIME_FDX;
489 CSR_WRITE_4(gx, GX_TX_CONTROL, ctrl);
492 * set up recommended IPG times, which vary depending on chip type:
493 * IPG transmit time: 80ns
494 * IPG receive time 1: 20ns
495 * IPG receive time 2: 80ns
497 CSR_WRITE_4(gx, GX_TX_IPG, gx->gx_ipg);
499 /* set up 802.3x MAC flow control address -- 01:80:c2:00:00:01 */
500 CSR_WRITE_4(gx, GX_FLOW_CTRL_BASE, 0x00C28001);
501 CSR_WRITE_4(gx, GX_FLOW_CTRL_BASE+4, 0x00000100);
503 /* set up 802.3x MAC flow control type -- 88:08 */
504 CSR_WRITE_4(gx, GX_FLOW_CTRL_TYPE, 0x8808);
506 /* Set up tuneables */
507 CSR_WRITE_4(gx, gx->gx_reg.r_rx_delay, gx->gx_rx_intr_delay);
508 CSR_WRITE_4(gx, gx->gx_reg.r_tx_delay, gx->gx_tx_intr_delay);
511 * Configure chip for correct operation.
513 ctrl = GX_CTRL_DUPLEX;
514 #if BYTE_ORDER == BIG_ENDIAN
515 ctrl |= GX_CTRL_BIGENDIAN;
517 ctrl |= GX_CTRL_VLAN_ENABLE;
519 if (gx->gx_tbimode) {
521 * It seems that TXCW must be initialized from the EEPROM
525 * should probably read the eeprom and re-insert the
528 #define TXCONFIG_WORD 0x000001A0
529 CSR_WRITE_4(gx, GX_TX_CONFIG, TXCONFIG_WORD);
531 /* turn on hardware autonegotiate */
532 GX_SETBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
535 * Auto-detect speed from PHY, instead of using direct
536 * indication. The SLU bit doesn't force the link, but
537 * must be present for ASDE to work.
540 ctrl |= GX_CTRL_SET_LINK_UP | GX_CTRL_AUTOSPEED;
544 * Take chip out of reset and start it running.
546 CSR_WRITE_4(gx, GX_CTRL, ctrl);
548 /* Turn interrupts on. */
549 CSR_WRITE_4(gx, GX_INT_MASK_SET, GX_INT_WANTED);
551 ifp->if_flags |= IFF_RUNNING;
552 ifp->if_flags &= ~IFF_OACTIVE;
555 * Set the current media.
557 if (gx->gx_miibus != NULL) {
558 mii_mediachg(device_get_softc(gx->gx_miibus));
561 tmp = ifm->ifm_media;
562 ifm->ifm_media = ifm->ifm_cur->ifm_media;
564 ifm->ifm_media = tmp;
569 * Have the LINK0 flag force the link in TBI mode.
571 if (gx->gx_tbimode && ifp->if_flags & IFF_LINK0) {
572 GX_CLRBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
573 GX_SETBIT(gx, GX_CTRL, GX_CTRL_SET_LINK_UP);
577 printf("66mhz: %s 64bit: %s\n",
578 CSR_READ_4(gx, GX_STATUS) & GX_STAT_PCI66 ? "yes" : "no",
579 CSR_READ_4(gx, GX_STATUS) & GX_STAT_BUS64 ? "yes" : "no");
587 * Stop all chip I/O so that the kernel's probe routines don't
588 * get confused by errant DMAs when rebooting.
591 gx_shutdown(device_t dev)
595 gx = device_get_softc(dev);
601 gx_detach(device_t dev)
609 gx = device_get_softc(dev);
610 ifp = &gx->arpcom.ac_if;
616 ifmedia_removeall(&gx->gx_media);
619 contigfree(gx->gx_rdata, sizeof(struct gx_ring_data), M_DEVBUF);
622 mtx_destroy(&gx->gx_mtx);
629 gx_eeprom_getword(struct gx_softc *gx, int addr, u_int16_t *dest)
635 addr = (GX_EE_OPC_READ << GX_EE_ADDR_SIZE) |
636 (addr & ((1 << GX_EE_ADDR_SIZE) - 1));
638 base = CSR_READ_4(gx, GX_EEPROM_CTRL);
639 base &= ~(GX_EE_DATA_OUT | GX_EE_DATA_IN | GX_EE_CLOCK);
640 base |= GX_EE_SELECT;
642 CSR_WRITE_4(gx, GX_EEPROM_CTRL, base);
644 for (x = 1 << ((GX_EE_OPC_SIZE + GX_EE_ADDR_SIZE) - 1); x; x >>= 1) {
645 reg = base | (addr & x ? GX_EE_DATA_IN : 0);
646 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg);
648 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg | GX_EE_CLOCK);
650 CSR_WRITE_4(gx, GX_EEPROM_CTRL, reg);
654 for (x = 1 << 15; x; x >>= 1) {
655 CSR_WRITE_4(gx, GX_EEPROM_CTRL, base | GX_EE_CLOCK);
657 reg = CSR_READ_4(gx, GX_EEPROM_CTRL);
658 if (reg & GX_EE_DATA_OUT)
660 CSR_WRITE_4(gx, GX_EEPROM_CTRL, base);
664 CSR_WRITE_4(gx, GX_EEPROM_CTRL, base & ~GX_EE_SELECT);
671 gx_read_eeprom(struct gx_softc *gx, caddr_t dest, int off, int cnt)
676 word = (u_int16_t *)dest;
677 for (i = 0; i < cnt; i ++) {
678 gx_eeprom_getword(gx, off + i, word);
688 gx_ifmedia_upd(struct ifnet *ifp)
692 struct mii_data *mii;
696 if (gx->gx_tbimode) {
698 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
700 switch (IFM_SUBTYPE(ifm->ifm_media)) {
702 GX_SETBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
703 GX_SETBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
704 GX_CLRBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
707 device_printf(gx->gx_dev,
708 "manual config not supported yet.\n");
710 GX_CLRBIT(gx, GX_TX_CONFIG, GX_TXCFG_AUTONEG);
711 config = /* bit symbols for 802.3z */0;
712 ctrl |= GX_CTRL_SET_LINK_UP;
713 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
714 ctrl |= GX_CTRL_DUPLEX;
724 * 1000TX half duplex does not work.
726 if (IFM_TYPE(ifm->ifm_media) == IFM_ETHER &&
727 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_TX &&
728 (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) == 0)
730 mii = device_get_softc(gx->gx_miibus);
737 * Report current media status.
740 gx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
743 struct mii_data *mii;
748 if (gx->gx_tbimode) {
749 ifmr->ifm_status = IFM_AVALID;
750 ifmr->ifm_active = IFM_ETHER;
752 status = CSR_READ_4(gx, GX_STATUS);
753 if ((status & GX_STAT_LINKUP) == 0)
756 ifmr->ifm_status |= IFM_ACTIVE;
757 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
759 mii = device_get_softc(gx->gx_miibus);
761 if ((mii->mii_media_active & (IFM_1000_TX | IFM_HDX)) ==
762 (IFM_1000_TX | IFM_HDX))
763 mii->mii_media_active = IFM_ETHER | IFM_NONE;
764 ifmr->ifm_active = mii->mii_media_active;
765 ifmr->ifm_status = mii->mii_media_status;
770 gx_mii_shiftin(struct gx_softc *gx, int data, int length)
775 * Set up default GPIO direction + PHY data out.
777 reg = CSR_READ_4(gx, GX_CTRL);
778 reg &= ~(GX_CTRL_GPIO_DIR_MASK | GX_CTRL_PHY_IO | GX_CTRL_PHY_CLK);
779 reg |= GX_CTRL_GPIO_DIR | GX_CTRL_PHY_IO_DIR;
782 * Shift in data to PHY.
784 for (x = 1 << (length - 1); x; x >>= 1) {
786 reg |= GX_CTRL_PHY_IO;
788 reg &= ~GX_CTRL_PHY_IO;
789 CSR_WRITE_4(gx, GX_CTRL, reg);
791 CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
793 CSR_WRITE_4(gx, GX_CTRL, reg);
799 gx_mii_shiftout(struct gx_softc *gx)
806 * Set up default GPIO direction + PHY data in.
808 reg = CSR_READ_4(gx, GX_CTRL);
809 reg &= ~(GX_CTRL_GPIO_DIR_MASK | GX_CTRL_PHY_IO | GX_CTRL_PHY_CLK);
810 reg |= GX_CTRL_GPIO_DIR;
812 CSR_WRITE_4(gx, GX_CTRL, reg);
814 CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
816 CSR_WRITE_4(gx, GX_CTRL, reg);
819 * Shift out data from PHY.
822 for (x = 1 << 15; x; x >>= 1) {
823 CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
825 if (CSR_READ_4(gx, GX_CTRL) & GX_CTRL_PHY_IO)
827 CSR_WRITE_4(gx, GX_CTRL, reg);
830 CSR_WRITE_4(gx, GX_CTRL, reg | GX_CTRL_PHY_CLK);
832 CSR_WRITE_4(gx, GX_CTRL, reg);
839 gx_miibus_readreg(device_t dev, int phy, int reg)
843 gx = device_get_softc(dev);
849 * Note: Cordova has a MDIC register. livingood and < have mii bits
852 gx_mii_shiftin(gx, GX_PHY_PREAMBLE, GX_PHY_PREAMBLE_LEN);
853 gx_mii_shiftin(gx, (GX_PHY_SOF << 12) | (GX_PHY_OP_READ << 10) |
854 (phy << 5) | reg, GX_PHY_READ_LEN);
855 return (gx_mii_shiftout(gx));
859 gx_miibus_writereg(device_t dev, int phy, int reg, int value)
863 gx = device_get_softc(dev);
867 gx_mii_shiftin(gx, GX_PHY_PREAMBLE, GX_PHY_PREAMBLE_LEN);
868 gx_mii_shiftin(gx, (GX_PHY_SOF << 30) | (GX_PHY_OP_WRITE << 28) |
869 (phy << 23) | (reg << 18) | (GX_PHY_TURNAROUND << 16) |
870 (value & 0xffff), GX_PHY_WRITE_LEN);
874 gx_miibus_statchg(device_t dev)
877 struct mii_data *mii;
880 gx = device_get_softc(dev);
885 * Set flow control behavior to mirror what PHY negotiated.
887 mii = device_get_softc(gx->gx_miibus);
892 reg = CSR_READ_4(gx, GX_CTRL);
893 if (mii->mii_media_active & IFM_FLAG0)
894 reg |= GX_CTRL_RX_FLOWCTRL;
896 reg &= ~GX_CTRL_RX_FLOWCTRL;
897 if (mii->mii_media_active & IFM_FLAG1)
898 reg |= GX_CTRL_TX_FLOWCTRL;
900 reg &= ~GX_CTRL_TX_FLOWCTRL;
901 CSR_WRITE_4(gx, GX_CTRL, reg);
908 gx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
910 struct gx_softc *gx = ifp->if_softc;
911 struct ifreq *ifr = (struct ifreq *)data;
912 struct mii_data *mii;
913 int s, mask, error = 0;
921 error = ether_ioctl(ifp, command, data);
924 if (ifr->ifr_mtu > GX_MAX_MTU) {
927 ifp->if_mtu = ifr->ifr_mtu;
932 if ((ifp->if_flags & IFF_UP) == 0) {
934 } else if (ifp->if_flags & IFF_RUNNING &&
935 ((ifp->if_flags & IFF_PROMISC) !=
936 (gx->gx_if_flags & IFF_PROMISC))) {
937 if (ifp->if_flags & IFF_PROMISC)
938 GX_SETBIT(gx, GX_RX_CONTROL, GX_RXC_UNI_PROMISC);
940 GX_CLRBIT(gx, GX_RX_CONTROL, GX_RXC_UNI_PROMISC);
944 gx->gx_if_flags = ifp->if_flags;
948 if (ifp->if_flags & IFF_RUNNING)
953 if (gx->gx_miibus != NULL) {
954 mii = device_get_softc(gx->gx_miibus);
955 error = ifmedia_ioctl(ifp, ifr,
956 &mii->mii_media, command);
958 error = ifmedia_ioctl(ifp, ifr, &gx->gx_media, command);
962 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
963 if (mask & IFCAP_HWCSUM) {
964 if (IFCAP_HWCSUM & ifp->if_capenable)
965 ifp->if_capenable &= ~IFCAP_HWCSUM;
967 ifp->if_capenable |= IFCAP_HWCSUM;
968 if (ifp->if_flags & IFF_RUNNING)
983 gx_phy_reset(struct gx_softc *gx)
987 GX_SETBIT(gx, GX_CTRL, GX_CTRL_SET_LINK_UP);
990 * PHY reset is active low.
992 reg = CSR_READ_4(gx, GX_CTRL_EXT);
993 reg &= ~(GX_CTRLX_GPIO_DIR_MASK | GX_CTRLX_PHY_RESET);
994 reg |= GX_CTRLX_GPIO_DIR;
996 CSR_WRITE_4(gx, GX_CTRL_EXT, reg | GX_CTRLX_PHY_RESET);
998 CSR_WRITE_4(gx, GX_CTRL_EXT, reg);
1000 CSR_WRITE_4(gx, GX_CTRL_EXT, reg | GX_CTRLX_PHY_RESET);
1004 /* post-livingood (cordova) only */
1005 GX_SETBIT(gx, GX_CTRL, 0x80000000);
1007 GX_CLRBIT(gx, GX_CTRL, 0x80000000);
1012 gx_reset(struct gx_softc *gx)
1015 /* Disable host interrupts. */
1016 CSR_WRITE_4(gx, GX_INT_MASK_CLR, GX_INT_ALL);
1018 /* reset chip (THWAP!) */
1019 GX_SETBIT(gx, GX_CTRL, GX_CTRL_DEVICE_RESET);
1024 gx_stop(struct gx_softc *gx)
1028 ifp = &gx->arpcom.ac_if;
1030 /* reset and flush transmitter */
1031 CSR_WRITE_4(gx, GX_TX_CONTROL, GX_TXC_RESET);
1033 /* reset and flush receiver */
1034 CSR_WRITE_4(gx, GX_RX_CONTROL, GX_RXC_RESET);
1038 GX_SETBIT(gx, GX_CTRL, GX_CTRL_LINK_RESET);
1040 /* Free the RX lists. */
1041 gx_free_rx_ring(gx);
1043 /* Free TX buffers. */
1044 gx_free_tx_ring(gx);
1046 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1050 gx_watchdog(struct ifnet *ifp)
1052 struct gx_softc *gx;
1056 device_printf(gx->gx_dev, "watchdog timeout -- resetting\n");
1064 * Intialize a receive ring descriptor.
1067 gx_newbuf(struct gx_softc *gx, int idx, struct mbuf *m)
1069 struct mbuf *m_new = NULL;
1070 struct gx_rx_desc *r;
1073 MGETHDR(m_new, MB_DONTWAIT, MT_DATA);
1074 if (m_new == NULL) {
1075 device_printf(gx->gx_dev,
1076 "mbuf allocation failed -- packet dropped\n");
1079 MCLGET(m_new, MB_DONTWAIT);
1080 if ((m_new->m_flags & M_EXT) == 0) {
1081 device_printf(gx->gx_dev,
1082 "cluster allocation failed -- packet dropped\n");
1086 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1088 m->m_len = m->m_pkthdr.len = MCLBYTES;
1089 m->m_data = m->m_ext.ext_buf;
1096 * this will _NOT_ work for large MTU's; it will overwrite
1097 * the end of the buffer. E.g.: take this out for jumbograms,
1098 * but then that breaks alignment.
1100 if (gx->arpcom.ac_if.if_mtu <= ETHERMTU)
1101 m_adj(m_new, ETHER_ALIGN);
1103 gx->gx_cdata.gx_rx_chain[idx] = m_new;
1104 r = &gx->gx_rdata->gx_rx_ring[idx];
1105 r->rx_addr = vtophys(mtod(m_new, caddr_t));
1112 * The receive ring can have up to 64K descriptors, which at 2K per mbuf
1113 * cluster, could add up to 128M of memory. Due to alignment constraints,
1114 * the number of descriptors must be a multiple of 8. For now, we
1115 * allocate 256 entries and hope that our CPU is fast enough to keep up
1119 gx_init_rx_ring(struct gx_softc *gx)
1123 for (i = 0; i < GX_RX_RING_CNT; i++) {
1124 error = gx_newbuf(gx, i, NULL);
1129 /* bring receiver out of reset state, leave disabled */
1130 CSR_WRITE_4(gx, GX_RX_CONTROL, 0);
1132 /* set up ring registers */
1133 CSR_WRITE_8(gx, gx->gx_reg.r_rx_base,
1134 (u_quad_t)vtophys(gx->gx_rdata->gx_rx_ring));
1136 CSR_WRITE_4(gx, gx->gx_reg.r_rx_length,
1137 GX_RX_RING_CNT * sizeof(struct gx_rx_desc));
1138 CSR_WRITE_4(gx, gx->gx_reg.r_rx_head, 0);
1139 CSR_WRITE_4(gx, gx->gx_reg.r_rx_tail, GX_RX_RING_CNT - 1);
1140 gx->gx_rx_tail_idx = 0;
1146 gx_free_rx_ring(struct gx_softc *gx)
1151 mp = gx->gx_cdata.gx_rx_chain;
1152 for (i = 0; i < GX_RX_RING_CNT; i++, mp++) {
1158 bzero((void *)gx->gx_rdata->gx_rx_ring,
1159 GX_RX_RING_CNT * sizeof(struct gx_rx_desc));
1161 /* release any partially-received packet chain */
1162 if (gx->gx_pkthdr != NULL) {
1163 m_freem(gx->gx_pkthdr);
1164 gx->gx_pkthdr = NULL;
1169 gx_init_tx_ring(struct gx_softc *gx)
1172 /* bring transmitter out of reset state, leave disabled */
1173 CSR_WRITE_4(gx, GX_TX_CONTROL, 0);
1175 /* set up ring registers */
1176 CSR_WRITE_8(gx, gx->gx_reg.r_tx_base,
1177 (u_quad_t)vtophys(gx->gx_rdata->gx_tx_ring));
1178 CSR_WRITE_4(gx, gx->gx_reg.r_tx_length,
1179 GX_TX_RING_CNT * sizeof(struct gx_tx_desc));
1180 CSR_WRITE_4(gx, gx->gx_reg.r_tx_head, 0);
1181 CSR_WRITE_4(gx, gx->gx_reg.r_tx_tail, 0);
1182 gx->gx_tx_head_idx = 0;
1183 gx->gx_tx_tail_idx = 0;
1186 /* set up initial TX context */
1187 gx->gx_txcontext = GX_TXCONTEXT_NONE;
1193 gx_free_tx_ring(struct gx_softc *gx)
1198 mp = gx->gx_cdata.gx_tx_chain;
1199 for (i = 0; i < GX_TX_RING_CNT; i++, mp++) {
1205 bzero((void *)&gx->gx_rdata->gx_tx_ring,
1206 GX_TX_RING_CNT * sizeof(struct gx_tx_desc));
1210 gx_setmulti(struct gx_softc *gx)
1214 /* wipe out the multicast table */
1215 for (i = 1; i < 128; i++)
1216 CSR_WRITE_4(gx, GX_MULTICAST_BASE + i * 4, 0);
1220 gx_rxeof(struct gx_softc *gx)
1222 struct ether_header *eh;
1223 struct gx_rx_desc *rx;
1225 int idx, staterr, len;
1228 gx->gx_rx_interrupts++;
1230 ifp = &gx->arpcom.ac_if;
1231 idx = gx->gx_rx_tail_idx;
1233 while (gx->gx_rdata->gx_rx_ring[idx].rx_staterr & GX_RXSTAT_COMPLETED) {
1235 rx = &gx->gx_rdata->gx_rx_ring[idx];
1236 m = gx->gx_cdata.gx_rx_chain[idx];
1238 * gx_newbuf overwrites status and length bits, so we
1239 * make a copy of them here.
1242 staterr = rx->rx_staterr;
1244 if (staterr & GX_INPUT_ERROR)
1247 if (gx_newbuf(gx, idx, NULL) == ENOBUFS)
1250 GX_INC(idx, GX_RX_RING_CNT);
1252 if (staterr & GX_RXSTAT_INEXACT_MATCH) {
1254 * multicast packet, must verify against
1255 * multicast address.
1259 if ((staterr & GX_RXSTAT_END_OF_PACKET) == 0) {
1260 if (gx->gx_pkthdr == NULL) {
1262 m->m_pkthdr.len = len;
1264 gx->gx_pktnextp = &m->m_next;
1267 m->m_flags &= ~M_PKTHDR;
1268 gx->gx_pkthdr->m_pkthdr.len += len;
1269 *(gx->gx_pktnextp) = m;
1270 gx->gx_pktnextp = &m->m_next;
1275 if (gx->gx_pkthdr == NULL) {
1277 m->m_pkthdr.len = len;
1280 m->m_flags &= ~M_PKTHDR;
1281 gx->gx_pkthdr->m_pkthdr.len += len;
1282 *(gx->gx_pktnextp) = m;
1284 gx->gx_pkthdr = NULL;
1288 eh = mtod(m, struct ether_header *);
1289 m->m_pkthdr.rcvif = ifp;
1291 /* Remove header from mbuf and pass it on. */
1292 m_adj(m, sizeof(struct ether_header));
1294 #define IP_CSMASK (GX_RXSTAT_IGNORE_CSUM | GX_RXSTAT_HAS_IP_CSUM)
1295 #define TCP_CSMASK \
1296 (GX_RXSTAT_IGNORE_CSUM | GX_RXSTAT_HAS_TCP_CSUM | GX_RXERR_TCP_CSUM)
1297 if (ifp->if_capenable & IFCAP_RXCSUM) {
1300 * Intel Erratum #23 indicates that the Receive IP
1301 * Checksum offload feature has been completely
1304 if ((staterr & IP_CSUM_MASK) == GX_RXSTAT_HAS_IP_CSUM) {
1305 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1306 if ((staterr & GX_RXERR_IP_CSUM) == 0)
1307 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1310 if ((staterr & TCP_CSMASK) == GX_RXSTAT_HAS_TCP_CSUM) {
1311 m->m_pkthdr.csum_flags |=
1312 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1313 m->m_pkthdr.csum_data = 0xffff;
1317 * If we received a packet with a vlan tag, pass it
1318 * to vlan_input() instead of ether_input().
1320 if (staterr & GX_RXSTAT_VLAN_PKT) {
1321 VLAN_INPUT_TAG(eh, m, rx->rx_special);
1324 ether_input(ifp, eh, m);
1329 gx_newbuf(gx, idx, m);
1333 * this isn't quite right. Suppose we have a packet that
1334 * spans 5 descriptors (9K split into 2K buffers). If
1335 * the 3rd descriptor sets an error, we need to ignore
1336 * the last two. The way things stand now, the last two
1337 * will be accepted as a single packet.
1339 * we don't worry about this -- the chip may not set an
1340 * error in this case, and the checksum of the upper layers
1341 * will catch the error.
1343 if (gx->gx_pkthdr != NULL) {
1344 m_freem(gx->gx_pkthdr);
1345 gx->gx_pkthdr = NULL;
1347 GX_INC(idx, GX_RX_RING_CNT);
1350 gx->gx_rx_tail_idx = idx;
1352 idx = GX_RX_RING_CNT - 1;
1353 CSR_WRITE_4(gx, gx->gx_reg.r_rx_tail, idx);
1357 gx_txeof(struct gx_softc *gx)
1362 gx->gx_tx_interrupts++;
1364 ifp = &gx->arpcom.ac_if;
1365 idx = gx->gx_tx_head_idx;
1369 * If the system chipset performs I/O write buffering, it is
1370 * possible for the PIO read of the head descriptor to bypass the
1371 * memory write of the descriptor, resulting in reading a descriptor
1372 * which has not been updated yet.
1375 struct gx_tx_desc_old *tx;
1377 tx = (struct gx_tx_desc_old *)&gx->gx_rdata->gx_tx_ring[idx];
1380 if ((tx->tx_command & GX_TXOLD_END_OF_PKT) == 0) {
1381 GX_INC(idx, GX_TX_RING_CNT);
1385 if ((tx->tx_status & GX_TXSTAT_DONE) == 0)
1390 m_freem(gx->gx_cdata.gx_tx_chain[idx]);
1391 gx->gx_cdata.gx_tx_chain[idx] = NULL;
1395 GX_INC(idx, GX_TX_RING_CNT);
1396 gx->gx_tx_head_idx = idx;
1399 if (gx->gx_txcnt == 0)
1400 ifp->if_flags &= ~IFF_OACTIVE;
1406 struct gx_softc *gx;
1412 ifp = &gx->arpcom.ac_if;
1416 gx->gx_interrupts++;
1418 /* Disable host interrupts. */
1419 CSR_WRITE_4(gx, GX_INT_MASK_CLR, GX_INT_ALL);
1422 * find out why we're being bothered.
1423 * reading this register automatically clears all bits.
1425 intr = CSR_READ_4(gx, GX_INT_READ);
1427 /* Check RX return ring producer/consumer */
1428 if (intr & (GX_INT_RCV_TIMER | GX_INT_RCV_THOLD | GX_INT_RCV_OVERRUN))
1431 /* Check TX ring producer/consumer */
1432 if (intr & (GX_INT_XMIT_DONE | GX_INT_XMIT_EMPTY))
1436 * handle other interrupts here.
1440 * Link change interrupts are not reliable; the interrupt may
1441 * not be generated if the link is lost. However, the register
1442 * read is reliable, so check that. Use SEQ errors to possibly
1443 * indicate that the link has changed.
1445 if (intr & GX_INT_LINK_CHANGE) {
1446 if ((CSR_READ_4(gx, GX_STATUS) & GX_STAT_LINKUP) == 0) {
1447 device_printf(gx->gx_dev, "link down\n");
1449 device_printf(gx->gx_dev, "link up\n");
1453 /* Turn interrupts on. */
1454 CSR_WRITE_4(gx, GX_INT_MASK_SET, GX_INT_WANTED);
1456 if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
1463 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
1464 * pointers to descriptors.
1467 gx_encap(struct gx_softc *gx, struct mbuf *m_head)
1469 struct gx_tx_desc_data *tx = NULL;
1470 struct gx_tx_desc_ctx *tctx;
1472 int idx, cnt, csumopts, txcontext;
1473 struct ifvlan *ifv = NULL;
1475 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1476 m_head->m_pkthdr.rcvif != NULL &&
1477 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1478 ifv = m_head->m_pkthdr.rcvif->if_softc;
1481 idx = gx->gx_tx_tail_idx;
1482 txcontext = gx->gx_txcontext;
1485 * Insure we have at least 4 descriptors pre-allocated.
1487 if (cnt >= GX_TX_RING_CNT - 4)
1491 * Set up the appropriate offload context if necessary.
1494 if (m_head->m_pkthdr.csum_flags) {
1495 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1496 csumopts |= GX_TXTCP_OPT_IP_CSUM;
1497 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) {
1498 csumopts |= GX_TXTCP_OPT_TCP_CSUM;
1499 txcontext = GX_TXCONTEXT_TCPIP;
1500 } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) {
1501 csumopts |= GX_TXTCP_OPT_TCP_CSUM;
1502 txcontext = GX_TXCONTEXT_UDPIP;
1503 } else if (txcontext == GX_TXCONTEXT_NONE)
1504 txcontext = GX_TXCONTEXT_TCPIP;
1505 if (txcontext == gx->gx_txcontext)
1508 tctx = (struct gx_tx_desc_ctx *)&gx->gx_rdata->gx_tx_ring[idx];
1509 tctx->tx_ip_csum_start = ETHER_HDR_LEN;
1510 tctx->tx_ip_csum_end = ETHER_HDR_LEN + sizeof(struct ip) - 1;
1511 tctx->tx_ip_csum_offset =
1512 ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
1513 tctx->tx_tcp_csum_start = ETHER_HDR_LEN + sizeof(struct ip);
1514 tctx->tx_tcp_csum_end = 0;
1515 if (txcontext == GX_TXCONTEXT_TCPIP)
1516 tctx->tx_tcp_csum_offset = ETHER_HDR_LEN +
1517 sizeof(struct ip) + offsetof(struct tcphdr, th_sum);
1519 tctx->tx_tcp_csum_offset = ETHER_HDR_LEN +
1520 sizeof(struct ip) + offsetof(struct udphdr, uh_sum);
1521 tctx->tx_command = GX_TXCTX_EXTENSION | GX_TXCTX_INT_DELAY;
1523 tctx->tx_status = 0;
1524 GX_INC(idx, GX_TX_RING_CNT);
1530 * Start packing the mbufs in this chain into the transmit
1531 * descriptors. Stop when we run out of descriptors or hit
1532 * the end of the mbuf chain.
1534 for (m = m_head; m != NULL; m = m->m_next) {
1538 if (cnt == GX_TX_RING_CNT) {
1539 printf("overflow(2): %d, %d\n", cnt, GX_TX_RING_CNT);
1543 tx = (struct gx_tx_desc_data *)&gx->gx_rdata->gx_tx_ring[idx];
1544 tx->tx_addr = vtophys(mtod(m, vm_offset_t));
1546 tx->tx_len = m->m_len;
1547 if (gx->arpcom.ac_if.if_hwassist) {
1549 tx->tx_command = GX_TXTCP_EXTENSION;
1550 tx->tx_options = csumopts;
1553 * This is really a struct gx_tx_desc_old.
1557 GX_INC(idx, GX_TX_RING_CNT);
1562 tx->tx_command |= GX_TXTCP_REPORT_STATUS | GX_TXTCP_INT_DELAY |
1563 GX_TXTCP_ETHER_CRC | GX_TXTCP_END_OF_PKT;
1565 tx->tx_command |= GX_TXTCP_VLAN_ENABLE;
1566 tx->tx_vlan = ifv->ifv_tag;
1569 gx->gx_tx_tail_idx = idx;
1570 gx->gx_txcontext = txcontext;
1571 idx = GX_PREV(idx, GX_TX_RING_CNT);
1572 gx->gx_cdata.gx_tx_chain[idx] = m_head;
1574 CSR_WRITE_4(gx, gx->gx_reg.r_tx_tail, gx->gx_tx_tail_idx);
1581 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1582 * to the mbuf data regions directly in the transmit descriptors.
1585 gx_start(struct ifnet *ifp)
1587 struct gx_softc *gx;
1588 struct mbuf *m_head;
1596 IF_DEQUEUE(&ifp->if_snd, m_head);
1601 * Pack the data into the transmit ring. If we
1602 * don't have room, set the OACTIVE flag and wait
1603 * for the NIC to drain the ring.
1605 if (gx_encap(gx, m_head) != 0) {
1606 IF_PREPEND(&ifp->if_snd, m_head);
1607 ifp->if_flags |= IFF_OACTIVE;
1612 * If there's a BPF listener, bounce a copy of this frame
1616 bpf_mtap(ifp, m_head);
1619 * Set a timeout in case the chip goes out to lunch.