3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
32 * $FreeBSD: src/sys/dev/vge/if_vge.c,v 1.24 2006/02/14 12:44:56 glebius Exp $
36 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
38 * Written by Bill Paul <wpaul@windriver.com>
39 * Senior Networking Software Engineer
44 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that
45 * combines a tri-speed ethernet MAC and PHY, with the following
48 * o Jumbo frame support up to 16K
49 * o Transmit and receive flow control
50 * o IPv4 checksum offload
51 * o VLAN tag insertion and stripping
53 * o 64-bit multicast hash table filter
54 * o 64 entry CAM filter
55 * o 16K RX FIFO and 48K TX FIFO memory
56 * o Interrupt moderation
58 * The VT6122 supports up to four transmit DMA queues. The descriptors
59 * in the transmit ring can address up to 7 data fragments; frames which
60 * span more than 7 data buffers must be coalesced, but in general the
61 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
62 * long. The receive descriptors address only a single buffer.
64 * There are two peculiar design issues with the VT6122. One is that
65 * receive data buffers must be aligned on a 32-bit boundary. This is
66 * not a problem where the VT6122 is used as a LOM device in x86-based
67 * systems, but on architectures that generate unaligned access traps, we
68 * have to do some copying.
70 * The other issue has to do with the way 64-bit addresses are handled.
71 * The DMA descriptors only allow you to specify 48 bits of addressing
72 * information. The remaining 16 bits are specified using one of the
73 * I/O registers. If you only have a 32-bit system, then this isn't
74 * an issue, but if you have a 64-bit system and more than 4GB of
75 * memory, you must have to make sure your network data buffers reside
76 * in the same 48-bit 'segment.'
78 * Special thanks to Ryan Fu at VIA Networking for providing documentation
79 * and sample NICs for testing.
82 #include "opt_polling.h"
84 #include <sys/param.h>
85 #include <sys/endian.h>
86 #include <sys/systm.h>
87 #include <sys/sockio.h>
89 #include <sys/malloc.h>
90 #include <sys/module.h>
91 #include <sys/kernel.h>
92 #include <sys/socket.h>
93 #include <sys/serialize.h>
97 #include <sys/interrupt.h>
100 #include <net/if_arp.h>
101 #include <net/ethernet.h>
102 #include <net/if_dl.h>
103 #include <net/if_media.h>
104 #include <net/ifq_var.h>
105 #include <net/if_types.h>
106 #include <net/vlan/if_vlan_var.h>
107 #include <net/vlan/if_vlan_ether.h>
111 #include <dev/netif/mii_layer/mii.h>
112 #include <dev/netif/mii_layer/miivar.h>
114 #include <bus/pci/pcireg.h>
115 #include <bus/pci/pcivar.h>
116 #include <bus/pci/pcidevs.h>
118 #include "miibus_if.h"
120 #include <dev/netif/vge/if_vgereg.h>
121 #include <dev/netif/vge/if_vgevar.h>
123 #define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
126 * Various supported device vendors/types and their names.
128 static const struct vge_type vge_devs[] = {
129 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT612X,
130 "VIA Networking Gigabit Ethernet" },
134 static int vge_probe (device_t);
135 static int vge_attach (device_t);
136 static int vge_detach (device_t);
138 static int vge_encap (struct vge_softc *, struct mbuf *, int);
140 static void vge_dma_map_addr (void *, bus_dma_segment_t *, int, int);
141 static void vge_dma_map_rx_desc (void *, bus_dma_segment_t *, int,
143 static void vge_dma_map_tx_desc (void *, bus_dma_segment_t *, int,
145 static int vge_dma_alloc (device_t);
146 static void vge_dma_free (struct vge_softc *);
147 static int vge_newbuf (struct vge_softc *, int, struct mbuf *);
148 static int vge_rx_list_init (struct vge_softc *);
149 static int vge_tx_list_init (struct vge_softc *);
151 static __inline void vge_fixup_rx
154 static void vge_rxeof (struct vge_softc *, int);
155 static void vge_txeof (struct vge_softc *);
156 static void vge_intr (void *);
157 static void vge_tick (struct vge_softc *);
158 static void vge_start (struct ifnet *);
159 static int vge_ioctl (struct ifnet *, u_long, caddr_t,
161 static void vge_init (void *);
162 static void vge_stop (struct vge_softc *);
163 static void vge_watchdog (struct ifnet *);
164 static int vge_suspend (device_t);
165 static int vge_resume (device_t);
166 static void vge_shutdown (device_t);
167 static int vge_ifmedia_upd (struct ifnet *);
168 static void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *);
171 static void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *);
173 static void vge_read_eeprom (struct vge_softc *, uint8_t *, int, int, int);
175 static void vge_miipoll_start (struct vge_softc *);
176 static void vge_miipoll_stop (struct vge_softc *);
177 static int vge_miibus_readreg (device_t, int, int);
178 static int vge_miibus_writereg (device_t, int, int, int);
179 static void vge_miibus_statchg (device_t);
181 static void vge_cam_clear (struct vge_softc *);
182 static int vge_cam_set (struct vge_softc *, uint8_t *);
183 static void vge_setmulti (struct vge_softc *);
184 static void vge_reset (struct vge_softc *);
186 #ifdef DEVICE_POLLING
187 static void vge_poll(struct ifnet *, enum poll_cmd, int);
188 static void vge_disable_intr(struct vge_softc *);
190 static void vge_enable_intr(struct vge_softc *, uint32_t);
192 #define VGE_PCI_LOIO 0x10
193 #define VGE_PCI_LOMEM 0x14
195 static device_method_t vge_methods[] = {
196 /* Device interface */
197 DEVMETHOD(device_probe, vge_probe),
198 DEVMETHOD(device_attach, vge_attach),
199 DEVMETHOD(device_detach, vge_detach),
200 DEVMETHOD(device_suspend, vge_suspend),
201 DEVMETHOD(device_resume, vge_resume),
202 DEVMETHOD(device_shutdown, vge_shutdown),
205 DEVMETHOD(bus_print_child, bus_generic_print_child),
206 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
209 DEVMETHOD(miibus_readreg, vge_miibus_readreg),
210 DEVMETHOD(miibus_writereg, vge_miibus_writereg),
211 DEVMETHOD(miibus_statchg, vge_miibus_statchg),
216 static driver_t vge_driver = {
219 sizeof(struct vge_softc)
222 static devclass_t vge_devclass;
224 DECLARE_DUMMY_MODULE(if_vge);
225 MODULE_DEPEND(if_vge, miibus, 1, 1, 1);
226 DRIVER_MODULE(if_vge, pci, vge_driver, vge_devclass, NULL, NULL);
227 DRIVER_MODULE(if_vge, cardbus, vge_driver, vge_devclass, NULL, NULL);
228 DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, NULL, NULL);
232 * Read a word of data stored in the EEPROM at address 'addr.'
235 vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t dest)
241 * Enter EEPROM embedded programming mode. In order to
242 * access the EEPROM at all, we first have to set the
243 * EELOAD bit in the CHIPCFG2 register.
245 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
246 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
248 /* Select the address of the word we want to read */
249 CSR_WRITE_1(sc, VGE_EEADDR, addr);
251 /* Issue read command */
252 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
254 /* Wait for the done bit to be set. */
255 for (i = 0; i < VGE_TIMEOUT; i++) {
256 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
259 if (i == VGE_TIMEOUT) {
260 device_printf(sc->vge_dev, "EEPROM read timed out\n");
265 /* Read the result */
266 word = CSR_READ_2(sc, VGE_EERDDAT);
268 /* Turn off EEPROM access mode. */
269 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
270 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
277 * Read a sequence of words from the EEPROM.
280 vge_read_eeprom(struct vge_softc *sc, uint8_t *dest, int off, int cnt, int swap)
284 uint16_t word = 0, *ptr;
286 for (i = 0; i < cnt; i++) {
287 vge_eeprom_getword(sc, off + i, &word);
288 ptr = (uint16_t *)(dest + (i * 2));
295 for (i = 0; i < ETHER_ADDR_LEN; i++)
296 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i);
301 vge_miipoll_stop(struct vge_softc *sc)
305 CSR_WRITE_1(sc, VGE_MIICMD, 0);
307 for (i = 0; i < VGE_TIMEOUT; i++) {
309 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
312 if (i == VGE_TIMEOUT)
313 if_printf(&sc->arpcom.ac_if, "failed to idle MII autopoll\n");
317 vge_miipoll_start(struct vge_softc *sc)
321 /* First, make sure we're idle. */
322 CSR_WRITE_1(sc, VGE_MIICMD, 0);
323 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
325 for (i = 0; i < VGE_TIMEOUT; i++) {
327 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
330 if (i == VGE_TIMEOUT) {
331 if_printf(&sc->arpcom.ac_if, "failed to idle MII autopoll\n");
335 /* Now enable auto poll mode. */
336 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
338 /* And make sure it started. */
339 for (i = 0; i < VGE_TIMEOUT; i++) {
341 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
344 if (i == VGE_TIMEOUT)
345 if_printf(&sc->arpcom.ac_if, "failed to start MII autopoll\n");
349 vge_miibus_readreg(device_t dev, int phy, int reg)
351 struct vge_softc *sc;
355 sc = device_get_softc(dev);
357 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
360 vge_miipoll_stop(sc);
362 /* Specify the register we want to read. */
363 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
365 /* Issue read command. */
366 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
368 /* Wait for the read command bit to self-clear. */
369 for (i = 0; i < VGE_TIMEOUT; i++) {
371 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
374 if (i == VGE_TIMEOUT)
375 if_printf(&sc->arpcom.ac_if, "MII read timed out\n");
377 rval = CSR_READ_2(sc, VGE_MIIDATA);
379 vge_miipoll_start(sc);
385 vge_miibus_writereg(device_t dev, int phy, int reg, int data)
387 struct vge_softc *sc;
390 sc = device_get_softc(dev);
392 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
395 vge_miipoll_stop(sc);
397 /* Specify the register we want to write. */
398 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
400 /* Specify the data we want to write. */
401 CSR_WRITE_2(sc, VGE_MIIDATA, data);
403 /* Issue write command. */
404 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
406 /* Wait for the write command bit to self-clear. */
407 for (i = 0; i < VGE_TIMEOUT; i++) {
409 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
412 if (i == VGE_TIMEOUT) {
413 if_printf(&sc->arpcom.ac_if, "MII write timed out\n");
417 vge_miipoll_start(sc);
423 vge_cam_clear(struct vge_softc *sc)
428 * Turn off all the mask bits. This tells the chip
429 * that none of the entries in the CAM filter are valid.
430 * desired entries will be enabled as we fill the filter in.
432 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
433 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
434 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
435 for (i = 0; i < 8; i++)
436 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
438 /* Clear the VLAN filter too. */
439 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
440 for (i = 0; i < 8; i++)
441 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
443 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
444 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
445 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
451 vge_cam_set(struct vge_softc *sc, uint8_t *addr)
455 if (sc->vge_camidx == VGE_CAM_MAXADDRS)
458 /* Select the CAM data page. */
459 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
460 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
462 /* Set the filter entry we want to update and enable writing. */
463 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
465 /* Write the address to the CAM registers */
466 for (i = 0; i < ETHER_ADDR_LEN; i++)
467 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
469 /* Issue a write command. */
470 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
472 /* Wake for it to clear. */
473 for (i = 0; i < VGE_TIMEOUT; i++) {
475 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
478 if (i == VGE_TIMEOUT) {
479 if_printf(&sc->arpcom.ac_if, "setting CAM filter failed\n");
484 /* Select the CAM mask page. */
485 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
486 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
488 /* Set the mask bit that enables this filter. */
489 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
490 1<<(sc->vge_camidx & 7));
495 /* Turn off access to CAM. */
496 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
497 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
498 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
504 * Program the multicast filter. We use the 64-entry CAM filter
505 * for perfect filtering. If there's more than 64 multicast addresses,
506 * we use the hash filter insted.
509 vge_setmulti(struct vge_softc *sc)
511 struct ifnet *ifp = &sc->arpcom.ac_if;
513 struct ifmultiaddr *ifma;
514 uint32_t h, hashes[2] = { 0, 0 };
516 /* First, zot all the multicast entries. */
518 CSR_WRITE_4(sc, VGE_MAR0, 0);
519 CSR_WRITE_4(sc, VGE_MAR1, 0);
522 * If the user wants allmulti or promisc mode, enable reception
523 * of all multicast frames.
525 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
526 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF);
527 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF);
531 /* Now program new ones */
532 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
533 if (ifma->ifma_addr->sa_family != AF_LINK)
535 error = vge_cam_set(sc,
536 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
541 /* If there were too many addresses, use the hash filter. */
545 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
546 if (ifma->ifma_addr->sa_family != AF_LINK)
548 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
549 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
551 hashes[0] |= (1 << h);
553 hashes[1] |= (1 << (h - 32));
556 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
557 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
562 vge_reset(struct vge_softc *sc)
566 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
568 for (i = 0; i < VGE_TIMEOUT; i++) {
570 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
574 if (i == VGE_TIMEOUT) {
575 if_printf(&sc->arpcom.ac_if, "soft reset timed out");
576 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
582 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
584 for (i = 0; i < VGE_TIMEOUT; i++) {
586 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
589 if (i == VGE_TIMEOUT) {
590 if_printf(&sc->arpcom.ac_if, "EEPROM reload timed out\n");
594 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
598 * Probe for a VIA gigabit chip. Check the PCI vendor and device
599 * IDs against our list and return a device name if we find a match.
602 vge_probe(device_t dev)
604 const struct vge_type *t;
607 did = pci_get_device(dev);
608 vid = pci_get_vendor(dev);
609 for (t = vge_devs; t->vge_name != NULL; ++t) {
610 if (vid == t->vge_vid && did == t->vge_did) {
611 device_set_desc(dev, t->vge_name);
619 vge_dma_map_rx_desc(void *arg, bus_dma_segment_t *segs, int nseg,
620 bus_size_t mapsize, int error)
623 struct vge_dmaload_arg *ctx;
624 struct vge_rx_desc *d = NULL;
631 /* Signal error to caller if there's too many segments */
632 if (nseg > ctx->vge_maxsegs) {
633 ctx->vge_maxsegs = 0;
638 * Map the segment array into descriptors.
640 d = &ctx->sc->vge_ldata.vge_rx_list[ctx->vge_idx];
642 /* If this descriptor is still owned by the chip, bail. */
643 if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) {
644 if_printf(&ctx->sc->arpcom.ac_if,
645 "tried to map busy descriptor\n");
646 ctx->vge_maxsegs = 0;
650 d->vge_buflen = htole16(VGE_BUFLEN(segs[0].ds_len) | VGE_RXDESC_I);
651 d->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
652 d->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF);
656 ctx->vge_maxsegs = 1;
660 vge_dma_map_tx_desc(void *arg, bus_dma_segment_t *segs, int nseg,
661 bus_size_t mapsize, int error)
663 struct vge_dmaload_arg *ctx;
664 struct vge_tx_desc *d = NULL;
665 struct vge_tx_frag *f;
673 /* Signal error to caller if there's too many segments */
674 if (nseg > ctx->vge_maxsegs) {
675 ctx->vge_maxsegs = 0;
679 /* Map the segment array into descriptors. */
680 d = &ctx->sc->vge_ldata.vge_tx_list[ctx->vge_idx];
682 /* If this descriptor is still owned by the chip, bail. */
683 if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) {
684 ctx->vge_maxsegs = 0;
688 for (i = 0; i < nseg; i++) {
690 f->vge_buflen = htole16(VGE_BUFLEN(segs[i].ds_len));
691 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[i].ds_addr));
692 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[i].ds_addr) & 0xFFFF);
695 /* Argh. This chip does not autopad short frames */
696 if (ctx->vge_m0->m_pkthdr.len < VGE_MIN_FRAMELEN) {
698 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN -
699 ctx->vge_m0->m_pkthdr.len));
700 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
701 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF);
702 ctx->vge_m0->m_pkthdr.len = VGE_MIN_FRAMELEN;
707 * When telling the chip how many segments there are, we
708 * must use nsegs + 1 instead of just nsegs. Darned if I
713 d->vge_sts = ctx->vge_m0->m_pkthdr.len << 16;
714 d->vge_ctl = ctx->vge_flags|(i << 28)|VGE_TD_LS_NORM;
716 if (ctx->vge_m0->m_pkthdr.len > ETHERMTU + ETHER_HDR_LEN)
717 d->vge_ctl |= VGE_TDCTL_JUMBO;
719 ctx->vge_maxsegs = nseg;
723 * Map a single buffer address.
727 vge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
732 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
733 *((bus_addr_t *)arg) = segs->ds_addr;
737 vge_dma_alloc(device_t dev)
739 struct vge_softc *sc = device_get_softc(dev);
740 int error, nseg, i, tx_pos = 0, rx_pos = 0;
743 * Allocate the parent bus DMA tag appropriate for PCI.
745 #define VGE_NSEG_NEW 32
746 error = bus_dma_tag_create(NULL, /* parent */
747 1, 0, /* alignment, boundary */
748 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
749 BUS_SPACE_MAXADDR, /* highaddr */
750 NULL, NULL, /* filter, filterarg */
751 MAXBSIZE, VGE_NSEG_NEW, /* maxsize, nsegments */
752 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
753 BUS_DMA_ALLOCNOW, /* flags */
754 &sc->vge_parent_tag);
756 device_printf(dev, "can't create parent dma tag\n");
761 * Allocate map for RX mbufs.
764 error = bus_dma_tag_create(sc->vge_parent_tag, ETHER_ALIGN, 0,
765 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
767 MCLBYTES * nseg, nseg, MCLBYTES,
768 BUS_DMA_ALLOCNOW, &sc->vge_ldata.vge_mtag);
770 device_printf(dev, "could not allocate mbuf dma tag\n");
775 * Allocate map for TX descriptor list.
777 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 0,
778 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
780 VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ,
782 &sc->vge_ldata.vge_tx_list_tag);
784 device_printf(dev, "could not allocate tx list dma tag\n");
788 /* Allocate DMA'able memory for the TX ring */
789 error = bus_dmamem_alloc(sc->vge_ldata.vge_tx_list_tag,
790 (void **)&sc->vge_ldata.vge_tx_list,
791 BUS_DMA_WAITOK | BUS_DMA_ZERO,
792 &sc->vge_ldata.vge_tx_list_map);
794 device_printf(dev, "could not allocate tx list dma memory\n");
798 /* Load the map for the TX ring. */
799 error = bus_dmamap_load(sc->vge_ldata.vge_tx_list_tag,
800 sc->vge_ldata.vge_tx_list_map,
801 sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ,
803 &sc->vge_ldata.vge_tx_list_addr,
806 device_printf(dev, "could not load tx list\n");
807 bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag,
808 sc->vge_ldata.vge_tx_list,
809 sc->vge_ldata.vge_tx_list_map);
810 sc->vge_ldata.vge_tx_list = NULL;
814 /* Create DMA maps for TX buffers */
815 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
816 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0,
817 &sc->vge_ldata.vge_tx_dmamap[i]);
819 device_printf(dev, "can't create DMA map for TX\n");
824 tx_pos = VGE_TX_DESC_CNT;
827 * Allocate map for RX descriptor list.
829 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 0,
830 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
832 VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ,
834 &sc->vge_ldata.vge_rx_list_tag);
836 device_printf(dev, "could not allocate rx list dma tag\n");
840 /* Allocate DMA'able memory for the RX ring */
841 error = bus_dmamem_alloc(sc->vge_ldata.vge_rx_list_tag,
842 (void **)&sc->vge_ldata.vge_rx_list,
843 BUS_DMA_WAITOK | BUS_DMA_ZERO,
844 &sc->vge_ldata.vge_rx_list_map);
846 device_printf(dev, "could not allocate rx list dma memory\n");
850 /* Load the map for the RX ring. */
851 error = bus_dmamap_load(sc->vge_ldata.vge_rx_list_tag,
852 sc->vge_ldata.vge_rx_list_map,
853 sc->vge_ldata.vge_rx_list, VGE_TX_LIST_SZ,
855 &sc->vge_ldata.vge_rx_list_addr,
858 device_printf(dev, "could not load rx list\n");
859 bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag,
860 sc->vge_ldata.vge_rx_list,
861 sc->vge_ldata.vge_rx_list_map);
862 sc->vge_ldata.vge_rx_list = NULL;
866 /* Create DMA maps for RX buffers */
867 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
868 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0,
869 &sc->vge_ldata.vge_rx_dmamap[i]);
871 device_printf(dev, "can't create DMA map for RX\n");
879 for (i = 0; i < tx_pos; ++i) {
880 error = bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
881 sc->vge_ldata.vge_tx_dmamap[i]);
883 for (i = 0; i < rx_pos; ++i) {
884 error = bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
885 sc->vge_ldata.vge_rx_dmamap[i]);
887 bus_dma_tag_destroy(sc->vge_ldata.vge_mtag);
888 sc->vge_ldata.vge_mtag = NULL;
894 vge_dma_free(struct vge_softc *sc)
896 /* Unload and free the RX DMA ring memory and map */
897 if (sc->vge_ldata.vge_rx_list_tag) {
898 bus_dmamap_unload(sc->vge_ldata.vge_rx_list_tag,
899 sc->vge_ldata.vge_rx_list_map);
900 bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag,
901 sc->vge_ldata.vge_rx_list,
902 sc->vge_ldata.vge_rx_list_map);
905 if (sc->vge_ldata.vge_rx_list_tag)
906 bus_dma_tag_destroy(sc->vge_ldata.vge_rx_list_tag);
908 /* Unload and free the TX DMA ring memory and map */
909 if (sc->vge_ldata.vge_tx_list_tag) {
910 bus_dmamap_unload(sc->vge_ldata.vge_tx_list_tag,
911 sc->vge_ldata.vge_tx_list_map);
912 bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag,
913 sc->vge_ldata.vge_tx_list,
914 sc->vge_ldata.vge_tx_list_map);
917 if (sc->vge_ldata.vge_tx_list_tag)
918 bus_dma_tag_destroy(sc->vge_ldata.vge_tx_list_tag);
920 /* Destroy all the RX and TX buffer maps */
921 if (sc->vge_ldata.vge_mtag) {
924 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
925 bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
926 sc->vge_ldata.vge_tx_dmamap[i]);
928 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
929 bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
930 sc->vge_ldata.vge_rx_dmamap[i]);
932 bus_dma_tag_destroy(sc->vge_ldata.vge_mtag);
935 if (sc->vge_parent_tag)
936 bus_dma_tag_destroy(sc->vge_parent_tag);
940 * Attach the interface. Allocate softc structures, do ifmedia
941 * setup and ethernet/BPF attach.
944 vge_attach(device_t dev)
946 uint8_t eaddr[ETHER_ADDR_LEN];
947 struct vge_softc *sc;
951 sc = device_get_softc(dev);
952 ifp = &sc->arpcom.ac_if;
954 /* Initialize if_xname early, so if_printf() can be used */
955 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
958 * Map control/status registers.
960 pci_enable_busmaster(dev);
962 sc->vge_res_rid = VGE_PCI_LOMEM;
963 sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
964 &sc->vge_res_rid, RF_ACTIVE);
965 if (sc->vge_res == NULL) {
966 device_printf(dev, "couldn't map ports/memory\n");
970 sc->vge_btag = rman_get_bustag(sc->vge_res);
971 sc->vge_bhandle = rman_get_bushandle(sc->vge_res);
973 /* Allocate interrupt */
975 sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->vge_irq_rid,
976 RF_SHAREABLE | RF_ACTIVE);
977 if (sc->vge_irq == NULL) {
978 device_printf(dev, "couldn't map interrupt\n");
983 /* Reset the adapter. */
987 * Get station address from the EEPROM.
989 vge_read_eeprom(sc, eaddr, VGE_EE_EADDR, 3, 0);
991 /* Allocate DMA related stuffs */
992 error = vge_dma_alloc(dev);
997 error = mii_phy_probe(dev, &sc->vge_miibus, vge_ifmedia_upd,
1000 device_printf(dev, "MII without any phy!\n");
1005 ifp->if_mtu = ETHERMTU;
1006 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1007 ifp->if_init = vge_init;
1008 ifp->if_start = vge_start;
1009 ifp->if_watchdog = vge_watchdog;
1010 ifp->if_ioctl = vge_ioctl;
1011 #ifdef DEVICE_POLLING
1012 ifp->if_poll = vge_poll;
1014 ifp->if_hwassist = VGE_CSUM_FEATURES;
1015 ifp->if_capabilities = IFCAP_VLAN_MTU |
1017 IFCAP_VLAN_HWTAGGING;
1018 ifp->if_capenable = ifp->if_capabilities;
1019 ifq_set_maxlen(&ifp->if_snd, VGE_IFQ_MAXLEN);
1020 ifq_set_ready(&ifp->if_snd);
1023 * Call MI attach routine.
1025 ether_ifattach(ifp, eaddr, NULL);
1027 /* Hook interrupt last to avoid having to lock softc */
1028 error = bus_setup_intr(dev, sc->vge_irq, INTR_MPSAFE, vge_intr, sc,
1029 &sc->vge_intrhand, ifp->if_serializer);
1031 device_printf(dev, "couldn't set up irq\n");
1032 ether_ifdetach(ifp);
1036 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->vge_irq));
1037 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
1046 * Shutdown hardware and free up resources. This can be called any
1047 * time after the mutex has been initialized. It is called in both
1048 * the error case in attach and the normal detach case so it needs
1049 * to be careful about only freeing resources that have actually been
1053 vge_detach(device_t dev)
1055 struct vge_softc *sc = device_get_softc(dev);
1056 struct ifnet *ifp = &sc->arpcom.ac_if;
1058 /* These should only be active if attach succeeded */
1059 if (device_is_attached(dev)) {
1060 lwkt_serialize_enter(ifp->if_serializer);
1063 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand);
1065 * Force off the IFF_UP flag here, in case someone
1066 * still had a BPF descriptor attached to this
1067 * interface. If they do, ether_ifattach() will cause
1068 * the BPF code to try and clear the promisc mode
1069 * flag, which will bubble down to vge_ioctl(),
1070 * which will try to call vge_init() again. This will
1071 * turn the NIC back on and restart the MII ticker,
1072 * which will panic the system when the kernel tries
1073 * to invoke the vge_tick() function that isn't there
1076 ifp->if_flags &= ~IFF_UP;
1078 lwkt_serialize_exit(ifp->if_serializer);
1080 ether_ifdetach(ifp);
1084 device_delete_child(dev, sc->vge_miibus);
1085 bus_generic_detach(dev);
1088 bus_release_resource(dev, SYS_RES_IRQ, sc->vge_irq_rid,
1093 bus_release_resource(dev, SYS_RES_MEMORY, sc->vge_res_rid,
1102 vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m)
1104 struct vge_dmaload_arg arg;
1105 struct mbuf *n = NULL;
1109 n = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
1114 m->m_data = m->m_ext.ext_buf;
1120 * This is part of an evil trick to deal with non-x86 platforms.
1121 * The VIA chip requires RX buffers to be aligned on 32-bit
1122 * boundaries, but that will hose non-x86 machines. To get around
1123 * this, we leave some empty space at the start of each buffer
1124 * and for non-x86 hosts, we copy the buffer back two bytes
1125 * to achieve word alignment. This is slightly more efficient
1126 * than allocating a new buffer, copying the contents, and
1127 * discarding the old buffer.
1129 m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN;
1130 m_adj(m, VGE_ETHER_ALIGN);
1132 m->m_len = m->m_pkthdr.len = MCLBYTES;
1137 arg.vge_maxsegs = 1;
1140 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag,
1141 sc->vge_ldata.vge_rx_dmamap[idx], m,
1142 vge_dma_map_rx_desc, &arg, BUS_DMA_NOWAIT);
1143 if (error || arg.vge_maxsegs != 1) {
1150 * Note: the manual fails to document the fact that for
1151 * proper opration, the driver needs to replentish the RX
1152 * DMA ring 4 descriptors at a time (rather than one at a
1153 * time, like most chips). We can allocate the new buffers
1154 * but we should not set the OWN bits until we're ready
1155 * to hand back 4 of them in one shot.
1158 #define VGE_RXCHUNK 4
1159 sc->vge_rx_consumed++;
1160 if (sc->vge_rx_consumed == VGE_RXCHUNK) {
1161 for (i = idx; i != idx - sc->vge_rx_consumed; i--) {
1162 sc->vge_ldata.vge_rx_list[i].vge_sts |=
1163 htole32(VGE_RDSTS_OWN);
1165 sc->vge_rx_consumed = 0;
1168 sc->vge_ldata.vge_rx_mbuf[idx] = m;
1170 bus_dmamap_sync(sc->vge_ldata.vge_mtag,
1171 sc->vge_ldata.vge_rx_dmamap[idx], BUS_DMASYNC_PREREAD);
1177 vge_tx_list_init(struct vge_softc *sc)
1179 bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ);
1180 bzero ((char *)&sc->vge_ldata.vge_tx_mbuf,
1181 (VGE_TX_DESC_CNT * sizeof(struct mbuf *)));
1183 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
1184 sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_PREWRITE);
1185 sc->vge_ldata.vge_tx_prodidx = 0;
1186 sc->vge_ldata.vge_tx_considx = 0;
1187 sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT;
1193 vge_rx_list_init(struct vge_softc *sc)
1197 bzero(sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ);
1198 bzero(&sc->vge_ldata.vge_rx_mbuf,
1199 VGE_RX_DESC_CNT * sizeof(struct mbuf *));
1201 sc->vge_rx_consumed = 0;
1203 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1204 if (vge_newbuf(sc, i, NULL) == ENOBUFS)
1208 /* Flush the RX descriptors */
1209 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
1210 sc->vge_ldata.vge_rx_list_map,
1211 BUS_DMASYNC_PREWRITE);
1213 sc->vge_ldata.vge_rx_prodidx = 0;
1214 sc->vge_rx_consumed = 0;
1215 sc->vge_head = sc->vge_tail = NULL;
1220 static __inline void
1221 vge_fixup_rx(struct mbuf *m)
1223 uint16_t *src, *dst;
1226 src = mtod(m, uint16_t *);
1229 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1232 m->m_data -= ETHER_ALIGN;
1237 * RX handler. We support the reception of jumbo frames that have
1238 * been fragmented across multiple 2K mbuf cluster buffers.
1241 vge_rxeof(struct vge_softc *sc, int count)
1243 struct ifnet *ifp = &sc->arpcom.ac_if;
1245 int i, total_len, lim = 0;
1246 struct vge_rx_desc *cur_rx;
1247 uint32_t rxstat, rxctl;
1249 ASSERT_SERIALIZED(ifp->if_serializer);
1251 i = sc->vge_ldata.vge_rx_prodidx;
1253 /* Invalidate the descriptor memory */
1255 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
1256 sc->vge_ldata.vge_rx_list_map, BUS_DMASYNC_POSTREAD);
1258 while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) {
1259 #ifdef DEVICE_POLLING
1260 if (count >= 0 && count-- == 0)
1264 cur_rx = &sc->vge_ldata.vge_rx_list[i];
1265 m = sc->vge_ldata.vge_rx_mbuf[i];
1266 total_len = VGE_RXBYTES(cur_rx);
1267 rxstat = le32toh(cur_rx->vge_sts);
1268 rxctl = le32toh(cur_rx->vge_ctl);
1270 /* Invalidate the RX mbuf and unload its map */
1271 bus_dmamap_sync(sc->vge_ldata.vge_mtag,
1272 sc->vge_ldata.vge_rx_dmamap[i],
1273 BUS_DMASYNC_POSTWRITE);
1274 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
1275 sc->vge_ldata.vge_rx_dmamap[i]);
1278 * If the 'start of frame' bit is set, this indicates
1279 * either the first fragment in a multi-fragment receive,
1280 * or an intermediate fragment. Either way, we want to
1281 * accumulate the buffers.
1283 if (rxstat & VGE_RXPKT_SOF) {
1284 m->m_len = MCLBYTES - VGE_ETHER_ALIGN;
1285 if (sc->vge_head == NULL) {
1286 sc->vge_head = sc->vge_tail = m;
1288 m->m_flags &= ~M_PKTHDR;
1289 sc->vge_tail->m_next = m;
1292 vge_newbuf(sc, i, NULL);
1298 * Bad/error frames will have the RXOK bit cleared.
1299 * However, there's one error case we want to allow:
1300 * if a VLAN tagged frame arrives and the chip can't
1301 * match it against the CAM filter, it considers this
1302 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1303 * We don't want to drop the frame though: our VLAN
1304 * filtering is done in software.
1306 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM) &&
1307 !(rxstat & VGE_RDSTS_CSUMERR)) {
1310 * If this is part of a multi-fragment packet,
1311 * discard all the pieces.
1313 if (sc->vge_head != NULL) {
1314 m_freem(sc->vge_head);
1315 sc->vge_head = sc->vge_tail = NULL;
1317 vge_newbuf(sc, i, m);
1323 * If allocating a replacement mbuf fails,
1324 * reload the current one.
1326 if (vge_newbuf(sc, i, NULL)) {
1328 if (sc->vge_head != NULL) {
1329 m_freem(sc->vge_head);
1330 sc->vge_head = sc->vge_tail = NULL;
1332 vge_newbuf(sc, i, m);
1339 if (sc->vge_head != NULL) {
1340 m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN);
1342 * Special case: if there's 4 bytes or less
1343 * in this buffer, the mbuf can be discarded:
1344 * the last 4 bytes is the CRC, which we don't
1345 * care about anyway.
1347 if (m->m_len <= ETHER_CRC_LEN) {
1348 sc->vge_tail->m_len -=
1349 (ETHER_CRC_LEN - m->m_len);
1352 m->m_len -= ETHER_CRC_LEN;
1353 m->m_flags &= ~M_PKTHDR;
1354 sc->vge_tail->m_next = m;
1357 sc->vge_head = sc->vge_tail = NULL;
1358 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1360 m->m_pkthdr.len = m->m_len =
1361 (total_len - ETHER_CRC_LEN);
1368 m->m_pkthdr.rcvif = ifp;
1370 /* Do RX checksumming if enabled */
1371 if (ifp->if_capenable & IFCAP_RXCSUM) {
1372 /* Check IP header checksum */
1373 if (rxctl & VGE_RDCTL_IPPKT)
1374 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1375 if (rxctl & VGE_RDCTL_IPCSUMOK)
1376 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1378 /* Check TCP/UDP checksum */
1379 if (rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT) &&
1380 rxctl & VGE_RDCTL_PROTOCSUMOK) {
1381 m->m_pkthdr.csum_flags |=
1382 CSUM_DATA_VALID|CSUM_PSEUDO_HDR|
1383 CSUM_FRAG_NOT_CHECKED;
1384 m->m_pkthdr.csum_data = 0xffff;
1388 if (rxstat & VGE_RDSTS_VTAG) {
1389 m->m_flags |= M_VLANTAG;
1390 m->m_pkthdr.ether_vlantag =
1391 ntohs((rxctl & VGE_RDCTL_VLANID));
1393 ifp->if_input(ifp, m);
1396 if (lim == VGE_RX_DESC_CNT)
1400 /* Flush the RX DMA ring */
1401 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
1402 sc->vge_ldata.vge_rx_list_map,
1403 BUS_DMASYNC_PREWRITE);
1405 sc->vge_ldata.vge_rx_prodidx = i;
1406 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim);
1410 vge_txeof(struct vge_softc *sc)
1412 struct ifnet *ifp = &sc->arpcom.ac_if;
1416 idx = sc->vge_ldata.vge_tx_considx;
1418 /* Invalidate the TX descriptor list */
1420 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
1421 sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_POSTREAD);
1423 while (idx != sc->vge_ldata.vge_tx_prodidx) {
1425 txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts);
1426 if (txstat & VGE_TDSTS_OWN)
1429 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]);
1430 sc->vge_ldata.vge_tx_mbuf[idx] = NULL;
1431 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
1432 sc->vge_ldata.vge_tx_dmamap[idx]);
1433 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL))
1434 ifp->if_collisions++;
1435 if (txstat & VGE_TDSTS_TXERR)
1440 sc->vge_ldata.vge_tx_free++;
1441 VGE_TX_DESC_INC(idx);
1444 /* No changes made to the TX ring, so no flush needed */
1445 if (idx != sc->vge_ldata.vge_tx_considx) {
1446 sc->vge_ldata.vge_tx_considx = idx;
1447 ifp->if_flags &= ~IFF_OACTIVE;
1452 * If not all descriptors have been released reaped yet,
1453 * reload the timer so that we will eventually get another
1454 * interrupt that will cause us to re-enter this routine.
1455 * This is done in case the transmitter has gone idle.
1457 if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT)
1458 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1462 vge_tick(struct vge_softc *sc)
1464 struct ifnet *ifp = &sc->arpcom.ac_if;
1465 struct mii_data *mii;
1467 mii = device_get_softc(sc->vge_miibus);
1471 if (!(mii->mii_media_status & IFM_ACTIVE))
1474 if (mii->mii_media_status & IFM_ACTIVE &&
1475 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1477 if (!ifq_is_empty(&ifp->if_snd))
1483 #ifdef DEVICE_POLLING
1485 vge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1487 struct vge_softc *sc = ifp->if_softc;
1489 sc->rxcycles = count;
1493 vge_disable_intr(sc);
1495 case POLL_DEREGISTER:
1496 vge_enable_intr(sc, 0xffffffff);
1499 case POLL_AND_CHECK_STATUS:
1500 vge_rxeof(sc, count);
1503 if (!ifq_is_empty(&ifp->if_snd))
1506 /* XXX copy & paste from vge_intr */
1507 if (cmd == POLL_AND_CHECK_STATUS) {
1508 uint32_t status = 0;
1510 status = CSR_READ_4(sc, VGE_ISR);
1511 if (status == 0xffffffff)
1515 CSR_WRITE_4(sc, VGE_ISR, status);
1517 if (status & (VGE_ISR_TXDMA_STALL |
1518 VGE_ISR_RXDMA_STALL))
1521 if (status & (VGE_ISR_RXOFLOW | VGE_ISR_RXNODESC)) {
1523 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1524 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1531 #endif /* DEVICE_POLLING */
1536 struct vge_softc *sc = arg;
1537 struct ifnet *ifp = &sc->arpcom.ac_if;
1540 if (sc->suspended || !(ifp->if_flags & IFF_UP))
1543 /* Disable interrupts */
1544 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1547 status = CSR_READ_4(sc, VGE_ISR);
1548 /* If the card has gone away the read returns 0xffff. */
1549 if (status == 0xFFFFFFFF)
1553 CSR_WRITE_4(sc, VGE_ISR, status);
1555 if ((status & VGE_INTRS) == 0)
1558 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1561 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1564 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1565 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1568 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0))
1571 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL))
1574 if (status & VGE_ISR_LINKSTS)
1578 /* Re-enable interrupts */
1579 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1581 if (!ifq_is_empty(&ifp->if_snd))
1586 vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx)
1588 struct vge_dmaload_arg arg;
1594 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1595 arg.vge_flags |= VGE_TDCTL_IPCSUM;
1596 if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
1597 arg.vge_flags |= VGE_TDCTL_TCPCSUM;
1598 if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
1599 arg.vge_flags |= VGE_TDCTL_UDPCSUM;
1603 arg.vge_m0 = m_head;
1604 arg.vge_maxsegs = VGE_TX_FRAGS;
1606 map = sc->vge_ldata.vge_tx_dmamap[idx];
1607 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map, m_head,
1608 vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT);
1609 if (error && error != EFBIG) {
1610 if_printf(&sc->arpcom.ac_if, "can't map mbuf (error %d)\n",
1615 /* Too many segments to map, coalesce into a single mbuf */
1616 if (error || arg.vge_maxsegs == 0) {
1619 m_new = m_defrag(m_head, MB_DONTWAIT);
1620 if (m_new == NULL) {
1628 arg.vge_m0 = m_head;
1630 arg.vge_maxsegs = 1;
1632 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map,
1633 m_head, vge_dma_map_tx_desc, &arg,
1636 if_printf(&sc->arpcom.ac_if,
1637 "can't map mbuf (error %d)\n", error);
1642 sc->vge_ldata.vge_tx_mbuf[idx] = m_head;
1643 sc->vge_ldata.vge_tx_free--;
1646 * Set up hardware VLAN tagging.
1648 if (m_head->m_flags & M_VLANTAG) {
1649 sc->vge_ldata.vge_tx_list[idx].vge_ctl |=
1650 htole32(htons(m_head->m_pkthdr.ether_vlantag) |
1654 sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN);
1663 * Main transmit routine.
1667 vge_start(struct ifnet *ifp)
1669 struct vge_softc *sc = ifp->if_softc;
1670 struct mbuf *m_head = NULL;
1673 ASSERT_SERIALIZED(ifp->if_serializer);
1675 if (!sc->vge_link) {
1676 ifq_purge(&ifp->if_snd);
1680 if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING)
1683 idx = sc->vge_ldata.vge_tx_prodidx;
1687 pidx = VGE_TX_DESC_CNT - 1;
1689 while (sc->vge_ldata.vge_tx_mbuf[idx] == NULL) {
1690 if (sc->vge_ldata.vge_tx_free <= 2) {
1691 ifp->if_flags |= IFF_OACTIVE;
1695 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1699 if (vge_encap(sc, m_head, idx)) {
1700 /* If vge_encap() failed, it will free m_head for us */
1701 ifp->if_flags |= IFF_OACTIVE;
1705 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |=
1706 htole16(VGE_TXDESC_Q);
1709 VGE_TX_DESC_INC(idx);
1712 * If there's a BPF listener, bounce a copy of this frame
1715 ETHER_BPF_MTAP(ifp, m_head);
1718 if (idx == sc->vge_ldata.vge_tx_prodidx)
1721 /* Flush the TX descriptors */
1722 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
1723 sc->vge_ldata.vge_tx_list_map,
1724 BUS_DMASYNC_PREWRITE);
1726 /* Issue a transmit command. */
1727 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
1729 sc->vge_ldata.vge_tx_prodidx = idx;
1732 * Use the countdown timer for interrupt moderation.
1733 * 'TX done' interrupts are disabled. Instead, we reset the
1734 * countdown timer, which will begin counting until it hits
1735 * the value in the SSTIMER register, and then trigger an
1736 * interrupt. Each time we set the TIMER0_ENABLE bit, the
1737 * the timer count is reloaded. Only when the transmitter
1738 * is idle will the timer hit 0 and an interrupt fire.
1740 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1743 * Set a timeout in case the chip goes out to lunch.
1751 struct vge_softc *sc = xsc;
1752 struct ifnet *ifp = &sc->arpcom.ac_if;
1753 struct mii_data *mii;
1756 ASSERT_SERIALIZED(ifp->if_serializer);
1758 mii = device_get_softc(sc->vge_miibus);
1761 * Cancel pending I/O and free all RX/TX buffers.
1767 * Initialize the RX and TX descriptors and mbufs.
1769 vge_rx_list_init(sc);
1770 vge_tx_list_init(sc);
1772 /* Set our station address */
1773 for (i = 0; i < ETHER_ADDR_LEN; i++)
1774 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(ifp)[i]);
1777 * Set receive FIFO threshold. Also allow transmission and
1778 * reception of VLAN tagged frames.
1780 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
1781 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2);
1783 /* Set DMA burst length */
1784 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
1785 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
1787 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
1789 /* Set collision backoff algorithm */
1790 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
1791 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
1792 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
1794 /* Disable LPSEL field in priority resolution */
1795 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
1798 * Load the addresses of the DMA queues into the chip.
1799 * Note that we only use one transmit queue.
1801 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
1802 VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_addr));
1803 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
1805 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
1806 VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_addr));
1807 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
1808 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
1810 /* Enable and wake up the RX descriptor queue */
1811 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1812 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1814 /* Enable the TX descriptor queue */
1815 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
1817 /* Set up the receive filter -- allow large frames for VLANs. */
1818 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT);
1820 /* If we want promiscuous mode, set the allframes bit. */
1821 if (ifp->if_flags & IFF_PROMISC)
1822 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
1824 /* Set capture broadcast bit to capture broadcast frames. */
1825 if (ifp->if_flags & IFF_BROADCAST)
1826 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST);
1828 /* Set multicast bit to capture multicast frames. */
1829 if (ifp->if_flags & IFF_MULTICAST)
1830 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST);
1832 /* Init the cam filter. */
1835 /* Init the multicast filter. */
1838 /* Enable flow control */
1840 CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
1842 /* Enable jumbo frame reception (if desired) */
1844 /* Start the MAC. */
1845 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
1846 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
1847 CSR_WRITE_1(sc, VGE_CRS0,
1848 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
1851 * Configure one-shot timer for microsecond
1852 * resulution and load it for 500 usecs.
1854 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES);
1855 CSR_WRITE_2(sc, VGE_SSTIMER, 400);
1858 * Configure interrupt moderation for receive. Enable
1859 * the holdoff counter and load it, and set the RX
1860 * suppression count to the number of descriptors we
1861 * want to allow before triggering an interrupt.
1862 * The holdoff timer is in units of 20 usecs.
1866 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE);
1867 /* Select the interrupt holdoff timer page. */
1868 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1869 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
1870 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */
1872 /* Enable use of the holdoff timer. */
1873 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
1874 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD);
1876 /* Select the RX suppression threshold page. */
1877 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1878 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
1879 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */
1881 /* Restore the page select bits. */
1882 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1883 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
1886 #ifdef DEVICE_POLLING
1887 /* Disable intr if polling(4) is enabled */
1888 if (ifp->if_flags & IFF_POLLING)
1889 vge_disable_intr(sc);
1892 vge_enable_intr(sc, 0);
1896 ifp->if_flags |= IFF_RUNNING;
1897 ifp->if_flags &= ~IFF_OACTIVE;
1899 sc->vge_if_flags = 0;
1904 * Set media options.
1907 vge_ifmedia_upd(struct ifnet *ifp)
1909 struct vge_softc *sc = ifp->if_softc;
1910 struct mii_data *mii = device_get_softc(sc->vge_miibus);
1918 * Report current media status.
1921 vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1923 struct vge_softc *sc = ifp->if_softc;
1924 struct mii_data *mii = device_get_softc(sc->vge_miibus);
1927 ifmr->ifm_active = mii->mii_media_active;
1928 ifmr->ifm_status = mii->mii_media_status;
1932 vge_miibus_statchg(device_t dev)
1934 struct vge_softc *sc;
1935 struct mii_data *mii;
1936 struct ifmedia_entry *ife;
1938 sc = device_get_softc(dev);
1939 mii = device_get_softc(sc->vge_miibus);
1940 ife = mii->mii_media.ifm_cur;
1943 * If the user manually selects a media mode, we need to turn
1944 * on the forced MAC mode bit in the DIAGCTL register. If the
1945 * user happens to choose a full duplex mode, we also need to
1946 * set the 'force full duplex' bit. This applies only to
1947 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
1948 * mode is disabled, and in 1000baseT mode, full duplex is
1949 * always implied, so we turn on the forced mode bit but leave
1950 * the FDX bit cleared.
1953 switch (IFM_SUBTYPE(ife->ifm_media)) {
1955 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1956 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1959 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1960 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1964 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1965 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX)
1966 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1968 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1971 device_printf(dev, "unknown media type: %x\n",
1972 IFM_SUBTYPE(ife->ifm_media));
1978 vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
1980 struct vge_softc *sc = ifp->if_softc;
1981 struct ifreq *ifr = (struct ifreq *)data;
1982 struct mii_data *mii;
1987 if (ifr->ifr_mtu > VGE_JUMBO_MTU)
1989 ifp->if_mtu = ifr->ifr_mtu;
1992 if (ifp->if_flags & IFF_UP) {
1993 if ((ifp->if_flags & IFF_RUNNING) &&
1994 (ifp->if_flags & IFF_PROMISC) &&
1995 !(sc->vge_if_flags & IFF_PROMISC)) {
1996 CSR_SETBIT_1(sc, VGE_RXCTL,
1997 VGE_RXCTL_RX_PROMISC);
1999 } else if ((ifp->if_flags & IFF_RUNNING) &&
2000 !(ifp->if_flags & IFF_PROMISC) &&
2001 (sc->vge_if_flags & IFF_PROMISC)) {
2002 CSR_CLRBIT_1(sc, VGE_RXCTL,
2003 VGE_RXCTL_RX_PROMISC);
2009 if (ifp->if_flags & IFF_RUNNING)
2012 sc->vge_if_flags = ifp->if_flags;
2020 mii = device_get_softc(sc->vge_miibus);
2021 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2025 uint32_t mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2027 if (mask & IFCAP_HWCSUM) {
2028 ifp->if_capenable |= ifr->ifr_reqcap & (IFCAP_HWCSUM);
2029 if (ifp->if_capenable & IFCAP_TXCSUM)
2030 ifp->if_hwassist = VGE_CSUM_FEATURES;
2032 ifp->if_hwassist = 0;
2033 if (ifp->if_flags & IFF_RUNNING)
2039 error = ether_ioctl(ifp, command, data);
2046 vge_watchdog(struct ifnet *ifp)
2048 struct vge_softc *sc = ifp->if_softc;
2050 if_printf(ifp, "watchdog timeout\n");
2060 * Stop the adapter and free any mbufs allocated to the
2064 vge_stop(struct vge_softc *sc)
2066 struct ifnet *ifp = &sc->arpcom.ac_if;
2069 ASSERT_SERIALIZED(ifp->if_serializer);
2073 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2075 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2076 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2077 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2078 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2079 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2080 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2082 if (sc->vge_head != NULL) {
2083 m_freem(sc->vge_head);
2084 sc->vge_head = sc->vge_tail = NULL;
2087 /* Free the TX list buffers. */
2088 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
2089 if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) {
2090 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
2091 sc->vge_ldata.vge_tx_dmamap[i]);
2092 m_freem(sc->vge_ldata.vge_tx_mbuf[i]);
2093 sc->vge_ldata.vge_tx_mbuf[i] = NULL;
2097 /* Free the RX list buffers. */
2098 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
2099 if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) {
2100 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
2101 sc->vge_ldata.vge_rx_dmamap[i]);
2102 m_freem(sc->vge_ldata.vge_rx_mbuf[i]);
2103 sc->vge_ldata.vge_rx_mbuf[i] = NULL;
2109 * Device suspend routine. Stop the interface and save some PCI
2110 * settings in case the BIOS doesn't restore them properly on
2114 vge_suspend(device_t dev)
2116 struct vge_softc *sc = device_get_softc(dev);
2117 struct ifnet *ifp = &sc->arpcom.ac_if;
2119 lwkt_serialize_enter(ifp->if_serializer);
2122 lwkt_serialize_exit(ifp->if_serializer);
2128 * Device resume routine. Restore some PCI settings in case the BIOS
2129 * doesn't, re-enable busmastering, and restart the interface if
2133 vge_resume(device_t dev)
2135 struct vge_softc *sc = device_get_softc(dev);
2136 struct ifnet *ifp = &sc->arpcom.ac_if;
2138 /* reenable busmastering */
2139 pci_enable_busmaster(dev);
2140 pci_enable_io(dev, SYS_RES_MEMORY);
2142 lwkt_serialize_enter(ifp->if_serializer);
2143 /* reinitialize interface if necessary */
2144 if (ifp->if_flags & IFF_UP)
2148 lwkt_serialize_exit(ifp->if_serializer);
2154 * Stop all chip I/O so that the kernel's probe routines don't
2155 * get confused by errant DMAs when rebooting.
2158 vge_shutdown(device_t dev)
2160 struct vge_softc *sc = device_get_softc(dev);
2161 struct ifnet *ifp = &sc->arpcom.ac_if;
2163 lwkt_serialize_enter(ifp->if_serializer);
2165 lwkt_serialize_exit(ifp->if_serializer);
2169 vge_enable_intr(struct vge_softc *sc, uint32_t isr)
2171 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2172 CSR_WRITE_4(sc, VGE_ISR, isr);
2173 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2176 #ifdef DEVICE_POLLING
2178 vge_disable_intr(struct vge_softc *sc)
2180 CSR_WRITE_4(sc, VGE_IMR, 0);
2181 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);