1 /* $FreeBSD: src/sys/pci/if_wx.c,v 1.5.2.12 2003/03/05 18:42:34 njl Exp $ */
2 /* $DragonFly: src/sys/dev/netif/wx/Attic/if_wx.c,v 1.8 2004/03/23 22:19:05 hsu Exp $ */
4 * Principal Author: Matthew Jacob <mjacob@feral.com>
5 * Copyright (c) 1999, 2001 by Traakan Software
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Additional Copyright (c) 2001 by Parag Patel
31 * under same licence for MII PHY code.
35 * Intel Gigabit Ethernet (82452/82453) Driver.
36 * Inspired by fxp driver by David Greenman for FreeBSD, and by
37 * Bill Paul's work in other FreeBSD network drivers.
41 * Many bug fixes gratefully acknowledged from:
43 * The folks at Sitara Networks
51 * Use only every other 16 byte receive descriptor, leaving the ones
52 * in between empty. This card is most efficient at reading/writing
53 * 32 byte cache lines, so avoid all the (not working for early rev
54 * cards) MWI and/or READ/MODIFY/WRITE cycles updating one descriptor
57 * This isn't debugged yet.
59 /* #define PADDED_CELL 1 */
62 * Since the includes are a mess, they'll all be in if_wxvar.h
69 #define vtophys(va) alpha_XXX_dmamap((vm_offset_t)(va))
70 #endif /* __alpha__ */
73 * Function Prototpes, yadda yadda...
76 static int wx_intr(void *);
77 static void wx_handle_link_intr(wx_softc_t *);
78 static void wx_check_link(wx_softc_t *);
79 static void wx_handle_rxint(wx_softc_t *);
80 static void wx_gc(wx_softc_t *);
81 static void wx_start(struct ifnet *);
82 static int wx_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t, struct ucred *);
83 static int wx_ifmedia_upd(struct ifnet *);
84 static void wx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
85 static int wx_init(void *);
86 static void wx_hw_stop(wx_softc_t *);
87 static void wx_set_addr(wx_softc_t *, int, u_int8_t *);
88 static int wx_hw_initialize(wx_softc_t *);
89 static void wx_stop(wx_softc_t *);
90 static void wx_txwatchdog(struct ifnet *);
91 static int wx_get_rbuf(wx_softc_t *, rxpkt_t *);
92 static void wx_rxdma_map(wx_softc_t *, rxpkt_t *, struct mbuf *);
94 static INLINE void wx_eeprom_raise_clk(wx_softc_t *, u_int32_t);
95 static INLINE void wx_eeprom_lower_clk(wx_softc_t *, u_int32_t);
96 static INLINE void wx_eeprom_sobits(wx_softc_t *, u_int16_t, u_int16_t);
97 static INLINE u_int16_t wx_eeprom_sibits(wx_softc_t *);
98 static INLINE void wx_eeprom_cleanup(wx_softc_t *);
99 static INLINE u_int16_t wx_read_eeprom_word(wx_softc_t *, int);
100 static void wx_read_eeprom(wx_softc_t *, u_int16_t *, int, int);
102 static int wx_attach_common(wx_softc_t *);
103 static void wx_watchdog(void *);
105 static INLINE void wx_mwi_whackon(wx_softc_t *);
106 static INLINE void wx_mwi_unwhack(wx_softc_t *);
107 static int wx_dring_setup(wx_softc_t *);
108 static void wx_dring_teardown(wx_softc_t *);
110 static int wx_attach_phy(wx_softc_t *);
111 static int wx_miibus_readreg(void *, int, int);
112 static int wx_miibus_writereg(void *, int, int, int);
113 static void wx_miibus_statchg(void *);
114 static void wx_miibus_mediainit(void *);
116 static u_int32_t wx_mii_shift_in(wx_softc_t *);
117 static void wx_mii_shift_out(wx_softc_t *, u_int32_t, u_int32_t);
119 #define WX_DISABLE_INT(sc) WRITE_CSR(sc, WXREG_IMCLR, WXDISABLE)
120 #define WX_ENABLE_INT(sc) WRITE_CSR(sc, WXREG_IMASK, sc->wx_ienable)
123 * Until we do a bit more work, we can get no bigger than MCLBYTES
126 #define WX_MAXMTU (WX_MAX_PKT_SIZE_JUMBO - sizeof (struct ether_header))
128 #define WX_MAXMTU (MCLBYTES - sizeof (struct ether_header))
131 #define DPRINTF(sc, x) if (sc->wx_debug) printf x
132 #define IPRINTF(sc, x) if (sc->wx_verbose) printf x
134 static const char ldn[] = "%s: link down\n";
135 static const char lup[] = "%s: link up\n";
136 static const char sqe[] = "%s: receive sequence error\n";
137 static const char ane[] = "%s: /C/ ordered sets seen- enabling ANE\n";
138 static const char inane[] = "%s: no /C/ ordered sets seen- disabling ANE\n";
140 static int wx_txint_delay = 5000; /* ~5ms */
141 TUNABLE_INT("hw.wx.txint_delay", &wx_txint_delay);
143 SYSCTL_NODE(_hw, OID_AUTO, wx, CTLFLAG_RD, 0, "WX driver parameters");
144 SYSCTL_INT(_hw_wx, OID_AUTO, txint_delay, CTLFLAG_RW,
145 &wx_txint_delay, 0, "");
146 static int wx_dump_stats = -1;
147 SYSCTL_INT(_hw_wx, OID_AUTO, dump_stats, CTLFLAG_RW,
148 &wx_dump_stats, 0, "");
149 static int wx_clr_stats = -1;
150 SYSCTL_INT(_hw_wx, OID_AUTO, clear_stats, CTLFLAG_RW,
151 &wx_clr_stats, 0, "");
154 * Program multicast addresses.
156 * This function must be called at splimp, but it may sleep.
159 wx_mc_setup(wx_softc_t *sc)
161 struct ifnet *ifp = &sc->wx_if;
162 struct ifmultiaddr *ifma;
165 * XXX: drain TX queue
173 if ((ifp->if_flags & IFF_ALLMULTI) || (ifp->if_flags & IFF_PROMISC)) {
175 return (wx_init(sc));
179 for (ifma = ifp->if_multiaddrs.lh_first, sc->wx_nmca = 0;
180 ifma != NULL; ifma = ifma->ifma_link.le_next) {
182 if (ifma->ifma_addr->sa_family != AF_LINK) {
185 if (sc->wx_nmca >= WX_RAL_TAB_SIZE-1) {
190 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
191 (void *) &sc->wx_mcaddr[sc->wx_nmca++][0], 6);
193 return (wx_init(sc));
197 * Return identification string if this is device is ours.
200 wx_probe(device_t dev)
202 if (pci_get_vendor(dev) != WX_VENDOR_INTEL) {
205 switch (pci_get_device(dev)) {
206 case WX_PRODUCT_82452:
207 device_set_desc(dev, "Intel PRO/1000 Gigabit (WISEMAN)");
209 case WX_PRODUCT_LIVENGOOD:
210 device_set_desc(dev, "Intel PRO/1000 (LIVENGOOD)");
212 case WX_PRODUCT_82452_SC:
213 device_set_desc(dev, "Intel PRO/1000 F Gigabit Ethernet");
215 case WX_PRODUCT_82543:
216 device_set_desc(dev, "Intel PRO/1000 T Gigabit Ethernet");
225 wx_attach(device_t dev)
228 wx_softc_t *sc = device_get_softc(dev);
233 bzero(sc, sizeof (wx_softc_t));
235 callout_handle_init(&sc->w.sch);
241 if (getenv_int ("wx_debug", &rid)) {
242 if (rid & (1 << device_get_unit(dev))) {
247 if (getenv_int("wx_no_ilos", &rid)) {
248 if (rid & (1 << device_get_unit(dev))) {
253 if (getenv_int("wx_ilos", &rid)) {
254 if (rid & (1 << device_get_unit(dev))) {
259 if (getenv_int("wx_no_flow", &rid)) {
260 if (rid & (1 << device_get_unit(dev))) {
266 mtx_init(&sc->wx_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE);
270 * get revision && id...
272 sc->wx_idnrev = (pci_get_device(dev) << 16) | (pci_get_revid(dev));
275 * Enable bus mastering, make sure that the cache line size is right.
277 pci_enable_busmaster(dev);
278 pci_enable_io(dev, SYS_RES_MEMORY);
279 val = pci_read_config(dev, PCIR_COMMAND, 4);
280 if ((val & PCIM_CMD_MEMEN) == 0) {
281 device_printf(dev, "failed to enable memory mapping\n");
287 * Let the BIOS do it's job- but check for sanity.
289 val = pci_read_config(dev, PCIR_CACHELNSZ, 1);
290 if (val < 4 || val > 32) {
291 pci_write_config(dev, PCIR_CACHELNSZ, 8, 1);
295 * Map control/status registers.
298 sc->w.mem = bus_alloc_resource(dev, SYS_RES_MEMORY,
299 &rid, 0, ~0, 1, RF_ACTIVE);
301 device_printf(dev, "could not map memory\n");
305 sc->w.st = rman_get_bustag(sc->w.mem);
306 sc->w.sh = rman_get_bushandle(sc->w.mem);
309 sc->w.irq = bus_alloc_resource(dev, SYS_RES_IRQ,
310 &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE);
311 if (sc->w.irq == NULL) {
312 device_printf(dev, "could not map interrupt\n");
316 error = bus_setup_intr(dev, sc->w.irq, INTR_TYPE_NET,
317 (void (*)(void *))wx_intr, sc, &sc->w.ih);
319 device_printf(dev, "could not setup irq\n");
322 (void) snprintf(sc->wx_name, sizeof (sc->wx_name) - 1, "wx%d",
323 device_get_unit(dev));
324 if (wx_attach_common(sc)) {
325 bus_teardown_intr(dev, sc->w.irq, sc->w.ih);
326 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->w.irq);
327 bus_release_resource(dev, SYS_RES_MEMORY, WX_MMBA, sc->w.mem);
331 device_printf(dev, "Ethernet address %6D\n", sc->w.arpcom.ac_enaddr, ":");
333 ifp = &sc->w.arpcom.ac_if;
334 if_initname(ifp, "wx", device_get_unit(dev));
335 ifp->if_mtu = ETHERMTU; /* we always start at ETHERMTU size */
336 ifp->if_output = ether_output;
337 ifp->if_baudrate = 1000000000;
338 ifp->if_init = (void (*)(void *))wx_init;
340 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
341 ifp->if_ioctl = wx_ioctl;
342 ifp->if_start = wx_start;
343 ifp->if_watchdog = wx_txwatchdog;
344 ifp->if_snd.ifq_maxlen = WX_MAX_TDESC - 1;
345 ether_ifattach(ifp, sc->w.arpcom.ac_enaddr);
352 wx_attach_phy(wx_softc_t *sc)
354 if (mii_phy_probe(sc->w.dev, &sc->w.miibus, wx_ifmedia_upd,
356 printf("%s: no PHY probed!\n", sc->wx_name);
364 wx_detach(device_t dev)
366 wx_softc_t *sc = device_get_softc(dev);
371 ether_ifdetach(&sc->w.arpcom.ac_if);
373 bus_generic_detach(dev);
374 device_delete_child(dev, sc->w.miibus);
376 ifmedia_removeall(&sc->wx_media);
378 bus_teardown_intr(dev, sc->w.irq, sc->w.ih);
379 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->w.irq);
380 bus_release_resource(dev, SYS_RES_MEMORY, WX_MMBA, sc->w.mem);
382 wx_dring_teardown(sc);
393 mtx_destroy(&sc->wx_mtx);
399 wx_shutdown(device_t dev)
401 wx_hw_stop((wx_softc_t *) device_get_softc(dev));
406 wx_mwi_whackon(wx_softc_t *sc)
408 sc->wx_cmdw = pci_read_config(sc->w.dev, PCIR_COMMAND, 2);
409 pci_write_config(sc->w.dev, PCIR_COMMAND, sc->wx_cmdw & ~MWI, 2);
413 wx_mwi_unwhack(wx_softc_t *sc)
415 if (sc->wx_cmdw & MWI) {
416 pci_write_config(sc->w.dev, PCIR_COMMAND, sc->wx_cmdw, 2);
421 wx_dring_setup(wx_softc_t *sc)
425 len = sizeof (wxrd_t) * WX_MAX_RDESC;
426 sc->rdescriptors = (wxrd_t *)
427 contigmalloc(len, M_DEVBUF, M_NOWAIT, 0, ~0, 4096, 0);
428 if (sc->rdescriptors == NULL) {
429 printf("%s: could not allocate rcv descriptors\n", sc->wx_name);
432 if (((intptr_t)sc->rdescriptors) & 0xfff) {
433 contigfree(sc->rdescriptors, len, M_DEVBUF);
434 sc->rdescriptors = NULL;
435 printf("%s: rcv descriptors not 4KB aligned\n", sc->wx_name);
438 bzero(sc->rdescriptors, len);
440 len = sizeof (wxtd_t) * WX_MAX_TDESC;
441 sc->tdescriptors = (wxtd_t *)
442 contigmalloc(len, M_DEVBUF, M_NOWAIT, 0, ~0, 4096, 0);
443 if (sc->tdescriptors == NULL) {
444 contigfree(sc->rdescriptors,
445 sizeof (wxrd_t) * WX_MAX_RDESC, M_DEVBUF);
446 sc->rdescriptors = NULL;
447 printf("%s: could not allocate xmt descriptors\n", sc->wx_name);
450 if (((intptr_t)sc->tdescriptors) & 0xfff) {
451 contigfree(sc->rdescriptors,
452 sizeof (wxrd_t) * WX_MAX_RDESC, M_DEVBUF);
453 contigfree(sc->tdescriptors, len, M_DEVBUF);
454 sc->rdescriptors = NULL;
455 sc->tdescriptors = NULL;
456 printf("%s: xmt descriptors not 4KB aligned\n", sc->wx_name);
459 bzero(sc->tdescriptors, len);
464 wx_dring_teardown(wx_softc_t *sc)
466 if (sc->rdescriptors) {
467 contigfree(sc->rdescriptors,
468 sizeof (wxrd_t) * WX_MAX_RDESC, M_DEVBUF);
469 sc->rdescriptors = NULL;
471 if (sc->tdescriptors) {
472 contigfree(sc->tdescriptors,
473 sizeof (wxtd_t) * WX_MAX_TDESC, M_DEVBUF);
474 sc->tdescriptors = NULL;
478 static device_method_t wx_methods[] = {
479 /* Device interface */
480 DEVMETHOD(device_probe, wx_probe),
481 DEVMETHOD(device_attach, wx_attach),
482 DEVMETHOD(device_detach, wx_detach),
483 DEVMETHOD(device_shutdown, wx_shutdown),
486 DEVMETHOD(bus_print_child, bus_generic_print_child),
487 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
490 DEVMETHOD(miibus_readreg, wx_miibus_readreg),
491 DEVMETHOD(miibus_writereg, wx_miibus_writereg),
492 DEVMETHOD(miibus_statchg, wx_miibus_statchg),
493 DEVMETHOD(miibus_mediainit, wx_miibus_mediainit),
498 static driver_t wx_driver = {
499 "wx", wx_methods, sizeof(wx_softc_t),
501 static devclass_t wx_devclass;
503 DECLARE_DUMMY_MODULE(if_wx);
504 MODULE_DEPEND(if_wx, miibus, 1, 1, 1);
505 DRIVER_MODULE(if_wx, pci, wx_driver, wx_devclass, 0, 0);
506 DRIVER_MODULE(miibus, wx, miibus_driver, miibus_devclass, 0, 0);
509 * Do generic parts of attach. Our registers have been mapped
510 * and our interrupt registered.
513 wx_attach_common(wx_softc_t *sc)
520 * First, check for revision support.
522 if (sc->wx_idnrev < WX_WISEMAN_2_0) {
523 printf("%s: cannot support ID 0x%x, revision %d chips\n",
524 sc->wx_name, sc->wx_idnrev >> 16, sc->wx_idnrev & 0xffff);
529 * Second, reset the chip.
534 * Third, validate our EEPROM.
540 * Fourth, read eeprom for our MAC address and other things.
542 wx_read_eeprom(sc, (u_int16_t *)sc->wx_enaddr, WX_EEPROM_MAC_OFF, 3);
545 * Fifth, establish some adapter parameters.
549 if (IS_LIVENGOOD_CU(sc)) {
551 /* settings to talk to PHY */
552 sc->wx_dcr |= WXDCR_FRCSPD | WXDCR_FRCDPX | WXDCR_SLU;
553 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr);
556 * Raise the PHY's reset line to make it operational.
558 tmp = READ_CSR(sc, WXREG_EXCT);
559 tmp |= WXPHY_RESET_DIR4;
560 WRITE_CSR(sc, WXREG_EXCT, tmp);
563 tmp = READ_CSR(sc, WXREG_EXCT);
564 tmp &= ~WXPHY_RESET4;
565 WRITE_CSR(sc, WXREG_EXCT, tmp);
568 tmp = READ_CSR(sc, WXREG_EXCT);
570 WRITE_CSR(sc, WXREG_EXCT, tmp);
573 if (wx_attach_phy(sc)) {
577 ifmedia_init(&sc->wx_media, IFM_IMASK,
578 wx_ifmedia_upd, wx_ifmedia_sts);
580 ifmedia_add(&sc->wx_media, IFM_ETHER|IFM_1000_SX, 0, NULL);
581 ifmedia_add(&sc->wx_media,
582 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
583 ifmedia_set(&sc->wx_media, IFM_ETHER|IFM_1000_SX|IFM_FDX);
585 sc->wx_media.ifm_media = sc->wx_media.ifm_cur->ifm_media;
589 * Sixth, establish a default device control register word.
592 if (sc->wx_cfg1 & WX_EEPROM_CTLR1_FD)
593 sc->wx_dcr |= WXDCR_FD;
594 if (sc->wx_cfg1 & WX_EEPROM_CTLR1_ILOS)
595 sc->wx_dcr |= WXDCR_ILOS;
597 tmp = (sc->wx_cfg1 >> WX_EEPROM_CTLR1_SWDPIO_SHIFT) & WXDCR_SWDPIO_MASK;
598 sc->wx_dcr |= (tmp << WXDCR_SWDPIO_SHIFT);
601 sc->wx_dcr &= ~WXDCR_ILOS;
603 sc->wx_dcr |= WXDCR_ILOS;
604 if (sc->wx_no_flow == 0)
605 sc->wx_dcr |= WXDCR_RFCE | WXDCR_TFCE;
608 * Seventh, allocate various sw structures...
610 len = sizeof (rxpkt_t) * WX_MAX_RDESC;
611 sc->rbase = (rxpkt_t *) WXMALLOC(len);
612 if (sc->rbase == NULL) {
615 bzero(sc->rbase, len);
618 len = sizeof (txpkt_t) * WX_MAX_TDESC;
619 sc->tbase = (txpkt_t *) WXMALLOC(len);
620 if (sc->tbase == NULL) {
623 bzero(sc->tbase, len);
627 * Eighth, allocate and dma map (platform dependent) descriptor rings.
628 * They have to be aligned on a 4KB boundary.
630 if (wx_dring_setup(sc) == 0) {
635 printf("%s: failed to do common attach (%d)\n", sc->wx_name, ll);
636 wx_dring_teardown(sc);
653 wx_eeprom_raise_clk(wx_softc_t *sc, u_int32_t regval)
655 WRITE_CSR(sc, WXREG_EECDR, regval | WXEECD_SK);
660 wx_eeprom_lower_clk(wx_softc_t *sc, u_int32_t regval)
662 WRITE_CSR(sc, WXREG_EECDR, regval & ~WXEECD_SK);
667 wx_eeprom_sobits(wx_softc_t *sc, u_int16_t data, u_int16_t count)
669 u_int32_t regval, mask;
671 mask = 1 << (count - 1);
672 regval = READ_CSR(sc, WXREG_EECDR) & ~(WXEECD_DI|WXEECD_DO);
678 regval &= ~WXEECD_DI;
679 WRITE_CSR(sc, WXREG_EECDR, regval); DELAY(50);
680 wx_eeprom_raise_clk(sc, regval);
681 wx_eeprom_lower_clk(sc, regval);
684 WRITE_CSR(sc, WXREG_EECDR, regval & ~WXEECD_DI);
687 static INLINE u_int16_t
688 wx_eeprom_sibits(wx_softc_t *sc)
690 unsigned int regval, i;
694 regval = READ_CSR(sc, WXREG_EECDR) & ~(WXEECD_DI|WXEECD_DO);
695 for (i = 0; i != 16; i++) {
697 wx_eeprom_raise_clk(sc, regval);
698 regval = READ_CSR(sc, WXREG_EECDR) & ~WXEECD_DI;
699 if (regval & WXEECD_DO) {
702 wx_eeprom_lower_clk(sc, regval);
708 wx_eeprom_cleanup(wx_softc_t *sc)
711 regval = READ_CSR(sc, WXREG_EECDR) & ~(WXEECD_DI|WXEECD_CS);
712 WRITE_CSR(sc, WXREG_EECDR, regval); DELAY(50);
713 wx_eeprom_raise_clk(sc, regval);
714 wx_eeprom_lower_clk(sc, regval);
717 static u_int16_t INLINE
718 wx_read_eeprom_word(wx_softc_t *sc, int offset)
721 WRITE_CSR(sc, WXREG_EECDR, WXEECD_CS);
722 wx_eeprom_sobits(sc, EEPROM_READ_OPCODE, 3);
723 wx_eeprom_sobits(sc, offset, 6);
724 data = wx_eeprom_sibits(sc);
725 wx_eeprom_cleanup(sc);
730 wx_read_eeprom(wx_softc_t *sc, u_int16_t *data, int offset, int words)
733 for (i = 0; i < words; i++) {
734 *data++ = wx_read_eeprom_word(sc, offset++);
736 sc->wx_cfg1 = wx_read_eeprom_word(sc, WX_EEPROM_CTLR1_OFF);
740 * Start packet transmission on the interface.
744 wx_start(struct ifnet *ifp)
746 wx_softc_t *sc = SOFTC_IFP(ifp);
747 u_int16_t widx = WX_MAX_TDESC, cidx, nactv;
750 DPRINTF(sc, ("%s: wx_start\n", sc->wx_name));
752 while (nactv < WX_MAX_TDESC - 1) {
755 struct mbuf *m, *mb_head;
757 IF_DEQUEUE(&ifp->if_snd, mb_head);
758 if (mb_head == NULL) {
764 * If we have a packet less than ethermin, pad it out.
766 if (mb_head->m_pkthdr.len < WX_MIN_RPKT_SIZE) {
767 if (mb_head->m_next == NULL) {
768 mb_head->m_len = WX_MIN_RPKT_SIZE;
770 MGETHDR(m, M_DONTWAIT, MT_DATA);
775 m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
777 m->m_pkthdr.len = m->m_len = WX_MIN_RPKT_SIZE;
778 bzero(mtod(m, char *) + mb_head->m_pkthdr.len,
779 WX_MIN_RPKT_SIZE - mb_head->m_pkthdr.len);
791 * Go through each of the mbufs in the chain and initialize
792 * the transmit buffer descriptors with the physical address
793 * and size of that mbuf. If we have a length less than our
794 * minimum transmit size, we bail (to do a pullup). If we run
795 * out of descriptors, we also bail and try and do a pullup.
797 for (plen = ndesc = 0, m = mb_head; m != NULL; m = m->m_next) {
802 * If this mbuf has no data, skip it.
809 * This appears to be a bogus check the PRO1000T.
810 * I think they meant that the minimum packet size
811 * is in fact WX_MIN_XPKT_SIZE (all data loaded)
815 * If this mbuf is too small for the chip's minimum,
816 * break out to cluster it.
818 if (m->m_len < WX_MIN_XPKT_SIZE) {
825 * Do we have a descriptor available for this mbuf?
827 if (++nactv == WX_MAX_TDESC) {
828 if (gctried++ == 0) {
835 sc->tbase[cidx].dptr = m;
836 td = &sc->tdescriptors[cidx];
837 td->length = m->m_len;
840 vptr = mtod(m, vm_offset_t);
841 td->address.highpart = 0;
842 td->address.lowpart = vtophys(vptr);
851 printf("%s: XMIT[%d] %p vptr %lx (length %d "
852 "DMA addr %x) idx %d\n", sc->wx_name,
853 ndesc, m, (long) vptr, td->length,
854 td->address.lowpart, cidx);
857 cidx = T_NXT_IDX(cidx);
861 * If we get here and m is NULL, we can send
862 * the the packet chain described by mb_head.
866 * Mark the last descriptor with EOP and tell the
867 * chip to insert a final checksum.
869 wxtd_t *td = &sc->tdescriptors[T_PREV_IDX(cidx)];
870 td->cmd = TXCMD_EOP|TXCMD_IFCS;
872 * Set up a delayed interrupt when this packet
873 * is sent and the descriptor written back.
874 * Additional packets completing will cause
875 * interrupt to be delayed further. Therefore,
876 * after the *last* packet is sent, after the delay
877 * period in TIDV, an interrupt will be generated
878 * which will cause us to garbage collect.
880 td->cmd |= TXCMD_IDE|TXCMD_RPS;
883 * Don't xmit odd length packets.
884 * We're okay with bumping things
885 * up as long as our mbuf allocation
886 * is always larger than our MTU
887 * by a comfortable amount.
889 * Yes, it's a hole to run past the end
897 sc->tbase[sc->tnxtfree].sidx = sc->tnxtfree;
898 sc->tbase[sc->tnxtfree].eidx = cidx;
899 sc->tbase[sc->tnxtfree].next = NULL;
901 sc->tbsyl->next = &sc->tbase[sc->tnxtfree];
903 sc->tbsyf = &sc->tbase[sc->tnxtfree];
905 sc->tbsyl = &sc->tbase[sc->tnxtfree];
910 bpf_mtap(WX_BPFTAP_ARG(ifp), mb_head);
911 /* defer xmit until we've got them all */
917 * Otherwise, we couldn't send this packet for some reason.
919 * If don't have a descriptor available, and this is a
920 * single mbuf packet, freeze output so that later we
921 * can restart when we have more room. Otherwise, we'll
922 * try and cluster the request. We've already tried to
923 * garbage collect completed descriptors.
925 if (nactv == WX_MAX_TDESC && mb_head->m_next == NULL) {
926 sc->wx_xmitputback++;
927 ifp->if_flags |= IFF_OACTIVE;
928 IF_PREPEND(&ifp->if_snd, mb_head);
933 * Otherwise, it's either a fragment length somewhere in the
934 * chain that isn't at least WX_MIN_XPKT_SIZE in length or
935 * the number of fragments exceeds the number of descriptors
938 * We could try a variety of strategies here- if this is
939 * a length problem for single mbuf packet or a length problem
940 * for the last mbuf in a chain (we could just try and adjust
941 * it), but it's just simpler to try and cluster it.
943 MGETHDR(m, M_DONTWAIT, MT_DATA);
948 MCLGET(m, M_DONTWAIT);
949 if ((m->m_flags & M_EXT) == 0) {
954 m_copydata(mb_head, 0, mb_head->m_pkthdr.len, mtod(m, caddr_t));
955 m->m_pkthdr.len = m->m_len = mb_head->m_pkthdr.len;
958 sc->wx_xmitcluster++;
962 if (widx < WX_MAX_TDESC) {
963 if (IS_WISEMAN(sc)) {
964 WRITE_CSR(sc, WXREG_TDT, widx);
966 WRITE_CSR(sc, WXREG_TDT_LIVENGOOD, widx);
970 if (sc->tactive == WX_MAX_TDESC - 1) {
973 if (sc->tactive >= WX_MAX_TDESC - 1) {
974 sc->wx_xmitblocked++;
975 ifp->if_flags |= IFF_OACTIVE;
979 /* used SW LED to indicate transmission active */
980 if (sc->tactive > 0 && sc->wx_mii) {
981 WRITE_CSR(sc, WXREG_DCR,
982 READ_CSR(sc, WXREG_DCR) | (WXDCR_SWDPIO0|WXDCR_SWDPIN0));
988 * Process interface interrupts.
993 wx_softc_t *sc = arg;
998 * Read interrupt cause register. Reading it clears bits.
1000 sc->wx_icr = READ_CSR(sc, WXREG_ICR);
1005 if (sc->wx_icr & (WXISR_LSC|WXISR_RXSEQ|WXISR_GPI_EN1)) {
1007 wx_handle_link_intr(sc);
1009 wx_handle_rxint(sc);
1010 if (sc->wx_icr & WXISR_TXDW) {
1015 if (sc->wx_icr & WXISR_TXQE) {
1020 if (sc->wx_if.if_snd.ifq_head != NULL) {
1021 wx_start(&sc->wx_if);
1030 wx_handle_link_intr(wx_softc_t *sc)
1032 u_int32_t txcw, rxcw, dcr, dsr;
1035 dcr = READ_CSR(sc, WXREG_DCR);
1036 DPRINTF(sc, ("%s: handle_link_intr: icr=%#x dcr=%#x\n",
1037 sc->wx_name, sc->wx_icr, dcr));
1039 mii_data_t *mii = WX_MII_FROM_SOFTC(sc);
1041 if (mii->mii_media_status & IFM_ACTIVE) {
1042 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE) {
1043 IPRINTF(sc, (ldn, sc->wx_name));
1046 IPRINTF(sc, (lup, sc->wx_name));
1049 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr);
1050 } else if (sc->wx_icr & WXISR_RXSEQ) {
1051 DPRINTF(sc, (sqe, sc->wx_name));
1056 txcw = READ_CSR(sc, WXREG_XMIT_CFGW);
1057 rxcw = READ_CSR(sc, WXREG_RECV_CFGW);
1058 dsr = READ_CSR(sc, WXREG_DSR);
1061 * If we have LOS or are now receiving Ordered Sets and are not
1062 * doing auto-negotiation, restore autonegotiation.
1065 if (((dcr & WXDCR_SWDPIN1) || (rxcw & WXRXCW_C)) &&
1066 ((txcw & WXTXCW_ANE) == 0)) {
1067 DPRINTF(sc, (ane, sc->wx_name));
1068 WRITE_CSR(sc, WXREG_XMIT_CFGW, WXTXCW_DEFAULT);
1069 sc->wx_dcr &= ~WXDCR_SLU;
1070 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr);
1074 if (sc->wx_icr & WXISR_LSC) {
1075 if (READ_CSR(sc, WXREG_DSR) & WXDSR_LU) {
1076 IPRINTF(sc, (lup, sc->wx_name));
1078 sc->wx_dcr |= (WXDCR_SWDPIO0|WXDCR_SWDPIN0);
1080 IPRINTF(sc, (ldn, sc->wx_name));
1082 sc->wx_dcr &= ~(WXDCR_SWDPIO0|WXDCR_SWDPIN0);
1084 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr);
1086 DPRINTF(sc, (sqe, sc->wx_name));
1091 wx_check_link(wx_softc_t *sc)
1093 u_int32_t rxcw, dcr, dsr;
1096 mii_pollstat(WX_MII_FROM_SOFTC(sc));
1100 rxcw = READ_CSR(sc, WXREG_RECV_CFGW);
1101 dcr = READ_CSR(sc, WXREG_DCR);
1102 dsr = READ_CSR(sc, WXREG_DSR);
1104 if ((dsr & WXDSR_LU) == 0 && (dcr & WXDCR_SWDPIN1) == 0 &&
1105 (rxcw & WXRXCW_C) == 0) {
1106 if (sc->ane_failed == 0) {
1110 DPRINTF(sc, (inane, sc->wx_name));
1111 WRITE_CSR(sc, WXREG_XMIT_CFGW, WXTXCW_DEFAULT & ~WXTXCW_ANE);
1112 if (sc->wx_idnrev < WX_WISEMAN_2_1)
1113 sc->wx_dcr &= ~WXDCR_TFCE;
1114 sc->wx_dcr |= WXDCR_SLU;
1115 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr);
1116 } else if ((rxcw & WXRXCW_C) != 0 && (dcr & WXDCR_SLU) != 0) {
1117 DPRINTF(sc, (ane, sc->wx_name));
1118 WRITE_CSR(sc, WXREG_XMIT_CFGW, WXTXCW_DEFAULT);
1119 sc->wx_dcr &= ~WXDCR_SLU;
1120 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr);
1125 wx_handle_rxint(wx_softc_t *sc)
1127 struct ether_header *eh;
1128 struct mbuf *m0, *mb, *pending[WX_MAX_RDESC];
1129 struct ifnet *ifp = &sc->wx_if;
1130 int npkts, ndesc, lidx, idx, tlen;
1132 DPRINTF(sc, ("%s: wx_handle_rxint\n", sc->wx_name));
1134 for (m0 = sc->rpending, tlen = ndesc = npkts = 0, idx = sc->rnxt,
1135 lidx = R_PREV_IDX(idx); ndesc < WX_MAX_RDESC;
1136 ndesc++, lidx = idx, idx = R_NXT_IDX(idx)) {
1139 int length, offset, lastframe;
1141 rd = &sc->rdescriptors[idx];
1143 * XXX: DMA Flush descriptor
1145 if ((rd->status & RDSTAT_DD) == 0) {
1147 if (sc->rpending == NULL) {
1148 m0->m_pkthdr.len = tlen;
1155 DPRINTF(sc, ("%s: WXRX: ndesc %d idx %d lidx %d\n",
1156 sc->wx_name, ndesc, idx, lidx));
1160 if (rd->errors != 0) {
1161 printf("%s: packet with errors (%x)\n",
1162 sc->wx_name, rd->errors);
1169 m_freem(sc->rpending);
1170 sc->rpending = NULL;
1177 rxpkt = &sc->rbase[idx];
1180 printf("%s: receive descriptor with no mbuf\n",
1182 (void) wx_get_rbuf(sc, rxpkt);
1189 m_freem(sc->rpending);
1190 sc->rpending = NULL;
1196 /* XXX: Flush DMA for rxpkt */
1198 if (wx_get_rbuf(sc, rxpkt)) {
1200 wx_rxdma_map(sc, rxpkt, mb);
1207 m_freem(sc->rpending);
1208 sc->rpending = NULL;
1215 * Save the completing packet's offset value and length
1216 * and install the new one into the descriptor.
1218 lastframe = (rd->status & RDSTAT_EOP) != 0;
1219 length = rd->length;
1220 offset = rd->address.lowpart & 0xff;
1221 bzero (rd, sizeof (*rd));
1222 rd->address.lowpart = rxpkt->dma_addr + WX_RX_OFFSET_VALUE;
1225 mb->m_data += offset;
1230 } else if (m0 == sc->rpending) {
1232 * Pick up where we left off before. If
1233 * we have an offset (we're assuming the
1234 * first frame has an offset), then we've
1235 * lost sync somewhere along the line.
1238 printf("%s: lost sync with partial packet\n",
1240 m_freem(sc->rpending);
1241 sc->rpending = NULL;
1245 sc->rpending = NULL;
1246 tlen = m0->m_pkthdr.len;
1252 DPRINTF(sc, ("%s: RDESC[%d] len %d off %d lastframe %d\n",
1253 sc->wx_name, idx, mb->m_len, offset, lastframe));
1256 if (lastframe == 0) {
1259 m0->m_pkthdr.rcvif = ifp;
1260 m0->m_pkthdr.len = tlen - WX_CRC_LENGTH;
1261 mb->m_len -= WX_CRC_LENGTH;
1263 eh = mtod(m0, struct ether_header *);
1265 * No need to check for promiscous mode since
1266 * the decision to keep or drop the packet is
1267 * handled by ether_input()
1269 pending[npkts++] = m0;
1275 if (IS_WISEMAN(sc)) {
1276 WRITE_CSR(sc, WXREG_RDT0, lidx);
1278 WRITE_CSR(sc, WXREG_RDT0_LIVENGOOD, lidx);
1287 for (idx = 0; idx < npkts; idx++) {
1290 bpf_mtap(WX_BPFTAP_ARG(ifp), mb);
1293 DPRINTF(sc, ("%s: RECV packet length %d\n",
1294 sc->wx_name, mb->m_pkthdr.len));
1295 eh = mtod(mb, struct ether_header *);
1296 m_adj(mb, sizeof (struct ether_header));
1297 ether_input(ifp, eh, mb);
1302 wx_gc(wx_softc_t *sc)
1304 struct ifnet *ifp = &sc->wx_if;
1310 if (IS_WISEMAN(sc)) {
1311 tdh = READ_CSR(sc, WXREG_TDH);
1313 tdh = READ_CSR(sc, WXREG_TDH_LIVENGOOD);
1315 while (txpkt != NULL) {
1316 u_int32_t end = txpkt->eidx, cidx = tdh;
1319 * Normalize start..end indices to 2 *
1320 * WX_MAX_TDESC range to eliminate wrap.
1322 if (txpkt->eidx < txpkt->sidx) {
1323 end += WX_MAX_TDESC;
1327 * Normalize current chip index to 2 *
1328 * WX_MAX_TDESC range to eliminate wrap.
1330 if (cidx < txpkt->sidx) {
1331 cidx += WX_MAX_TDESC;
1335 * If the current chip index is between low and
1336 * high indices for this packet, it's not finished
1337 * transmitting yet. Because transmits are done FIFO,
1338 * this means we're done garbage collecting too.
1341 if (txpkt->sidx <= cidx && cidx < txpkt->eidx) {
1342 DPRINTF(sc, ("%s: TXGC %d..%d TDH %d\n", sc->wx_name,
1343 txpkt->sidx, txpkt->eidx, tdh));
1349 (void) m_freem(txpkt->dptr);
1351 printf("%s: null mbuf in gc\n", sc->wx_name);
1354 for (cidx = txpkt->sidx; cidx != txpkt->eidx;
1355 cidx = T_NXT_IDX(cidx)) {
1359 td = &sc->tdescriptors[cidx];
1360 if (td->status & TXSTS_EC) {
1361 IPRINTF(sc, ("%s: excess collisions\n",
1363 ifp->if_collisions++;
1366 if (td->status & TXSTS_LC) {
1368 ("%s: lost carrier\n", sc->wx_name));
1371 tmp = &sc->tbase[cidx];
1372 DPRINTF(sc, ("%s: TXGC[%d] %p %d..%d done nact %d "
1373 "TDH %d\n", sc->wx_name, cidx, tmp->dptr,
1374 txpkt->sidx, txpkt->eidx, sc->tactive, tdh));
1376 if (sc->tactive == 0) {
1377 printf("%s: nactive < 0?\n", sc->wx_name);
1381 bzero(td, sizeof (*td));
1383 sc->tbsyf = txpkt->next;
1386 if (sc->tactive < WX_MAX_TDESC - 1) {
1388 ifp->if_flags &= ~IFF_OACTIVE;
1391 /* used SW LED to indicate transmission not active */
1392 if (sc->tactive == 0 && sc->wx_mii) {
1393 WRITE_CSR(sc, WXREG_DCR,
1394 READ_CSR(sc, WXREG_DCR) & ~(WXDCR_SWDPIO0|WXDCR_SWDPIN0));
1400 * Periodic timer to update packet in/out/collision statistics,
1401 * and, more importantly, garbage collect completed transmissions
1402 * and to handle link status changes.
1404 #define WX_PRT_STATS(sc, y) printf("\t" # y " = %u\n", ((sc)->y))
1405 #define WX_CLR_STATS(sc, y) ((sc)->y = 0)
1408 wx_watchdog(void *arg)
1410 wx_softc_t *sc = arg;
1413 if (sc->wx_needreinit) {
1415 if (wx_init(sc) == 0) {
1417 sc->wx_needreinit = 0;
1425 if (wx_dump_stats == device_get_unit(sc->w.dev)) {
1426 printf("%s: current statistics\n", sc->wx_name);
1427 WX_PRT_STATS(sc, wx_intr);
1428 WX_PRT_STATS(sc, wx_linkintr);
1429 WX_PRT_STATS(sc, wx_rxintr);
1430 WX_PRT_STATS(sc, wx_txqe);
1431 WX_PRT_STATS(sc, wx_xmitgc);
1432 WX_PRT_STATS(sc, wx_xmitpullup);
1433 WX_PRT_STATS(sc, wx_xmitcluster);
1434 WX_PRT_STATS(sc, wx_xmitputback);
1435 WX_PRT_STATS(sc, wx_xmitwanted);
1436 WX_PRT_STATS(sc, wx_xmitblocked);
1437 WX_PRT_STATS(sc, wx_xmitrunt);
1438 WX_PRT_STATS(sc, wx_rxnobuf);
1439 WX_PRT_STATS(sc, wx_oddpkt);
1442 if (wx_clr_stats == device_get_unit(sc->w.dev)) {
1443 printf("%s: statistics cleared\n", sc->wx_name);
1444 WX_CLR_STATS(sc, wx_intr);
1445 WX_CLR_STATS(sc, wx_linkintr);
1446 WX_CLR_STATS(sc, wx_rxintr);
1447 WX_CLR_STATS(sc, wx_txqe);
1448 WX_CLR_STATS(sc, wx_xmitgc);
1449 WX_CLR_STATS(sc, wx_xmitpullup);
1450 WX_CLR_STATS(sc, wx_xmitcluster);
1451 WX_CLR_STATS(sc, wx_xmitputback);
1452 WX_CLR_STATS(sc, wx_xmitwanted);
1453 WX_CLR_STATS(sc, wx_xmitblocked);
1454 WX_CLR_STATS(sc, wx_xmitrunt);
1455 WX_CLR_STATS(sc, wx_rxnobuf);
1456 WX_CLR_STATS(sc, wx_oddpkt);
1462 * Schedule another timeout one second from now.
1464 TIMEOUT(sc, wx_watchdog, sc, hz);
1468 * Stop and reinitialize the hardware
1471 wx_hw_stop(wx_softc_t *sc)
1474 DPRINTF(sc, ("%s: wx_hw_stop\n", sc->wx_name));
1476 if (sc->wx_idnrev < WX_WISEMAN_2_1) {
1479 WRITE_CSR(sc, WXREG_DCR, WXDCR_RST);
1481 icr = READ_CSR(sc, WXREG_ICR);
1482 if (sc->wx_idnrev < WX_WISEMAN_2_1) {
1488 wx_set_addr(wx_softc_t *sc, int idx, u_int8_t *mac)
1491 DPRINTF(sc, ("%s: wx_set_addr\n", sc->wx_name));
1492 t0 = (mac[0]) | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
1493 t1 = (mac[4] << 0) | (mac[5] << 8);
1495 WRITE_CSR(sc, WXREG_RAL_LO(idx), t0);
1496 WRITE_CSR(sc, WXREG_RAL_HI(idx), t1);
1500 wx_hw_initialize(wx_softc_t *sc)
1504 DPRINTF(sc, ("%s: wx_hw_initialize\n", sc->wx_name));
1506 WRITE_CSR(sc, WXREG_VET, 0);
1507 for (i = 0; i < (WX_VLAN_TAB_SIZE << 2); i += 4) {
1508 WRITE_CSR(sc, (WXREG_VFTA + i), 0);
1510 if (sc->wx_idnrev < WX_WISEMAN_2_1) {
1512 WRITE_CSR(sc, WXREG_RCTL, WXRCTL_RST);
1516 * Load the first receiver address with our MAC address,
1517 * and load as many multicast addresses as can fit into
1518 * the receive address array.
1520 wx_set_addr(sc, 0, sc->wx_enaddr);
1521 for (i = 1; i <= sc->wx_nmca; i++) {
1522 if (i >= WX_RAL_TAB_SIZE) {
1525 wx_set_addr(sc, i, sc->wx_mcaddr[i-1]);
1529 while (i < WX_RAL_TAB_SIZE) {
1530 WRITE_CSR(sc, WXREG_RAL_LO(i), 0);
1531 WRITE_CSR(sc, WXREG_RAL_HI(i), 0);
1535 if (sc->wx_idnrev < WX_WISEMAN_2_1) {
1536 WRITE_CSR(sc, WXREG_RCTL, 0);
1542 * Clear out the hashed multicast table array.
1544 for (i = 0; i < WX_MC_TAB_SIZE; i++) {
1545 WRITE_CSR(sc, WXREG_MTA + (sizeof (u_int32_t) * 4), 0);
1548 if (IS_LIVENGOOD_CU(sc)) {
1550 * has a PHY - raise its reset line to make it operational
1552 u_int32_t tmp = READ_CSR(sc, WXREG_EXCT);
1553 tmp |= WXPHY_RESET_DIR4;
1554 WRITE_CSR(sc, WXREG_EXCT, tmp);
1557 tmp = READ_CSR(sc, WXREG_EXCT);
1558 tmp &= ~WXPHY_RESET4;
1559 WRITE_CSR(sc, WXREG_EXCT, tmp);
1562 tmp = READ_CSR(sc, WXREG_EXCT);
1563 tmp |= WXPHY_RESET4;
1564 WRITE_CSR(sc, WXREG_EXCT, tmp);
1566 } else if (IS_LIVENGOOD(sc)) {
1570 * Handle link control
1572 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr | WXDCR_LRST);
1575 wx_read_eeprom(sc, &tew, WX_EEPROM_CTLR2_OFF, 1);
1576 tew = (tew & WX_EEPROM_CTLR2_SWDPIO) << WX_EEPROM_EXT_SHIFT;
1577 WRITE_CSR(sc, WXREG_EXCT, (u_int32_t)tew);
1580 if (sc->wx_dcr & (WXDCR_RFCE|WXDCR_TFCE)) {
1581 WRITE_CSR(sc, WXREG_FCAL, FC_FRM_CONST_LO);
1582 WRITE_CSR(sc, WXREG_FCAH, FC_FRM_CONST_HI);
1583 WRITE_CSR(sc, WXREG_FCT, FC_TYP_CONST);
1585 WRITE_CSR(sc, WXREG_FCAL, 0);
1586 WRITE_CSR(sc, WXREG_FCAH, 0);
1587 WRITE_CSR(sc, WXREG_FCT, 0);
1589 WRITE_CSR(sc, WXREG_FLOW_XTIMER, WX_XTIMER_DFLT);
1591 if (IS_WISEMAN(sc)) {
1592 if (sc->wx_idnrev < WX_WISEMAN_2_1) {
1593 WRITE_CSR(sc, WXREG_FLOW_RCV_HI, 0);
1594 WRITE_CSR(sc, WXREG_FLOW_RCV_LO, 0);
1595 sc->wx_dcr &= ~(WXDCR_RFCE|WXDCR_TFCE);
1597 WRITE_CSR(sc, WXREG_FLOW_RCV_HI, WX_RCV_FLOW_HI_DFLT);
1598 WRITE_CSR(sc, WXREG_FLOW_RCV_LO, WX_RCV_FLOW_LO_DFLT);
1601 WRITE_CSR(sc, WXREG_FLOW_RCV_HI_LIVENGOOD, WX_RCV_FLOW_HI_DFLT);
1602 WRITE_CSR(sc, WXREG_FLOW_RCV_LO_LIVENGOOD, WX_RCV_FLOW_LO_DFLT);
1605 if (!IS_LIVENGOOD_CU(sc))
1606 WRITE_CSR(sc, WXREG_XMIT_CFGW, WXTXCW_DEFAULT);
1608 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr);
1611 if (!IS_LIVENGOOD_CU(sc)) {
1613 * The pin stuff is all FM from the Linux driver.
1615 if ((READ_CSR(sc, WXREG_DCR) & WXDCR_SWDPIN1) == 0) {
1616 for (i = 0; i < (WX_LINK_UP_TIMEOUT/10); i++) {
1618 if (READ_CSR(sc, WXREG_DSR) & WXDSR_LU) {
1623 if (sc->linkup == 0) {
1629 printf("%s: SWDPIO1 did not clear- check for reversed "
1630 "or disconnected cable\n", sc->wx_name);
1631 /* but return okay anyway */
1635 sc->wx_ienable = WXIENABLE_DEFAULT;
1640 * Stop the interface. Cancels the statistics updater and resets the interface.
1643 wx_stop(wx_softc_t *sc)
1647 struct ifnet *ifp = &sc->wx_if;
1649 DPRINTF(sc, ("%s: wx_stop\n", sc->wx_name));
1651 * Cancel stats updater.
1653 UNTIMEOUT(wx_watchdog, sc, sc);
1661 * Release any xmit buffers.
1663 for (txp = sc->tbase; txp && txp < &sc->tbase[WX_MAX_TDESC]; txp++) {
1671 * Free all the receive buffers.
1673 for (rxp = sc->rbase; rxp && rxp < &sc->rbase[WX_MAX_RDESC]; rxp++) {
1681 m_freem(sc->rpending);
1682 sc->rpending = NULL;
1686 * And we're outta here...
1689 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1697 wx_txwatchdog(struct ifnet *ifp)
1699 wx_softc_t *sc = SOFTC_IFP(ifp);
1700 printf("%s: device timeout\n", sc->wx_name);
1703 printf("%s: could not re-init device\n", sc->wx_name);
1704 sc->wx_needreinit = 1;
1711 struct ifmedia *ifm;
1712 wx_softc_t *sc = xsc;
1713 struct ifnet *ifp = &sc->wx_if;
1719 DPRINTF(sc, ("%s: wx_init\n", sc->wx_name));
1723 * Cancel any pending I/O by resetting things.
1724 * wx_stop will free any allocated mbufs.
1729 * Reset the hardware. All network addresses loaded here, but
1730 * neither the receiver nor the transmitter are enabled.
1733 if (wx_hw_initialize(sc)) {
1734 DPRINTF(sc, ("%s: wx_hw_initialize failed\n", sc->wx_name));
1740 * Set up the receive ring stuff.
1742 len = sizeof (wxrd_t) * WX_MAX_RDESC;
1743 bzero(sc->rdescriptors, len);
1744 for (rxpkt = sc->rbase, i = 0; rxpkt != NULL && i < WX_MAX_RDESC;
1745 i += RXINCR, rxpkt++) {
1746 rd = &sc->rdescriptors[i];
1747 if (wx_get_rbuf(sc, rxpkt)) {
1750 rd->address.lowpart = rxpkt->dma_addr + WX_RX_OFFSET_VALUE;
1752 if (i != WX_MAX_RDESC) {
1753 printf("%s: could not set up rbufs\n", sc->wx_name);
1760 * Set up transmit parameters and enable the transmitter.
1762 sc->tnxtfree = sc->tactive = 0;
1763 sc->tbsyf = sc->tbsyl = NULL;
1764 WRITE_CSR(sc, WXREG_TCTL, 0);
1766 if (IS_WISEMAN(sc)) {
1767 WRITE_CSR(sc, WXREG_TDBA_LO,
1768 vtophys((vm_offset_t)&sc->tdescriptors[0]));
1769 WRITE_CSR(sc, WXREG_TDBA_HI, 0);
1770 WRITE_CSR(sc, WXREG_TDLEN, WX_MAX_TDESC * sizeof (wxtd_t));
1771 WRITE_CSR(sc, WXREG_TDH, 0);
1772 WRITE_CSR(sc, WXREG_TDT, 0);
1773 WRITE_CSR(sc, WXREG_TQSA_HI, 0);
1774 WRITE_CSR(sc, WXREG_TQSA_LO, 0);
1775 WRITE_CSR(sc, WXREG_TIPG, WX_WISEMAN_TIPG_DFLT);
1776 WRITE_CSR(sc, WXREG_TIDV, wx_txint_delay);
1778 WRITE_CSR(sc, WXREG_TDBA_LO_LIVENGOOD,
1779 vtophys((vm_offset_t)&sc->tdescriptors[0]));
1780 WRITE_CSR(sc, WXREG_TDBA_HI_LIVENGOOD, 0);
1781 WRITE_CSR(sc, WXREG_TDLEN_LIVENGOOD,
1782 WX_MAX_TDESC * sizeof (wxtd_t));
1783 WRITE_CSR(sc, WXREG_TDH_LIVENGOOD, 0);
1784 WRITE_CSR(sc, WXREG_TDT_LIVENGOOD, 0);
1785 WRITE_CSR(sc, WXREG_TQSA_HI, 0);
1786 WRITE_CSR(sc, WXREG_TQSA_LO, 0);
1787 WRITE_CSR(sc, WXREG_TIPG, WX_LIVENGOOD_TIPG_DFLT);
1788 WRITE_CSR(sc, WXREG_TIDV_LIVENGOOD, wx_txint_delay);
1790 WRITE_CSR(sc, WXREG_TCTL, (WXTCTL_CT(WX_COLLISION_THRESHOLD) |
1791 WXTCTL_COLD(WX_FDX_COLLISION_DX) | WXTCTL_EN));
1793 * Set up receive parameters and enable the receiver.
1797 WRITE_CSR(sc, WXREG_RCTL, 0);
1799 if (IS_WISEMAN(sc)) {
1800 WRITE_CSR(sc, WXREG_RDTR0, WXRDTR_FPD);
1801 WRITE_CSR(sc, WXREG_RDBA0_LO,
1802 vtophys((vm_offset_t)&sc->rdescriptors[0]));
1803 WRITE_CSR(sc, WXREG_RDBA0_HI, 0);
1804 WRITE_CSR(sc, WXREG_RDLEN0, WX_MAX_RDESC * sizeof (wxrd_t));
1805 WRITE_CSR(sc, WXREG_RDH0, 0);
1806 WRITE_CSR(sc, WXREG_RDT0, (WX_MAX_RDESC - RXINCR));
1809 * The delay should yield ~10us receive interrupt delay
1811 WRITE_CSR(sc, WXREG_RDTR0_LIVENGOOD, WXRDTR_FPD | 0x40);
1812 WRITE_CSR(sc, WXREG_RDBA0_LO_LIVENGOOD,
1813 vtophys((vm_offset_t)&sc->rdescriptors[0]));
1814 WRITE_CSR(sc, WXREG_RDBA0_HI_LIVENGOOD, 0);
1815 WRITE_CSR(sc, WXREG_RDLEN0_LIVENGOOD,
1816 WX_MAX_RDESC * sizeof (wxrd_t));
1817 WRITE_CSR(sc, WXREG_RDH0_LIVENGOOD, 0);
1818 WRITE_CSR(sc, WXREG_RDT0_LIVENGOOD, (WX_MAX_RDESC - RXINCR));
1820 WRITE_CSR(sc, WXREG_RDTR1, 0);
1821 WRITE_CSR(sc, WXREG_RDBA1_LO, 0);
1822 WRITE_CSR(sc, WXREG_RDBA1_HI, 0);
1823 WRITE_CSR(sc, WXREG_RDLEN1, 0);
1824 WRITE_CSR(sc, WXREG_RDH1, 0);
1825 WRITE_CSR(sc, WXREG_RDT1, 0);
1827 if (ifp->if_mtu > ETHERMTU) {
1828 bflags = WXRCTL_EN | WXRCTL_LPE | WXRCTL_2KRBUF;
1830 bflags = WXRCTL_EN | WXRCTL_2KRBUF;
1833 WRITE_CSR(sc, WXREG_RCTL, bflags |
1834 ((ifp->if_flags & IFF_BROADCAST) ? WXRCTL_BAM : 0) |
1835 ((ifp->if_flags & IFF_PROMISC) ? WXRCTL_UPE : 0) |
1836 ((sc->all_mcasts) ? WXRCTL_MPE : 0));
1844 mii_mediachg(WX_MII_FROM_SOFTC(sc));
1846 ifm = &sc->wx_media;
1848 ifm->ifm_media = ifm->ifm_cur->ifm_media;
1849 wx_ifmedia_upd(ifp);
1854 * Mark that we're up and running...
1856 ifp->if_flags |= IFF_RUNNING;
1857 ifp->if_flags &= ~IFF_OACTIVE;
1861 * Start stats updater.
1863 TIMEOUT(sc, wx_watchdog, sc, hz);
1867 * And we're outta here...
1873 * Get a receive buffer for our use (and dma map the data area).
1875 * The Wiseman chip can have buffers be 256, 512, 1024 or 2048 bytes in size.
1876 * The LIVENGOOD chip can go higher (up to 16K), but what's the point as
1877 * we aren't doing non-MCLGET memory management.
1879 * It wants them aligned on 256 byte boundaries, but can actually cope
1880 * with an offset in the first 255 bytes of the head of a receive frame.
1882 * We'll allocate a MCLBYTE sized cluster but *not* adjust the data pointer
1883 * by any alignment value. Instead, we'll tell the chip to offset by any
1884 * alignment and we'll catch the alignment on the backend at interrupt time.
1887 wx_rxdma_map(wx_softc_t *sc, rxpkt_t *rxpkt, struct mbuf *mb)
1890 rxpkt->dma_addr = vtophys(mtod(mb, vm_offset_t));
1894 wx_get_rbuf(wx_softc_t *sc, rxpkt_t *rxpkt)
1897 MGETHDR(mb, M_DONTWAIT, MT_DATA);
1902 MCLGET(mb, M_DONTWAIT);
1903 if ((mb->m_flags & M_EXT) == 0) {
1908 wx_rxdma_map(sc, rxpkt, mb);
1913 wx_ioctl(struct ifnet *ifp, IOCTL_CMD_TYPE command, caddr_t data,
1916 wx_softc_t *sc = SOFTC_IFP(ifp);
1917 struct ifreq *ifr = (struct ifreq *) data;
1924 error = ether_ioctl(ifp, command, data);
1927 if (ifr->ifr_mtu > WX_MAXMTU || ifr->ifr_mtu < ETHERMIN) {
1929 } else if (ifp->if_mtu != ifr->ifr_mtu) {
1930 ifp->if_mtu = ifr->ifr_mtu;
1931 error = wx_init(sc);
1935 sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0;
1938 * If interface is marked up and not running, then start it.
1939 * If it is marked down and running, stop it.
1940 * If it's up then re-initialize it. This is so flags
1941 * such as IFF_PROMISC are handled.
1943 if (ifp->if_flags & IFF_UP) {
1944 if ((ifp->if_flags & IFF_RUNNING) == 0) {
1945 error = wx_init(sc);
1948 if (ifp->if_flags & IFF_RUNNING) {
1956 sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0;
1957 error = wx_mc_setup(sc);
1961 DPRINTF(sc, ("%s: ioctl SIOC[GS]IFMEDIA: command=%#lx\n",
1962 sc->wx_name, command));
1964 mii_data_t *mii = WX_MII_FROM_SOFTC(sc);
1965 error = ifmedia_ioctl(ifp, ifr,
1966 &mii->mii_media, command);
1968 error = ifmedia_ioctl(ifp, ifr, &sc->wx_media, command);
1981 wx_ifmedia_upd(struct ifnet *ifp)
1983 struct wx_softc *sc = SOFTC_IFP(ifp);
1984 struct ifmedia *ifm;
1986 DPRINTF(sc, ("%s: ifmedia_upd\n", sc->wx_name));
1989 mii_mediachg(WX_MII_FROM_SOFTC(sc));
1993 ifm = &sc->wx_media;
1995 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
2003 wx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2006 struct wx_softc *sc = SOFTC_IFP(ifp);
2008 DPRINTF(sc, ("%s: ifmedia_sts: ", sc->wx_name));
2011 mii_data_t *mii = WX_MII_FROM_SOFTC(sc);
2013 ifmr->ifm_active = mii->mii_media_active;
2014 ifmr->ifm_status = mii->mii_media_status;
2015 DPRINTF(sc, ("active=%#x status=%#x\n",
2016 ifmr->ifm_active, ifmr->ifm_status));
2020 DPRINTF(sc, ("\n"));
2021 ifmr->ifm_status = IFM_AVALID;
2022 ifmr->ifm_active = IFM_ETHER;
2024 if (sc->linkup == 0)
2027 ifmr->ifm_status |= IFM_ACTIVE;
2028 dsr = READ_CSR(sc, WXREG_DSR);
2029 if (IS_LIVENGOOD(sc)) {
2030 if (dsr & WXDSR_1000BT) {
2031 if (IS_LIVENGOOD_CU(sc)) {
2032 ifmr->ifm_status |= IFM_1000_TX;
2035 ifmr->ifm_status |= IFM_1000_SX;
2037 } else if (dsr & WXDSR_100BT) {
2038 ifmr->ifm_status |= IFM_100_FX; /* ?? */
2040 ifmr->ifm_status |= IFM_10_T; /* ?? */
2043 ifmr->ifm_status |= IFM_1000_SX;
2045 if (dsr & WXDSR_FD) {
2046 ifmr->ifm_active |= IFM_FDX;
2051 #define RAISE_CLOCK(sc, dcr) \
2052 WRITE_CSR(sc, WXREG_DCR, (dcr) | WXPHY_MDC), DELAY(2)
2054 #define LOWER_CLOCK(sc, dcr) \
2055 WRITE_CSR(sc, WXREG_DCR, (dcr) & ~WXPHY_MDC), DELAY(2)
2058 wx_mii_shift_in(wx_softc_t *sc)
2063 dcr = READ_CSR(sc, WXREG_DCR);
2064 dcr &= ~(WXPHY_MDIO_DIR | WXPHY_MDIO);
2065 WRITE_CSR(sc, WXREG_DCR, dcr);
2066 RAISE_CLOCK(sc, dcr);
2067 LOWER_CLOCK(sc, dcr);
2069 for (i = 0; i < 16; i++) {
2071 RAISE_CLOCK(sc, dcr);
2072 dcr = READ_CSR(sc, WXREG_DCR);
2074 if (dcr & WXPHY_MDIO)
2077 LOWER_CLOCK(sc, dcr);
2080 RAISE_CLOCK(sc, dcr);
2081 LOWER_CLOCK(sc, dcr);
2086 wx_mii_shift_out(wx_softc_t *sc, u_int32_t data, u_int32_t count)
2088 u_int32_t dcr, mask;
2090 dcr = READ_CSR(sc, WXREG_DCR);
2091 dcr |= WXPHY_MDIO_DIR | WXPHY_MDC_DIR;
2093 for (mask = (1 << (count - 1)); mask; mask >>= 1) {
2099 WRITE_CSR(sc, WXREG_DCR, dcr);
2101 RAISE_CLOCK(sc, dcr);
2102 LOWER_CLOCK(sc, dcr);
2107 wx_miibus_readreg(void *arg, int phy, int reg)
2109 wx_softc_t *sc = WX_SOFTC_FROM_MII_ARG(arg);
2110 unsigned int data = 0;
2112 if (!IS_LIVENGOOD_CU(sc)) {
2115 wx_mii_shift_out(sc, WXPHYC_PREAMBLE, WXPHYC_PREAMBLE_LEN);
2116 wx_mii_shift_out(sc, reg | (phy << 5) | (WXPHYC_READ << 10) |
2117 (WXPHYC_SOF << 12), 14);
2118 data = wx_mii_shift_in(sc);
2119 return (data & WXMDIC_DATA_MASK);
2123 wx_miibus_writereg(void *arg, int phy, int reg, int data)
2125 wx_softc_t *sc = WX_SOFTC_FROM_MII_ARG(arg);
2126 if (!IS_LIVENGOOD_CU(sc)) {
2129 wx_mii_shift_out(sc, WXPHYC_PREAMBLE, WXPHYC_PREAMBLE_LEN);
2130 wx_mii_shift_out(sc, (u_int32_t)data | (WXPHYC_TURNAROUND << 16) |
2131 (reg << 18) | (phy << 23) | (WXPHYC_WRITE << 28) |
2132 (WXPHYC_SOF << 30), 32);
2137 wx_miibus_statchg(void *arg)
2139 wx_softc_t *sc = WX_SOFTC_FROM_MII_ARG(arg);
2140 mii_data_t *mii = WX_MII_FROM_SOFTC(sc);
2141 u_int32_t dcr, tctl;
2147 tctl = READ_CSR(sc, WXREG_TCTL);
2148 DPRINTF(sc, ("%s: statchg dcr=%#x tctl=%#x", sc->wx_name, dcr, tctl));
2150 dcr |= WXDCR_FRCSPD | WXDCR_FRCDPX | WXDCR_SLU;
2151 dcr &= ~(WXDCR_SPEED_MASK | WXDCR_ASDE /* | WXDCR_ILOS */);
2153 if (mii->mii_media_status & IFM_ACTIVE) {
2154 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE) {
2155 DPRINTF(sc, (" link-down\n"));
2163 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_TX) {
2164 DPRINTF(sc, (" 1000TX"));
2165 dcr |= WXDCR_1000BT;
2166 } else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
2167 DPRINTF(sc, (" 100TX"));
2169 } else /* assume IFM_10_TX */ {
2170 DPRINTF(sc, (" 10TX"));
2174 if (mii->mii_media_active & IFM_FDX) {
2175 DPRINTF(sc, ("-FD"));
2176 tctl = WXTCTL_CT(WX_COLLISION_THRESHOLD) |
2177 WXTCTL_COLD(WX_FDX_COLLISION_DX) | WXTCTL_EN;
2180 DPRINTF(sc, ("-HD"));
2181 tctl = WXTCTL_CT(WX_COLLISION_THRESHOLD) |
2182 WXTCTL_COLD(WX_HDX_COLLISION_DX) | WXTCTL_EN;
2186 /* FLAG0==rx-flow-control FLAG1==tx-flow-control */
2187 if (mii->mii_media_active & IFM_FLAG0) {
2193 if (mii->mii_media_active & IFM_FLAG1) {
2199 if (dcr & (WXDCR_RFCE|WXDCR_TFCE)) {
2200 WRITE_CSR(sc, WXREG_FCAL, FC_FRM_CONST_LO);
2201 WRITE_CSR(sc, WXREG_FCAH, FC_FRM_CONST_HI);
2202 WRITE_CSR(sc, WXREG_FCT, FC_TYP_CONST);
2204 WRITE_CSR(sc, WXREG_FCAL, 0);
2205 WRITE_CSR(sc, WXREG_FCAH, 0);
2206 WRITE_CSR(sc, WXREG_FCT, 0);
2209 DPRINTF(sc, (" dcr=%#x tctl=%#x\n", dcr, tctl));
2210 WRITE_CSR(sc, WXREG_TCTL, tctl);
2212 WRITE_CSR(sc, WXREG_DCR, dcr);
2216 wx_miibus_mediainit(void *arg)