1 /* $FreeBSD: src/sys/pci/if_wx.c,v 1.5.2.12 2003/03/05 18:42:34 njl Exp $ */
2 /* $DragonFly: src/sys/dev/netif/wx/Attic/if_wx.c,v 1.3 2003/08/07 21:17:06 dillon Exp $ */
4 * Principal Author: Matthew Jacob <mjacob@feral.com>
5 * Copyright (c) 1999, 2001 by Traakan Software
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Additional Copyright (c) 2001 by Parag Patel
31 * under same licence for MII PHY code.
35 * Intel Gigabit Ethernet (82452/82453) Driver.
36 * Inspired by fxp driver by David Greenman for FreeBSD, and by
37 * Bill Paul's work in other FreeBSD network drivers.
41 * Many bug fixes gratefully acknowledged from:
43 * The folks at Sitara Networks
51 * Use only every other 16 byte receive descriptor, leaving the ones
52 * in between empty. This card is most efficient at reading/writing
53 * 32 byte cache lines, so avoid all the (not working for early rev
54 * cards) MWI and/or READ/MODIFY/WRITE cycles updating one descriptor
57 * This isn't debugged yet.
59 /* #define PADDED_CELL 1 */
62 * Since the includes are a mess, they'll all be in if_wxvar.h
69 #define vtophys(va) alpha_XXX_dmamap((vm_offset_t)(va))
70 #endif /* __alpha__ */
73 * Function Prototpes, yadda yadda...
76 static int wx_intr(void *);
77 static void wx_handle_link_intr(wx_softc_t *);
78 static void wx_check_link(wx_softc_t *);
79 static void wx_handle_rxint(wx_softc_t *);
80 static void wx_gc(wx_softc_t *);
81 static void wx_start(struct ifnet *);
82 static int wx_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t);
83 static int wx_ifmedia_upd(struct ifnet *);
84 static void wx_ifmedia_sts(struct ifnet *, struct ifmediareq *);
85 static int wx_init(void *);
86 static void wx_hw_stop(wx_softc_t *);
87 static void wx_set_addr(wx_softc_t *, int, u_int8_t *);
88 static int wx_hw_initialize(wx_softc_t *);
89 static void wx_stop(wx_softc_t *);
90 static void wx_txwatchdog(struct ifnet *);
91 static int wx_get_rbuf(wx_softc_t *, rxpkt_t *);
92 static void wx_rxdma_map(wx_softc_t *, rxpkt_t *, struct mbuf *);
94 static INLINE void wx_eeprom_raise_clk(wx_softc_t *, u_int32_t);
95 static INLINE void wx_eeprom_lower_clk(wx_softc_t *, u_int32_t);
96 static INLINE void wx_eeprom_sobits(wx_softc_t *, u_int16_t, u_int16_t);
97 static INLINE u_int16_t wx_eeprom_sibits(wx_softc_t *);
98 static INLINE void wx_eeprom_cleanup(wx_softc_t *);
99 static INLINE u_int16_t wx_read_eeprom_word(wx_softc_t *, int);
100 static void wx_read_eeprom(wx_softc_t *, u_int16_t *, int, int);
102 static int wx_attach_common(wx_softc_t *);
103 static void wx_watchdog(void *);
105 static INLINE void wx_mwi_whackon(wx_softc_t *);
106 static INLINE void wx_mwi_unwhack(wx_softc_t *);
107 static int wx_dring_setup(wx_softc_t *);
108 static void wx_dring_teardown(wx_softc_t *);
110 static int wx_attach_phy(wx_softc_t *);
111 static int wx_miibus_readreg(void *, int, int);
112 static int wx_miibus_writereg(void *, int, int, int);
113 static void wx_miibus_statchg(void *);
114 static void wx_miibus_mediainit(void *);
116 static u_int32_t wx_mii_shift_in(wx_softc_t *);
117 static void wx_mii_shift_out(wx_softc_t *, u_int32_t, u_int32_t);
119 #define WX_DISABLE_INT(sc) WRITE_CSR(sc, WXREG_IMCLR, WXDISABLE)
120 #define WX_ENABLE_INT(sc) WRITE_CSR(sc, WXREG_IMASK, sc->wx_ienable)
123 * Until we do a bit more work, we can get no bigger than MCLBYTES
126 #define WX_MAXMTU (WX_MAX_PKT_SIZE_JUMBO - sizeof (struct ether_header))
128 #define WX_MAXMTU (MCLBYTES - sizeof (struct ether_header))
131 #define DPRINTF(sc, x) if (sc->wx_debug) printf x
132 #define IPRINTF(sc, x) if (sc->wx_verbose) printf x
134 static const char ldn[] = "%s: link down\n";
135 static const char lup[] = "%s: link up\n";
136 static const char sqe[] = "%s: receive sequence error\n";
137 static const char ane[] = "%s: /C/ ordered sets seen- enabling ANE\n";
138 static const char inane[] = "%s: no /C/ ordered sets seen- disabling ANE\n";
140 static int wx_txint_delay = 5000; /* ~5ms */
141 TUNABLE_INT("hw.wx.txint_delay", &wx_txint_delay);
143 SYSCTL_NODE(_hw, OID_AUTO, wx, CTLFLAG_RD, 0, "WX driver parameters");
144 SYSCTL_INT(_hw_wx, OID_AUTO, txint_delay, CTLFLAG_RW,
145 &wx_txint_delay, 0, "");
146 static int wx_dump_stats = -1;
147 SYSCTL_INT(_hw_wx, OID_AUTO, dump_stats, CTLFLAG_RW,
148 &wx_dump_stats, 0, "");
149 static int wx_clr_stats = -1;
150 SYSCTL_INT(_hw_wx, OID_AUTO, clear_stats, CTLFLAG_RW,
151 &wx_clr_stats, 0, "");
155 * Program multicast addresses.
157 * This function must be called at splimp, but it may sleep.
160 wx_mc_setup(wx_softc_t *sc)
162 struct ifnet *ifp = &sc->wx_if;
163 struct ifmultiaddr *ifma;
166 * XXX: drain TX queue
174 if ((ifp->if_flags & IFF_ALLMULTI) || (ifp->if_flags & IFF_PROMISC)) {
176 return (wx_init(sc));
180 for (ifma = ifp->if_multiaddrs.lh_first, sc->wx_nmca = 0;
181 ifma != NULL; ifma = ifma->ifma_link.le_next) {
183 if (ifma->ifma_addr->sa_family != AF_LINK) {
186 if (sc->wx_nmca >= WX_RAL_TAB_SIZE-1) {
191 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
192 (void *) &sc->wx_mcaddr[sc->wx_nmca++][0], 6);
194 return (wx_init(sc));
198 * Return identification string if this is device is ours.
201 wx_probe(device_t dev)
203 if (pci_get_vendor(dev) != WX_VENDOR_INTEL) {
206 switch (pci_get_device(dev)) {
207 case WX_PRODUCT_82452:
208 device_set_desc(dev, "Intel PRO/1000 Gigabit (WISEMAN)");
210 case WX_PRODUCT_LIVENGOOD:
211 device_set_desc(dev, "Intel PRO/1000 (LIVENGOOD)");
213 case WX_PRODUCT_82452_SC:
214 device_set_desc(dev, "Intel PRO/1000 F Gigabit Ethernet");
216 case WX_PRODUCT_82543:
217 device_set_desc(dev, "Intel PRO/1000 T Gigabit Ethernet");
226 wx_attach(device_t dev)
229 wx_softc_t *sc = device_get_softc(dev);
234 bzero(sc, sizeof (wx_softc_t));
236 callout_handle_init(&sc->w.sch);
242 if (getenv_int ("wx_debug", &rid)) {
243 if (rid & (1 << device_get_unit(dev))) {
248 if (getenv_int("wx_no_ilos", &rid)) {
249 if (rid & (1 << device_get_unit(dev))) {
254 if (getenv_int("wx_ilos", &rid)) {
255 if (rid & (1 << device_get_unit(dev))) {
260 if (getenv_int("wx_no_flow", &rid)) {
261 if (rid & (1 << device_get_unit(dev))) {
267 mtx_init(&sc->wx_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE);
271 * get revision && id...
273 sc->wx_idnrev = (pci_get_device(dev) << 16) | (pci_get_revid(dev));
276 * Enable bus mastering, make sure that the cache line size is right.
278 pci_enable_busmaster(dev);
279 pci_enable_io(dev, SYS_RES_MEMORY);
280 val = pci_read_config(dev, PCIR_COMMAND, 4);
281 if ((val & PCIM_CMD_MEMEN) == 0) {
282 device_printf(dev, "failed to enable memory mapping\n");
288 * Let the BIOS do it's job- but check for sanity.
290 val = pci_read_config(dev, PCIR_CACHELNSZ, 1);
291 if (val < 4 || val > 32) {
292 pci_write_config(dev, PCIR_CACHELNSZ, 8, 1);
296 * Map control/status registers.
299 sc->w.mem = bus_alloc_resource(dev, SYS_RES_MEMORY,
300 &rid, 0, ~0, 1, RF_ACTIVE);
302 device_printf(dev, "could not map memory\n");
306 sc->w.st = rman_get_bustag(sc->w.mem);
307 sc->w.sh = rman_get_bushandle(sc->w.mem);
310 sc->w.irq = bus_alloc_resource(dev, SYS_RES_IRQ,
311 &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE);
312 if (sc->w.irq == NULL) {
313 device_printf(dev, "could not map interrupt\n");
317 error = bus_setup_intr(dev, sc->w.irq, INTR_TYPE_NET,
318 (void (*)(void *))wx_intr, sc, &sc->w.ih);
320 device_printf(dev, "could not setup irq\n");
323 (void) snprintf(sc->wx_name, sizeof (sc->wx_name) - 1, "wx%d",
324 device_get_unit(dev));
325 if (wx_attach_common(sc)) {
326 bus_teardown_intr(dev, sc->w.irq, sc->w.ih);
327 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->w.irq);
328 bus_release_resource(dev, SYS_RES_MEMORY, WX_MMBA, sc->w.mem);
332 device_printf(dev, "Ethernet address %02x:%02x:%02x:%02x:%02x:%02x\n",
333 sc->w.arpcom.ac_enaddr[0], sc->w.arpcom.ac_enaddr[1],
334 sc->w.arpcom.ac_enaddr[2], sc->w.arpcom.ac_enaddr[3],
335 sc->w.arpcom.ac_enaddr[4], sc->w.arpcom.ac_enaddr[5]);
337 ifp = &sc->w.arpcom.ac_if;
338 ifp->if_unit = device_get_unit(dev);
340 ifp->if_mtu = ETHERMTU; /* we always start at ETHERMTU size */
341 ifp->if_output = ether_output;
342 ifp->if_baudrate = 1000000000;
343 ifp->if_init = (void (*)(void *))wx_init;
345 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
346 ifp->if_ioctl = wx_ioctl;
347 ifp->if_start = wx_start;
348 ifp->if_watchdog = wx_txwatchdog;
349 ifp->if_snd.ifq_maxlen = WX_MAX_TDESC - 1;
350 ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
357 wx_attach_phy(wx_softc_t *sc)
359 if (mii_phy_probe(sc->w.dev, &sc->w.miibus, wx_ifmedia_upd,
361 printf("%s: no PHY probed!\n", sc->wx_name);
369 wx_detach(device_t dev)
371 wx_softc_t *sc = device_get_softc(dev);
376 ether_ifdetach(&sc->w.arpcom.ac_if, ETHER_BPF_SUPPORTED);
378 bus_generic_detach(dev);
379 device_delete_child(dev, sc->w.miibus);
381 ifmedia_removeall(&sc->wx_media);
383 bus_teardown_intr(dev, sc->w.irq, sc->w.ih);
384 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->w.irq);
385 bus_release_resource(dev, SYS_RES_MEMORY, WX_MMBA, sc->w.mem);
387 wx_dring_teardown(sc);
398 mtx_destroy(&sc->wx_mtx);
404 wx_shutdown(device_t dev)
406 wx_hw_stop((wx_softc_t *) device_get_softc(dev));
411 wx_mwi_whackon(wx_softc_t *sc)
413 sc->wx_cmdw = pci_read_config(sc->w.dev, PCIR_COMMAND, 2);
414 pci_write_config(sc->w.dev, PCIR_COMMAND, sc->wx_cmdw & ~MWI, 2);
418 wx_mwi_unwhack(wx_softc_t *sc)
420 if (sc->wx_cmdw & MWI) {
421 pci_write_config(sc->w.dev, PCIR_COMMAND, sc->wx_cmdw, 2);
426 wx_dring_setup(wx_softc_t *sc)
430 len = sizeof (wxrd_t) * WX_MAX_RDESC;
431 sc->rdescriptors = (wxrd_t *)
432 contigmalloc(len, M_DEVBUF, M_NOWAIT, 0, ~0, 4096, 0);
433 if (sc->rdescriptors == NULL) {
434 printf("%s: could not allocate rcv descriptors\n", sc->wx_name);
437 if (((intptr_t)sc->rdescriptors) & 0xfff) {
438 contigfree(sc->rdescriptors, len, M_DEVBUF);
439 sc->rdescriptors = NULL;
440 printf("%s: rcv descriptors not 4KB aligned\n", sc->wx_name);
443 bzero(sc->rdescriptors, len);
445 len = sizeof (wxtd_t) * WX_MAX_TDESC;
446 sc->tdescriptors = (wxtd_t *)
447 contigmalloc(len, M_DEVBUF, M_NOWAIT, 0, ~0, 4096, 0);
448 if (sc->tdescriptors == NULL) {
449 contigfree(sc->rdescriptors,
450 sizeof (wxrd_t) * WX_MAX_RDESC, M_DEVBUF);
451 sc->rdescriptors = NULL;
452 printf("%s: could not allocate xmt descriptors\n", sc->wx_name);
455 if (((intptr_t)sc->tdescriptors) & 0xfff) {
456 contigfree(sc->rdescriptors,
457 sizeof (wxrd_t) * WX_MAX_RDESC, M_DEVBUF);
458 contigfree(sc->tdescriptors, len, M_DEVBUF);
459 sc->rdescriptors = NULL;
460 sc->tdescriptors = NULL;
461 printf("%s: xmt descriptors not 4KB aligned\n", sc->wx_name);
464 bzero(sc->tdescriptors, len);
469 wx_dring_teardown(wx_softc_t *sc)
471 if (sc->rdescriptors) {
472 contigfree(sc->rdescriptors,
473 sizeof (wxrd_t) * WX_MAX_RDESC, M_DEVBUF);
474 sc->rdescriptors = NULL;
476 if (sc->tdescriptors) {
477 contigfree(sc->tdescriptors,
478 sizeof (wxtd_t) * WX_MAX_TDESC, M_DEVBUF);
479 sc->tdescriptors = NULL;
483 static device_method_t wx_methods[] = {
484 /* Device interface */
485 DEVMETHOD(device_probe, wx_probe),
486 DEVMETHOD(device_attach, wx_attach),
487 DEVMETHOD(device_detach, wx_detach),
488 DEVMETHOD(device_shutdown, wx_shutdown),
491 DEVMETHOD(bus_print_child, bus_generic_print_child),
492 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
495 DEVMETHOD(miibus_readreg, wx_miibus_readreg),
496 DEVMETHOD(miibus_writereg, wx_miibus_writereg),
497 DEVMETHOD(miibus_statchg, wx_miibus_statchg),
498 DEVMETHOD(miibus_mediainit, wx_miibus_mediainit),
503 static driver_t wx_driver = {
504 "wx", wx_methods, sizeof(wx_softc_t),
506 static devclass_t wx_devclass;
507 DRIVER_MODULE(if_wx, pci, wx_driver, wx_devclass, 0, 0);
508 DRIVER_MODULE(miibus, wx, miibus_driver, miibus_devclass, 0, 0);
511 * Do generic parts of attach. Our registers have been mapped
512 * and our interrupt registered.
515 wx_attach_common(wx_softc_t *sc)
522 * First, check for revision support.
524 if (sc->wx_idnrev < WX_WISEMAN_2_0) {
525 printf("%s: cannot support ID 0x%x, revision %d chips\n",
526 sc->wx_name, sc->wx_idnrev >> 16, sc->wx_idnrev & 0xffff);
531 * Second, reset the chip.
536 * Third, validate our EEPROM.
542 * Fourth, read eeprom for our MAC address and other things.
544 wx_read_eeprom(sc, (u_int16_t *)sc->wx_enaddr, WX_EEPROM_MAC_OFF, 3);
547 * Fifth, establish some adapter parameters.
551 if (IS_LIVENGOOD_CU(sc)) {
553 /* settings to talk to PHY */
554 sc->wx_dcr |= WXDCR_FRCSPD | WXDCR_FRCDPX | WXDCR_SLU;
555 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr);
558 * Raise the PHY's reset line to make it operational.
560 tmp = READ_CSR(sc, WXREG_EXCT);
561 tmp |= WXPHY_RESET_DIR4;
562 WRITE_CSR(sc, WXREG_EXCT, tmp);
565 tmp = READ_CSR(sc, WXREG_EXCT);
566 tmp &= ~WXPHY_RESET4;
567 WRITE_CSR(sc, WXREG_EXCT, tmp);
570 tmp = READ_CSR(sc, WXREG_EXCT);
572 WRITE_CSR(sc, WXREG_EXCT, tmp);
575 if (wx_attach_phy(sc)) {
579 ifmedia_init(&sc->wx_media, IFM_IMASK,
580 wx_ifmedia_upd, wx_ifmedia_sts);
582 ifmedia_add(&sc->wx_media, IFM_ETHER|IFM_1000_SX, 0, NULL);
583 ifmedia_add(&sc->wx_media,
584 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL);
585 ifmedia_set(&sc->wx_media, IFM_ETHER|IFM_1000_SX|IFM_FDX);
587 sc->wx_media.ifm_media = sc->wx_media.ifm_cur->ifm_media;
591 * Sixth, establish a default device control register word.
594 if (sc->wx_cfg1 & WX_EEPROM_CTLR1_FD)
595 sc->wx_dcr |= WXDCR_FD;
596 if (sc->wx_cfg1 & WX_EEPROM_CTLR1_ILOS)
597 sc->wx_dcr |= WXDCR_ILOS;
599 tmp = (sc->wx_cfg1 >> WX_EEPROM_CTLR1_SWDPIO_SHIFT) & WXDCR_SWDPIO_MASK;
600 sc->wx_dcr |= (tmp << WXDCR_SWDPIO_SHIFT);
603 sc->wx_dcr &= ~WXDCR_ILOS;
605 sc->wx_dcr |= WXDCR_ILOS;
606 if (sc->wx_no_flow == 0)
607 sc->wx_dcr |= WXDCR_RFCE | WXDCR_TFCE;
610 * Seventh, allocate various sw structures...
612 len = sizeof (rxpkt_t) * WX_MAX_RDESC;
613 sc->rbase = (rxpkt_t *) WXMALLOC(len);
614 if (sc->rbase == NULL) {
617 bzero(sc->rbase, len);
620 len = sizeof (txpkt_t) * WX_MAX_TDESC;
621 sc->tbase = (txpkt_t *) WXMALLOC(len);
622 if (sc->tbase == NULL) {
625 bzero(sc->tbase, len);
629 * Eighth, allocate and dma map (platform dependent) descriptor rings.
630 * They have to be aligned on a 4KB boundary.
632 if (wx_dring_setup(sc) == 0) {
637 printf("%s: failed to do common attach (%d)\n", sc->wx_name, ll);
638 wx_dring_teardown(sc);
655 wx_eeprom_raise_clk(wx_softc_t *sc, u_int32_t regval)
657 WRITE_CSR(sc, WXREG_EECDR, regval | WXEECD_SK);
662 wx_eeprom_lower_clk(wx_softc_t *sc, u_int32_t regval)
664 WRITE_CSR(sc, WXREG_EECDR, regval & ~WXEECD_SK);
669 wx_eeprom_sobits(wx_softc_t *sc, u_int16_t data, u_int16_t count)
671 u_int32_t regval, mask;
673 mask = 1 << (count - 1);
674 regval = READ_CSR(sc, WXREG_EECDR) & ~(WXEECD_DI|WXEECD_DO);
680 regval &= ~WXEECD_DI;
681 WRITE_CSR(sc, WXREG_EECDR, regval); DELAY(50);
682 wx_eeprom_raise_clk(sc, regval);
683 wx_eeprom_lower_clk(sc, regval);
686 WRITE_CSR(sc, WXREG_EECDR, regval & ~WXEECD_DI);
689 static INLINE u_int16_t
690 wx_eeprom_sibits(wx_softc_t *sc)
692 unsigned int regval, i;
696 regval = READ_CSR(sc, WXREG_EECDR) & ~(WXEECD_DI|WXEECD_DO);
697 for (i = 0; i != 16; i++) {
699 wx_eeprom_raise_clk(sc, regval);
700 regval = READ_CSR(sc, WXREG_EECDR) & ~WXEECD_DI;
701 if (regval & WXEECD_DO) {
704 wx_eeprom_lower_clk(sc, regval);
710 wx_eeprom_cleanup(wx_softc_t *sc)
713 regval = READ_CSR(sc, WXREG_EECDR) & ~(WXEECD_DI|WXEECD_CS);
714 WRITE_CSR(sc, WXREG_EECDR, regval); DELAY(50);
715 wx_eeprom_raise_clk(sc, regval);
716 wx_eeprom_lower_clk(sc, regval);
719 static u_int16_t INLINE
720 wx_read_eeprom_word(wx_softc_t *sc, int offset)
723 WRITE_CSR(sc, WXREG_EECDR, WXEECD_CS);
724 wx_eeprom_sobits(sc, EEPROM_READ_OPCODE, 3);
725 wx_eeprom_sobits(sc, offset, 6);
726 data = wx_eeprom_sibits(sc);
727 wx_eeprom_cleanup(sc);
732 wx_read_eeprom(wx_softc_t *sc, u_int16_t *data, int offset, int words)
735 for (i = 0; i < words; i++) {
736 *data++ = wx_read_eeprom_word(sc, offset++);
738 sc->wx_cfg1 = wx_read_eeprom_word(sc, WX_EEPROM_CTLR1_OFF);
742 * Start packet transmission on the interface.
746 wx_start(struct ifnet *ifp)
748 wx_softc_t *sc = SOFTC_IFP(ifp);
749 u_int16_t widx = WX_MAX_TDESC, cidx, nactv;
752 DPRINTF(sc, ("%s: wx_start\n", sc->wx_name));
754 while (nactv < WX_MAX_TDESC - 1) {
757 struct mbuf *m, *mb_head;
759 IF_DEQUEUE(&ifp->if_snd, mb_head);
760 if (mb_head == NULL) {
766 * If we have a packet less than ethermin, pad it out.
768 if (mb_head->m_pkthdr.len < WX_MIN_RPKT_SIZE) {
769 if (mb_head->m_next == NULL) {
770 mb_head->m_len = WX_MIN_RPKT_SIZE;
772 MGETHDR(m, M_DONTWAIT, MT_DATA);
777 m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
779 m->m_pkthdr.len = m->m_len = WX_MIN_RPKT_SIZE;
780 bzero(mtod(m, char *) + mb_head->m_pkthdr.len,
781 WX_MIN_RPKT_SIZE - mb_head->m_pkthdr.len);
793 * Go through each of the mbufs in the chain and initialize
794 * the transmit buffer descriptors with the physical address
795 * and size of that mbuf. If we have a length less than our
796 * minimum transmit size, we bail (to do a pullup). If we run
797 * out of descriptors, we also bail and try and do a pullup.
799 for (plen = ndesc = 0, m = mb_head; m != NULL; m = m->m_next) {
804 * If this mbuf has no data, skip it.
811 * This appears to be a bogus check the PRO1000T.
812 * I think they meant that the minimum packet size
813 * is in fact WX_MIN_XPKT_SIZE (all data loaded)
817 * If this mbuf is too small for the chip's minimum,
818 * break out to cluster it.
820 if (m->m_len < WX_MIN_XPKT_SIZE) {
827 * Do we have a descriptor available for this mbuf?
829 if (++nactv == WX_MAX_TDESC) {
830 if (gctried++ == 0) {
837 sc->tbase[cidx].dptr = m;
838 td = &sc->tdescriptors[cidx];
839 td->length = m->m_len;
842 vptr = mtod(m, vm_offset_t);
843 td->address.highpart = 0;
844 td->address.lowpart = vtophys(vptr);
853 printf("%s: XMIT[%d] %p vptr %lx (length %d "
854 "DMA addr %x) idx %d\n", sc->wx_name,
855 ndesc, m, (long) vptr, td->length,
856 td->address.lowpart, cidx);
859 cidx = T_NXT_IDX(cidx);
863 * If we get here and m is NULL, we can send
864 * the the packet chain described by mb_head.
868 * Mark the last descriptor with EOP and tell the
869 * chip to insert a final checksum.
871 wxtd_t *td = &sc->tdescriptors[T_PREV_IDX(cidx)];
872 td->cmd = TXCMD_EOP|TXCMD_IFCS;
874 * Set up a delayed interrupt when this packet
875 * is sent and the descriptor written back.
876 * Additional packets completing will cause
877 * interrupt to be delayed further. Therefore,
878 * after the *last* packet is sent, after the delay
879 * period in TIDV, an interrupt will be generated
880 * which will cause us to garbage collect.
882 td->cmd |= TXCMD_IDE|TXCMD_RPS;
885 * Don't xmit odd length packets.
886 * We're okay with bumping things
887 * up as long as our mbuf allocation
888 * is always larger than our MTU
889 * by a comfortable amount.
891 * Yes, it's a hole to run past the end
899 sc->tbase[sc->tnxtfree].sidx = sc->tnxtfree;
900 sc->tbase[sc->tnxtfree].eidx = cidx;
901 sc->tbase[sc->tnxtfree].next = NULL;
903 sc->tbsyl->next = &sc->tbase[sc->tnxtfree];
905 sc->tbsyf = &sc->tbase[sc->tnxtfree];
907 sc->tbsyl = &sc->tbase[sc->tnxtfree];
912 bpf_mtap(WX_BPFTAP_ARG(ifp), mb_head);
913 /* defer xmit until we've got them all */
919 * Otherwise, we couldn't send this packet for some reason.
921 * If don't have a descriptor available, and this is a
922 * single mbuf packet, freeze output so that later we
923 * can restart when we have more room. Otherwise, we'll
924 * try and cluster the request. We've already tried to
925 * garbage collect completed descriptors.
927 if (nactv == WX_MAX_TDESC && mb_head->m_next == NULL) {
928 sc->wx_xmitputback++;
929 ifp->if_flags |= IFF_OACTIVE;
930 IF_PREPEND(&ifp->if_snd, mb_head);
935 * Otherwise, it's either a fragment length somewhere in the
936 * chain that isn't at least WX_MIN_XPKT_SIZE in length or
937 * the number of fragments exceeds the number of descriptors
940 * We could try a variety of strategies here- if this is
941 * a length problem for single mbuf packet or a length problem
942 * for the last mbuf in a chain (we could just try and adjust
943 * it), but it's just simpler to try and cluster it.
945 MGETHDR(m, M_DONTWAIT, MT_DATA);
950 MCLGET(m, M_DONTWAIT);
951 if ((m->m_flags & M_EXT) == 0) {
956 m_copydata(mb_head, 0, mb_head->m_pkthdr.len, mtod(m, caddr_t));
957 m->m_pkthdr.len = m->m_len = mb_head->m_pkthdr.len;
960 sc->wx_xmitcluster++;
964 if (widx < WX_MAX_TDESC) {
965 if (IS_WISEMAN(sc)) {
966 WRITE_CSR(sc, WXREG_TDT, widx);
968 WRITE_CSR(sc, WXREG_TDT_LIVENGOOD, widx);
972 if (sc->tactive == WX_MAX_TDESC - 1) {
975 if (sc->tactive >= WX_MAX_TDESC - 1) {
976 sc->wx_xmitblocked++;
977 ifp->if_flags |= IFF_OACTIVE;
981 /* used SW LED to indicate transmission active */
982 if (sc->tactive > 0 && sc->wx_mii) {
983 WRITE_CSR(sc, WXREG_DCR,
984 READ_CSR(sc, WXREG_DCR) | (WXDCR_SWDPIO0|WXDCR_SWDPIN0));
990 * Process interface interrupts.
995 wx_softc_t *sc = arg;
1000 * Read interrupt cause register. Reading it clears bits.
1002 sc->wx_icr = READ_CSR(sc, WXREG_ICR);
1007 if (sc->wx_icr & (WXISR_LSC|WXISR_RXSEQ|WXISR_GPI_EN1)) {
1009 wx_handle_link_intr(sc);
1011 wx_handle_rxint(sc);
1012 if (sc->wx_icr & WXISR_TXDW) {
1017 if (sc->wx_icr & WXISR_TXQE) {
1022 if (sc->wx_if.if_snd.ifq_head != NULL) {
1023 wx_start(&sc->wx_if);
1032 wx_handle_link_intr(wx_softc_t *sc)
1034 u_int32_t txcw, rxcw, dcr, dsr;
1037 dcr = READ_CSR(sc, WXREG_DCR);
1038 DPRINTF(sc, ("%s: handle_link_intr: icr=%#x dcr=%#x\n",
1039 sc->wx_name, sc->wx_icr, dcr));
1041 mii_data_t *mii = WX_MII_FROM_SOFTC(sc);
1043 if (mii->mii_media_status & IFM_ACTIVE) {
1044 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE) {
1045 IPRINTF(sc, (ldn, sc->wx_name));
1048 IPRINTF(sc, (lup, sc->wx_name));
1051 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr);
1052 } else if (sc->wx_icr & WXISR_RXSEQ) {
1053 DPRINTF(sc, (sqe, sc->wx_name));
1058 txcw = READ_CSR(sc, WXREG_XMIT_CFGW);
1059 rxcw = READ_CSR(sc, WXREG_RECV_CFGW);
1060 dsr = READ_CSR(sc, WXREG_DSR);
1063 * If we have LOS or are now receiving Ordered Sets and are not
1064 * doing auto-negotiation, restore autonegotiation.
1067 if (((dcr & WXDCR_SWDPIN1) || (rxcw & WXRXCW_C)) &&
1068 ((txcw & WXTXCW_ANE) == 0)) {
1069 DPRINTF(sc, (ane, sc->wx_name));
1070 WRITE_CSR(sc, WXREG_XMIT_CFGW, WXTXCW_DEFAULT);
1071 sc->wx_dcr &= ~WXDCR_SLU;
1072 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr);
1076 if (sc->wx_icr & WXISR_LSC) {
1077 if (READ_CSR(sc, WXREG_DSR) & WXDSR_LU) {
1078 IPRINTF(sc, (lup, sc->wx_name));
1080 sc->wx_dcr |= (WXDCR_SWDPIO0|WXDCR_SWDPIN0);
1082 IPRINTF(sc, (ldn, sc->wx_name));
1084 sc->wx_dcr &= ~(WXDCR_SWDPIO0|WXDCR_SWDPIN0);
1086 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr);
1088 DPRINTF(sc, (sqe, sc->wx_name));
1093 wx_check_link(wx_softc_t *sc)
1095 u_int32_t rxcw, dcr, dsr;
1098 mii_pollstat(WX_MII_FROM_SOFTC(sc));
1102 rxcw = READ_CSR(sc, WXREG_RECV_CFGW);
1103 dcr = READ_CSR(sc, WXREG_DCR);
1104 dsr = READ_CSR(sc, WXREG_DSR);
1106 if ((dsr & WXDSR_LU) == 0 && (dcr & WXDCR_SWDPIN1) == 0 &&
1107 (rxcw & WXRXCW_C) == 0) {
1108 if (sc->ane_failed == 0) {
1112 DPRINTF(sc, (inane, sc->wx_name));
1113 WRITE_CSR(sc, WXREG_XMIT_CFGW, WXTXCW_DEFAULT & ~WXTXCW_ANE);
1114 if (sc->wx_idnrev < WX_WISEMAN_2_1)
1115 sc->wx_dcr &= ~WXDCR_TFCE;
1116 sc->wx_dcr |= WXDCR_SLU;
1117 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr);
1118 } else if ((rxcw & WXRXCW_C) != 0 && (dcr & WXDCR_SLU) != 0) {
1119 DPRINTF(sc, (ane, sc->wx_name));
1120 WRITE_CSR(sc, WXREG_XMIT_CFGW, WXTXCW_DEFAULT);
1121 sc->wx_dcr &= ~WXDCR_SLU;
1122 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr);
1127 wx_handle_rxint(wx_softc_t *sc)
1129 struct ether_header *eh;
1130 struct mbuf *m0, *mb, *pending[WX_MAX_RDESC];
1131 struct ifnet *ifp = &sc->wx_if;
1132 int npkts, ndesc, lidx, idx, tlen;
1134 DPRINTF(sc, ("%s: wx_handle_rxint\n", sc->wx_name));
1136 for (m0 = sc->rpending, tlen = ndesc = npkts = 0, idx = sc->rnxt,
1137 lidx = R_PREV_IDX(idx); ndesc < WX_MAX_RDESC;
1138 ndesc++, lidx = idx, idx = R_NXT_IDX(idx)) {
1141 int length, offset, lastframe;
1143 rd = &sc->rdescriptors[idx];
1145 * XXX: DMA Flush descriptor
1147 if ((rd->status & RDSTAT_DD) == 0) {
1149 if (sc->rpending == NULL) {
1150 m0->m_pkthdr.len = tlen;
1157 DPRINTF(sc, ("%s: WXRX: ndesc %d idx %d lidx %d\n",
1158 sc->wx_name, ndesc, idx, lidx));
1162 if (rd->errors != 0) {
1163 printf("%s: packet with errors (%x)\n",
1164 sc->wx_name, rd->errors);
1171 m_freem(sc->rpending);
1172 sc->rpending = NULL;
1179 rxpkt = &sc->rbase[idx];
1182 printf("%s: receive descriptor with no mbuf\n",
1184 (void) wx_get_rbuf(sc, rxpkt);
1191 m_freem(sc->rpending);
1192 sc->rpending = NULL;
1198 /* XXX: Flush DMA for rxpkt */
1200 if (wx_get_rbuf(sc, rxpkt)) {
1202 wx_rxdma_map(sc, rxpkt, mb);
1209 m_freem(sc->rpending);
1210 sc->rpending = NULL;
1217 * Save the completing packet's offset value and length
1218 * and install the new one into the descriptor.
1220 lastframe = (rd->status & RDSTAT_EOP) != 0;
1221 length = rd->length;
1222 offset = rd->address.lowpart & 0xff;
1223 bzero (rd, sizeof (*rd));
1224 rd->address.lowpart = rxpkt->dma_addr + WX_RX_OFFSET_VALUE;
1227 mb->m_data += offset;
1232 } else if (m0 == sc->rpending) {
1234 * Pick up where we left off before. If
1235 * we have an offset (we're assuming the
1236 * first frame has an offset), then we've
1237 * lost sync somewhere along the line.
1240 printf("%s: lost sync with partial packet\n",
1242 m_freem(sc->rpending);
1243 sc->rpending = NULL;
1247 sc->rpending = NULL;
1248 tlen = m0->m_pkthdr.len;
1254 DPRINTF(sc, ("%s: RDESC[%d] len %d off %d lastframe %d\n",
1255 sc->wx_name, idx, mb->m_len, offset, lastframe));
1258 if (lastframe == 0) {
1261 m0->m_pkthdr.rcvif = ifp;
1262 m0->m_pkthdr.len = tlen - WX_CRC_LENGTH;
1263 mb->m_len -= WX_CRC_LENGTH;
1265 eh = mtod(m0, struct ether_header *);
1267 * No need to check for promiscous mode since
1268 * the decision to keep or drop the packet is
1269 * handled by ether_input()
1271 pending[npkts++] = m0;
1277 if (IS_WISEMAN(sc)) {
1278 WRITE_CSR(sc, WXREG_RDT0, lidx);
1280 WRITE_CSR(sc, WXREG_RDT0_LIVENGOOD, lidx);
1289 for (idx = 0; idx < npkts; idx++) {
1292 bpf_mtap(WX_BPFTAP_ARG(ifp), mb);
1295 DPRINTF(sc, ("%s: RECV packet length %d\n",
1296 sc->wx_name, mb->m_pkthdr.len));
1297 eh = mtod(mb, struct ether_header *);
1298 m_adj(mb, sizeof (struct ether_header));
1299 ether_input(ifp, eh, mb);
1304 wx_gc(wx_softc_t *sc)
1306 struct ifnet *ifp = &sc->wx_if;
1312 if (IS_WISEMAN(sc)) {
1313 tdh = READ_CSR(sc, WXREG_TDH);
1315 tdh = READ_CSR(sc, WXREG_TDH_LIVENGOOD);
1317 while (txpkt != NULL) {
1318 u_int32_t end = txpkt->eidx, cidx = tdh;
1321 * Normalize start..end indices to 2 *
1322 * WX_MAX_TDESC range to eliminate wrap.
1324 if (txpkt->eidx < txpkt->sidx) {
1325 end += WX_MAX_TDESC;
1329 * Normalize current chip index to 2 *
1330 * WX_MAX_TDESC range to eliminate wrap.
1332 if (cidx < txpkt->sidx) {
1333 cidx += WX_MAX_TDESC;
1337 * If the current chip index is between low and
1338 * high indices for this packet, it's not finished
1339 * transmitting yet. Because transmits are done FIFO,
1340 * this means we're done garbage collecting too.
1343 if (txpkt->sidx <= cidx && cidx < txpkt->eidx) {
1344 DPRINTF(sc, ("%s: TXGC %d..%d TDH %d\n", sc->wx_name,
1345 txpkt->sidx, txpkt->eidx, tdh));
1351 (void) m_freem(txpkt->dptr);
1353 printf("%s: null mbuf in gc\n", sc->wx_name);
1356 for (cidx = txpkt->sidx; cidx != txpkt->eidx;
1357 cidx = T_NXT_IDX(cidx)) {
1361 td = &sc->tdescriptors[cidx];
1362 if (td->status & TXSTS_EC) {
1363 IPRINTF(sc, ("%s: excess collisions\n",
1365 ifp->if_collisions++;
1368 if (td->status & TXSTS_LC) {
1370 ("%s: lost carrier\n", sc->wx_name));
1373 tmp = &sc->tbase[cidx];
1374 DPRINTF(sc, ("%s: TXGC[%d] %p %d..%d done nact %d "
1375 "TDH %d\n", sc->wx_name, cidx, tmp->dptr,
1376 txpkt->sidx, txpkt->eidx, sc->tactive, tdh));
1378 if (sc->tactive == 0) {
1379 printf("%s: nactive < 0?\n", sc->wx_name);
1383 bzero(td, sizeof (*td));
1385 sc->tbsyf = txpkt->next;
1388 if (sc->tactive < WX_MAX_TDESC - 1) {
1390 ifp->if_flags &= ~IFF_OACTIVE;
1393 /* used SW LED to indicate transmission not active */
1394 if (sc->tactive == 0 && sc->wx_mii) {
1395 WRITE_CSR(sc, WXREG_DCR,
1396 READ_CSR(sc, WXREG_DCR) & ~(WXDCR_SWDPIO0|WXDCR_SWDPIN0));
1402 * Periodic timer to update packet in/out/collision statistics,
1403 * and, more importantly, garbage collect completed transmissions
1404 * and to handle link status changes.
1406 #define WX_PRT_STATS(sc, y) printf("\t" # y " = %u\n", (sc)-> ## y )
1407 #define WX_CLR_STATS(sc, y) (sc)-> ## y = 0
1410 wx_watchdog(void *arg)
1412 wx_softc_t *sc = arg;
1415 if (sc->wx_needreinit) {
1417 if (wx_init(sc) == 0) {
1419 sc->wx_needreinit = 0;
1427 if (wx_dump_stats == device_get_unit(sc->w.dev)) {
1428 printf("%s: current statistics\n", sc->wx_name);
1429 WX_PRT_STATS(sc, wx_intr);
1430 WX_PRT_STATS(sc, wx_linkintr);
1431 WX_PRT_STATS(sc, wx_rxintr);
1432 WX_PRT_STATS(sc, wx_txqe);
1433 WX_PRT_STATS(sc, wx_xmitgc);
1434 WX_PRT_STATS(sc, wx_xmitpullup);
1435 WX_PRT_STATS(sc, wx_xmitcluster);
1436 WX_PRT_STATS(sc, wx_xmitputback);
1437 WX_PRT_STATS(sc, wx_xmitwanted);
1438 WX_PRT_STATS(sc, wx_xmitblocked);
1439 WX_PRT_STATS(sc, wx_xmitrunt);
1440 WX_PRT_STATS(sc, wx_rxnobuf);
1441 WX_PRT_STATS(sc, wx_oddpkt);
1444 if (wx_clr_stats == device_get_unit(sc->w.dev)) {
1445 printf("%s: statistics cleared\n", sc->wx_name);
1446 WX_CLR_STATS(sc, wx_intr);
1447 WX_CLR_STATS(sc, wx_linkintr);
1448 WX_CLR_STATS(sc, wx_rxintr);
1449 WX_CLR_STATS(sc, wx_txqe);
1450 WX_CLR_STATS(sc, wx_xmitgc);
1451 WX_CLR_STATS(sc, wx_xmitpullup);
1452 WX_CLR_STATS(sc, wx_xmitcluster);
1453 WX_CLR_STATS(sc, wx_xmitputback);
1454 WX_CLR_STATS(sc, wx_xmitwanted);
1455 WX_CLR_STATS(sc, wx_xmitblocked);
1456 WX_CLR_STATS(sc, wx_xmitrunt);
1457 WX_CLR_STATS(sc, wx_rxnobuf);
1458 WX_CLR_STATS(sc, wx_oddpkt);
1464 * Schedule another timeout one second from now.
1466 TIMEOUT(sc, wx_watchdog, sc, hz);
1470 * Stop and reinitialize the hardware
1473 wx_hw_stop(wx_softc_t *sc)
1476 DPRINTF(sc, ("%s: wx_hw_stop\n", sc->wx_name));
1478 if (sc->wx_idnrev < WX_WISEMAN_2_1) {
1481 WRITE_CSR(sc, WXREG_DCR, WXDCR_RST);
1483 icr = READ_CSR(sc, WXREG_ICR);
1484 if (sc->wx_idnrev < WX_WISEMAN_2_1) {
1490 wx_set_addr(wx_softc_t *sc, int idx, u_int8_t *mac)
1493 DPRINTF(sc, ("%s: wx_set_addr\n", sc->wx_name));
1494 t0 = (mac[0]) | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
1495 t1 = (mac[4] << 0) | (mac[5] << 8);
1497 WRITE_CSR(sc, WXREG_RAL_LO(idx), t0);
1498 WRITE_CSR(sc, WXREG_RAL_HI(idx), t1);
1502 wx_hw_initialize(wx_softc_t *sc)
1506 DPRINTF(sc, ("%s: wx_hw_initialize\n", sc->wx_name));
1508 WRITE_CSR(sc, WXREG_VET, 0);
1509 for (i = 0; i < (WX_VLAN_TAB_SIZE << 2); i += 4) {
1510 WRITE_CSR(sc, (WXREG_VFTA + i), 0);
1512 if (sc->wx_idnrev < WX_WISEMAN_2_1) {
1514 WRITE_CSR(sc, WXREG_RCTL, WXRCTL_RST);
1518 * Load the first receiver address with our MAC address,
1519 * and load as many multicast addresses as can fit into
1520 * the receive address array.
1522 wx_set_addr(sc, 0, sc->wx_enaddr);
1523 for (i = 1; i <= sc->wx_nmca; i++) {
1524 if (i >= WX_RAL_TAB_SIZE) {
1527 wx_set_addr(sc, i, sc->wx_mcaddr[i-1]);
1531 while (i < WX_RAL_TAB_SIZE) {
1532 WRITE_CSR(sc, WXREG_RAL_LO(i), 0);
1533 WRITE_CSR(sc, WXREG_RAL_HI(i), 0);
1537 if (sc->wx_idnrev < WX_WISEMAN_2_1) {
1538 WRITE_CSR(sc, WXREG_RCTL, 0);
1544 * Clear out the hashed multicast table array.
1546 for (i = 0; i < WX_MC_TAB_SIZE; i++) {
1547 WRITE_CSR(sc, WXREG_MTA + (sizeof (u_int32_t) * 4), 0);
1550 if (IS_LIVENGOOD_CU(sc)) {
1552 * has a PHY - raise its reset line to make it operational
1554 u_int32_t tmp = READ_CSR(sc, WXREG_EXCT);
1555 tmp |= WXPHY_RESET_DIR4;
1556 WRITE_CSR(sc, WXREG_EXCT, tmp);
1559 tmp = READ_CSR(sc, WXREG_EXCT);
1560 tmp &= ~WXPHY_RESET4;
1561 WRITE_CSR(sc, WXREG_EXCT, tmp);
1564 tmp = READ_CSR(sc, WXREG_EXCT);
1565 tmp |= WXPHY_RESET4;
1566 WRITE_CSR(sc, WXREG_EXCT, tmp);
1568 } else if (IS_LIVENGOOD(sc)) {
1572 * Handle link control
1574 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr | WXDCR_LRST);
1577 wx_read_eeprom(sc, &tew, WX_EEPROM_CTLR2_OFF, 1);
1578 tew = (tew & WX_EEPROM_CTLR2_SWDPIO) << WX_EEPROM_EXT_SHIFT;
1579 WRITE_CSR(sc, WXREG_EXCT, (u_int32_t)tew);
1582 if (sc->wx_dcr & (WXDCR_RFCE|WXDCR_TFCE)) {
1583 WRITE_CSR(sc, WXREG_FCAL, FC_FRM_CONST_LO);
1584 WRITE_CSR(sc, WXREG_FCAH, FC_FRM_CONST_HI);
1585 WRITE_CSR(sc, WXREG_FCT, FC_TYP_CONST);
1587 WRITE_CSR(sc, WXREG_FCAL, 0);
1588 WRITE_CSR(sc, WXREG_FCAH, 0);
1589 WRITE_CSR(sc, WXREG_FCT, 0);
1591 WRITE_CSR(sc, WXREG_FLOW_XTIMER, WX_XTIMER_DFLT);
1593 if (IS_WISEMAN(sc)) {
1594 if (sc->wx_idnrev < WX_WISEMAN_2_1) {
1595 WRITE_CSR(sc, WXREG_FLOW_RCV_HI, 0);
1596 WRITE_CSR(sc, WXREG_FLOW_RCV_LO, 0);
1597 sc->wx_dcr &= ~(WXDCR_RFCE|WXDCR_TFCE);
1599 WRITE_CSR(sc, WXREG_FLOW_RCV_HI, WX_RCV_FLOW_HI_DFLT);
1600 WRITE_CSR(sc, WXREG_FLOW_RCV_LO, WX_RCV_FLOW_LO_DFLT);
1603 WRITE_CSR(sc, WXREG_FLOW_RCV_HI_LIVENGOOD, WX_RCV_FLOW_HI_DFLT);
1604 WRITE_CSR(sc, WXREG_FLOW_RCV_LO_LIVENGOOD, WX_RCV_FLOW_LO_DFLT);
1607 if (!IS_LIVENGOOD_CU(sc))
1608 WRITE_CSR(sc, WXREG_XMIT_CFGW, WXTXCW_DEFAULT);
1610 WRITE_CSR(sc, WXREG_DCR, sc->wx_dcr);
1613 if (!IS_LIVENGOOD_CU(sc)) {
1615 * The pin stuff is all FM from the Linux driver.
1617 if ((READ_CSR(sc, WXREG_DCR) & WXDCR_SWDPIN1) == 0) {
1618 for (i = 0; i < (WX_LINK_UP_TIMEOUT/10); i++) {
1620 if (READ_CSR(sc, WXREG_DSR) & WXDSR_LU) {
1625 if (sc->linkup == 0) {
1631 printf("%s: SWDPIO1 did not clear- check for reversed "
1632 "or disconnected cable\n", sc->wx_name);
1633 /* but return okay anyway */
1637 sc->wx_ienable = WXIENABLE_DEFAULT;
1642 * Stop the interface. Cancels the statistics updater and resets the interface.
1645 wx_stop(wx_softc_t *sc)
1649 struct ifnet *ifp = &sc->wx_if;
1651 DPRINTF(sc, ("%s: wx_stop\n", sc->wx_name));
1653 * Cancel stats updater.
1655 UNTIMEOUT(wx_watchdog, sc, sc);
1663 * Release any xmit buffers.
1665 for (txp = sc->tbase; txp && txp < &sc->tbase[WX_MAX_TDESC]; txp++) {
1673 * Free all the receive buffers.
1675 for (rxp = sc->rbase; rxp && rxp < &sc->rbase[WX_MAX_RDESC]; rxp++) {
1683 m_freem(sc->rpending);
1684 sc->rpending = NULL;
1688 * And we're outta here...
1691 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1699 wx_txwatchdog(struct ifnet *ifp)
1701 wx_softc_t *sc = SOFTC_IFP(ifp);
1702 printf("%s: device timeout\n", sc->wx_name);
1705 printf("%s: could not re-init device\n", sc->wx_name);
1706 sc->wx_needreinit = 1;
1713 struct ifmedia *ifm;
1714 wx_softc_t *sc = xsc;
1715 struct ifnet *ifp = &sc->wx_if;
1721 DPRINTF(sc, ("%s: wx_init\n", sc->wx_name));
1725 * Cancel any pending I/O by resetting things.
1726 * wx_stop will free any allocated mbufs.
1731 * Reset the hardware. All network addresses loaded here, but
1732 * neither the receiver nor the transmitter are enabled.
1735 if (wx_hw_initialize(sc)) {
1736 DPRINTF(sc, ("%s: wx_hw_initialize failed\n", sc->wx_name));
1742 * Set up the receive ring stuff.
1744 len = sizeof (wxrd_t) * WX_MAX_RDESC;
1745 bzero(sc->rdescriptors, len);
1746 for (rxpkt = sc->rbase, i = 0; rxpkt != NULL && i < WX_MAX_RDESC;
1747 i += RXINCR, rxpkt++) {
1748 rd = &sc->rdescriptors[i];
1749 if (wx_get_rbuf(sc, rxpkt)) {
1752 rd->address.lowpart = rxpkt->dma_addr + WX_RX_OFFSET_VALUE;
1754 if (i != WX_MAX_RDESC) {
1755 printf("%s: could not set up rbufs\n", sc->wx_name);
1762 * Set up transmit parameters and enable the transmitter.
1764 sc->tnxtfree = sc->tactive = 0;
1765 sc->tbsyf = sc->tbsyl = NULL;
1766 WRITE_CSR(sc, WXREG_TCTL, 0);
1768 if (IS_WISEMAN(sc)) {
1769 WRITE_CSR(sc, WXREG_TDBA_LO,
1770 vtophys((vm_offset_t)&sc->tdescriptors[0]));
1771 WRITE_CSR(sc, WXREG_TDBA_HI, 0);
1772 WRITE_CSR(sc, WXREG_TDLEN, WX_MAX_TDESC * sizeof (wxtd_t));
1773 WRITE_CSR(sc, WXREG_TDH, 0);
1774 WRITE_CSR(sc, WXREG_TDT, 0);
1775 WRITE_CSR(sc, WXREG_TQSA_HI, 0);
1776 WRITE_CSR(sc, WXREG_TQSA_LO, 0);
1777 WRITE_CSR(sc, WXREG_TIPG, WX_WISEMAN_TIPG_DFLT);
1778 WRITE_CSR(sc, WXREG_TIDV, wx_txint_delay);
1780 WRITE_CSR(sc, WXREG_TDBA_LO_LIVENGOOD,
1781 vtophys((vm_offset_t)&sc->tdescriptors[0]));
1782 WRITE_CSR(sc, WXREG_TDBA_HI_LIVENGOOD, 0);
1783 WRITE_CSR(sc, WXREG_TDLEN_LIVENGOOD,
1784 WX_MAX_TDESC * sizeof (wxtd_t));
1785 WRITE_CSR(sc, WXREG_TDH_LIVENGOOD, 0);
1786 WRITE_CSR(sc, WXREG_TDT_LIVENGOOD, 0);
1787 WRITE_CSR(sc, WXREG_TQSA_HI, 0);
1788 WRITE_CSR(sc, WXREG_TQSA_LO, 0);
1789 WRITE_CSR(sc, WXREG_TIPG, WX_LIVENGOOD_TIPG_DFLT);
1790 WRITE_CSR(sc, WXREG_TIDV_LIVENGOOD, wx_txint_delay);
1792 WRITE_CSR(sc, WXREG_TCTL, (WXTCTL_CT(WX_COLLISION_THRESHOLD) |
1793 WXTCTL_COLD(WX_FDX_COLLISION_DX) | WXTCTL_EN));
1795 * Set up receive parameters and enable the receiver.
1799 WRITE_CSR(sc, WXREG_RCTL, 0);
1801 if (IS_WISEMAN(sc)) {
1802 WRITE_CSR(sc, WXREG_RDTR0, WXRDTR_FPD);
1803 WRITE_CSR(sc, WXREG_RDBA0_LO,
1804 vtophys((vm_offset_t)&sc->rdescriptors[0]));
1805 WRITE_CSR(sc, WXREG_RDBA0_HI, 0);
1806 WRITE_CSR(sc, WXREG_RDLEN0, WX_MAX_RDESC * sizeof (wxrd_t));
1807 WRITE_CSR(sc, WXREG_RDH0, 0);
1808 WRITE_CSR(sc, WXREG_RDT0, (WX_MAX_RDESC - RXINCR));
1811 * The delay should yield ~10us receive interrupt delay
1813 WRITE_CSR(sc, WXREG_RDTR0_LIVENGOOD, WXRDTR_FPD | 0x40);
1814 WRITE_CSR(sc, WXREG_RDBA0_LO_LIVENGOOD,
1815 vtophys((vm_offset_t)&sc->rdescriptors[0]));
1816 WRITE_CSR(sc, WXREG_RDBA0_HI_LIVENGOOD, 0);
1817 WRITE_CSR(sc, WXREG_RDLEN0_LIVENGOOD,
1818 WX_MAX_RDESC * sizeof (wxrd_t));
1819 WRITE_CSR(sc, WXREG_RDH0_LIVENGOOD, 0);
1820 WRITE_CSR(sc, WXREG_RDT0_LIVENGOOD, (WX_MAX_RDESC - RXINCR));
1822 WRITE_CSR(sc, WXREG_RDTR1, 0);
1823 WRITE_CSR(sc, WXREG_RDBA1_LO, 0);
1824 WRITE_CSR(sc, WXREG_RDBA1_HI, 0);
1825 WRITE_CSR(sc, WXREG_RDLEN1, 0);
1826 WRITE_CSR(sc, WXREG_RDH1, 0);
1827 WRITE_CSR(sc, WXREG_RDT1, 0);
1829 if (ifp->if_mtu > ETHERMTU) {
1830 bflags = WXRCTL_EN | WXRCTL_LPE | WXRCTL_2KRBUF;
1832 bflags = WXRCTL_EN | WXRCTL_2KRBUF;
1835 WRITE_CSR(sc, WXREG_RCTL, bflags |
1836 ((ifp->if_flags & IFF_BROADCAST) ? WXRCTL_BAM : 0) |
1837 ((ifp->if_flags & IFF_PROMISC) ? WXRCTL_UPE : 0) |
1838 ((sc->all_mcasts) ? WXRCTL_MPE : 0));
1846 mii_mediachg(WX_MII_FROM_SOFTC(sc));
1848 ifm = &sc->wx_media;
1850 ifm->ifm_media = ifm->ifm_cur->ifm_media;
1851 wx_ifmedia_upd(ifp);
1856 * Mark that we're up and running...
1858 ifp->if_flags |= IFF_RUNNING;
1859 ifp->if_flags &= ~IFF_OACTIVE;
1863 * Start stats updater.
1865 TIMEOUT(sc, wx_watchdog, sc, hz);
1869 * And we're outta here...
1875 * Get a receive buffer for our use (and dma map the data area).
1877 * The Wiseman chip can have buffers be 256, 512, 1024 or 2048 bytes in size.
1878 * The LIVENGOOD chip can go higher (up to 16K), but what's the point as
1879 * we aren't doing non-MCLGET memory management.
1881 * It wants them aligned on 256 byte boundaries, but can actually cope
1882 * with an offset in the first 255 bytes of the head of a receive frame.
1884 * We'll allocate a MCLBYTE sized cluster but *not* adjust the data pointer
1885 * by any alignment value. Instead, we'll tell the chip to offset by any
1886 * alignment and we'll catch the alignment on the backend at interrupt time.
1889 wx_rxdma_map(wx_softc_t *sc, rxpkt_t *rxpkt, struct mbuf *mb)
1892 rxpkt->dma_addr = vtophys(mtod(mb, vm_offset_t));
1896 wx_get_rbuf(wx_softc_t *sc, rxpkt_t *rxpkt)
1899 MGETHDR(mb, M_DONTWAIT, MT_DATA);
1904 MCLGET(mb, M_DONTWAIT);
1905 if ((mb->m_flags & M_EXT) == 0) {
1910 wx_rxdma_map(sc, rxpkt, mb);
1915 wx_ioctl(struct ifnet *ifp, IOCTL_CMD_TYPE command, caddr_t data)
1917 wx_softc_t *sc = SOFTC_IFP(ifp);
1918 struct ifreq *ifr = (struct ifreq *) data;
1925 error = ether_ioctl(ifp, command, data);
1928 if (ifr->ifr_mtu > WX_MAXMTU || ifr->ifr_mtu < ETHERMIN) {
1930 } else if (ifp->if_mtu != ifr->ifr_mtu) {
1931 ifp->if_mtu = ifr->ifr_mtu;
1932 error = wx_init(sc);
1936 sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0;
1939 * If interface is marked up and not running, then start it.
1940 * If it is marked down and running, stop it.
1941 * If it's up then re-initialize it. This is so flags
1942 * such as IFF_PROMISC are handled.
1944 if (ifp->if_flags & IFF_UP) {
1945 if ((ifp->if_flags & IFF_RUNNING) == 0) {
1946 error = wx_init(sc);
1949 if (ifp->if_flags & IFF_RUNNING) {
1957 sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0;
1958 error = wx_mc_setup(sc);
1962 DPRINTF(sc, ("%s: ioctl SIOC[GS]IFMEDIA: command=%#lx\n",
1963 sc->wx_name, command));
1965 mii_data_t *mii = WX_MII_FROM_SOFTC(sc);
1966 error = ifmedia_ioctl(ifp, ifr,
1967 &mii->mii_media, command);
1969 error = ifmedia_ioctl(ifp, ifr, &sc->wx_media, command);
1982 wx_ifmedia_upd(struct ifnet *ifp)
1984 struct wx_softc *sc = SOFTC_IFP(ifp);
1985 struct ifmedia *ifm;
1987 DPRINTF(sc, ("%s: ifmedia_upd\n", sc->wx_name));
1990 mii_mediachg(WX_MII_FROM_SOFTC(sc));
1994 ifm = &sc->wx_media;
1996 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
2004 wx_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2007 struct wx_softc *sc = SOFTC_IFP(ifp);
2009 DPRINTF(sc, ("%s: ifmedia_sts: ", sc->wx_name));
2012 mii_data_t *mii = WX_MII_FROM_SOFTC(sc);
2014 ifmr->ifm_active = mii->mii_media_active;
2015 ifmr->ifm_status = mii->mii_media_status;
2016 DPRINTF(sc, ("active=%#x status=%#x\n",
2017 ifmr->ifm_active, ifmr->ifm_status));
2021 DPRINTF(sc, ("\n"));
2022 ifmr->ifm_status = IFM_AVALID;
2023 ifmr->ifm_active = IFM_ETHER;
2025 if (sc->linkup == 0)
2028 ifmr->ifm_status |= IFM_ACTIVE;
2029 dsr = READ_CSR(sc, WXREG_DSR);
2030 if (IS_LIVENGOOD(sc)) {
2031 if (dsr & WXDSR_1000BT) {
2032 if (IS_LIVENGOOD_CU(sc)) {
2033 ifmr->ifm_status |= IFM_1000_TX;
2036 ifmr->ifm_status |= IFM_1000_SX;
2038 } else if (dsr & WXDSR_100BT) {
2039 ifmr->ifm_status |= IFM_100_FX; /* ?? */
2041 ifmr->ifm_status |= IFM_10_T; /* ?? */
2044 ifmr->ifm_status |= IFM_1000_SX;
2046 if (dsr & WXDSR_FD) {
2047 ifmr->ifm_active |= IFM_FDX;
2052 #define RAISE_CLOCK(sc, dcr) \
2053 WRITE_CSR(sc, WXREG_DCR, (dcr) | WXPHY_MDC), DELAY(2)
2055 #define LOWER_CLOCK(sc, dcr) \
2056 WRITE_CSR(sc, WXREG_DCR, (dcr) & ~WXPHY_MDC), DELAY(2)
2059 wx_mii_shift_in(wx_softc_t *sc)
2064 dcr = READ_CSR(sc, WXREG_DCR);
2065 dcr &= ~(WXPHY_MDIO_DIR | WXPHY_MDIO);
2066 WRITE_CSR(sc, WXREG_DCR, dcr);
2067 RAISE_CLOCK(sc, dcr);
2068 LOWER_CLOCK(sc, dcr);
2070 for (i = 0; i < 16; i++) {
2072 RAISE_CLOCK(sc, dcr);
2073 dcr = READ_CSR(sc, WXREG_DCR);
2075 if (dcr & WXPHY_MDIO)
2078 LOWER_CLOCK(sc, dcr);
2081 RAISE_CLOCK(sc, dcr);
2082 LOWER_CLOCK(sc, dcr);
2087 wx_mii_shift_out(wx_softc_t *sc, u_int32_t data, u_int32_t count)
2089 u_int32_t dcr, mask;
2091 dcr = READ_CSR(sc, WXREG_DCR);
2092 dcr |= WXPHY_MDIO_DIR | WXPHY_MDC_DIR;
2094 for (mask = (1 << (count - 1)); mask; mask >>= 1) {
2100 WRITE_CSR(sc, WXREG_DCR, dcr);
2102 RAISE_CLOCK(sc, dcr);
2103 LOWER_CLOCK(sc, dcr);
2108 wx_miibus_readreg(void *arg, int phy, int reg)
2110 wx_softc_t *sc = WX_SOFTC_FROM_MII_ARG(arg);
2111 unsigned int data = 0;
2113 if (!IS_LIVENGOOD_CU(sc)) {
2116 wx_mii_shift_out(sc, WXPHYC_PREAMBLE, WXPHYC_PREAMBLE_LEN);
2117 wx_mii_shift_out(sc, reg | (phy << 5) | (WXPHYC_READ << 10) |
2118 (WXPHYC_SOF << 12), 14);
2119 data = wx_mii_shift_in(sc);
2120 return (data & WXMDIC_DATA_MASK);
2124 wx_miibus_writereg(void *arg, int phy, int reg, int data)
2126 wx_softc_t *sc = WX_SOFTC_FROM_MII_ARG(arg);
2127 if (!IS_LIVENGOOD_CU(sc)) {
2130 wx_mii_shift_out(sc, WXPHYC_PREAMBLE, WXPHYC_PREAMBLE_LEN);
2131 wx_mii_shift_out(sc, (u_int32_t)data | (WXPHYC_TURNAROUND << 16) |
2132 (reg << 18) | (phy << 23) | (WXPHYC_WRITE << 28) |
2133 (WXPHYC_SOF << 30), 32);
2138 wx_miibus_statchg(void *arg)
2140 wx_softc_t *sc = WX_SOFTC_FROM_MII_ARG(arg);
2141 mii_data_t *mii = WX_MII_FROM_SOFTC(sc);
2142 u_int32_t dcr, tctl;
2148 tctl = READ_CSR(sc, WXREG_TCTL);
2149 DPRINTF(sc, ("%s: statchg dcr=%#x tctl=%#x", sc->wx_name, dcr, tctl));
2151 dcr |= WXDCR_FRCSPD | WXDCR_FRCDPX | WXDCR_SLU;
2152 dcr &= ~(WXDCR_SPEED_MASK | WXDCR_ASDE /* | WXDCR_ILOS */);
2154 if (mii->mii_media_status & IFM_ACTIVE) {
2155 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE) {
2156 DPRINTF(sc, (" link-down\n"));
2164 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_TX) {
2165 DPRINTF(sc, (" 1000TX"));
2166 dcr |= WXDCR_1000BT;
2167 } else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
2168 DPRINTF(sc, (" 100TX"));
2170 } else /* assume IFM_10_TX */ {
2171 DPRINTF(sc, (" 10TX"));
2175 if (mii->mii_media_active & IFM_FDX) {
2176 DPRINTF(sc, ("-FD"));
2177 tctl = WXTCTL_CT(WX_COLLISION_THRESHOLD) |
2178 WXTCTL_COLD(WX_FDX_COLLISION_DX) | WXTCTL_EN;
2181 DPRINTF(sc, ("-HD"));
2182 tctl = WXTCTL_CT(WX_COLLISION_THRESHOLD) |
2183 WXTCTL_COLD(WX_HDX_COLLISION_DX) | WXTCTL_EN;
2187 /* FLAG0==rx-flow-control FLAG1==tx-flow-control */
2188 if (mii->mii_media_active & IFM_FLAG0) {
2194 if (mii->mii_media_active & IFM_FLAG1) {
2200 if (dcr & (WXDCR_RFCE|WXDCR_TFCE)) {
2201 WRITE_CSR(sc, WXREG_FCAL, FC_FRM_CONST_LO);
2202 WRITE_CSR(sc, WXREG_FCAH, FC_FRM_CONST_HI);
2203 WRITE_CSR(sc, WXREG_FCT, FC_TYP_CONST);
2205 WRITE_CSR(sc, WXREG_FCAL, 0);
2206 WRITE_CSR(sc, WXREG_FCAH, 0);
2207 WRITE_CSR(sc, WXREG_FCT, 0);
2210 DPRINTF(sc, (" dcr=%#x tctl=%#x\n", dcr, tctl));
2211 WRITE_CSR(sc, WXREG_TCTL, tctl);
2213 WRITE_CSR(sc, WXREG_DCR, dcr);
2217 wx_miibus_mediainit(void *arg)