2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/bitops.h>
37 #include <sys/endian.h>
38 #include <sys/kernel.h>
40 #include <sys/interrupt.h>
41 #include <sys/malloc.h>
44 #include <sys/serialize.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/sysctl.h>
49 #include <net/ethernet.h>
52 #include <net/if_arp.h>
53 #include <net/if_dl.h>
54 #include <net/if_media.h>
55 #include <net/ifq_var.h>
56 #include <net/vlan/if_vlan_var.h>
58 #include <dev/netif/mii_layer/miivar.h>
60 #include <bus/pci/pcireg.h>
61 #include <bus/pci/pcivar.h>
62 #include <bus/pci/pcidevs.h>
64 #include <dev/netif/et/if_etreg.h>
65 #include <dev/netif/et/if_etvar.h>
67 #include "miibus_if.h"
69 static int et_probe(device_t);
70 static int et_attach(device_t);
71 static int et_detach(device_t);
72 static int et_shutdown(device_t);
74 static int et_miibus_readreg(device_t, int, int);
75 static int et_miibus_writereg(device_t, int, int, int);
76 static void et_miibus_statchg(device_t);
78 static void et_init(void *);
79 static int et_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
80 static void et_start(struct ifnet *);
81 static void et_watchdog(struct ifnet *);
82 static int et_ifmedia_upd(struct ifnet *);
83 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
85 static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS);
86 static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS);
88 static void et_intr(void *);
89 static void et_enable_intrs(struct et_softc *, uint32_t);
90 static void et_disable_intrs(struct et_softc *);
91 static void et_rxeof(struct et_softc *);
92 static void et_txeof(struct et_softc *, int);
94 static int et_dma_alloc(device_t);
95 static void et_dma_free(device_t);
96 static void et_dma_mem_destroy(bus_dma_tag_t, void *, bus_dmamap_t);
97 static int et_dma_mbuf_create(device_t);
98 static void et_dma_mbuf_destroy(device_t, int, const int[]);
99 static int et_jumbo_mem_alloc(device_t);
100 static void et_jumbo_mem_free(device_t);
101 static int et_init_tx_ring(struct et_softc *);
102 static int et_init_rx_ring(struct et_softc *);
103 static void et_free_tx_ring(struct et_softc *);
104 static void et_free_rx_ring(struct et_softc *);
105 static int et_encap(struct et_softc *, struct mbuf **);
106 static struct et_jslot *
107 et_jalloc(struct et_jumbo_data *);
108 static void et_jfree(void *);
109 static void et_jref(void *);
110 static int et_newbuf(struct et_rxbuf_data *, int, int, int);
111 static int et_newbuf_cluster(struct et_rxbuf_data *, int, int);
112 static int et_newbuf_hdr(struct et_rxbuf_data *, int, int);
113 static int et_newbuf_jumbo(struct et_rxbuf_data *, int, int);
115 static void et_stop(struct et_softc *);
116 static int et_chip_init(struct et_softc *);
117 static void et_chip_attach(struct et_softc *);
118 static void et_init_mac(struct et_softc *);
119 static void et_init_rxmac(struct et_softc *);
120 static void et_init_txmac(struct et_softc *);
121 static int et_init_rxdma(struct et_softc *);
122 static int et_init_txdma(struct et_softc *);
123 static int et_start_rxdma(struct et_softc *);
124 static int et_start_txdma(struct et_softc *);
125 static int et_stop_rxdma(struct et_softc *);
126 static int et_stop_txdma(struct et_softc *);
127 static int et_enable_txrx(struct et_softc *, int);
128 static void et_reset(struct et_softc *);
129 static int et_bus_config(device_t);
130 static void et_get_eaddr(device_t, uint8_t[]);
131 static void et_setmulti(struct et_softc *);
132 static void et_tick(void *);
133 static void et_setmedia(struct et_softc *);
134 static void et_setup_rxdesc(struct et_rxbuf_data *, int, bus_addr_t);
136 static const struct et_dev {
141 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310,
142 "Agere ET1310 Gigabit Ethernet" },
143 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST,
144 "Agere ET1310 Fast Ethernet" },
148 static device_method_t et_methods[] = {
149 DEVMETHOD(device_probe, et_probe),
150 DEVMETHOD(device_attach, et_attach),
151 DEVMETHOD(device_detach, et_detach),
152 DEVMETHOD(device_shutdown, et_shutdown),
154 DEVMETHOD(device_suspend, et_suspend),
155 DEVMETHOD(device_resume, et_resume),
158 DEVMETHOD(bus_print_child, bus_generic_print_child),
159 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
161 DEVMETHOD(miibus_readreg, et_miibus_readreg),
162 DEVMETHOD(miibus_writereg, et_miibus_writereg),
163 DEVMETHOD(miibus_statchg, et_miibus_statchg),
168 static driver_t et_driver = {
171 sizeof(struct et_softc)
174 static devclass_t et_devclass;
176 DECLARE_DUMMY_MODULE(if_et);
177 MODULE_DEPEND(if_et, miibus, 1, 1, 1);
178 DRIVER_MODULE(if_et, pci, et_driver, et_devclass, NULL, NULL);
179 DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, NULL, NULL);
181 static int et_rx_intr_npkts = 129;
182 static int et_rx_intr_delay = 25; /* x4 usec */
183 static int et_tx_intr_nsegs = 256;
184 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */
186 TUNABLE_INT("hw.et.timer", &et_timer);
187 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts);
188 TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay);
189 TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs);
197 static const struct et_bsize et_bufsize_std[ET_RX_NRING] = {
198 { .bufsize = ET_RXDMA_CTRL_RING0_128, .jumbo = 0,
199 .newbuf = et_newbuf_hdr },
200 { .bufsize = ET_RXDMA_CTRL_RING1_2048, .jumbo = 0,
201 .newbuf = et_newbuf_cluster },
204 static const struct et_bsize et_bufsize_jumbo[ET_RX_NRING] = {
205 { .bufsize = ET_RXDMA_CTRL_RING0_128, .jumbo = 0,
206 .newbuf = et_newbuf_hdr },
207 { .bufsize = ET_RXDMA_CTRL_RING1_16384, .jumbo = 1,
208 .newbuf = et_newbuf_jumbo },
212 et_probe(device_t dev)
214 const struct et_dev *d;
217 vid = pci_get_vendor(dev);
218 did = pci_get_device(dev);
220 for (d = et_devices; d->desc != NULL; ++d) {
221 if (vid == d->vid && did == d->did) {
222 device_set_desc(dev, d->desc);
230 et_attach(device_t dev)
232 struct et_softc *sc = device_get_softc(dev);
233 struct ifnet *ifp = &sc->arpcom.ac_if;
234 uint8_t eaddr[ETHER_ADDR_LEN];
237 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
238 callout_init(&sc->sc_tick);
241 * Initialize tunables
243 sc->sc_rx_intr_npkts = et_rx_intr_npkts;
244 sc->sc_rx_intr_delay = et_rx_intr_delay;
245 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
246 sc->sc_timer = et_timer;
249 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
252 irq = pci_read_config(dev, PCIR_INTLINE, 4);
253 mem = pci_read_config(dev, ET_PCIR_BAR, 4);
255 device_printf(dev, "chip is in D%d power mode "
256 "-- setting to D0\n", pci_get_powerstate(dev));
258 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
260 pci_write_config(dev, PCIR_INTLINE, irq, 4);
261 pci_write_config(dev, ET_PCIR_BAR, mem, 4);
263 #endif /* !BURN_BRIDGE */
265 /* Enable bus mastering */
266 pci_enable_busmaster(dev);
271 sc->sc_mem_rid = ET_PCIR_BAR;
272 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
273 &sc->sc_mem_rid, RF_ACTIVE);
274 if (sc->sc_mem_res == NULL) {
275 device_printf(dev, "can't allocate IO memory\n");
278 sc->sc_mem_bt = rman_get_bustag(sc->sc_mem_res);
279 sc->sc_mem_bh = rman_get_bushandle(sc->sc_mem_res);
285 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
287 RF_SHAREABLE | RF_ACTIVE);
288 if (sc->sc_irq_res == NULL) {
289 device_printf(dev, "can't allocate irq\n");
297 sysctl_ctx_init(&sc->sc_sysctl_ctx);
298 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx,
299 SYSCTL_STATIC_CHILDREN(_hw),
301 device_get_nameunit(dev),
303 if (sc->sc_sysctl_tree == NULL) {
304 device_printf(dev, "can't add sysctl node\n");
309 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
310 SYSCTL_CHILDREN(sc->sc_sysctl_tree),
311 OID_AUTO, "rx_intr_npkts", CTLTYPE_INT | CTLFLAG_RW,
312 sc, 0, et_sysctl_rx_intr_npkts, "I",
313 "RX IM, # packets per RX interrupt");
314 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
315 SYSCTL_CHILDREN(sc->sc_sysctl_tree),
316 OID_AUTO, "rx_intr_delay", CTLTYPE_INT | CTLFLAG_RW,
317 sc, 0, et_sysctl_rx_intr_delay, "I",
318 "RX IM, RX interrupt delay (x10 usec)");
319 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx,
320 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
321 "tx_intr_nsegs", CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0,
322 "TX IM, # segments per TX interrupt");
323 SYSCTL_ADD_UINT(&sc->sc_sysctl_ctx,
324 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
325 "timer", CTLFLAG_RW, &sc->sc_timer, 0,
328 error = et_bus_config(dev);
332 et_get_eaddr(dev, eaddr);
334 CSR_WRITE_4(sc, ET_PM,
335 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE);
339 et_disable_intrs(sc);
341 error = et_dma_alloc(dev);
346 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
347 ifp->if_init = et_init;
348 ifp->if_ioctl = et_ioctl;
349 ifp->if_start = et_start;
350 ifp->if_watchdog = et_watchdog;
351 ifp->if_mtu = ETHERMTU;
352 ifp->if_capabilities = IFCAP_VLAN_MTU;
353 ifp->if_capenable = ifp->if_capabilities;
354 ifq_set_maxlen(&ifp->if_snd, ET_TX_NDESC);
355 ifq_set_ready(&ifp->if_snd);
359 error = mii_phy_probe(dev, &sc->sc_miibus,
360 et_ifmedia_upd, et_ifmedia_sts);
362 device_printf(dev, "can't probe any PHY\n");
366 ether_ifattach(ifp, eaddr, NULL);
368 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, et_intr, sc,
369 &sc->sc_irq_handle, ifp->if_serializer);
372 device_printf(dev, "can't setup intr\n");
376 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->sc_irq_res));
385 et_detach(device_t dev)
387 struct et_softc *sc = device_get_softc(dev);
389 if (device_is_attached(dev)) {
390 struct ifnet *ifp = &sc->arpcom.ac_if;
392 lwkt_serialize_enter(ifp->if_serializer);
394 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle);
395 lwkt_serialize_exit(ifp->if_serializer);
400 if (sc->sc_sysctl_tree != NULL)
401 sysctl_ctx_free(&sc->sc_sysctl_ctx);
403 if (sc->sc_miibus != NULL)
404 device_delete_child(dev, sc->sc_miibus);
405 bus_generic_detach(dev);
407 if (sc->sc_irq_res != NULL) {
408 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
412 if (sc->sc_mem_res != NULL) {
413 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
423 et_shutdown(device_t dev)
425 struct et_softc *sc = device_get_softc(dev);
426 struct ifnet *ifp = &sc->arpcom.ac_if;
428 lwkt_serialize_enter(ifp->if_serializer);
430 lwkt_serialize_exit(ifp->if_serializer);
435 et_miibus_readreg(device_t dev, int phy, int reg)
437 struct et_softc *sc = device_get_softc(dev);
441 /* Stop any pending operations */
442 CSR_WRITE_4(sc, ET_MII_CMD, 0);
444 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
445 __SHIFTIN(reg, ET_MII_ADDR_REG);
446 CSR_WRITE_4(sc, ET_MII_ADDR, val);
449 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
453 for (i = 0; i < NRETRY; ++i) {
454 val = CSR_READ_4(sc, ET_MII_IND);
455 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
460 if_printf(&sc->arpcom.ac_if,
461 "read phy %d, reg %d timed out\n", phy, reg);
468 val = CSR_READ_4(sc, ET_MII_STAT);
469 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE);
472 /* Make sure that the current operation is stopped */
473 CSR_WRITE_4(sc, ET_MII_CMD, 0);
478 et_miibus_writereg(device_t dev, int phy, int reg, int val0)
480 struct et_softc *sc = device_get_softc(dev);
484 /* Stop any pending operations */
485 CSR_WRITE_4(sc, ET_MII_CMD, 0);
487 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
488 __SHIFTIN(reg, ET_MII_ADDR_REG);
489 CSR_WRITE_4(sc, ET_MII_ADDR, val);
492 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE));
496 for (i = 0; i < NRETRY; ++i) {
497 val = CSR_READ_4(sc, ET_MII_IND);
498 if ((val & ET_MII_IND_BUSY) == 0)
503 if_printf(&sc->arpcom.ac_if,
504 "write phy %d, reg %d timed out\n", phy, reg);
505 et_miibus_readreg(dev, phy, reg);
510 /* Make sure that the current operation is stopped */
511 CSR_WRITE_4(sc, ET_MII_CMD, 0);
516 et_miibus_statchg(device_t dev)
518 et_setmedia(device_get_softc(dev));
522 et_ifmedia_upd(struct ifnet *ifp)
524 struct et_softc *sc = ifp->if_softc;
525 struct mii_data *mii = device_get_softc(sc->sc_miibus);
527 if (mii->mii_instance != 0) {
528 struct mii_softc *miisc;
530 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
531 mii_phy_reset(miisc);
539 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
541 struct et_softc *sc = ifp->if_softc;
542 struct mii_data *mii = device_get_softc(sc->sc_miibus);
545 ifmr->ifm_active = mii->mii_media_active;
546 ifmr->ifm_status = mii->mii_media_status;
550 et_stop(struct et_softc *sc)
552 struct ifnet *ifp = &sc->arpcom.ac_if;
554 ASSERT_SERIALIZED(ifp->if_serializer);
556 callout_stop(&sc->sc_tick);
561 et_disable_intrs(sc);
570 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
573 ifp->if_flags &= ~IFF_RUNNING;
574 ifq_clr_oactive(&ifp->if_snd);
578 et_bus_config(device_t dev)
580 uint32_t val, max_plsz;
581 uint16_t ack_latency, replay_timer;
584 * Test whether EEPROM is valid
585 * NOTE: Read twice to get the correct value
587 pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1);
588 val = pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1);
589 if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
590 device_printf(dev, "EEPROM status error 0x%02x\n", val);
597 * Configure ACK latency and replay timer according to
600 val = pci_read_config(dev, ET_PCIR_DEVICE_CAPS, 4);
601 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ;
604 case ET_PCIV_DEVICE_CAPS_PLSZ_128:
605 ack_latency = ET_PCIV_ACK_LATENCY_128;
606 replay_timer = ET_PCIV_REPLAY_TIMER_128;
609 case ET_PCIV_DEVICE_CAPS_PLSZ_256:
610 ack_latency = ET_PCIV_ACK_LATENCY_256;
611 replay_timer = ET_PCIV_REPLAY_TIMER_256;
615 ack_latency = pci_read_config(dev, ET_PCIR_ACK_LATENCY, 2);
616 replay_timer = pci_read_config(dev, ET_PCIR_REPLAY_TIMER, 2);
617 device_printf(dev, "ack latency %u, replay timer %u\n",
618 ack_latency, replay_timer);
621 if (ack_latency != 0) {
622 pci_write_config(dev, ET_PCIR_ACK_LATENCY, ack_latency, 2);
623 pci_write_config(dev, ET_PCIR_REPLAY_TIMER, replay_timer, 2);
627 * Set L0s and L1 latency timer to 2us
629 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2);
630 pci_write_config(dev, ET_PCIR_L0S_L1_LATENCY, val, 1);
633 * Set max read request size to 2048 bytes
635 val = pci_read_config(dev, ET_PCIR_DEVICE_CTRL, 2);
636 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ;
637 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K;
638 pci_write_config(dev, ET_PCIR_DEVICE_CTRL, val, 2);
644 et_get_eaddr(device_t dev, uint8_t eaddr[])
649 val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4);
650 for (i = 0; i < 4; ++i)
651 eaddr[i] = (val >> (8 * i)) & 0xff;
653 val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2);
654 for (; i < ETHER_ADDR_LEN; ++i)
655 eaddr[i] = (val >> (8 * (i - 4))) & 0xff;
659 et_reset(struct et_softc *sc)
661 CSR_WRITE_4(sc, ET_MAC_CFG1,
662 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
663 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
664 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
666 CSR_WRITE_4(sc, ET_SWRST,
667 ET_SWRST_TXDMA | ET_SWRST_RXDMA |
668 ET_SWRST_TXMAC | ET_SWRST_RXMAC |
669 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
671 CSR_WRITE_4(sc, ET_MAC_CFG1,
672 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
673 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
674 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
678 et_disable_intrs(struct et_softc *sc)
680 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
684 et_enable_intrs(struct et_softc *sc, uint32_t intrs)
686 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs);
690 et_dma_alloc(device_t dev)
692 struct et_softc *sc = device_get_softc(dev);
693 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
694 struct et_txstatus_data *txsd = &sc->sc_tx_status;
695 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
696 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
700 * Create top level DMA tag
702 error = bus_dma_tag_create(NULL, 1, 0,
706 BUS_SPACE_MAXSIZE_32BIT,
708 BUS_SPACE_MAXSIZE_32BIT,
711 device_printf(dev, "can't create DMA tag\n");
716 * Create TX ring DMA stuffs
718 tx_ring->tr_desc = bus_dmamem_coherent_any(sc->sc_dtag,
719 ET_ALIGN, ET_TX_RING_SIZE,
720 BUS_DMA_WAITOK | BUS_DMA_ZERO,
721 &tx_ring->tr_dtag, &tx_ring->tr_dmap,
723 if (tx_ring->tr_desc == NULL) {
724 device_printf(dev, "can't create TX ring DMA stuffs\n");
729 * Create TX status DMA stuffs
731 txsd->txsd_status = bus_dmamem_coherent_any(sc->sc_dtag,
732 ET_ALIGN, sizeof(uint32_t),
733 BUS_DMA_WAITOK | BUS_DMA_ZERO,
734 &txsd->txsd_dtag, &txsd->txsd_dmap,
736 if (txsd->txsd_status == NULL) {
737 device_printf(dev, "can't create TX status DMA stuffs\n");
742 * Create DMA stuffs for RX rings
744 for (i = 0; i < ET_RX_NRING; ++i) {
745 static const uint32_t rx_ring_posreg[ET_RX_NRING] =
746 { ET_RX_RING0_POS, ET_RX_RING1_POS };
748 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
750 rx_ring->rr_desc = bus_dmamem_coherent_any(sc->sc_dtag,
751 ET_ALIGN, ET_RX_RING_SIZE,
752 BUS_DMA_WAITOK | BUS_DMA_ZERO,
753 &rx_ring->rr_dtag, &rx_ring->rr_dmap,
755 if (rx_ring->rr_desc == NULL) {
756 device_printf(dev, "can't create DMA stuffs for "
757 "the %d RX ring\n", i);
760 rx_ring->rr_posreg = rx_ring_posreg[i];
764 * Create RX stat ring DMA stuffs
766 rxst_ring->rsr_stat = bus_dmamem_coherent_any(sc->sc_dtag,
767 ET_ALIGN, ET_RXSTAT_RING_SIZE,
768 BUS_DMA_WAITOK | BUS_DMA_ZERO,
769 &rxst_ring->rsr_dtag, &rxst_ring->rsr_dmap,
770 &rxst_ring->rsr_paddr);
771 if (rxst_ring->rsr_stat == NULL) {
772 device_printf(dev, "can't create RX stat ring DMA stuffs\n");
777 * Create RX status DMA stuffs
779 rxsd->rxsd_status = bus_dmamem_coherent_any(sc->sc_dtag,
780 ET_ALIGN, sizeof(struct et_rxstatus),
781 BUS_DMA_WAITOK | BUS_DMA_ZERO,
782 &rxsd->rxsd_dtag, &rxsd->rxsd_dmap,
784 if (rxsd->rxsd_status == NULL) {
785 device_printf(dev, "can't create RX status DMA stuffs\n");
790 * Create mbuf DMA stuffs
792 error = et_dma_mbuf_create(dev);
797 * Create jumbo buffer DMA stuffs
798 * NOTE: Allow it to fail
800 if (et_jumbo_mem_alloc(dev) == 0)
801 sc->sc_flags |= ET_FLAG_JUMBO;
807 et_dma_free(device_t dev)
809 struct et_softc *sc = device_get_softc(dev);
810 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
811 struct et_txstatus_data *txsd = &sc->sc_tx_status;
812 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
813 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
814 int i, rx_done[ET_RX_NRING];
817 * Destroy TX ring DMA stuffs
819 et_dma_mem_destroy(tx_ring->tr_dtag, tx_ring->tr_desc,
823 * Destroy TX status DMA stuffs
825 et_dma_mem_destroy(txsd->txsd_dtag, txsd->txsd_status,
829 * Destroy DMA stuffs for RX rings
831 for (i = 0; i < ET_RX_NRING; ++i) {
832 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
834 et_dma_mem_destroy(rx_ring->rr_dtag, rx_ring->rr_desc,
839 * Destroy RX stat ring DMA stuffs
841 et_dma_mem_destroy(rxst_ring->rsr_dtag, rxst_ring->rsr_stat,
842 rxst_ring->rsr_dmap);
845 * Destroy RX status DMA stuffs
847 et_dma_mem_destroy(rxsd->rxsd_dtag, rxsd->rxsd_status,
851 * Destroy mbuf DMA stuffs
853 for (i = 0; i < ET_RX_NRING; ++i)
854 rx_done[i] = ET_RX_NDESC;
855 et_dma_mbuf_destroy(dev, ET_TX_NDESC, rx_done);
858 * Destroy jumbo buffer DMA stuffs
860 if (sc->sc_flags & ET_FLAG_JUMBO)
861 et_jumbo_mem_free(dev);
864 * Destroy top level DMA tag
866 if (sc->sc_dtag != NULL)
867 bus_dma_tag_destroy(sc->sc_dtag);
871 et_dma_mbuf_create(device_t dev)
873 struct et_softc *sc = device_get_softc(dev);
874 struct et_txbuf_data *tbd = &sc->sc_tx_data;
875 int i, error, rx_done[ET_RX_NRING];
878 * Create RX mbuf DMA tag
880 error = bus_dma_tag_create(sc->sc_dtag, 1, 0,
881 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
883 MCLBYTES, 1, MCLBYTES,
884 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
887 device_printf(dev, "can't create RX mbuf DMA tag\n");
892 * Create spare DMA map for RX mbufs
894 error = bus_dmamap_create(sc->sc_rxbuf_dtag, BUS_DMA_WAITOK,
895 &sc->sc_rxbuf_tmp_dmap);
897 device_printf(dev, "can't create spare mbuf DMA map\n");
898 bus_dma_tag_destroy(sc->sc_rxbuf_dtag);
899 sc->sc_rxbuf_dtag = NULL;
904 * Create DMA maps for RX mbufs
906 bzero(rx_done, sizeof(rx_done));
907 for (i = 0; i < ET_RX_NRING; ++i) {
908 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
911 for (j = 0; j < ET_RX_NDESC; ++j) {
912 error = bus_dmamap_create(sc->sc_rxbuf_dtag,
914 &rbd->rbd_buf[j].rb_dmap);
916 device_printf(dev, "can't create %d RX mbuf "
917 "for %d RX ring\n", j, i);
919 et_dma_mbuf_destroy(dev, 0, rx_done);
923 rx_done[i] = ET_RX_NDESC;
926 rbd->rbd_ring = &sc->sc_rx_ring[i];
930 * Create TX mbuf DMA tag
932 error = bus_dma_tag_create(sc->sc_dtag, 1, 0,
933 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
935 ET_JUMBO_FRAMELEN, ET_NSEG_MAX, MCLBYTES,
936 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
940 device_printf(dev, "can't create TX mbuf DMA tag\n");
945 * Create DMA maps for TX mbufs
947 for (i = 0; i < ET_TX_NDESC; ++i) {
948 error = bus_dmamap_create(sc->sc_txbuf_dtag,
949 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
950 &tbd->tbd_buf[i].tb_dmap);
952 device_printf(dev, "can't create %d TX mbuf "
954 et_dma_mbuf_destroy(dev, i, rx_done);
963 et_dma_mbuf_destroy(device_t dev, int tx_done, const int rx_done[])
965 struct et_softc *sc = device_get_softc(dev);
966 struct et_txbuf_data *tbd = &sc->sc_tx_data;
970 * Destroy DMA tag and maps for RX mbufs
972 if (sc->sc_rxbuf_dtag) {
973 for (i = 0; i < ET_RX_NRING; ++i) {
974 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
977 for (j = 0; j < rx_done[i]; ++j) {
978 struct et_rxbuf *rb = &rbd->rbd_buf[j];
980 KASSERT(rb->rb_mbuf == NULL,
981 ("RX mbuf in %d RX ring is "
982 "not freed yet", i));
983 bus_dmamap_destroy(sc->sc_rxbuf_dtag,
987 bus_dmamap_destroy(sc->sc_rxbuf_dtag, sc->sc_rxbuf_tmp_dmap);
988 bus_dma_tag_destroy(sc->sc_rxbuf_dtag);
989 sc->sc_rxbuf_dtag = NULL;
993 * Destroy DMA tag and maps for TX mbufs
995 if (sc->sc_txbuf_dtag) {
996 for (i = 0; i < tx_done; ++i) {
997 struct et_txbuf *tb = &tbd->tbd_buf[i];
999 KASSERT(tb->tb_mbuf == NULL,
1000 ("TX mbuf is not freed yet"));
1001 bus_dmamap_destroy(sc->sc_txbuf_dtag, tb->tb_dmap);
1003 bus_dma_tag_destroy(sc->sc_txbuf_dtag);
1004 sc->sc_txbuf_dtag = NULL;
1009 et_dma_mem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap)
1012 bus_dmamap_unload(dtag, dmap);
1013 bus_dmamem_free(dtag, addr, dmap);
1014 bus_dma_tag_destroy(dtag);
1019 et_chip_attach(struct et_softc *sc)
1024 * Perform minimal initialization
1027 /* Disable loopback */
1028 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1031 CSR_WRITE_4(sc, ET_MAC_CFG1,
1032 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1033 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1034 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1037 * Setup half duplex mode
1039 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
1040 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
1041 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
1042 ET_MAC_HDX_EXC_DEFER;
1043 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1045 /* Clear MAC control */
1046 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1049 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1051 /* Bring MAC out of reset state */
1052 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1054 /* Enable memory controllers */
1055 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1061 struct et_softc *sc = xsc;
1062 struct ifnet *ifp = &sc->arpcom.ac_if;
1065 ASSERT_SERIALIZED(ifp->if_serializer);
1067 if ((ifp->if_flags & IFF_RUNNING) == 0)
1070 et_disable_intrs(sc);
1072 intrs = CSR_READ_4(sc, ET_INTR_STATUS);
1074 if (intrs == 0) /* Not interested */
1077 if (intrs & ET_INTR_RXEOF)
1079 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER))
1081 if (intrs & ET_INTR_TIMER)
1082 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1084 et_enable_intrs(sc, ET_INTRS);
1090 struct et_softc *sc = xsc;
1091 struct ifnet *ifp = &sc->arpcom.ac_if;
1092 const struct et_bsize *arr;
1095 ASSERT_SERIALIZED(ifp->if_serializer);
1099 arr = ET_FRAMELEN(ifp->if_mtu) < MCLBYTES ?
1100 et_bufsize_std : et_bufsize_jumbo;
1101 for (i = 0; i < ET_RX_NRING; ++i) {
1102 sc->sc_rx_data[i].rbd_bufsize = arr[i].bufsize;
1103 sc->sc_rx_data[i].rbd_newbuf = arr[i].newbuf;
1104 sc->sc_rx_data[i].rbd_jumbo = arr[i].jumbo;
1107 error = et_init_tx_ring(sc);
1111 error = et_init_rx_ring(sc);
1115 error = et_chip_init(sc);
1119 error = et_enable_txrx(sc, 1);
1123 et_enable_intrs(sc, ET_INTRS);
1125 callout_reset(&sc->sc_tick, hz, et_tick, sc);
1127 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1129 ifp->if_flags |= IFF_RUNNING;
1130 ifq_clr_oactive(&ifp->if_snd);
1137 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1139 struct et_softc *sc = ifp->if_softc;
1140 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1141 struct ifreq *ifr = (struct ifreq *)data;
1142 int error = 0, max_framelen;
1144 ASSERT_SERIALIZED(ifp->if_serializer);
1148 if (ifp->if_flags & IFF_UP) {
1149 if (ifp->if_flags & IFF_RUNNING) {
1150 if ((ifp->if_flags ^ sc->sc_if_flags) &
1151 (IFF_ALLMULTI | IFF_PROMISC))
1157 if (ifp->if_flags & IFF_RUNNING)
1160 sc->sc_if_flags = ifp->if_flags;
1165 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1170 if (ifp->if_flags & IFF_RUNNING)
1175 if (sc->sc_flags & ET_FLAG_JUMBO)
1176 max_framelen = ET_JUMBO_FRAMELEN;
1178 max_framelen = MCLBYTES - 1;
1180 if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) {
1185 ifp->if_mtu = ifr->ifr_mtu;
1186 if (ifp->if_flags & IFF_RUNNING)
1191 error = ether_ioctl(ifp, cmd, data);
1198 et_start(struct ifnet *ifp)
1200 struct et_softc *sc = ifp->if_softc;
1201 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1204 ASSERT_SERIALIZED(ifp->if_serializer);
1206 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) {
1207 ifq_purge(&ifp->if_snd);
1211 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
1220 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) {
1222 ifq_set_oactive(&ifp->if_snd);
1231 m = ifq_dequeue(&ifp->if_snd, NULL);
1235 error = et_encap(sc, &m);
1238 KKASSERT(m == NULL);
1240 if (error == EFBIG) {
1242 * Excessive fragmented packets
1245 ifq_set_oactive(&ifp->if_snd);
1265 et_watchdog(struct ifnet *ifp)
1267 ASSERT_SERIALIZED(ifp->if_serializer);
1269 if_printf(ifp, "watchdog timed out\n");
1271 ifp->if_init(ifp->if_softc);
1276 et_stop_rxdma(struct et_softc *sc)
1278 CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1279 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1282 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1283 if_printf(&sc->arpcom.ac_if, "can't stop RX DMA engine\n");
1290 et_stop_txdma(struct et_softc *sc)
1292 CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1293 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1298 et_free_tx_ring(struct et_softc *sc)
1300 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1301 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1304 for (i = 0; i < ET_TX_NDESC; ++i) {
1305 struct et_txbuf *tb = &tbd->tbd_buf[i];
1307 if (tb->tb_mbuf != NULL) {
1308 bus_dmamap_unload(sc->sc_txbuf_dtag, tb->tb_dmap);
1309 m_freem(tb->tb_mbuf);
1313 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1317 et_free_rx_ring(struct et_softc *sc)
1321 for (n = 0; n < ET_RX_NRING; ++n) {
1322 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1323 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n];
1326 for (i = 0; i < ET_RX_NDESC; ++i) {
1327 struct et_rxbuf *rb = &rbd->rbd_buf[i];
1329 if (rb->rb_mbuf != NULL) {
1330 if (!rbd->rbd_jumbo) {
1331 bus_dmamap_unload(sc->sc_rxbuf_dtag,
1334 m_freem(rb->rb_mbuf);
1338 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE);
1343 et_setmulti(struct et_softc *sc)
1345 struct ifnet *ifp = &sc->arpcom.ac_if;
1346 uint32_t hash[4] = { 0, 0, 0, 0 };
1347 uint32_t rxmac_ctrl, pktfilt;
1348 struct ifmultiaddr *ifma;
1351 pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1352 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1354 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1355 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1356 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1361 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1364 if (ifma->ifma_addr->sa_family != AF_LINK)
1367 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
1368 ifma->ifma_addr), ETHER_ADDR_LEN);
1369 h = (h & 0x3f800000) >> 23;
1372 if (h >= 32 && h < 64) {
1375 } else if (h >= 64 && h < 96) {
1378 } else if (h >= 96) {
1387 for (i = 0; i < 4; ++i)
1388 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1391 pktfilt |= ET_PKTFILT_MCAST;
1392 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1394 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1395 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1399 et_chip_init(struct et_softc *sc)
1401 struct ifnet *ifp = &sc->arpcom.ac_if;
1403 int error, frame_len, rxmem_size;
1406 * Split 16Kbytes internal memory between TX and RX
1407 * according to frame length.
1409 frame_len = ET_FRAMELEN(ifp->if_mtu);
1410 if (frame_len < 2048) {
1411 rxmem_size = ET_MEM_RXSIZE_DEFAULT;
1412 } else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) {
1413 rxmem_size = ET_MEM_SIZE / 2;
1415 rxmem_size = ET_MEM_SIZE -
1416 roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT);
1418 rxq_end = ET_QUEUE_ADDR(rxmem_size);
1420 CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START);
1421 CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end);
1422 CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1);
1423 CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END);
1426 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1428 /* Clear MSI configure */
1429 CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1432 CSR_WRITE_4(sc, ET_TIMER, 0);
1434 /* Initialize MAC */
1437 /* Enable memory controllers */
1438 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1440 /* Initialize RX MAC */
1443 /* Initialize TX MAC */
1446 /* Initialize RX DMA engine */
1447 error = et_init_rxdma(sc);
1451 /* Initialize TX DMA engine */
1452 error = et_init_txdma(sc);
1460 et_init_tx_ring(struct et_softc *sc)
1462 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1463 struct et_txstatus_data *txsd = &sc->sc_tx_status;
1464 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1466 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1468 tbd->tbd_start_index = 0;
1469 tbd->tbd_start_wrap = 0;
1472 bzero(txsd->txsd_status, sizeof(uint32_t));
1478 et_init_rx_ring(struct et_softc *sc)
1480 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1481 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1484 for (n = 0; n < ET_RX_NRING; ++n) {
1485 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1488 for (i = 0; i < ET_RX_NDESC; ++i) {
1489 error = rbd->rbd_newbuf(rbd, i, 1);
1491 if_printf(&sc->arpcom.ac_if, "%d ring %d buf, "
1492 "newbuf failed: %d\n", n, i, error);
1498 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1499 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1505 et_init_rxdma(struct et_softc *sc)
1507 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1508 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1509 struct et_rxdesc_ring *rx_ring;
1512 error = et_stop_rxdma(sc);
1514 if_printf(&sc->arpcom.ac_if, "can't init RX DMA engine\n");
1521 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1522 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1525 * Install RX stat ring
1527 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1528 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1529 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1530 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1531 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1533 /* Match ET_RXSTAT_POS */
1534 rxst_ring->rsr_index = 0;
1535 rxst_ring->rsr_wrap = 0;
1538 * Install the 2nd RX descriptor ring
1540 rx_ring = &sc->sc_rx_ring[1];
1541 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1542 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1543 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1544 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1545 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1547 /* Match ET_RX_RING1_POS */
1548 rx_ring->rr_index = 0;
1549 rx_ring->rr_wrap = 1;
1552 * Install the 1st RX descriptor ring
1554 rx_ring = &sc->sc_rx_ring[0];
1555 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1556 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1557 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1558 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1559 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1561 /* Match ET_RX_RING0_POS */
1562 rx_ring->rr_index = 0;
1563 rx_ring->rr_wrap = 1;
1566 * RX intr moderation
1568 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1569 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1575 et_init_txdma(struct et_softc *sc)
1577 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1578 struct et_txstatus_data *txsd = &sc->sc_tx_status;
1581 error = et_stop_txdma(sc);
1583 if_printf(&sc->arpcom.ac_if, "can't init TX DMA engine\n");
1588 * Install TX descriptor ring
1590 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1591 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1592 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1597 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1598 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1600 CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1602 /* Match ET_TX_READY_POS */
1603 tx_ring->tr_ready_index = 0;
1604 tx_ring->tr_ready_wrap = 0;
1610 et_init_mac(struct et_softc *sc)
1612 struct ifnet *ifp = &sc->arpcom.ac_if;
1613 const uint8_t *eaddr = IF_LLADDR(ifp);
1617 CSR_WRITE_4(sc, ET_MAC_CFG1,
1618 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1619 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1620 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1623 * Setup inter packet gap
1625 val = __SHIFTIN(56, ET_IPG_NONB2B_1) |
1626 __SHIFTIN(88, ET_IPG_NONB2B_2) |
1627 __SHIFTIN(80, ET_IPG_MINIFG) |
1628 __SHIFTIN(96, ET_IPG_B2B);
1629 CSR_WRITE_4(sc, ET_IPG, val);
1632 * Setup half duplex mode
1634 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
1635 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
1636 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
1637 ET_MAC_HDX_EXC_DEFER;
1638 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1640 /* Clear MAC control */
1641 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1644 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1649 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1650 CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1651 val = (eaddr[0] << 16) | (eaddr[1] << 24);
1652 CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1654 /* Set max frame length */
1655 CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu));
1657 /* Bring MAC out of reset state */
1658 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1662 et_init_rxmac(struct et_softc *sc)
1664 struct ifnet *ifp = &sc->arpcom.ac_if;
1665 const uint8_t *eaddr = IF_LLADDR(ifp);
1669 /* Disable RX MAC and WOL */
1670 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1673 * Clear all WOL related registers
1675 for (i = 0; i < 3; ++i)
1676 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1677 for (i = 0; i < 20; ++i)
1678 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1681 * Set WOL source address. XXX is this necessary?
1683 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1684 CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1685 val = (eaddr[0] << 8) | eaddr[1];
1686 CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1688 /* Clear packet filters */
1689 CSR_WRITE_4(sc, ET_PKTFILT, 0);
1691 /* No ucast filtering */
1692 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1693 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1694 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1696 if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) {
1698 * In order to transmit jumbo packets greater than
1699 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between
1700 * RX MAC and RX DMA needs to be reduced in size to
1701 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In
1702 * order to implement this, we must use "cut through"
1703 * mode in the RX MAC, which chops packets down into
1704 * segments. In this case we selected 256 bytes,
1705 * since this is the size of the PCI-Express TLP's
1706 * that the ET1310 uses.
1708 val = __SHIFTIN(ET_RXMAC_SEGSZ(256), ET_RXMAC_MC_SEGSZ_MAX) |
1709 ET_RXMAC_MC_SEGSZ_ENABLE;
1713 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1715 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1717 /* Initialize RX MAC management register */
1718 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1720 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1722 CSR_WRITE_4(sc, ET_RXMAC_MGT,
1723 ET_RXMAC_MGT_PASS_ECRC |
1724 ET_RXMAC_MGT_PASS_ELEN |
1725 ET_RXMAC_MGT_PASS_ETRUNC |
1726 ET_RXMAC_MGT_CHECK_PKT);
1729 * Configure runt filtering (may not work on certain chip generation)
1731 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG;
1732 CSR_WRITE_4(sc, ET_PKTFILT, val);
1734 /* Enable RX MAC but leave WOL disabled */
1735 CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1736 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1739 * Setup multicast hash and allmulti/promisc mode
1745 et_init_txmac(struct et_softc *sc)
1747 /* Disable TX MAC and FC(?) */
1748 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1750 /* No flow control yet */
1751 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0);
1753 /* Enable TX MAC but leave FC(?) diabled */
1754 CSR_WRITE_4(sc, ET_TXMAC_CTRL,
1755 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
1759 et_start_rxdma(struct et_softc *sc)
1763 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize,
1764 ET_RXDMA_CTRL_RING0_SIZE) |
1765 ET_RXDMA_CTRL_RING0_ENABLE;
1766 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize,
1767 ET_RXDMA_CTRL_RING1_SIZE) |
1768 ET_RXDMA_CTRL_RING1_ENABLE;
1770 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
1774 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
1775 if_printf(&sc->arpcom.ac_if, "can't start RX DMA engine\n");
1782 et_start_txdma(struct et_softc *sc)
1784 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
1789 et_enable_txrx(struct et_softc *sc, int media_upd)
1791 struct ifnet *ifp = &sc->arpcom.ac_if;
1795 val = CSR_READ_4(sc, ET_MAC_CFG1);
1796 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
1797 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
1798 ET_MAC_CFG1_LOOPBACK);
1799 CSR_WRITE_4(sc, ET_MAC_CFG1, val);
1802 et_ifmedia_upd(ifp);
1808 for (i = 0; i < NRETRY; ++i) {
1809 val = CSR_READ_4(sc, ET_MAC_CFG1);
1810 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
1811 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
1817 if_printf(ifp, "can't enable RX/TX\n");
1820 sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
1825 * Start TX/RX DMA engine
1827 error = et_start_rxdma(sc);
1831 error = et_start_txdma(sc);
1839 et_rxeof(struct et_softc *sc)
1841 struct ifnet *ifp = &sc->arpcom.ac_if;
1842 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1843 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1844 uint32_t rxs_stat_ring;
1845 int rxst_wrap, rxst_index;
1847 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1850 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring;
1851 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
1852 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX);
1854 while (rxst_index != rxst_ring->rsr_index ||
1855 rxst_wrap != rxst_ring->rsr_wrap) {
1856 struct et_rxbuf_data *rbd;
1857 struct et_rxdesc_ring *rx_ring;
1858 struct et_rxstat *st;
1860 int buflen, buf_idx, ring_idx;
1861 uint32_t rxstat_pos, rxring_pos;
1863 KKASSERT(rxst_ring->rsr_index < ET_RX_NSTAT);
1864 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
1866 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN);
1867 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX);
1868 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX);
1870 if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
1871 rxst_ring->rsr_index = 0;
1872 rxst_ring->rsr_wrap ^= 1;
1874 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index,
1875 ET_RXSTAT_POS_INDEX);
1876 if (rxst_ring->rsr_wrap)
1877 rxstat_pos |= ET_RXSTAT_POS_WRAP;
1878 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
1880 if (ring_idx >= ET_RX_NRING) {
1882 if_printf(ifp, "invalid ring index %d\n", ring_idx);
1885 if (buf_idx >= ET_RX_NDESC) {
1887 if_printf(ifp, "invalid buf index %d\n", buf_idx);
1891 rbd = &sc->sc_rx_data[ring_idx];
1892 m = rbd->rbd_buf[buf_idx].rb_mbuf;
1894 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) {
1895 if (buflen < ETHER_CRC_LEN) {
1899 m->m_pkthdr.len = m->m_len = buflen;
1900 m->m_pkthdr.rcvif = ifp;
1902 m_adj(m, -ETHER_CRC_LEN);
1905 ifp->if_input(ifp, m);
1910 m = NULL; /* Catch invalid reference */
1912 rx_ring = &sc->sc_rx_ring[ring_idx];
1914 if (buf_idx != rx_ring->rr_index) {
1915 if_printf(ifp, "WARNING!! ring %d, "
1916 "buf_idx %d, rr_idx %d\n",
1917 ring_idx, buf_idx, rx_ring->rr_index);
1920 KKASSERT(rx_ring->rr_index < ET_RX_NDESC);
1921 if (++rx_ring->rr_index == ET_RX_NDESC) {
1922 rx_ring->rr_index = 0;
1923 rx_ring->rr_wrap ^= 1;
1925 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX);
1926 if (rx_ring->rr_wrap)
1927 rxring_pos |= ET_RX_RING_POS_WRAP;
1928 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
1933 et_encap(struct et_softc *sc, struct mbuf **m0)
1935 bus_dma_segment_t segs[ET_NSEG_MAX];
1936 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1937 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1938 struct et_txdesc *td;
1940 int error, maxsegs, nsegs, first_idx, last_idx, i;
1941 uint32_t tx_ready_pos, last_td_ctrl2;
1943 maxsegs = ET_TX_NDESC - tbd->tbd_used;
1944 if (maxsegs > ET_NSEG_MAX)
1945 maxsegs = ET_NSEG_MAX;
1946 KASSERT(maxsegs >= ET_NSEG_SPARE,
1947 ("not enough spare TX desc (%d)", maxsegs));
1949 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
1950 first_idx = tx_ring->tr_ready_index;
1951 map = tbd->tbd_buf[first_idx].tb_dmap;
1953 error = bus_dmamap_load_mbuf_defrag(sc->sc_txbuf_dtag, map, m0,
1954 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1957 bus_dmamap_sync(sc->sc_txbuf_dtag, map, BUS_DMASYNC_PREWRITE);
1959 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
1961 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
1962 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
1963 last_td_ctrl2 |= ET_TDCTRL2_INTR;
1967 for (i = 0; i < nsegs; ++i) {
1970 idx = (first_idx + i) % ET_TX_NDESC;
1971 td = &tx_ring->tr_desc[idx];
1972 td->td_addr_hi = ET_ADDR_HI(segs[i].ds_addr);
1973 td->td_addr_lo = ET_ADDR_LO(segs[i].ds_addr);
1974 td->td_ctrl1 = __SHIFTIN(segs[i].ds_len, ET_TDCTRL1_LEN);
1976 if (i == nsegs - 1) { /* Last frag */
1977 td->td_ctrl2 = last_td_ctrl2;
1981 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
1982 if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
1983 tx_ring->tr_ready_index = 0;
1984 tx_ring->tr_ready_wrap ^= 1;
1987 td = &tx_ring->tr_desc[first_idx];
1988 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */
1990 KKASSERT(last_idx >= 0);
1991 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
1992 tbd->tbd_buf[last_idx].tb_dmap = map;
1993 tbd->tbd_buf[last_idx].tb_mbuf = *m0;
1995 tbd->tbd_used += nsegs;
1996 KKASSERT(tbd->tbd_used <= ET_TX_NDESC);
1998 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index,
1999 ET_TX_READY_POS_INDEX);
2000 if (tx_ring->tr_ready_wrap)
2001 tx_ready_pos |= ET_TX_READY_POS_WRAP;
2002 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
2014 et_txeof(struct et_softc *sc, int start)
2016 struct ifnet *ifp = &sc->arpcom.ac_if;
2017 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
2018 struct et_txbuf_data *tbd = &sc->sc_tx_data;
2022 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2025 if (tbd->tbd_used == 0)
2028 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
2029 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX);
2030 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
2032 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
2033 struct et_txbuf *tb;
2035 KKASSERT(tbd->tbd_start_index < ET_TX_NDESC);
2036 tb = &tbd->tbd_buf[tbd->tbd_start_index];
2038 bzero(&tx_ring->tr_desc[tbd->tbd_start_index],
2039 sizeof(struct et_txdesc));
2041 if (tb->tb_mbuf != NULL) {
2042 bus_dmamap_unload(sc->sc_txbuf_dtag, tb->tb_dmap);
2043 m_freem(tb->tb_mbuf);
2048 if (++tbd->tbd_start_index == ET_TX_NDESC) {
2049 tbd->tbd_start_index = 0;
2050 tbd->tbd_start_wrap ^= 1;
2053 KKASSERT(tbd->tbd_used > 0);
2057 if (tbd->tbd_used == 0)
2059 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC)
2060 ifq_clr_oactive(&ifp->if_snd);
2069 struct et_softc *sc = xsc;
2070 struct ifnet *ifp = &sc->arpcom.ac_if;
2071 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2073 lwkt_serialize_enter(ifp->if_serializer);
2076 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0 &&
2077 (mii->mii_media_status & IFM_ACTIVE) &&
2078 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2079 if_printf(ifp, "Link up, enable TX/RX\n");
2080 if (et_enable_txrx(sc, 0) == 0)
2083 callout_reset(&sc->sc_tick, hz, et_tick, sc);
2085 lwkt_serialize_exit(ifp->if_serializer);
2089 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init)
2091 return et_newbuf(rbd, buf_idx, init, MCLBYTES);
2095 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init)
2097 return et_newbuf(rbd, buf_idx, init, MHLEN);
2101 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0)
2103 struct et_softc *sc = rbd->rbd_softc;
2104 struct et_rxbuf *rb;
2106 bus_dma_segment_t seg;
2108 int error, len, nseg;
2110 KASSERT(!rbd->rbd_jumbo, ("calling %s with jumbo ring", __func__));
2112 KKASSERT(buf_idx < ET_RX_NDESC);
2113 rb = &rbd->rbd_buf[buf_idx];
2115 m = m_getl(len0, init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR, &len);
2120 if_printf(&sc->arpcom.ac_if,
2121 "m_getl failed, size %d\n", len0);
2127 m->m_len = m->m_pkthdr.len = len;
2130 * Try load RX mbuf into temporary DMA tag
2132 error = bus_dmamap_load_mbuf_segment(sc->sc_rxbuf_dtag,
2133 sc->sc_rxbuf_tmp_dmap, m, &seg, 1, &nseg,
2138 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2146 bus_dmamap_sync(sc->sc_rxbuf_dtag, rb->rb_dmap,
2147 BUS_DMASYNC_POSTREAD);
2148 bus_dmamap_unload(sc->sc_rxbuf_dtag, rb->rb_dmap);
2151 rb->rb_paddr = seg.ds_addr;
2154 * Swap RX buf's DMA map with the loaded temporary one
2157 rb->rb_dmap = sc->sc_rxbuf_tmp_dmap;
2158 sc->sc_rxbuf_tmp_dmap = dmap;
2162 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr);
2167 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS)
2169 struct et_softc *sc = arg1;
2170 struct ifnet *ifp = &sc->arpcom.ac_if;
2173 lwkt_serialize_enter(ifp->if_serializer);
2175 v = sc->sc_rx_intr_npkts;
2176 error = sysctl_handle_int(oidp, &v, 0, req);
2177 if (error || req->newptr == NULL)
2184 if (sc->sc_rx_intr_npkts != v) {
2185 if (ifp->if_flags & IFF_RUNNING)
2186 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v);
2187 sc->sc_rx_intr_npkts = v;
2190 lwkt_serialize_exit(ifp->if_serializer);
2195 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS)
2197 struct et_softc *sc = arg1;
2198 struct ifnet *ifp = &sc->arpcom.ac_if;
2201 lwkt_serialize_enter(ifp->if_serializer);
2203 v = sc->sc_rx_intr_delay;
2204 error = sysctl_handle_int(oidp, &v, 0, req);
2205 if (error || req->newptr == NULL)
2212 if (sc->sc_rx_intr_delay != v) {
2213 if (ifp->if_flags & IFF_RUNNING)
2214 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v);
2215 sc->sc_rx_intr_delay = v;
2218 lwkt_serialize_exit(ifp->if_serializer);
2223 et_setmedia(struct et_softc *sc)
2225 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2226 uint32_t cfg2, ctrl;
2228 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
2229 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
2230 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
2231 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
2232 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN);
2234 ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
2235 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
2237 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
2238 cfg2 |= ET_MAC_CFG2_MODE_GMII;
2240 cfg2 |= ET_MAC_CFG2_MODE_MII;
2241 ctrl |= ET_MAC_CTRL_MODE_MII;
2244 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
2245 cfg2 |= ET_MAC_CFG2_FDX;
2247 ctrl |= ET_MAC_CTRL_GHDX;
2249 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
2250 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
2254 et_jumbo_mem_alloc(device_t dev)
2256 struct et_softc *sc = device_get_softc(dev);
2257 struct et_jumbo_data *jd = &sc->sc_jumbo_data;
2262 jd->jd_buf = bus_dmamem_coherent_any(sc->sc_dtag,
2263 ET_JUMBO_ALIGN, ET_JUMBO_MEM_SIZE, BUS_DMA_WAITOK,
2264 &jd->jd_dtag, &jd->jd_dmap, &paddr);
2265 if (jd->jd_buf == NULL) {
2266 device_printf(dev, "can't create jumbo DMA stuffs\n");
2270 jd->jd_slots = kmalloc(sizeof(*jd->jd_slots) * ET_JSLOTS, M_DEVBUF,
2272 lwkt_serialize_init(&jd->jd_serializer);
2273 SLIST_INIT(&jd->jd_free_slots);
2276 for (i = 0; i < ET_JSLOTS; ++i) {
2277 struct et_jslot *jslot = &jd->jd_slots[i];
2279 jslot->jslot_data = jd;
2280 jslot->jslot_buf = buf;
2281 jslot->jslot_paddr = paddr;
2282 jslot->jslot_inuse = 0;
2283 jslot->jslot_index = i;
2284 SLIST_INSERT_HEAD(&jd->jd_free_slots, jslot, jslot_link);
2293 et_jumbo_mem_free(device_t dev)
2295 struct et_softc *sc = device_get_softc(dev);
2296 struct et_jumbo_data *jd = &sc->sc_jumbo_data;
2298 KKASSERT(sc->sc_flags & ET_FLAG_JUMBO);
2300 kfree(jd->jd_slots, M_DEVBUF);
2301 et_dma_mem_destroy(jd->jd_dtag, jd->jd_buf, jd->jd_dmap);
2304 static struct et_jslot *
2305 et_jalloc(struct et_jumbo_data *jd)
2307 struct et_jslot *jslot;
2309 lwkt_serialize_enter(&jd->jd_serializer);
2311 jslot = SLIST_FIRST(&jd->jd_free_slots);
2313 SLIST_REMOVE_HEAD(&jd->jd_free_slots, jslot_link);
2314 jslot->jslot_inuse = 1;
2317 lwkt_serialize_exit(&jd->jd_serializer);
2322 et_jfree(void *xjslot)
2324 struct et_jslot *jslot = xjslot;
2325 struct et_jumbo_data *jd = jslot->jslot_data;
2327 if (&jd->jd_slots[jslot->jslot_index] != jslot) {
2328 panic("%s wrong jslot!?", __func__);
2329 } else if (jslot->jslot_inuse == 0) {
2330 panic("%s jslot already freed", __func__);
2332 lwkt_serialize_enter(&jd->jd_serializer);
2334 atomic_subtract_int(&jslot->jslot_inuse, 1);
2335 if (jslot->jslot_inuse == 0) {
2336 SLIST_INSERT_HEAD(&jd->jd_free_slots, jslot,
2340 lwkt_serialize_exit(&jd->jd_serializer);
2345 et_jref(void *xjslot)
2347 struct et_jslot *jslot = xjslot;
2348 struct et_jumbo_data *jd = jslot->jslot_data;
2350 if (&jd->jd_slots[jslot->jslot_index] != jslot)
2351 panic("%s wrong jslot!?", __func__);
2352 else if (jslot->jslot_inuse == 0)
2353 panic("%s jslot already freed", __func__);
2355 atomic_add_int(&jslot->jslot_inuse, 1);
2359 et_newbuf_jumbo(struct et_rxbuf_data *rbd, int buf_idx, int init)
2361 struct et_softc *sc = rbd->rbd_softc;
2362 struct et_rxbuf *rb;
2364 struct et_jslot *jslot;
2367 KASSERT(rbd->rbd_jumbo, ("calling %s with non-jumbo ring", __func__));
2369 KKASSERT(buf_idx < ET_RX_NDESC);
2370 rb = &rbd->rbd_buf[buf_idx];
2374 MGETHDR(m, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
2377 if_printf(&sc->arpcom.ac_if, "MGETHDR failed\n");
2384 jslot = et_jalloc(&sc->sc_jumbo_data);
2385 if (jslot == NULL) {
2389 if_printf(&sc->arpcom.ac_if,
2390 "jslot allocation failed\n");
2397 m->m_ext.ext_arg = jslot;
2398 m->m_ext.ext_buf = jslot->jslot_buf;
2399 m->m_ext.ext_free = et_jfree;
2400 m->m_ext.ext_ref = et_jref;
2401 m->m_ext.ext_size = ET_JUMBO_FRAMELEN;
2402 m->m_flags |= M_EXT;
2403 m->m_data = m->m_ext.ext_buf;
2404 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2407 rb->rb_paddr = jslot->jslot_paddr;
2411 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr);
2416 et_setup_rxdesc(struct et_rxbuf_data *rbd, int buf_idx, bus_addr_t paddr)
2418 struct et_rxdesc_ring *rx_ring = rbd->rbd_ring;
2419 struct et_rxdesc *desc;
2421 KKASSERT(buf_idx < ET_RX_NDESC);
2422 desc = &rx_ring->rr_desc[buf_idx];
2424 desc->rd_addr_hi = ET_ADDR_HI(paddr);
2425 desc->rd_addr_lo = ET_ADDR_LO(paddr);
2426 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX);