2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/bitops.h>
37 #include <sys/endian.h>
38 #include <sys/kernel.h>
40 #include <sys/interrupt.h>
41 #include <sys/malloc.h>
44 #include <sys/serialize.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/sysctl.h>
49 #include <net/ethernet.h>
52 #include <net/if_arp.h>
53 #include <net/if_dl.h>
54 #include <net/if_media.h>
55 #include <net/ifq_var.h>
56 #include <net/vlan/if_vlan_var.h>
58 #include <dev/netif/mii_layer/miivar.h>
60 #include <bus/pci/pcireg.h>
61 #include <bus/pci/pcivar.h>
64 #include <dev/netif/et/if_etreg.h>
65 #include <dev/netif/et/if_etvar.h>
67 #include "miibus_if.h"
69 static int et_probe(device_t);
70 static int et_attach(device_t);
71 static int et_detach(device_t);
72 static int et_shutdown(device_t);
74 static int et_miibus_readreg(device_t, int, int);
75 static int et_miibus_writereg(device_t, int, int, int);
76 static void et_miibus_statchg(device_t);
78 static void et_init(void *);
79 static int et_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
80 static void et_start(struct ifnet *, struct ifaltq_subque *);
81 static void et_watchdog(struct ifnet *);
82 static int et_ifmedia_upd(struct ifnet *);
83 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
85 static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS);
86 static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS);
88 static void et_intr(void *);
89 static void et_enable_intrs(struct et_softc *, uint32_t);
90 static void et_disable_intrs(struct et_softc *);
91 static void et_rxeof(struct et_softc *);
92 static void et_txeof(struct et_softc *, int);
94 static int et_dma_alloc(device_t);
95 static void et_dma_free(device_t);
96 static void et_dma_mem_destroy(bus_dma_tag_t, void *, bus_dmamap_t);
97 static int et_dma_mbuf_create(device_t);
98 static void et_dma_mbuf_destroy(device_t, int, const int[]);
99 static int et_jumbo_mem_alloc(device_t);
100 static void et_jumbo_mem_free(device_t);
101 static int et_init_tx_ring(struct et_softc *);
102 static int et_init_rx_ring(struct et_softc *);
103 static void et_free_tx_ring(struct et_softc *);
104 static void et_free_rx_ring(struct et_softc *);
105 static int et_encap(struct et_softc *, struct mbuf **);
106 static struct et_jslot *
107 et_jalloc(struct et_jumbo_data *);
108 static void et_jfree(void *);
109 static void et_jref(void *);
110 static int et_newbuf(struct et_rxbuf_data *, int, int, int);
111 static int et_newbuf_cluster(struct et_rxbuf_data *, int, int);
112 static int et_newbuf_hdr(struct et_rxbuf_data *, int, int);
113 static int et_newbuf_jumbo(struct et_rxbuf_data *, int, int);
115 static void et_stop(struct et_softc *);
116 static int et_chip_init(struct et_softc *);
117 static void et_chip_attach(struct et_softc *);
118 static void et_init_mac(struct et_softc *);
119 static void et_init_rxmac(struct et_softc *);
120 static void et_init_txmac(struct et_softc *);
121 static int et_init_rxdma(struct et_softc *);
122 static int et_init_txdma(struct et_softc *);
123 static int et_start_rxdma(struct et_softc *);
124 static int et_start_txdma(struct et_softc *);
125 static int et_stop_rxdma(struct et_softc *);
126 static int et_stop_txdma(struct et_softc *);
127 static int et_enable_txrx(struct et_softc *, int);
128 static void et_reset(struct et_softc *);
129 static int et_bus_config(device_t);
130 static void et_get_eaddr(device_t, uint8_t[]);
131 static void et_setmulti(struct et_softc *);
132 static void et_tick(void *);
133 static void et_setmedia(struct et_softc *);
134 static void et_setup_rxdesc(struct et_rxbuf_data *, int, bus_addr_t);
136 static const struct et_dev {
141 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310,
142 "Agere ET1310 Gigabit Ethernet" },
143 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST,
144 "Agere ET1310 Fast Ethernet" },
148 static device_method_t et_methods[] = {
149 DEVMETHOD(device_probe, et_probe),
150 DEVMETHOD(device_attach, et_attach),
151 DEVMETHOD(device_detach, et_detach),
152 DEVMETHOD(device_shutdown, et_shutdown),
154 DEVMETHOD(device_suspend, et_suspend),
155 DEVMETHOD(device_resume, et_resume),
158 DEVMETHOD(bus_print_child, bus_generic_print_child),
159 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
161 DEVMETHOD(miibus_readreg, et_miibus_readreg),
162 DEVMETHOD(miibus_writereg, et_miibus_writereg),
163 DEVMETHOD(miibus_statchg, et_miibus_statchg),
168 static driver_t et_driver = {
171 sizeof(struct et_softc)
174 static devclass_t et_devclass;
176 DECLARE_DUMMY_MODULE(if_et);
177 MODULE_DEPEND(if_et, miibus, 1, 1, 1);
178 DRIVER_MODULE(if_et, pci, et_driver, et_devclass, NULL, NULL);
179 DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, NULL, NULL);
181 static int et_rx_intr_npkts = 129;
182 static int et_rx_intr_delay = 25; /* x4 usec */
183 static int et_tx_intr_nsegs = 256;
184 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */
186 static int et_msi_enable = 1;
188 TUNABLE_INT("hw.et.timer", &et_timer);
189 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts);
190 TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay);
191 TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs);
192 TUNABLE_INT("hw.et.msi.enable", &et_msi_enable);
200 static const struct et_bsize et_bufsize_std[ET_RX_NRING] = {
201 { .bufsize = ET_RXDMA_CTRL_RING0_128, .jumbo = 0,
202 .newbuf = et_newbuf_hdr },
203 { .bufsize = ET_RXDMA_CTRL_RING1_2048, .jumbo = 0,
204 .newbuf = et_newbuf_cluster },
207 static const struct et_bsize et_bufsize_jumbo[ET_RX_NRING] = {
208 { .bufsize = ET_RXDMA_CTRL_RING0_128, .jumbo = 0,
209 .newbuf = et_newbuf_hdr },
210 { .bufsize = ET_RXDMA_CTRL_RING1_16384, .jumbo = 1,
211 .newbuf = et_newbuf_jumbo },
215 et_probe(device_t dev)
217 const struct et_dev *d;
220 vid = pci_get_vendor(dev);
221 did = pci_get_device(dev);
223 for (d = et_devices; d->desc != NULL; ++d) {
224 if (vid == d->vid && did == d->did) {
225 device_set_desc(dev, d->desc);
233 et_attach(device_t dev)
235 struct et_softc *sc = device_get_softc(dev);
236 struct ifnet *ifp = &sc->arpcom.ac_if;
237 struct sysctl_ctx_list *ctx;
238 struct sysctl_oid *tree;
239 uint8_t eaddr[ETHER_ADDR_LEN];
243 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
244 callout_init(&sc->sc_tick);
247 * Initialize tunables
249 sc->sc_rx_intr_npkts = et_rx_intr_npkts;
250 sc->sc_rx_intr_delay = et_rx_intr_delay;
251 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
252 sc->sc_timer = et_timer;
255 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
258 irq = pci_read_config(dev, PCIR_INTLINE, 4);
259 mem = pci_read_config(dev, ET_PCIR_BAR, 4);
261 device_printf(dev, "chip is in D%d power mode "
262 "-- setting to D0\n", pci_get_powerstate(dev));
264 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
266 pci_write_config(dev, PCIR_INTLINE, irq, 4);
267 pci_write_config(dev, ET_PCIR_BAR, mem, 4);
269 #endif /* !BURN_BRIDGE */
271 /* Enable bus mastering */
272 pci_enable_busmaster(dev);
277 sc->sc_mem_rid = ET_PCIR_BAR;
278 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
279 &sc->sc_mem_rid, RF_ACTIVE);
280 if (sc->sc_mem_res == NULL) {
281 device_printf(dev, "can't allocate IO memory\n");
284 sc->sc_mem_bt = rman_get_bustag(sc->sc_mem_res);
285 sc->sc_mem_bh = rman_get_bushandle(sc->sc_mem_res);
290 sc->sc_irq_type = pci_alloc_1intr(dev, et_msi_enable,
291 &sc->sc_irq_rid, &irq_flags);
292 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
293 &sc->sc_irq_rid, irq_flags);
294 if (sc->sc_irq_res == NULL) {
295 device_printf(dev, "can't allocate irq\n");
303 ctx = device_get_sysctl_ctx(dev);
304 tree = device_get_sysctl_tree(dev);
305 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
306 OID_AUTO, "rx_intr_npkts", CTLTYPE_INT | CTLFLAG_RW,
307 sc, 0, et_sysctl_rx_intr_npkts, "I",
308 "RX IM, # packets per RX interrupt");
309 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
310 OID_AUTO, "rx_intr_delay", CTLTYPE_INT | CTLFLAG_RW,
311 sc, 0, et_sysctl_rx_intr_delay, "I",
312 "RX IM, RX interrupt delay (x10 usec)");
313 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
314 "tx_intr_nsegs", CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0,
315 "TX IM, # segments per TX interrupt");
316 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
317 "timer", CTLFLAG_RW, &sc->sc_timer, 0,
320 error = et_bus_config(dev);
324 et_get_eaddr(dev, eaddr);
326 CSR_WRITE_4(sc, ET_PM,
327 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE);
331 et_disable_intrs(sc);
333 error = et_dma_alloc(dev);
338 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
339 ifp->if_init = et_init;
340 ifp->if_ioctl = et_ioctl;
341 ifp->if_start = et_start;
342 ifp->if_watchdog = et_watchdog;
343 ifp->if_mtu = ETHERMTU;
344 ifp->if_capabilities = IFCAP_VLAN_MTU;
345 ifp->if_capenable = ifp->if_capabilities;
346 ifq_set_maxlen(&ifp->if_snd, ET_TX_NDESC);
347 ifq_set_ready(&ifp->if_snd);
351 error = mii_phy_probe(dev, &sc->sc_miibus,
352 et_ifmedia_upd, et_ifmedia_sts);
354 device_printf(dev, "can't probe any PHY\n");
358 ether_ifattach(ifp, eaddr, NULL);
360 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->sc_irq_res));
362 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, et_intr, sc,
363 &sc->sc_irq_handle, ifp->if_serializer);
366 device_printf(dev, "can't setup intr\n");
377 et_detach(device_t dev)
379 struct et_softc *sc = device_get_softc(dev);
381 if (device_is_attached(dev)) {
382 struct ifnet *ifp = &sc->arpcom.ac_if;
384 lwkt_serialize_enter(ifp->if_serializer);
386 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle);
387 lwkt_serialize_exit(ifp->if_serializer);
392 if (sc->sc_miibus != NULL)
393 device_delete_child(dev, sc->sc_miibus);
394 bus_generic_detach(dev);
396 if (sc->sc_irq_res != NULL) {
397 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
400 if (sc->sc_irq_type == PCI_INTR_TYPE_MSI)
401 pci_release_msi(dev);
403 if (sc->sc_mem_res != NULL) {
404 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
414 et_shutdown(device_t dev)
416 struct et_softc *sc = device_get_softc(dev);
417 struct ifnet *ifp = &sc->arpcom.ac_if;
419 lwkt_serialize_enter(ifp->if_serializer);
421 lwkt_serialize_exit(ifp->if_serializer);
426 et_miibus_readreg(device_t dev, int phy, int reg)
428 struct et_softc *sc = device_get_softc(dev);
432 /* Stop any pending operations */
433 CSR_WRITE_4(sc, ET_MII_CMD, 0);
435 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
436 __SHIFTIN(reg, ET_MII_ADDR_REG);
437 CSR_WRITE_4(sc, ET_MII_ADDR, val);
440 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
444 for (i = 0; i < NRETRY; ++i) {
445 val = CSR_READ_4(sc, ET_MII_IND);
446 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
451 if_printf(&sc->arpcom.ac_if,
452 "read phy %d, reg %d timed out\n", phy, reg);
459 val = CSR_READ_4(sc, ET_MII_STAT);
460 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE);
463 /* Make sure that the current operation is stopped */
464 CSR_WRITE_4(sc, ET_MII_CMD, 0);
469 et_miibus_writereg(device_t dev, int phy, int reg, int val0)
471 struct et_softc *sc = device_get_softc(dev);
475 /* Stop any pending operations */
476 CSR_WRITE_4(sc, ET_MII_CMD, 0);
478 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
479 __SHIFTIN(reg, ET_MII_ADDR_REG);
480 CSR_WRITE_4(sc, ET_MII_ADDR, val);
483 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE));
487 for (i = 0; i < NRETRY; ++i) {
488 val = CSR_READ_4(sc, ET_MII_IND);
489 if ((val & ET_MII_IND_BUSY) == 0)
494 if_printf(&sc->arpcom.ac_if,
495 "write phy %d, reg %d timed out\n", phy, reg);
496 et_miibus_readreg(dev, phy, reg);
501 /* Make sure that the current operation is stopped */
502 CSR_WRITE_4(sc, ET_MII_CMD, 0);
507 et_miibus_statchg(device_t dev)
509 et_setmedia(device_get_softc(dev));
513 et_ifmedia_upd(struct ifnet *ifp)
515 struct et_softc *sc = ifp->if_softc;
516 struct mii_data *mii = device_get_softc(sc->sc_miibus);
518 if (mii->mii_instance != 0) {
519 struct mii_softc *miisc;
521 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
522 mii_phy_reset(miisc);
530 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
532 struct et_softc *sc = ifp->if_softc;
533 struct mii_data *mii = device_get_softc(sc->sc_miibus);
536 ifmr->ifm_active = mii->mii_media_active;
537 ifmr->ifm_status = mii->mii_media_status;
541 et_stop(struct et_softc *sc)
543 struct ifnet *ifp = &sc->arpcom.ac_if;
545 ASSERT_SERIALIZED(ifp->if_serializer);
547 callout_stop(&sc->sc_tick);
552 et_disable_intrs(sc);
561 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
564 ifp->if_flags &= ~IFF_RUNNING;
565 ifq_clr_oactive(&ifp->if_snd);
569 et_bus_config(device_t dev)
571 uint32_t val, max_plsz;
572 uint16_t ack_latency, replay_timer;
575 * Test whether EEPROM is valid
576 * NOTE: Read twice to get the correct value
578 pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1);
579 val = pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1);
580 if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
581 device_printf(dev, "EEPROM status error 0x%02x\n", val);
588 * Configure ACK latency and replay timer according to
591 val = pci_read_config(dev, ET_PCIR_DEVICE_CAPS, 4);
592 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ;
595 case ET_PCIV_DEVICE_CAPS_PLSZ_128:
596 ack_latency = ET_PCIV_ACK_LATENCY_128;
597 replay_timer = ET_PCIV_REPLAY_TIMER_128;
600 case ET_PCIV_DEVICE_CAPS_PLSZ_256:
601 ack_latency = ET_PCIV_ACK_LATENCY_256;
602 replay_timer = ET_PCIV_REPLAY_TIMER_256;
606 ack_latency = pci_read_config(dev, ET_PCIR_ACK_LATENCY, 2);
607 replay_timer = pci_read_config(dev, ET_PCIR_REPLAY_TIMER, 2);
608 device_printf(dev, "ack latency %u, replay timer %u\n",
609 ack_latency, replay_timer);
612 if (ack_latency != 0) {
613 pci_write_config(dev, ET_PCIR_ACK_LATENCY, ack_latency, 2);
614 pci_write_config(dev, ET_PCIR_REPLAY_TIMER, replay_timer, 2);
618 * Set L0s and L1 latency timer to 2us
620 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2);
621 pci_write_config(dev, ET_PCIR_L0S_L1_LATENCY, val, 1);
624 * Set max read request size to 2048 bytes
626 val = pci_read_config(dev, ET_PCIR_DEVICE_CTRL, 2);
627 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ;
628 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K;
629 pci_write_config(dev, ET_PCIR_DEVICE_CTRL, val, 2);
635 et_get_eaddr(device_t dev, uint8_t eaddr[])
640 val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4);
641 for (i = 0; i < 4; ++i)
642 eaddr[i] = (val >> (8 * i)) & 0xff;
644 val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2);
645 for (; i < ETHER_ADDR_LEN; ++i)
646 eaddr[i] = (val >> (8 * (i - 4))) & 0xff;
650 et_reset(struct et_softc *sc)
652 CSR_WRITE_4(sc, ET_MAC_CFG1,
653 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
654 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
655 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
657 CSR_WRITE_4(sc, ET_SWRST,
658 ET_SWRST_TXDMA | ET_SWRST_RXDMA |
659 ET_SWRST_TXMAC | ET_SWRST_RXMAC |
660 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
662 CSR_WRITE_4(sc, ET_MAC_CFG1,
663 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
664 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
665 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
669 et_disable_intrs(struct et_softc *sc)
671 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
675 et_enable_intrs(struct et_softc *sc, uint32_t intrs)
677 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs);
681 et_dma_alloc(device_t dev)
683 struct et_softc *sc = device_get_softc(dev);
684 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
685 struct et_txstatus_data *txsd = &sc->sc_tx_status;
686 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
687 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
691 * Create top level DMA tag
693 error = bus_dma_tag_create(NULL, 1, 0,
697 BUS_SPACE_MAXSIZE_32BIT,
699 BUS_SPACE_MAXSIZE_32BIT,
702 device_printf(dev, "can't create DMA tag\n");
707 * Create TX ring DMA stuffs
709 tx_ring->tr_desc = bus_dmamem_coherent_any(sc->sc_dtag,
710 ET_ALIGN, ET_TX_RING_SIZE,
711 BUS_DMA_WAITOK | BUS_DMA_ZERO,
712 &tx_ring->tr_dtag, &tx_ring->tr_dmap,
714 if (tx_ring->tr_desc == NULL) {
715 device_printf(dev, "can't create TX ring DMA stuffs\n");
720 * Create TX status DMA stuffs
722 txsd->txsd_status = bus_dmamem_coherent_any(sc->sc_dtag,
723 ET_ALIGN, sizeof(uint32_t),
724 BUS_DMA_WAITOK | BUS_DMA_ZERO,
725 &txsd->txsd_dtag, &txsd->txsd_dmap,
727 if (txsd->txsd_status == NULL) {
728 device_printf(dev, "can't create TX status DMA stuffs\n");
733 * Create DMA stuffs for RX rings
735 for (i = 0; i < ET_RX_NRING; ++i) {
736 static const uint32_t rx_ring_posreg[ET_RX_NRING] =
737 { ET_RX_RING0_POS, ET_RX_RING1_POS };
739 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
741 rx_ring->rr_desc = bus_dmamem_coherent_any(sc->sc_dtag,
742 ET_ALIGN, ET_RX_RING_SIZE,
743 BUS_DMA_WAITOK | BUS_DMA_ZERO,
744 &rx_ring->rr_dtag, &rx_ring->rr_dmap,
746 if (rx_ring->rr_desc == NULL) {
747 device_printf(dev, "can't create DMA stuffs for "
748 "the %d RX ring\n", i);
751 rx_ring->rr_posreg = rx_ring_posreg[i];
755 * Create RX stat ring DMA stuffs
757 rxst_ring->rsr_stat = bus_dmamem_coherent_any(sc->sc_dtag,
758 ET_ALIGN, ET_RXSTAT_RING_SIZE,
759 BUS_DMA_WAITOK | BUS_DMA_ZERO,
760 &rxst_ring->rsr_dtag, &rxst_ring->rsr_dmap,
761 &rxst_ring->rsr_paddr);
762 if (rxst_ring->rsr_stat == NULL) {
763 device_printf(dev, "can't create RX stat ring DMA stuffs\n");
768 * Create RX status DMA stuffs
770 rxsd->rxsd_status = bus_dmamem_coherent_any(sc->sc_dtag,
771 ET_ALIGN, sizeof(struct et_rxstatus),
772 BUS_DMA_WAITOK | BUS_DMA_ZERO,
773 &rxsd->rxsd_dtag, &rxsd->rxsd_dmap,
775 if (rxsd->rxsd_status == NULL) {
776 device_printf(dev, "can't create RX status DMA stuffs\n");
781 * Create mbuf DMA stuffs
783 error = et_dma_mbuf_create(dev);
788 * Create jumbo buffer DMA stuffs
789 * NOTE: Allow it to fail
791 if (et_jumbo_mem_alloc(dev) == 0)
792 sc->sc_flags |= ET_FLAG_JUMBO;
798 et_dma_free(device_t dev)
800 struct et_softc *sc = device_get_softc(dev);
801 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
802 struct et_txstatus_data *txsd = &sc->sc_tx_status;
803 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
804 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
805 int i, rx_done[ET_RX_NRING];
808 * Destroy TX ring DMA stuffs
810 et_dma_mem_destroy(tx_ring->tr_dtag, tx_ring->tr_desc,
814 * Destroy TX status DMA stuffs
816 et_dma_mem_destroy(txsd->txsd_dtag, txsd->txsd_status,
820 * Destroy DMA stuffs for RX rings
822 for (i = 0; i < ET_RX_NRING; ++i) {
823 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
825 et_dma_mem_destroy(rx_ring->rr_dtag, rx_ring->rr_desc,
830 * Destroy RX stat ring DMA stuffs
832 et_dma_mem_destroy(rxst_ring->rsr_dtag, rxst_ring->rsr_stat,
833 rxst_ring->rsr_dmap);
836 * Destroy RX status DMA stuffs
838 et_dma_mem_destroy(rxsd->rxsd_dtag, rxsd->rxsd_status,
842 * Destroy mbuf DMA stuffs
844 for (i = 0; i < ET_RX_NRING; ++i)
845 rx_done[i] = ET_RX_NDESC;
846 et_dma_mbuf_destroy(dev, ET_TX_NDESC, rx_done);
849 * Destroy jumbo buffer DMA stuffs
851 if (sc->sc_flags & ET_FLAG_JUMBO)
852 et_jumbo_mem_free(dev);
855 * Destroy top level DMA tag
857 if (sc->sc_dtag != NULL)
858 bus_dma_tag_destroy(sc->sc_dtag);
862 et_dma_mbuf_create(device_t dev)
864 struct et_softc *sc = device_get_softc(dev);
865 struct et_txbuf_data *tbd = &sc->sc_tx_data;
866 int i, error, rx_done[ET_RX_NRING];
869 * Create RX mbuf DMA tag
871 error = bus_dma_tag_create(sc->sc_dtag, 1, 0,
872 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
874 MCLBYTES, 1, MCLBYTES,
875 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
878 device_printf(dev, "can't create RX mbuf DMA tag\n");
883 * Create spare DMA map for RX mbufs
885 error = bus_dmamap_create(sc->sc_rxbuf_dtag, BUS_DMA_WAITOK,
886 &sc->sc_rxbuf_tmp_dmap);
888 device_printf(dev, "can't create spare mbuf DMA map\n");
889 bus_dma_tag_destroy(sc->sc_rxbuf_dtag);
890 sc->sc_rxbuf_dtag = NULL;
895 * Create DMA maps for RX mbufs
897 bzero(rx_done, sizeof(rx_done));
898 for (i = 0; i < ET_RX_NRING; ++i) {
899 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
902 for (j = 0; j < ET_RX_NDESC; ++j) {
903 error = bus_dmamap_create(sc->sc_rxbuf_dtag,
905 &rbd->rbd_buf[j].rb_dmap);
907 device_printf(dev, "can't create %d RX mbuf "
908 "for %d RX ring\n", j, i);
910 et_dma_mbuf_destroy(dev, 0, rx_done);
914 rx_done[i] = ET_RX_NDESC;
917 rbd->rbd_ring = &sc->sc_rx_ring[i];
921 * Create TX mbuf DMA tag
923 error = bus_dma_tag_create(sc->sc_dtag, 1, 0,
924 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
926 ET_JUMBO_FRAMELEN, ET_NSEG_MAX, MCLBYTES,
927 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
931 device_printf(dev, "can't create TX mbuf DMA tag\n");
936 * Create DMA maps for TX mbufs
938 for (i = 0; i < ET_TX_NDESC; ++i) {
939 error = bus_dmamap_create(sc->sc_txbuf_dtag,
940 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
941 &tbd->tbd_buf[i].tb_dmap);
943 device_printf(dev, "can't create %d TX mbuf "
945 et_dma_mbuf_destroy(dev, i, rx_done);
954 et_dma_mbuf_destroy(device_t dev, int tx_done, const int rx_done[])
956 struct et_softc *sc = device_get_softc(dev);
957 struct et_txbuf_data *tbd = &sc->sc_tx_data;
961 * Destroy DMA tag and maps for RX mbufs
963 if (sc->sc_rxbuf_dtag) {
964 for (i = 0; i < ET_RX_NRING; ++i) {
965 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
968 for (j = 0; j < rx_done[i]; ++j) {
969 struct et_rxbuf *rb = &rbd->rbd_buf[j];
971 KASSERT(rb->rb_mbuf == NULL,
972 ("RX mbuf in %d RX ring is "
973 "not freed yet", i));
974 bus_dmamap_destroy(sc->sc_rxbuf_dtag,
978 bus_dmamap_destroy(sc->sc_rxbuf_dtag, sc->sc_rxbuf_tmp_dmap);
979 bus_dma_tag_destroy(sc->sc_rxbuf_dtag);
980 sc->sc_rxbuf_dtag = NULL;
984 * Destroy DMA tag and maps for TX mbufs
986 if (sc->sc_txbuf_dtag) {
987 for (i = 0; i < tx_done; ++i) {
988 struct et_txbuf *tb = &tbd->tbd_buf[i];
990 KASSERT(tb->tb_mbuf == NULL,
991 ("TX mbuf is not freed yet"));
992 bus_dmamap_destroy(sc->sc_txbuf_dtag, tb->tb_dmap);
994 bus_dma_tag_destroy(sc->sc_txbuf_dtag);
995 sc->sc_txbuf_dtag = NULL;
1000 et_dma_mem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap)
1003 bus_dmamap_unload(dtag, dmap);
1004 bus_dmamem_free(dtag, addr, dmap);
1005 bus_dma_tag_destroy(dtag);
1010 et_chip_attach(struct et_softc *sc)
1015 * Perform minimal initialization
1018 /* Disable loopback */
1019 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1022 CSR_WRITE_4(sc, ET_MAC_CFG1,
1023 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1024 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1025 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1028 * Setup half duplex mode
1030 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
1031 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
1032 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
1033 ET_MAC_HDX_EXC_DEFER;
1034 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1036 /* Clear MAC control */
1037 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1040 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1042 /* Bring MAC out of reset state */
1043 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1045 /* Enable memory controllers */
1046 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1052 struct et_softc *sc = xsc;
1053 struct ifnet *ifp = &sc->arpcom.ac_if;
1056 ASSERT_SERIALIZED(ifp->if_serializer);
1058 if ((ifp->if_flags & IFF_RUNNING) == 0)
1061 et_disable_intrs(sc);
1063 intrs = CSR_READ_4(sc, ET_INTR_STATUS);
1065 if (intrs == 0) /* Not interested */
1068 if (intrs & ET_INTR_RXEOF)
1070 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER))
1072 if (intrs & ET_INTR_TIMER)
1073 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1075 et_enable_intrs(sc, ET_INTRS);
1081 struct et_softc *sc = xsc;
1082 struct ifnet *ifp = &sc->arpcom.ac_if;
1083 const struct et_bsize *arr;
1086 ASSERT_SERIALIZED(ifp->if_serializer);
1090 arr = ET_FRAMELEN(ifp->if_mtu) < MCLBYTES ?
1091 et_bufsize_std : et_bufsize_jumbo;
1092 for (i = 0; i < ET_RX_NRING; ++i) {
1093 sc->sc_rx_data[i].rbd_bufsize = arr[i].bufsize;
1094 sc->sc_rx_data[i].rbd_newbuf = arr[i].newbuf;
1095 sc->sc_rx_data[i].rbd_jumbo = arr[i].jumbo;
1098 error = et_init_tx_ring(sc);
1102 error = et_init_rx_ring(sc);
1106 error = et_chip_init(sc);
1110 error = et_enable_txrx(sc, 1);
1114 et_enable_intrs(sc, ET_INTRS);
1116 callout_reset(&sc->sc_tick, hz, et_tick, sc);
1118 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1120 ifp->if_flags |= IFF_RUNNING;
1121 ifq_clr_oactive(&ifp->if_snd);
1128 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1130 struct et_softc *sc = ifp->if_softc;
1131 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1132 struct ifreq *ifr = (struct ifreq *)data;
1133 int error = 0, max_framelen;
1135 ASSERT_SERIALIZED(ifp->if_serializer);
1139 if (ifp->if_flags & IFF_UP) {
1140 if (ifp->if_flags & IFF_RUNNING) {
1141 if ((ifp->if_flags ^ sc->sc_if_flags) &
1142 (IFF_ALLMULTI | IFF_PROMISC))
1148 if (ifp->if_flags & IFF_RUNNING)
1151 sc->sc_if_flags = ifp->if_flags;
1156 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1161 if (ifp->if_flags & IFF_RUNNING)
1166 if (sc->sc_flags & ET_FLAG_JUMBO)
1167 max_framelen = ET_JUMBO_FRAMELEN;
1169 max_framelen = MCLBYTES - 1;
1171 if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) {
1176 ifp->if_mtu = ifr->ifr_mtu;
1177 if (ifp->if_flags & IFF_RUNNING)
1182 error = ether_ioctl(ifp, cmd, data);
1189 et_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1191 struct et_softc *sc = ifp->if_softc;
1192 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1195 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1196 ASSERT_SERIALIZED(ifp->if_serializer);
1198 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) {
1199 ifq_purge(&ifp->if_snd);
1203 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
1212 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) {
1214 ifq_set_oactive(&ifp->if_snd);
1223 m = ifq_dequeue(&ifp->if_snd);
1227 error = et_encap(sc, &m);
1229 IFNET_STAT_INC(ifp, oerrors, 1);
1230 KKASSERT(m == NULL);
1232 if (error == EFBIG) {
1234 * Excessive fragmented packets
1237 ifq_set_oactive(&ifp->if_snd);
1257 et_watchdog(struct ifnet *ifp)
1259 ASSERT_SERIALIZED(ifp->if_serializer);
1261 if_printf(ifp, "watchdog timed out\n");
1263 ifp->if_init(ifp->if_softc);
1268 et_stop_rxdma(struct et_softc *sc)
1270 CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1271 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1274 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1275 if_printf(&sc->arpcom.ac_if, "can't stop RX DMA engine\n");
1282 et_stop_txdma(struct et_softc *sc)
1284 CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1285 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1290 et_free_tx_ring(struct et_softc *sc)
1292 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1293 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1296 for (i = 0; i < ET_TX_NDESC; ++i) {
1297 struct et_txbuf *tb = &tbd->tbd_buf[i];
1299 if (tb->tb_mbuf != NULL) {
1300 bus_dmamap_unload(sc->sc_txbuf_dtag, tb->tb_dmap);
1301 m_freem(tb->tb_mbuf);
1305 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1309 et_free_rx_ring(struct et_softc *sc)
1313 for (n = 0; n < ET_RX_NRING; ++n) {
1314 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1315 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n];
1318 for (i = 0; i < ET_RX_NDESC; ++i) {
1319 struct et_rxbuf *rb = &rbd->rbd_buf[i];
1321 if (rb->rb_mbuf != NULL) {
1322 if (!rbd->rbd_jumbo) {
1323 bus_dmamap_unload(sc->sc_rxbuf_dtag,
1326 m_freem(rb->rb_mbuf);
1330 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE);
1335 et_setmulti(struct et_softc *sc)
1337 struct ifnet *ifp = &sc->arpcom.ac_if;
1338 uint32_t hash[4] = { 0, 0, 0, 0 };
1339 uint32_t rxmac_ctrl, pktfilt;
1340 struct ifmultiaddr *ifma;
1343 pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1344 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1346 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1347 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1348 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1353 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1356 if (ifma->ifma_addr->sa_family != AF_LINK)
1359 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
1360 ifma->ifma_addr), ETHER_ADDR_LEN);
1361 h = (h & 0x3f800000) >> 23;
1364 if (h >= 32 && h < 64) {
1367 } else if (h >= 64 && h < 96) {
1370 } else if (h >= 96) {
1379 for (i = 0; i < 4; ++i)
1380 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1383 pktfilt |= ET_PKTFILT_MCAST;
1384 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1386 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1387 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1391 et_chip_init(struct et_softc *sc)
1393 struct ifnet *ifp = &sc->arpcom.ac_if;
1395 int error, frame_len, rxmem_size;
1398 * Split 16Kbytes internal memory between TX and RX
1399 * according to frame length.
1401 frame_len = ET_FRAMELEN(ifp->if_mtu);
1402 if (frame_len < 2048) {
1403 rxmem_size = ET_MEM_RXSIZE_DEFAULT;
1404 } else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) {
1405 rxmem_size = ET_MEM_SIZE / 2;
1407 rxmem_size = ET_MEM_SIZE -
1408 roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT);
1410 rxq_end = ET_QUEUE_ADDR(rxmem_size);
1412 CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START);
1413 CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end);
1414 CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1);
1415 CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END);
1418 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1420 /* Clear MSI configure */
1421 CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1424 CSR_WRITE_4(sc, ET_TIMER, 0);
1426 /* Initialize MAC */
1429 /* Enable memory controllers */
1430 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1432 /* Initialize RX MAC */
1435 /* Initialize TX MAC */
1438 /* Initialize RX DMA engine */
1439 error = et_init_rxdma(sc);
1443 /* Initialize TX DMA engine */
1444 error = et_init_txdma(sc);
1452 et_init_tx_ring(struct et_softc *sc)
1454 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1455 struct et_txstatus_data *txsd = &sc->sc_tx_status;
1456 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1458 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1460 tbd->tbd_start_index = 0;
1461 tbd->tbd_start_wrap = 0;
1464 bzero(txsd->txsd_status, sizeof(uint32_t));
1470 et_init_rx_ring(struct et_softc *sc)
1472 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1473 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1476 for (n = 0; n < ET_RX_NRING; ++n) {
1477 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1480 for (i = 0; i < ET_RX_NDESC; ++i) {
1481 error = rbd->rbd_newbuf(rbd, i, 1);
1483 if_printf(&sc->arpcom.ac_if, "%d ring %d buf, "
1484 "newbuf failed: %d\n", n, i, error);
1490 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1491 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1497 et_init_rxdma(struct et_softc *sc)
1499 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1500 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1501 struct et_rxdesc_ring *rx_ring;
1504 error = et_stop_rxdma(sc);
1506 if_printf(&sc->arpcom.ac_if, "can't init RX DMA engine\n");
1513 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1514 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1517 * Install RX stat ring
1519 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1520 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1521 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1522 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1523 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1525 /* Match ET_RXSTAT_POS */
1526 rxst_ring->rsr_index = 0;
1527 rxst_ring->rsr_wrap = 0;
1530 * Install the 2nd RX descriptor ring
1532 rx_ring = &sc->sc_rx_ring[1];
1533 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1534 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1535 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1536 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1537 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1539 /* Match ET_RX_RING1_POS */
1540 rx_ring->rr_index = 0;
1541 rx_ring->rr_wrap = 1;
1544 * Install the 1st RX descriptor ring
1546 rx_ring = &sc->sc_rx_ring[0];
1547 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1548 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1549 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1550 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1551 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1553 /* Match ET_RX_RING0_POS */
1554 rx_ring->rr_index = 0;
1555 rx_ring->rr_wrap = 1;
1558 * RX intr moderation
1560 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1561 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1567 et_init_txdma(struct et_softc *sc)
1569 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1570 struct et_txstatus_data *txsd = &sc->sc_tx_status;
1573 error = et_stop_txdma(sc);
1575 if_printf(&sc->arpcom.ac_if, "can't init TX DMA engine\n");
1580 * Install TX descriptor ring
1582 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1583 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1584 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1589 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1590 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1592 CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1594 /* Match ET_TX_READY_POS */
1595 tx_ring->tr_ready_index = 0;
1596 tx_ring->tr_ready_wrap = 0;
1602 et_init_mac(struct et_softc *sc)
1604 struct ifnet *ifp = &sc->arpcom.ac_if;
1605 const uint8_t *eaddr = IF_LLADDR(ifp);
1609 CSR_WRITE_4(sc, ET_MAC_CFG1,
1610 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1611 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1612 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1615 * Setup inter packet gap
1617 val = __SHIFTIN(56, ET_IPG_NONB2B_1) |
1618 __SHIFTIN(88, ET_IPG_NONB2B_2) |
1619 __SHIFTIN(80, ET_IPG_MINIFG) |
1620 __SHIFTIN(96, ET_IPG_B2B);
1621 CSR_WRITE_4(sc, ET_IPG, val);
1624 * Setup half duplex mode
1626 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
1627 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
1628 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
1629 ET_MAC_HDX_EXC_DEFER;
1630 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1632 /* Clear MAC control */
1633 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1636 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1641 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1642 CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1643 val = (eaddr[0] << 16) | (eaddr[1] << 24);
1644 CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1646 /* Set max frame length */
1647 CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu));
1649 /* Bring MAC out of reset state */
1650 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1654 et_init_rxmac(struct et_softc *sc)
1656 struct ifnet *ifp = &sc->arpcom.ac_if;
1657 const uint8_t *eaddr = IF_LLADDR(ifp);
1661 /* Disable RX MAC and WOL */
1662 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1665 * Clear all WOL related registers
1667 for (i = 0; i < 3; ++i)
1668 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1669 for (i = 0; i < 20; ++i)
1670 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1673 * Set WOL source address. XXX is this necessary?
1675 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1676 CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1677 val = (eaddr[0] << 8) | eaddr[1];
1678 CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1680 /* Clear packet filters */
1681 CSR_WRITE_4(sc, ET_PKTFILT, 0);
1683 /* No ucast filtering */
1684 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1685 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1686 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1688 if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) {
1690 * In order to transmit jumbo packets greater than
1691 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between
1692 * RX MAC and RX DMA needs to be reduced in size to
1693 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In
1694 * order to implement this, we must use "cut through"
1695 * mode in the RX MAC, which chops packets down into
1696 * segments. In this case we selected 256 bytes,
1697 * since this is the size of the PCI-Express TLP's
1698 * that the ET1310 uses.
1700 val = __SHIFTIN(ET_RXMAC_SEGSZ(256), ET_RXMAC_MC_SEGSZ_MAX) |
1701 ET_RXMAC_MC_SEGSZ_ENABLE;
1705 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1707 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1709 /* Initialize RX MAC management register */
1710 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1712 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1714 CSR_WRITE_4(sc, ET_RXMAC_MGT,
1715 ET_RXMAC_MGT_PASS_ECRC |
1716 ET_RXMAC_MGT_PASS_ELEN |
1717 ET_RXMAC_MGT_PASS_ETRUNC |
1718 ET_RXMAC_MGT_CHECK_PKT);
1721 * Configure runt filtering (may not work on certain chip generation)
1723 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG;
1724 CSR_WRITE_4(sc, ET_PKTFILT, val);
1726 /* Enable RX MAC but leave WOL disabled */
1727 CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1728 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1731 * Setup multicast hash and allmulti/promisc mode
1737 et_init_txmac(struct et_softc *sc)
1739 /* Disable TX MAC and FC(?) */
1740 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1742 /* No flow control yet */
1743 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0);
1745 /* Enable TX MAC but leave FC(?) diabled */
1746 CSR_WRITE_4(sc, ET_TXMAC_CTRL,
1747 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
1751 et_start_rxdma(struct et_softc *sc)
1755 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize,
1756 ET_RXDMA_CTRL_RING0_SIZE) |
1757 ET_RXDMA_CTRL_RING0_ENABLE;
1758 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize,
1759 ET_RXDMA_CTRL_RING1_SIZE) |
1760 ET_RXDMA_CTRL_RING1_ENABLE;
1762 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
1766 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
1767 if_printf(&sc->arpcom.ac_if, "can't start RX DMA engine\n");
1774 et_start_txdma(struct et_softc *sc)
1776 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
1781 et_enable_txrx(struct et_softc *sc, int media_upd)
1783 struct ifnet *ifp = &sc->arpcom.ac_if;
1787 val = CSR_READ_4(sc, ET_MAC_CFG1);
1788 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
1789 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
1790 ET_MAC_CFG1_LOOPBACK);
1791 CSR_WRITE_4(sc, ET_MAC_CFG1, val);
1794 et_ifmedia_upd(ifp);
1800 for (i = 0; i < NRETRY; ++i) {
1801 val = CSR_READ_4(sc, ET_MAC_CFG1);
1802 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
1803 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
1809 if_printf(ifp, "can't enable RX/TX\n");
1812 sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
1817 * Start TX/RX DMA engine
1819 error = et_start_rxdma(sc);
1823 error = et_start_txdma(sc);
1831 et_rxeof(struct et_softc *sc)
1833 struct ifnet *ifp = &sc->arpcom.ac_if;
1834 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1835 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1836 uint32_t rxs_stat_ring;
1837 int rxst_wrap, rxst_index;
1839 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1842 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring;
1843 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
1844 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX);
1846 while (rxst_index != rxst_ring->rsr_index ||
1847 rxst_wrap != rxst_ring->rsr_wrap) {
1848 struct et_rxbuf_data *rbd;
1849 struct et_rxdesc_ring *rx_ring;
1850 struct et_rxstat *st;
1852 int buflen, buf_idx, ring_idx;
1853 uint32_t rxstat_pos, rxring_pos;
1855 KKASSERT(rxst_ring->rsr_index < ET_RX_NSTAT);
1856 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
1858 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN);
1859 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX);
1860 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX);
1862 if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
1863 rxst_ring->rsr_index = 0;
1864 rxst_ring->rsr_wrap ^= 1;
1866 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index,
1867 ET_RXSTAT_POS_INDEX);
1868 if (rxst_ring->rsr_wrap)
1869 rxstat_pos |= ET_RXSTAT_POS_WRAP;
1870 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
1872 if (ring_idx >= ET_RX_NRING) {
1873 IFNET_STAT_INC(ifp, ierrors, 1);
1874 if_printf(ifp, "invalid ring index %d\n", ring_idx);
1877 if (buf_idx >= ET_RX_NDESC) {
1878 IFNET_STAT_INC(ifp, ierrors, 1);
1879 if_printf(ifp, "invalid buf index %d\n", buf_idx);
1883 rbd = &sc->sc_rx_data[ring_idx];
1884 m = rbd->rbd_buf[buf_idx].rb_mbuf;
1886 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) {
1887 if (buflen < ETHER_CRC_LEN) {
1889 IFNET_STAT_INC(ifp, ierrors, 1);
1891 m->m_pkthdr.len = m->m_len = buflen;
1892 m->m_pkthdr.rcvif = ifp;
1894 m_adj(m, -ETHER_CRC_LEN);
1896 IFNET_STAT_INC(ifp, ipackets, 1);
1897 ifp->if_input(ifp, m, NULL, -1);
1900 IFNET_STAT_INC(ifp, ierrors, 1);
1902 m = NULL; /* Catch invalid reference */
1904 rx_ring = &sc->sc_rx_ring[ring_idx];
1906 if (buf_idx != rx_ring->rr_index) {
1907 if_printf(ifp, "WARNING!! ring %d, "
1908 "buf_idx %d, rr_idx %d\n",
1909 ring_idx, buf_idx, rx_ring->rr_index);
1912 KKASSERT(rx_ring->rr_index < ET_RX_NDESC);
1913 if (++rx_ring->rr_index == ET_RX_NDESC) {
1914 rx_ring->rr_index = 0;
1915 rx_ring->rr_wrap ^= 1;
1917 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX);
1918 if (rx_ring->rr_wrap)
1919 rxring_pos |= ET_RX_RING_POS_WRAP;
1920 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
1925 et_encap(struct et_softc *sc, struct mbuf **m0)
1927 bus_dma_segment_t segs[ET_NSEG_MAX];
1928 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1929 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1930 struct et_txdesc *td;
1932 int error, maxsegs, nsegs, first_idx, last_idx, i;
1933 uint32_t tx_ready_pos, last_td_ctrl2;
1935 maxsegs = ET_TX_NDESC - tbd->tbd_used;
1936 if (maxsegs > ET_NSEG_MAX)
1937 maxsegs = ET_NSEG_MAX;
1938 KASSERT(maxsegs >= ET_NSEG_SPARE,
1939 ("not enough spare TX desc (%d)", maxsegs));
1941 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
1942 first_idx = tx_ring->tr_ready_index;
1943 map = tbd->tbd_buf[first_idx].tb_dmap;
1945 error = bus_dmamap_load_mbuf_defrag(sc->sc_txbuf_dtag, map, m0,
1946 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1949 bus_dmamap_sync(sc->sc_txbuf_dtag, map, BUS_DMASYNC_PREWRITE);
1951 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
1953 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
1954 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
1955 last_td_ctrl2 |= ET_TDCTRL2_INTR;
1959 for (i = 0; i < nsegs; ++i) {
1962 idx = (first_idx + i) % ET_TX_NDESC;
1963 td = &tx_ring->tr_desc[idx];
1964 td->td_addr_hi = ET_ADDR_HI(segs[i].ds_addr);
1965 td->td_addr_lo = ET_ADDR_LO(segs[i].ds_addr);
1966 td->td_ctrl1 = __SHIFTIN(segs[i].ds_len, ET_TDCTRL1_LEN);
1968 if (i == nsegs - 1) { /* Last frag */
1969 td->td_ctrl2 = last_td_ctrl2;
1973 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
1974 if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
1975 tx_ring->tr_ready_index = 0;
1976 tx_ring->tr_ready_wrap ^= 1;
1979 td = &tx_ring->tr_desc[first_idx];
1980 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */
1982 KKASSERT(last_idx >= 0);
1983 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
1984 tbd->tbd_buf[last_idx].tb_dmap = map;
1985 tbd->tbd_buf[last_idx].tb_mbuf = *m0;
1987 tbd->tbd_used += nsegs;
1988 KKASSERT(tbd->tbd_used <= ET_TX_NDESC);
1990 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index,
1991 ET_TX_READY_POS_INDEX);
1992 if (tx_ring->tr_ready_wrap)
1993 tx_ready_pos |= ET_TX_READY_POS_WRAP;
1994 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
2006 et_txeof(struct et_softc *sc, int start)
2008 struct ifnet *ifp = &sc->arpcom.ac_if;
2009 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
2010 struct et_txbuf_data *tbd = &sc->sc_tx_data;
2014 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2017 if (tbd->tbd_used == 0)
2020 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
2021 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX);
2022 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
2024 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
2025 struct et_txbuf *tb;
2027 KKASSERT(tbd->tbd_start_index < ET_TX_NDESC);
2028 tb = &tbd->tbd_buf[tbd->tbd_start_index];
2030 bzero(&tx_ring->tr_desc[tbd->tbd_start_index],
2031 sizeof(struct et_txdesc));
2033 if (tb->tb_mbuf != NULL) {
2034 bus_dmamap_unload(sc->sc_txbuf_dtag, tb->tb_dmap);
2035 m_freem(tb->tb_mbuf);
2037 IFNET_STAT_INC(ifp, opackets, 1);
2040 if (++tbd->tbd_start_index == ET_TX_NDESC) {
2041 tbd->tbd_start_index = 0;
2042 tbd->tbd_start_wrap ^= 1;
2045 KKASSERT(tbd->tbd_used > 0);
2049 if (tbd->tbd_used == 0)
2051 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC)
2052 ifq_clr_oactive(&ifp->if_snd);
2061 struct et_softc *sc = xsc;
2062 struct ifnet *ifp = &sc->arpcom.ac_if;
2063 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2065 lwkt_serialize_enter(ifp->if_serializer);
2068 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0 &&
2069 (mii->mii_media_status & IFM_ACTIVE) &&
2070 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2071 if_printf(ifp, "Link up, enable TX/RX\n");
2072 if (et_enable_txrx(sc, 0) == 0)
2075 callout_reset(&sc->sc_tick, hz, et_tick, sc);
2077 lwkt_serialize_exit(ifp->if_serializer);
2081 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init)
2083 return et_newbuf(rbd, buf_idx, init, MCLBYTES);
2087 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init)
2089 return et_newbuf(rbd, buf_idx, init, MHLEN);
2093 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0)
2095 struct et_softc *sc = rbd->rbd_softc;
2096 struct et_rxbuf *rb;
2098 bus_dma_segment_t seg;
2100 int error, len, nseg;
2102 KASSERT(!rbd->rbd_jumbo, ("calling %s with jumbo ring", __func__));
2104 KKASSERT(buf_idx < ET_RX_NDESC);
2105 rb = &rbd->rbd_buf[buf_idx];
2107 m = m_getl(len0, init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR, &len);
2112 if_printf(&sc->arpcom.ac_if,
2113 "m_getl failed, size %d\n", len0);
2119 m->m_len = m->m_pkthdr.len = len;
2122 * Try load RX mbuf into temporary DMA tag
2124 error = bus_dmamap_load_mbuf_segment(sc->sc_rxbuf_dtag,
2125 sc->sc_rxbuf_tmp_dmap, m, &seg, 1, &nseg,
2130 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2138 bus_dmamap_sync(sc->sc_rxbuf_dtag, rb->rb_dmap,
2139 BUS_DMASYNC_POSTREAD);
2140 bus_dmamap_unload(sc->sc_rxbuf_dtag, rb->rb_dmap);
2143 rb->rb_paddr = seg.ds_addr;
2146 * Swap RX buf's DMA map with the loaded temporary one
2149 rb->rb_dmap = sc->sc_rxbuf_tmp_dmap;
2150 sc->sc_rxbuf_tmp_dmap = dmap;
2154 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr);
2159 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS)
2161 struct et_softc *sc = arg1;
2162 struct ifnet *ifp = &sc->arpcom.ac_if;
2165 lwkt_serialize_enter(ifp->if_serializer);
2167 v = sc->sc_rx_intr_npkts;
2168 error = sysctl_handle_int(oidp, &v, 0, req);
2169 if (error || req->newptr == NULL)
2176 if (sc->sc_rx_intr_npkts != v) {
2177 if (ifp->if_flags & IFF_RUNNING)
2178 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v);
2179 sc->sc_rx_intr_npkts = v;
2182 lwkt_serialize_exit(ifp->if_serializer);
2187 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS)
2189 struct et_softc *sc = arg1;
2190 struct ifnet *ifp = &sc->arpcom.ac_if;
2193 lwkt_serialize_enter(ifp->if_serializer);
2195 v = sc->sc_rx_intr_delay;
2196 error = sysctl_handle_int(oidp, &v, 0, req);
2197 if (error || req->newptr == NULL)
2204 if (sc->sc_rx_intr_delay != v) {
2205 if (ifp->if_flags & IFF_RUNNING)
2206 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v);
2207 sc->sc_rx_intr_delay = v;
2210 lwkt_serialize_exit(ifp->if_serializer);
2215 et_setmedia(struct et_softc *sc)
2217 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2218 uint32_t cfg2, ctrl;
2220 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
2221 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
2222 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
2223 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
2224 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN);
2226 ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
2227 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
2229 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
2230 cfg2 |= ET_MAC_CFG2_MODE_GMII;
2232 cfg2 |= ET_MAC_CFG2_MODE_MII;
2233 ctrl |= ET_MAC_CTRL_MODE_MII;
2236 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
2237 cfg2 |= ET_MAC_CFG2_FDX;
2239 ctrl |= ET_MAC_CTRL_GHDX;
2241 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
2242 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
2246 et_jumbo_mem_alloc(device_t dev)
2248 struct et_softc *sc = device_get_softc(dev);
2249 struct et_jumbo_data *jd = &sc->sc_jumbo_data;
2254 jd->jd_buf = bus_dmamem_coherent_any(sc->sc_dtag,
2255 ET_JUMBO_ALIGN, ET_JUMBO_MEM_SIZE, BUS_DMA_WAITOK,
2256 &jd->jd_dtag, &jd->jd_dmap, &paddr);
2257 if (jd->jd_buf == NULL) {
2258 device_printf(dev, "can't create jumbo DMA stuffs\n");
2262 jd->jd_slots = kmalloc(sizeof(*jd->jd_slots) * ET_JSLOTS, M_DEVBUF,
2264 lwkt_serialize_init(&jd->jd_serializer);
2265 SLIST_INIT(&jd->jd_free_slots);
2268 for (i = 0; i < ET_JSLOTS; ++i) {
2269 struct et_jslot *jslot = &jd->jd_slots[i];
2271 jslot->jslot_data = jd;
2272 jslot->jslot_buf = buf;
2273 jslot->jslot_paddr = paddr;
2274 jslot->jslot_inuse = 0;
2275 jslot->jslot_index = i;
2276 SLIST_INSERT_HEAD(&jd->jd_free_slots, jslot, jslot_link);
2285 et_jumbo_mem_free(device_t dev)
2287 struct et_softc *sc = device_get_softc(dev);
2288 struct et_jumbo_data *jd = &sc->sc_jumbo_data;
2290 KKASSERT(sc->sc_flags & ET_FLAG_JUMBO);
2292 kfree(jd->jd_slots, M_DEVBUF);
2293 et_dma_mem_destroy(jd->jd_dtag, jd->jd_buf, jd->jd_dmap);
2296 static struct et_jslot *
2297 et_jalloc(struct et_jumbo_data *jd)
2299 struct et_jslot *jslot;
2301 lwkt_serialize_enter(&jd->jd_serializer);
2303 jslot = SLIST_FIRST(&jd->jd_free_slots);
2305 SLIST_REMOVE_HEAD(&jd->jd_free_slots, jslot_link);
2306 jslot->jslot_inuse = 1;
2309 lwkt_serialize_exit(&jd->jd_serializer);
2314 et_jfree(void *xjslot)
2316 struct et_jslot *jslot = xjslot;
2317 struct et_jumbo_data *jd = jslot->jslot_data;
2319 if (&jd->jd_slots[jslot->jslot_index] != jslot) {
2320 panic("%s wrong jslot!?", __func__);
2321 } else if (jslot->jslot_inuse == 0) {
2322 panic("%s jslot already freed", __func__);
2324 lwkt_serialize_enter(&jd->jd_serializer);
2326 atomic_subtract_int(&jslot->jslot_inuse, 1);
2327 if (jslot->jslot_inuse == 0) {
2328 SLIST_INSERT_HEAD(&jd->jd_free_slots, jslot,
2332 lwkt_serialize_exit(&jd->jd_serializer);
2337 et_jref(void *xjslot)
2339 struct et_jslot *jslot = xjslot;
2340 struct et_jumbo_data *jd = jslot->jslot_data;
2342 if (&jd->jd_slots[jslot->jslot_index] != jslot)
2343 panic("%s wrong jslot!?", __func__);
2344 else if (jslot->jslot_inuse == 0)
2345 panic("%s jslot already freed", __func__);
2347 atomic_add_int(&jslot->jslot_inuse, 1);
2351 et_newbuf_jumbo(struct et_rxbuf_data *rbd, int buf_idx, int init)
2353 struct et_softc *sc = rbd->rbd_softc;
2354 struct et_rxbuf *rb;
2356 struct et_jslot *jslot;
2359 KASSERT(rbd->rbd_jumbo, ("calling %s with non-jumbo ring", __func__));
2361 KKASSERT(buf_idx < ET_RX_NDESC);
2362 rb = &rbd->rbd_buf[buf_idx];
2366 MGETHDR(m, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
2369 if_printf(&sc->arpcom.ac_if, "MGETHDR failed\n");
2376 jslot = et_jalloc(&sc->sc_jumbo_data);
2377 if (jslot == NULL) {
2381 if_printf(&sc->arpcom.ac_if,
2382 "jslot allocation failed\n");
2389 m->m_ext.ext_arg = jslot;
2390 m->m_ext.ext_buf = jslot->jslot_buf;
2391 m->m_ext.ext_free = et_jfree;
2392 m->m_ext.ext_ref = et_jref;
2393 m->m_ext.ext_size = ET_JUMBO_FRAMELEN;
2394 m->m_flags |= M_EXT;
2395 m->m_data = m->m_ext.ext_buf;
2396 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2399 rb->rb_paddr = jslot->jslot_paddr;
2403 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr);
2408 et_setup_rxdesc(struct et_rxbuf_data *rbd, int buf_idx, bus_addr_t paddr)
2410 struct et_rxdesc_ring *rx_ring = rbd->rbd_ring;
2411 struct et_rxdesc *desc;
2413 KKASSERT(buf_idx < ET_RX_NDESC);
2414 desc = &rx_ring->rr_desc[buf_idx];
2416 desc->rd_addr_hi = ET_ADDR_HI(paddr);
2417 desc->rd_addr_lo = ET_ADDR_LO(paddr);
2418 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX);