2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sepherosa Ziehau <sepherosa@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.7 2007/12/21 19:02:29 swildner Exp $
37 #include <sys/param.h>
38 #include <sys/bitops.h>
39 #include <sys/endian.h>
40 #include <sys/kernel.h>
42 #include <sys/malloc.h>
45 #include <sys/serialize.h>
46 #include <sys/socket.h>
47 #include <sys/sockio.h>
48 #include <sys/sysctl.h>
50 #include <net/ethernet.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/ifq_var.h>
57 #include <net/vlan/if_vlan_var.h>
59 #include <dev/netif/mii_layer/miivar.h>
61 #include <bus/pci/pcireg.h>
62 #include <bus/pci/pcivar.h>
63 #include <bus/pci/pcidevs.h>
65 #include <dev/netif/et/if_etreg.h>
66 #include <dev/netif/et/if_etvar.h>
68 #include "miibus_if.h"
70 static int et_probe(device_t);
71 static int et_attach(device_t);
72 static int et_detach(device_t);
73 static int et_shutdown(device_t);
75 static int et_miibus_readreg(device_t, int, int);
76 static int et_miibus_writereg(device_t, int, int, int);
77 static void et_miibus_statchg(device_t);
79 static void et_init(void *);
80 static int et_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
81 static void et_start(struct ifnet *);
82 static void et_watchdog(struct ifnet *);
83 static int et_ifmedia_upd(struct ifnet *);
84 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
86 static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS);
87 static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS);
89 static void et_intr(void *);
90 static void et_enable_intrs(struct et_softc *, uint32_t);
91 static void et_disable_intrs(struct et_softc *);
92 static void et_rxeof(struct et_softc *);
93 static void et_txeof(struct et_softc *);
95 static int et_dma_alloc(device_t);
96 static void et_dma_free(device_t);
97 static int et_dma_mem_create(device_t, bus_size_t, bus_dma_tag_t *,
98 void **, bus_addr_t *, bus_dmamap_t *);
99 static void et_dma_mem_destroy(bus_dma_tag_t, void *, bus_dmamap_t);
100 static int et_dma_mbuf_create(device_t);
101 static void et_dma_mbuf_destroy(device_t, int, const int[]);
102 static int et_jumbo_mem_alloc(device_t);
103 static void et_jumbo_mem_free(device_t);
104 static void et_dma_ring_addr(void *, bus_dma_segment_t *, int, int);
105 static void et_dma_buf_addr(void *, bus_dma_segment_t *, int,
107 static int et_init_tx_ring(struct et_softc *);
108 static int et_init_rx_ring(struct et_softc *);
109 static void et_free_tx_ring(struct et_softc *);
110 static void et_free_rx_ring(struct et_softc *);
111 static int et_encap(struct et_softc *, struct mbuf **);
112 static struct et_jslot *
113 et_jalloc(struct et_jumbo_data *);
114 static void et_jfree(void *);
115 static void et_jref(void *);
116 static int et_newbuf(struct et_rxbuf_data *, int, int, int);
117 static int et_newbuf_cluster(struct et_rxbuf_data *, int, int);
118 static int et_newbuf_hdr(struct et_rxbuf_data *, int, int);
119 static int et_newbuf_jumbo(struct et_rxbuf_data *, int, int);
121 static void et_stop(struct et_softc *);
122 static int et_chip_init(struct et_softc *);
123 static void et_chip_attach(struct et_softc *);
124 static void et_init_mac(struct et_softc *);
125 static void et_init_rxmac(struct et_softc *);
126 static void et_init_txmac(struct et_softc *);
127 static int et_init_rxdma(struct et_softc *);
128 static int et_init_txdma(struct et_softc *);
129 static int et_start_rxdma(struct et_softc *);
130 static int et_start_txdma(struct et_softc *);
131 static int et_stop_rxdma(struct et_softc *);
132 static int et_stop_txdma(struct et_softc *);
133 static int et_enable_txrx(struct et_softc *, int);
134 static void et_reset(struct et_softc *);
135 static int et_bus_config(device_t);
136 static void et_get_eaddr(device_t, uint8_t[]);
137 static void et_setmulti(struct et_softc *);
138 static void et_tick(void *);
139 static void et_setmedia(struct et_softc *);
140 static void et_setup_rxdesc(struct et_rxbuf_data *, int, bus_addr_t);
142 static const struct et_dev {
147 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310,
148 "Agere ET1310 Gigabit Ethernet" },
149 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST,
150 "Agere ET1310 Fast Ethernet" },
154 static device_method_t et_methods[] = {
155 DEVMETHOD(device_probe, et_probe),
156 DEVMETHOD(device_attach, et_attach),
157 DEVMETHOD(device_detach, et_detach),
158 DEVMETHOD(device_shutdown, et_shutdown),
160 DEVMETHOD(device_suspend, et_suspend),
161 DEVMETHOD(device_resume, et_resume),
164 DEVMETHOD(bus_print_child, bus_generic_print_child),
165 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
167 DEVMETHOD(miibus_readreg, et_miibus_readreg),
168 DEVMETHOD(miibus_writereg, et_miibus_writereg),
169 DEVMETHOD(miibus_statchg, et_miibus_statchg),
174 static driver_t et_driver = {
177 sizeof(struct et_softc)
180 static devclass_t et_devclass;
182 DECLARE_DUMMY_MODULE(if_et);
183 MODULE_DEPEND(if_et, miibus, 1, 1, 1);
184 DRIVER_MODULE(if_et, pci, et_driver, et_devclass, 0, 0);
185 DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0);
187 static int et_rx_intr_npkts = 32;
188 static int et_rx_intr_delay = 20; /* x10 usec */
189 static int et_tx_intr_nsegs = 126;
190 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */
192 TUNABLE_INT("hw.et.timer", &et_timer);
193 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts);
194 TUNABLE_INT("hw.et.rx_intr_intvl", &et_rx_intr_delay);
195 TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs);
203 static const struct et_bsize et_bufsize_std[ET_RX_NRING] = {
204 { .bufsize = ET_RXDMA_CTRL_RING0_128, .jumbo = 0,
205 .newbuf = et_newbuf_hdr },
206 { .bufsize = ET_RXDMA_CTRL_RING1_2048, .jumbo = 0,
207 .newbuf = et_newbuf_cluster },
210 static const struct et_bsize et_bufsize_jumbo[ET_RX_NRING] = {
211 { .bufsize = ET_RXDMA_CTRL_RING0_128, .jumbo = 0,
212 .newbuf = et_newbuf_hdr },
213 { .bufsize = ET_RXDMA_CTRL_RING1_16384, .jumbo = 1,
214 .newbuf = et_newbuf_jumbo },
218 et_probe(device_t dev)
220 const struct et_dev *d;
223 vid = pci_get_vendor(dev);
224 did = pci_get_device(dev);
226 for (d = et_devices; d->desc != NULL; ++d) {
227 if (vid == d->vid && did == d->did) {
228 device_set_desc(dev, d->desc);
236 et_attach(device_t dev)
238 struct et_softc *sc = device_get_softc(dev);
239 struct ifnet *ifp = &sc->arpcom.ac_if;
240 uint8_t eaddr[ETHER_ADDR_LEN];
243 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
244 callout_init(&sc->sc_tick);
247 * Initialize tunables
249 sc->sc_rx_intr_npkts = et_rx_intr_npkts;
250 sc->sc_rx_intr_delay = et_rx_intr_delay;
251 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
252 sc->sc_timer = et_timer;
255 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
258 irq = pci_read_config(dev, PCIR_INTLINE, 4);
259 mem = pci_read_config(dev, ET_PCIR_BAR, 4);
261 device_printf(dev, "chip is in D%d power mode "
262 "-- setting to D0\n", pci_get_powerstate(dev));
264 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
266 pci_write_config(dev, PCIR_INTLINE, irq, 4);
267 pci_write_config(dev, ET_PCIR_BAR, mem, 4);
269 #endif /* !BURN_BRIDGE */
271 /* Enable bus mastering */
272 pci_enable_busmaster(dev);
277 sc->sc_mem_rid = ET_PCIR_BAR;
278 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
279 &sc->sc_mem_rid, RF_ACTIVE);
280 if (sc->sc_mem_res == NULL) {
281 device_printf(dev, "can't allocate IO memory\n");
284 sc->sc_mem_bt = rman_get_bustag(sc->sc_mem_res);
285 sc->sc_mem_bh = rman_get_bushandle(sc->sc_mem_res);
291 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
293 RF_SHAREABLE | RF_ACTIVE);
294 if (sc->sc_irq_res == NULL) {
295 device_printf(dev, "can't allocate irq\n");
303 sysctl_ctx_init(&sc->sc_sysctl_ctx);
304 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx,
305 SYSCTL_STATIC_CHILDREN(_hw),
307 device_get_nameunit(dev),
309 if (sc->sc_sysctl_tree == NULL) {
310 device_printf(dev, "can't add sysctl node\n");
315 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
316 SYSCTL_CHILDREN(sc->sc_sysctl_tree),
317 OID_AUTO, "rx_intr_npkts", CTLTYPE_INT | CTLFLAG_RW,
318 sc, 0, et_sysctl_rx_intr_npkts, "I",
319 "RX IM, # packets per RX interrupt");
320 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
321 SYSCTL_CHILDREN(sc->sc_sysctl_tree),
322 OID_AUTO, "rx_intr_delay", CTLTYPE_INT | CTLFLAG_RW,
323 sc, 0, et_sysctl_rx_intr_delay, "I",
324 "RX IM, RX interrupt delay (x10 usec)");
325 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx,
326 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
327 "tx_intr_nsegs", CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0,
328 "TX IM, # segments per TX interrupt");
329 SYSCTL_ADD_UINT(&sc->sc_sysctl_ctx,
330 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
331 "timer", CTLFLAG_RW, &sc->sc_timer, 0,
334 error = et_bus_config(dev);
338 et_get_eaddr(dev, eaddr);
340 CSR_WRITE_4(sc, ET_PM,
341 ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE);
345 et_disable_intrs(sc);
347 error = et_dma_alloc(dev);
352 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
353 ifp->if_init = et_init;
354 ifp->if_ioctl = et_ioctl;
355 ifp->if_start = et_start;
356 ifp->if_watchdog = et_watchdog;
357 ifp->if_mtu = ETHERMTU;
358 ifp->if_capabilities = IFCAP_VLAN_MTU;
359 ifp->if_capenable = ifp->if_capabilities;
360 ifq_set_maxlen(&ifp->if_snd, ET_TX_NDESC);
361 ifq_set_ready(&ifp->if_snd);
365 error = mii_phy_probe(dev, &sc->sc_miibus,
366 et_ifmedia_upd, et_ifmedia_sts);
368 device_printf(dev, "can't probe any PHY\n");
372 ether_ifattach(ifp, eaddr, NULL);
374 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, et_intr, sc,
375 &sc->sc_irq_handle, ifp->if_serializer);
378 device_printf(dev, "can't setup intr\n");
388 et_detach(device_t dev)
390 struct et_softc *sc = device_get_softc(dev);
392 if (device_is_attached(dev)) {
393 struct ifnet *ifp = &sc->arpcom.ac_if;
395 lwkt_serialize_enter(ifp->if_serializer);
397 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle);
398 lwkt_serialize_exit(ifp->if_serializer);
403 if (sc->sc_sysctl_tree != NULL)
404 sysctl_ctx_free(&sc->sc_sysctl_ctx);
406 if (sc->sc_miibus != NULL)
407 device_delete_child(dev, sc->sc_miibus);
408 bus_generic_detach(dev);
410 if (sc->sc_irq_res != NULL) {
411 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
415 if (sc->sc_mem_res != NULL) {
416 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
426 et_shutdown(device_t dev)
428 struct et_softc *sc = device_get_softc(dev);
429 struct ifnet *ifp = &sc->arpcom.ac_if;
431 lwkt_serialize_enter(ifp->if_serializer);
433 lwkt_serialize_exit(ifp->if_serializer);
438 et_miibus_readreg(device_t dev, int phy, int reg)
440 struct et_softc *sc = device_get_softc(dev);
444 /* Stop any pending operations */
445 CSR_WRITE_4(sc, ET_MII_CMD, 0);
447 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
448 __SHIFTIN(reg, ET_MII_ADDR_REG);
449 CSR_WRITE_4(sc, ET_MII_ADDR, val);
452 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
456 for (i = 0; i < NRETRY; ++i) {
457 val = CSR_READ_4(sc, ET_MII_IND);
458 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
463 if_printf(&sc->arpcom.ac_if,
464 "read phy %d, reg %d timed out\n", phy, reg);
471 val = CSR_READ_4(sc, ET_MII_STAT);
472 ret = __SHIFTOUT(val, ET_MII_STAT_VALUE);
475 /* Make sure that the current operation is stopped */
476 CSR_WRITE_4(sc, ET_MII_CMD, 0);
481 et_miibus_writereg(device_t dev, int phy, int reg, int val0)
483 struct et_softc *sc = device_get_softc(dev);
487 /* Stop any pending operations */
488 CSR_WRITE_4(sc, ET_MII_CMD, 0);
490 val = __SHIFTIN(phy, ET_MII_ADDR_PHY) |
491 __SHIFTIN(reg, ET_MII_ADDR_REG);
492 CSR_WRITE_4(sc, ET_MII_ADDR, val);
495 CSR_WRITE_4(sc, ET_MII_CTRL, __SHIFTIN(val0, ET_MII_CTRL_VALUE));
499 for (i = 0; i < NRETRY; ++i) {
500 val = CSR_READ_4(sc, ET_MII_IND);
501 if ((val & ET_MII_IND_BUSY) == 0)
506 if_printf(&sc->arpcom.ac_if,
507 "write phy %d, reg %d timed out\n", phy, reg);
508 et_miibus_readreg(dev, phy, reg);
513 /* Make sure that the current operation is stopped */
514 CSR_WRITE_4(sc, ET_MII_CMD, 0);
519 et_miibus_statchg(device_t dev)
521 et_setmedia(device_get_softc(dev));
525 et_ifmedia_upd(struct ifnet *ifp)
527 struct et_softc *sc = ifp->if_softc;
528 struct mii_data *mii = device_get_softc(sc->sc_miibus);
530 if (mii->mii_instance != 0) {
531 struct mii_softc *miisc;
533 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
534 mii_phy_reset(miisc);
542 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
544 struct et_softc *sc = ifp->if_softc;
545 struct mii_data *mii = device_get_softc(sc->sc_miibus);
548 ifmr->ifm_active = mii->mii_media_active;
549 ifmr->ifm_status = mii->mii_media_status;
553 et_stop(struct et_softc *sc)
555 struct ifnet *ifp = &sc->arpcom.ac_if;
557 ASSERT_SERIALIZED(ifp->if_serializer);
559 callout_stop(&sc->sc_tick);
564 et_disable_intrs(sc);
573 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
576 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
580 et_bus_config(device_t dev)
582 uint32_t val, max_plsz;
583 uint16_t ack_latency, replay_timer;
586 * Test whether EEPROM is valid
587 * NOTE: Read twice to get the correct value
589 pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1);
590 val = pci_read_config(dev, ET_PCIR_EEPROM_STATUS, 1);
591 if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
592 device_printf(dev, "EEPROM status error 0x%02x\n", val);
599 * Configure ACK latency and replay timer according to
602 val = pci_read_config(dev, ET_PCIR_DEVICE_CAPS, 4);
603 max_plsz = val & ET_PCIM_DEVICE_CAPS_MAX_PLSZ;
606 case ET_PCIV_DEVICE_CAPS_PLSZ_128:
607 ack_latency = ET_PCIV_ACK_LATENCY_128;
608 replay_timer = ET_PCIV_REPLAY_TIMER_128;
611 case ET_PCIV_DEVICE_CAPS_PLSZ_256:
612 ack_latency = ET_PCIV_ACK_LATENCY_256;
613 replay_timer = ET_PCIV_REPLAY_TIMER_256;
617 ack_latency = pci_read_config(dev, ET_PCIR_ACK_LATENCY, 2);
618 replay_timer = pci_read_config(dev, ET_PCIR_REPLAY_TIMER, 2);
619 device_printf(dev, "ack latency %u, replay timer %u\n",
620 ack_latency, replay_timer);
623 if (ack_latency != 0) {
624 pci_write_config(dev, ET_PCIR_ACK_LATENCY, ack_latency, 2);
625 pci_write_config(dev, ET_PCIR_REPLAY_TIMER, replay_timer, 2);
629 * Set L0s and L1 latency timer to 2us
631 val = ET_PCIV_L0S_LATENCY(2) | ET_PCIV_L1_LATENCY(2);
632 pci_write_config(dev, ET_PCIR_L0S_L1_LATENCY, val, 1);
635 * Set max read request size to 2048 bytes
637 val = pci_read_config(dev, ET_PCIR_DEVICE_CTRL, 2);
638 val &= ~ET_PCIM_DEVICE_CTRL_MAX_RRSZ;
639 val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K;
640 pci_write_config(dev, ET_PCIR_DEVICE_CTRL, val, 2);
646 et_get_eaddr(device_t dev, uint8_t eaddr[])
651 val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4);
652 for (i = 0; i < 4; ++i)
653 eaddr[i] = (val >> (8 * i)) & 0xff;
655 val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2);
656 for (; i < ETHER_ADDR_LEN; ++i)
657 eaddr[i] = (val >> (8 * (i - 4))) & 0xff;
661 et_reset(struct et_softc *sc)
663 CSR_WRITE_4(sc, ET_MAC_CFG1,
664 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
665 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
666 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
668 CSR_WRITE_4(sc, ET_SWRST,
669 ET_SWRST_TXDMA | ET_SWRST_RXDMA |
670 ET_SWRST_TXMAC | ET_SWRST_RXMAC |
671 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
673 CSR_WRITE_4(sc, ET_MAC_CFG1,
674 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
675 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
676 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
680 et_disable_intrs(struct et_softc *sc)
682 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
686 et_enable_intrs(struct et_softc *sc, uint32_t intrs)
688 CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs);
692 et_dma_alloc(device_t dev)
694 struct et_softc *sc = device_get_softc(dev);
695 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
696 struct et_txstatus_data *txsd = &sc->sc_tx_status;
697 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
698 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
702 * Create top level DMA tag
704 error = bus_dma_tag_create(NULL, 1, 0,
705 BUS_SPACE_MAXADDR_32BIT,
709 BUS_SPACE_UNRESTRICTED,
710 BUS_SPACE_MAXSIZE_32BIT,
713 device_printf(dev, "can't create DMA tag\n");
718 * Create TX ring DMA stuffs
720 error = et_dma_mem_create(dev, ET_TX_RING_SIZE, &tx_ring->tr_dtag,
721 (void **)&tx_ring->tr_desc,
722 &tx_ring->tr_paddr, &tx_ring->tr_dmap);
724 device_printf(dev, "can't create TX ring DMA stuffs\n");
729 * Create TX status DMA stuffs
731 error = et_dma_mem_create(dev, sizeof(uint32_t), &txsd->txsd_dtag,
732 (void **)&txsd->txsd_status,
733 &txsd->txsd_paddr, &txsd->txsd_dmap);
735 device_printf(dev, "can't create TX status DMA stuffs\n");
740 * Create DMA stuffs for RX rings
742 for (i = 0; i < ET_RX_NRING; ++i) {
743 static const uint32_t rx_ring_posreg[ET_RX_NRING] =
744 { ET_RX_RING0_POS, ET_RX_RING1_POS };
746 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
748 error = et_dma_mem_create(dev, ET_RX_RING_SIZE,
750 (void **)&rx_ring->rr_desc,
754 device_printf(dev, "can't create DMA stuffs for "
755 "the %d RX ring\n", i);
758 rx_ring->rr_posreg = rx_ring_posreg[i];
762 * Create RX stat ring DMA stuffs
764 error = et_dma_mem_create(dev, ET_RXSTAT_RING_SIZE,
765 &rxst_ring->rsr_dtag,
766 (void **)&rxst_ring->rsr_stat,
767 &rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap);
769 device_printf(dev, "can't create RX stat ring DMA stuffs\n");
774 * Create RX status DMA stuffs
776 error = et_dma_mem_create(dev, sizeof(struct et_rxstatus),
778 (void **)&rxsd->rxsd_status,
779 &rxsd->rxsd_paddr, &rxsd->rxsd_dmap);
781 device_printf(dev, "can't create RX status DMA stuffs\n");
786 * Create mbuf DMA stuffs
788 error = et_dma_mbuf_create(dev);
793 * Create jumbo buffer DMA stuffs
794 * NOTE: Allow it to fail
796 if (et_jumbo_mem_alloc(dev) == 0)
797 sc->sc_flags |= ET_FLAG_JUMBO;
803 et_dma_free(device_t dev)
805 struct et_softc *sc = device_get_softc(dev);
806 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
807 struct et_txstatus_data *txsd = &sc->sc_tx_status;
808 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
809 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
810 int i, rx_done[ET_RX_NRING];
813 * Destroy TX ring DMA stuffs
815 et_dma_mem_destroy(tx_ring->tr_dtag, tx_ring->tr_desc,
819 * Destroy TX status DMA stuffs
821 et_dma_mem_destroy(txsd->txsd_dtag, txsd->txsd_status,
825 * Destroy DMA stuffs for RX rings
827 for (i = 0; i < ET_RX_NRING; ++i) {
828 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
830 et_dma_mem_destroy(rx_ring->rr_dtag, rx_ring->rr_desc,
835 * Destroy RX stat ring DMA stuffs
837 et_dma_mem_destroy(rxst_ring->rsr_dtag, rxst_ring->rsr_stat,
838 rxst_ring->rsr_dmap);
841 * Destroy RX status DMA stuffs
843 et_dma_mem_destroy(rxsd->rxsd_dtag, rxsd->rxsd_status,
847 * Destroy mbuf DMA stuffs
849 for (i = 0; i < ET_RX_NRING; ++i)
850 rx_done[i] = ET_RX_NDESC;
851 et_dma_mbuf_destroy(dev, ET_TX_NDESC, rx_done);
854 * Destroy jumbo buffer DMA stuffs
856 if (sc->sc_flags & ET_FLAG_JUMBO)
857 et_jumbo_mem_free(dev);
860 * Destroy top level DMA tag
862 if (sc->sc_dtag != NULL)
863 bus_dma_tag_destroy(sc->sc_dtag);
867 et_dma_mbuf_create(device_t dev)
869 struct et_softc *sc = device_get_softc(dev);
870 struct et_txbuf_data *tbd = &sc->sc_tx_data;
871 int i, error, rx_done[ET_RX_NRING];
874 * Create mbuf DMA tag
876 error = bus_dma_tag_create(sc->sc_dtag, 1, 0,
877 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
879 ET_JUMBO_FRAMELEN, ET_NSEG_MAX,
880 BUS_SPACE_MAXSIZE_32BIT,
881 BUS_DMA_ALLOCNOW, &sc->sc_mbuf_dtag);
883 device_printf(dev, "can't create mbuf DMA tag\n");
888 * Create spare DMA map for RX mbufs
890 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, &sc->sc_mbuf_tmp_dmap);
892 device_printf(dev, "can't create spare mbuf DMA map\n");
893 bus_dma_tag_destroy(sc->sc_mbuf_dtag);
894 sc->sc_mbuf_dtag = NULL;
899 * Create DMA maps for RX mbufs
901 bzero(rx_done, sizeof(rx_done));
902 for (i = 0; i < ET_RX_NRING; ++i) {
903 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
906 for (j = 0; j < ET_RX_NDESC; ++j) {
907 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0,
908 &rbd->rbd_buf[j].rb_dmap);
910 device_printf(dev, "can't create %d RX mbuf "
911 "for %d RX ring\n", j, i);
913 et_dma_mbuf_destroy(dev, 0, rx_done);
917 rx_done[i] = ET_RX_NDESC;
920 rbd->rbd_ring = &sc->sc_rx_ring[i];
924 * Create DMA maps for TX mbufs
926 for (i = 0; i < ET_TX_NDESC; ++i) {
927 error = bus_dmamap_create(sc->sc_mbuf_dtag, 0,
928 &tbd->tbd_buf[i].tb_dmap);
930 device_printf(dev, "can't create %d TX mbuf "
932 et_dma_mbuf_destroy(dev, i, rx_done);
941 et_dma_mbuf_destroy(device_t dev, int tx_done, const int rx_done[])
943 struct et_softc *sc = device_get_softc(dev);
944 struct et_txbuf_data *tbd = &sc->sc_tx_data;
947 if (sc->sc_mbuf_dtag == NULL)
951 * Destroy DMA maps for RX mbufs
953 for (i = 0; i < ET_RX_NRING; ++i) {
954 struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
957 for (j = 0; j < rx_done[i]; ++j) {
958 struct et_rxbuf *rb = &rbd->rbd_buf[j];
960 KASSERT(rb->rb_mbuf == NULL,
961 ("RX mbuf in %d RX ring is not freed yet\n", i));
962 bus_dmamap_destroy(sc->sc_mbuf_dtag, rb->rb_dmap);
967 * Destroy DMA maps for TX mbufs
969 for (i = 0; i < tx_done; ++i) {
970 struct et_txbuf *tb = &tbd->tbd_buf[i];
972 KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n"));
973 bus_dmamap_destroy(sc->sc_mbuf_dtag, tb->tb_dmap);
977 * Destroy spare mbuf DMA map
979 bus_dmamap_destroy(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap);
982 * Destroy mbuf DMA tag
984 bus_dma_tag_destroy(sc->sc_mbuf_dtag);
985 sc->sc_mbuf_dtag = NULL;
989 et_dma_mem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag,
990 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap)
992 struct et_softc *sc = device_get_softc(dev);
995 error = bus_dma_tag_create(sc->sc_dtag, ET_ALIGN, 0,
996 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
998 size, 1, BUS_SPACE_MAXSIZE_32BIT,
1001 device_printf(dev, "can't create DMA tag\n");
1005 error = bus_dmamem_alloc(*dtag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO,
1008 device_printf(dev, "can't allocate DMA mem\n");
1009 bus_dma_tag_destroy(*dtag);
1014 error = bus_dmamap_load(*dtag, *dmap, *addr, size,
1015 et_dma_ring_addr, paddr, BUS_DMA_WAITOK);
1017 device_printf(dev, "can't load DMA mem\n");
1018 bus_dmamem_free(*dtag, *addr, *dmap);
1019 bus_dma_tag_destroy(*dtag);
1027 et_dma_mem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap)
1030 bus_dmamap_unload(dtag, dmap);
1031 bus_dmamem_free(dtag, addr, dmap);
1032 bus_dma_tag_destroy(dtag);
1037 et_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
1039 KASSERT(nseg == 1, ("too many segments\n"));
1040 *((bus_addr_t *)arg) = seg->ds_addr;
1044 et_chip_attach(struct et_softc *sc)
1049 * Perform minimal initialization
1052 /* Disable loopback */
1053 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1056 CSR_WRITE_4(sc, ET_MAC_CFG1,
1057 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1058 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1059 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1062 * Setup half duplex mode
1064 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
1065 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
1066 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
1067 ET_MAC_HDX_EXC_DEFER;
1068 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1070 /* Clear MAC control */
1071 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1074 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1076 /* Bring MAC out of reset state */
1077 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1079 /* Enable memory controllers */
1080 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1086 struct et_softc *sc = xsc;
1087 struct ifnet *ifp = &sc->arpcom.ac_if;
1090 ASSERT_SERIALIZED(ifp->if_serializer);
1092 if ((ifp->if_flags & IFF_RUNNING) == 0)
1095 et_disable_intrs(sc);
1097 intrs = CSR_READ_4(sc, ET_INTR_STATUS);
1099 if (intrs == 0) /* Not interested */
1102 if (intrs & ET_INTR_RXEOF)
1104 if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER))
1106 if (intrs & ET_INTR_TIMER)
1107 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1109 et_enable_intrs(sc, ET_INTRS);
1115 struct et_softc *sc = xsc;
1116 struct ifnet *ifp = &sc->arpcom.ac_if;
1117 const struct et_bsize *arr;
1120 ASSERT_SERIALIZED(ifp->if_serializer);
1124 arr = ET_FRAMELEN(ifp->if_mtu) < MCLBYTES ?
1125 et_bufsize_std : et_bufsize_jumbo;
1126 for (i = 0; i < ET_RX_NRING; ++i) {
1127 sc->sc_rx_data[i].rbd_bufsize = arr[i].bufsize;
1128 sc->sc_rx_data[i].rbd_newbuf = arr[i].newbuf;
1129 sc->sc_rx_data[i].rbd_jumbo = arr[i].jumbo;
1132 error = et_init_tx_ring(sc);
1136 error = et_init_rx_ring(sc);
1140 error = et_chip_init(sc);
1144 error = et_enable_txrx(sc, 1);
1148 et_enable_intrs(sc, ET_INTRS);
1150 callout_reset(&sc->sc_tick, hz, et_tick, sc);
1152 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
1154 ifp->if_flags |= IFF_RUNNING;
1155 ifp->if_flags &= ~IFF_OACTIVE;
1162 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1164 struct et_softc *sc = ifp->if_softc;
1165 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1166 struct ifreq *ifr = (struct ifreq *)data;
1167 int error = 0, max_framelen;
1169 ASSERT_SERIALIZED(ifp->if_serializer);
1173 if (ifp->if_flags & IFF_UP) {
1174 if (ifp->if_flags & IFF_RUNNING) {
1175 if ((ifp->if_flags ^ sc->sc_if_flags) &
1176 (IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST))
1182 if (ifp->if_flags & IFF_RUNNING)
1185 sc->sc_if_flags = ifp->if_flags;
1190 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1195 if (ifp->if_flags & IFF_RUNNING)
1200 if (sc->sc_flags & ET_FLAG_JUMBO)
1201 max_framelen = ET_JUMBO_FRAMELEN;
1203 max_framelen = MCLBYTES - 1;
1205 if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) {
1210 ifp->if_mtu = ifr->ifr_mtu;
1211 if (ifp->if_flags & IFF_RUNNING)
1216 error = ether_ioctl(ifp, cmd, data);
1223 et_start(struct ifnet *ifp)
1225 struct et_softc *sc = ifp->if_softc;
1226 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1229 ASSERT_SERIALIZED(ifp->if_serializer);
1231 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1234 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1241 if ((tbd->tbd_used + ET_NSEG_SPARE) > ET_TX_NDESC) {
1242 ifp->if_flags |= IFF_OACTIVE;
1246 m = ifq_dequeue(&ifp->if_snd, NULL);
1250 if (et_encap(sc, &m)) {
1252 ifp->if_flags |= IFF_OACTIVE;
1265 et_watchdog(struct ifnet *ifp)
1267 ASSERT_SERIALIZED(ifp->if_serializer);
1269 if_printf(ifp, "watchdog timed out\n");
1271 ifp->if_init(ifp->if_softc);
1276 et_stop_rxdma(struct et_softc *sc)
1278 CSR_WRITE_4(sc, ET_RXDMA_CTRL,
1279 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
1282 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
1283 if_printf(&sc->arpcom.ac_if, "can't stop RX DMA engine\n");
1290 et_stop_txdma(struct et_softc *sc)
1292 CSR_WRITE_4(sc, ET_TXDMA_CTRL,
1293 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
1298 et_free_tx_ring(struct et_softc *sc)
1300 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1301 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1304 for (i = 0; i < ET_TX_NDESC; ++i) {
1305 struct et_txbuf *tb = &tbd->tbd_buf[i];
1307 if (tb->tb_mbuf != NULL) {
1308 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
1309 m_freem(tb->tb_mbuf);
1314 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1315 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1316 BUS_DMASYNC_PREWRITE);
1320 et_free_rx_ring(struct et_softc *sc)
1324 for (n = 0; n < ET_RX_NRING; ++n) {
1325 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1326 struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n];
1329 for (i = 0; i < ET_RX_NDESC; ++i) {
1330 struct et_rxbuf *rb = &rbd->rbd_buf[i];
1332 if (rb->rb_mbuf != NULL) {
1333 if (!rbd->rbd_jumbo) {
1334 bus_dmamap_unload(sc->sc_mbuf_dtag,
1337 m_freem(rb->rb_mbuf);
1342 bzero(rx_ring->rr_desc, ET_RX_RING_SIZE);
1343 bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap,
1344 BUS_DMASYNC_PREWRITE);
1349 et_setmulti(struct et_softc *sc)
1351 struct ifnet *ifp = &sc->arpcom.ac_if;
1352 uint32_t hash[4] = { 0, 0, 0, 0 };
1353 uint32_t rxmac_ctrl, pktfilt;
1354 struct ifmultiaddr *ifma;
1357 pktfilt = CSR_READ_4(sc, ET_PKTFILT);
1358 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
1360 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
1361 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1362 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
1367 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1370 if (ifma->ifma_addr->sa_family != AF_LINK)
1373 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
1374 ifma->ifma_addr), ETHER_ADDR_LEN);
1375 h = (h & 0x3f800000) >> 23;
1378 if (h >= 32 && h < 64) {
1381 } else if (h >= 64 && h < 96) {
1384 } else if (h >= 96) {
1393 for (i = 0; i < 4; ++i)
1394 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
1397 pktfilt |= ET_PKTFILT_MCAST;
1398 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
1400 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
1401 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
1405 et_chip_init(struct et_softc *sc)
1407 struct ifnet *ifp = &sc->arpcom.ac_if;
1409 int error, frame_len, rxmem_size;
1412 * Split 16Kbytes internal memory between TX and RX
1413 * according to frame length.
1415 frame_len = ET_FRAMELEN(ifp->if_mtu);
1416 if (frame_len < 2048) {
1417 rxmem_size = ET_MEM_RXSIZE_DEFAULT;
1418 } else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) {
1419 rxmem_size = ET_MEM_SIZE / 2;
1421 rxmem_size = ET_MEM_SIZE -
1422 roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT);
1424 rxq_end = ET_QUEUE_ADDR(rxmem_size);
1426 CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START);
1427 CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end);
1428 CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1);
1429 CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END);
1432 CSR_WRITE_4(sc, ET_LOOPBACK, 0);
1434 /* Clear MSI configure */
1435 CSR_WRITE_4(sc, ET_MSI_CFG, 0);
1438 CSR_WRITE_4(sc, ET_TIMER, 0);
1440 /* Initialize MAC */
1443 /* Enable memory controllers */
1444 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
1446 /* Initialize RX MAC */
1449 /* Initialize TX MAC */
1452 /* Initialize RX DMA engine */
1453 error = et_init_rxdma(sc);
1457 /* Initialize TX DMA engine */
1458 error = et_init_txdma(sc);
1466 et_init_tx_ring(struct et_softc *sc)
1468 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1469 struct et_txstatus_data *txsd = &sc->sc_tx_status;
1470 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1472 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
1473 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
1474 BUS_DMASYNC_PREWRITE);
1476 tbd->tbd_start_index = 0;
1477 tbd->tbd_start_wrap = 0;
1480 bzero(txsd->txsd_status, sizeof(uint32_t));
1481 bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap,
1482 BUS_DMASYNC_PREWRITE);
1487 et_init_rx_ring(struct et_softc *sc)
1489 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1490 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1493 for (n = 0; n < ET_RX_NRING; ++n) {
1494 struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
1497 for (i = 0; i < ET_RX_NDESC; ++i) {
1498 error = rbd->rbd_newbuf(rbd, i, 1);
1500 if_printf(&sc->arpcom.ac_if, "%d ring %d buf, "
1501 "newbuf failed: %d\n", n, i, error);
1507 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
1508 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1509 BUS_DMASYNC_PREWRITE);
1511 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
1512 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1513 BUS_DMASYNC_PREWRITE);
1519 et_dma_buf_addr(void *xctx, bus_dma_segment_t *segs, int nsegs,
1520 bus_size_t mapsz __unused, int error)
1522 struct et_dmamap_ctx *ctx = xctx;
1528 if (nsegs > ctx->nsegs) {
1534 for (i = 0; i < nsegs; ++i)
1535 ctx->segs[i] = segs[i];
1539 et_init_rxdma(struct et_softc *sc)
1541 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1542 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1543 struct et_rxdesc_ring *rx_ring;
1546 error = et_stop_rxdma(sc);
1548 if_printf(&sc->arpcom.ac_if, "can't init RX DMA engine\n");
1555 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
1556 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
1559 * Install RX stat ring
1561 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
1562 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
1563 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
1564 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
1565 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
1567 /* Match ET_RXSTAT_POS */
1568 rxst_ring->rsr_index = 0;
1569 rxst_ring->rsr_wrap = 0;
1572 * Install the 2nd RX descriptor ring
1574 rx_ring = &sc->sc_rx_ring[1];
1575 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1576 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1577 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
1578 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
1579 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1581 /* Match ET_RX_RING1_POS */
1582 rx_ring->rr_index = 0;
1583 rx_ring->rr_wrap = 1;
1586 * Install the 1st RX descriptor ring
1588 rx_ring = &sc->sc_rx_ring[0];
1589 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
1590 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
1591 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
1592 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
1593 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
1595 /* Match ET_RX_RING0_POS */
1596 rx_ring->rr_index = 0;
1597 rx_ring->rr_wrap = 1;
1600 * RX intr moderation
1602 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
1603 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
1609 et_init_txdma(struct et_softc *sc)
1611 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1612 struct et_txstatus_data *txsd = &sc->sc_tx_status;
1615 error = et_stop_txdma(sc);
1617 if_printf(&sc->arpcom.ac_if, "can't init TX DMA engine\n");
1622 * Install TX descriptor ring
1624 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
1625 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
1626 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
1631 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
1632 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
1634 CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
1636 /* Match ET_TX_READY_POS */
1637 tx_ring->tr_ready_index = 0;
1638 tx_ring->tr_ready_wrap = 0;
1644 et_init_mac(struct et_softc *sc)
1646 struct ifnet *ifp = &sc->arpcom.ac_if;
1647 const uint8_t *eaddr = IF_LLADDR(ifp);
1651 CSR_WRITE_4(sc, ET_MAC_CFG1,
1652 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
1653 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
1654 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
1657 * Setup inter packet gap
1659 val = __SHIFTIN(56, ET_IPG_NONB2B_1) |
1660 __SHIFTIN(88, ET_IPG_NONB2B_2) |
1661 __SHIFTIN(80, ET_IPG_MINIFG) |
1662 __SHIFTIN(96, ET_IPG_B2B);
1663 CSR_WRITE_4(sc, ET_IPG, val);
1666 * Setup half duplex mode
1668 val = __SHIFTIN(10, ET_MAC_HDX_ALT_BEB_TRUNC) |
1669 __SHIFTIN(15, ET_MAC_HDX_REXMIT_MAX) |
1670 __SHIFTIN(55, ET_MAC_HDX_COLLWIN) |
1671 ET_MAC_HDX_EXC_DEFER;
1672 CSR_WRITE_4(sc, ET_MAC_HDX, val);
1674 /* Clear MAC control */
1675 CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
1678 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
1683 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
1684 CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
1685 val = (eaddr[0] << 16) | (eaddr[1] << 24);
1686 CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
1688 /* Set max frame length */
1689 CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu));
1691 /* Bring MAC out of reset state */
1692 CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
1696 et_init_rxmac(struct et_softc *sc)
1698 struct ifnet *ifp = &sc->arpcom.ac_if;
1699 const uint8_t *eaddr = IF_LLADDR(ifp);
1703 /* Disable RX MAC and WOL */
1704 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
1707 * Clear all WOL related registers
1709 for (i = 0; i < 3; ++i)
1710 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
1711 for (i = 0; i < 20; ++i)
1712 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
1715 * Set WOL source address. XXX is this necessary?
1717 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
1718 CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
1719 val = (eaddr[0] << 8) | eaddr[1];
1720 CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
1722 /* Clear packet filters */
1723 CSR_WRITE_4(sc, ET_PKTFILT, 0);
1725 /* No ucast filtering */
1726 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
1727 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
1728 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
1730 if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) {
1732 * In order to transmit jumbo packets greater than
1733 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between
1734 * RX MAC and RX DMA needs to be reduced in size to
1735 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In
1736 * order to implement this, we must use "cut through"
1737 * mode in the RX MAC, which chops packets down into
1738 * segments. In this case we selected 256 bytes,
1739 * since this is the size of the PCI-Express TLP's
1740 * that the ET1310 uses.
1742 val = __SHIFTIN(ET_RXMAC_SEGSZ(256), ET_RXMAC_MC_SEGSZ_MAX) |
1743 ET_RXMAC_MC_SEGSZ_ENABLE;
1747 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
1749 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
1751 /* Initialize RX MAC management register */
1752 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
1754 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
1756 CSR_WRITE_4(sc, ET_RXMAC_MGT,
1757 ET_RXMAC_MGT_PASS_ECRC |
1758 ET_RXMAC_MGT_PASS_ELEN |
1759 ET_RXMAC_MGT_PASS_ETRUNC |
1760 ET_RXMAC_MGT_CHECK_PKT);
1763 * Configure runt filtering (may not work on certain chip generation)
1765 val = __SHIFTIN(ETHER_MIN_LEN, ET_PKTFILT_MINLEN) | ET_PKTFILT_FRAG;
1766 CSR_WRITE_4(sc, ET_PKTFILT, val);
1768 /* Enable RX MAC but leave WOL disabled */
1769 CSR_WRITE_4(sc, ET_RXMAC_CTRL,
1770 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
1773 * Setup multicast hash and allmulti/promisc mode
1779 et_init_txmac(struct et_softc *sc)
1781 /* Disable TX MAC and FC(?) */
1782 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
1784 /* No flow control yet */
1785 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0);
1787 /* Enable TX MAC but leave FC(?) diabled */
1788 CSR_WRITE_4(sc, ET_TXMAC_CTRL,
1789 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
1793 et_start_rxdma(struct et_softc *sc)
1797 val |= __SHIFTIN(sc->sc_rx_data[0].rbd_bufsize,
1798 ET_RXDMA_CTRL_RING0_SIZE) |
1799 ET_RXDMA_CTRL_RING0_ENABLE;
1800 val |= __SHIFTIN(sc->sc_rx_data[1].rbd_bufsize,
1801 ET_RXDMA_CTRL_RING1_SIZE) |
1802 ET_RXDMA_CTRL_RING1_ENABLE;
1804 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
1808 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
1809 if_printf(&sc->arpcom.ac_if, "can't start RX DMA engine\n");
1816 et_start_txdma(struct et_softc *sc)
1818 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
1823 et_enable_txrx(struct et_softc *sc, int media_upd)
1825 struct ifnet *ifp = &sc->arpcom.ac_if;
1829 val = CSR_READ_4(sc, ET_MAC_CFG1);
1830 val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
1831 val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
1832 ET_MAC_CFG1_LOOPBACK);
1833 CSR_WRITE_4(sc, ET_MAC_CFG1, val);
1836 et_ifmedia_upd(ifp);
1842 for (i = 0; i < NRETRY; ++i) {
1843 val = CSR_READ_4(sc, ET_MAC_CFG1);
1844 if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
1845 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
1851 if_printf(ifp, "can't enable RX/TX\n");
1854 sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
1859 * Start TX/RX DMA engine
1861 error = et_start_rxdma(sc);
1865 error = et_start_txdma(sc);
1873 et_rxeof(struct et_softc *sc)
1875 struct ifnet *ifp = &sc->arpcom.ac_if;
1876 struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
1877 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
1878 uint32_t rxs_stat_ring;
1879 int rxst_wrap, rxst_index;
1881 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
1884 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
1885 BUS_DMASYNC_POSTREAD);
1886 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
1887 BUS_DMASYNC_POSTREAD);
1889 rxs_stat_ring = rxsd->rxsd_status->rxs_stat_ring;
1890 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
1891 rxst_index = __SHIFTOUT(rxs_stat_ring, ET_RXS_STATRING_INDEX);
1893 while (rxst_index != rxst_ring->rsr_index ||
1894 rxst_wrap != rxst_ring->rsr_wrap) {
1895 struct et_rxbuf_data *rbd;
1896 struct et_rxdesc_ring *rx_ring;
1897 struct et_rxstat *st;
1899 int buflen, buf_idx, ring_idx;
1900 uint32_t rxstat_pos, rxring_pos;
1902 KKASSERT(rxst_ring->rsr_index < ET_RX_NSTAT);
1903 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
1905 buflen = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_LEN);
1906 buf_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_BUFIDX);
1907 ring_idx = __SHIFTOUT(st->rxst_info2, ET_RXST_INFO2_RINGIDX);
1909 if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
1910 rxst_ring->rsr_index = 0;
1911 rxst_ring->rsr_wrap ^= 1;
1913 rxstat_pos = __SHIFTIN(rxst_ring->rsr_index,
1914 ET_RXSTAT_POS_INDEX);
1915 if (rxst_ring->rsr_wrap)
1916 rxstat_pos |= ET_RXSTAT_POS_WRAP;
1917 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
1919 if (ring_idx >= ET_RX_NRING) {
1921 if_printf(ifp, "invalid ring index %d\n", ring_idx);
1924 if (buf_idx >= ET_RX_NDESC) {
1926 if_printf(ifp, "invalid buf index %d\n", buf_idx);
1930 rbd = &sc->sc_rx_data[ring_idx];
1931 m = rbd->rbd_buf[buf_idx].rb_mbuf;
1933 if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) {
1934 if (buflen < ETHER_CRC_LEN) {
1938 m->m_pkthdr.len = m->m_len = buflen;
1939 m->m_pkthdr.rcvif = ifp;
1941 m_adj(m, -ETHER_CRC_LEN);
1944 ifp->if_input(ifp, m);
1949 m = NULL; /* Catch invalid reference */
1951 rx_ring = &sc->sc_rx_ring[ring_idx];
1953 if (buf_idx != rx_ring->rr_index) {
1954 if_printf(ifp, "WARNING!! ring %d, "
1955 "buf_idx %d, rr_idx %d\n",
1956 ring_idx, buf_idx, rx_ring->rr_index);
1959 KKASSERT(rx_ring->rr_index < ET_RX_NDESC);
1960 if (++rx_ring->rr_index == ET_RX_NDESC) {
1961 rx_ring->rr_index = 0;
1962 rx_ring->rr_wrap ^= 1;
1964 rxring_pos = __SHIFTIN(rx_ring->rr_index, ET_RX_RING_POS_INDEX);
1965 if (rx_ring->rr_wrap)
1966 rxring_pos |= ET_RX_RING_POS_WRAP;
1967 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
1972 et_encap(struct et_softc *sc, struct mbuf **m0)
1974 struct mbuf *m = *m0;
1975 bus_dma_segment_t segs[ET_NSEG_MAX];
1976 struct et_dmamap_ctx ctx;
1977 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
1978 struct et_txbuf_data *tbd = &sc->sc_tx_data;
1979 struct et_txdesc *td;
1981 int error, maxsegs, first_idx, last_idx, i;
1982 uint32_t tx_ready_pos, last_td_ctrl2;
1984 maxsegs = ET_TX_NDESC - tbd->tbd_used;
1985 if (maxsegs > ET_NSEG_MAX)
1986 maxsegs = ET_NSEG_MAX;
1987 KASSERT(maxsegs >= ET_NSEG_SPARE,
1988 ("not enough spare TX desc (%d)\n", maxsegs));
1990 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
1991 first_idx = tx_ring->tr_ready_index;
1992 map = tbd->tbd_buf[first_idx].tb_dmap;
1994 ctx.nsegs = maxsegs;
1996 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m,
1997 et_dma_buf_addr, &ctx, BUS_DMA_NOWAIT);
1998 if (!error && ctx.nsegs == 0) {
1999 bus_dmamap_unload(sc->sc_mbuf_dtag, map);
2002 if (error && error != EFBIG) {
2003 if_printf(&sc->arpcom.ac_if, "can't load TX mbuf, error %d\n",
2007 if (error) { /* error == EFBIG */
2010 m_new = m_defrag(m, MB_DONTWAIT);
2011 if (m_new == NULL) {
2012 if_printf(&sc->arpcom.ac_if, "can't defrag TX mbuf\n");
2019 ctx.nsegs = maxsegs;
2021 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m,
2022 et_dma_buf_addr, &ctx,
2024 if (error || ctx.nsegs == 0) {
2025 if (ctx.nsegs == 0) {
2026 bus_dmamap_unload(sc->sc_mbuf_dtag, map);
2029 if_printf(&sc->arpcom.ac_if,
2030 "can't load defraged TX mbuf\n");
2035 bus_dmamap_sync(sc->sc_mbuf_dtag, map, BUS_DMASYNC_PREWRITE);
2037 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
2038 sc->sc_tx += ctx.nsegs;
2039 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
2040 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
2041 last_td_ctrl2 |= ET_TDCTRL2_INTR;
2045 for (i = 0; i < ctx.nsegs; ++i) {
2048 idx = (first_idx + i) % ET_TX_NDESC;
2049 td = &tx_ring->tr_desc[idx];
2050 td->td_addr_hi = ET_ADDR_HI(segs[i].ds_addr);
2051 td->td_addr_lo = ET_ADDR_LO(segs[i].ds_addr);
2052 td->td_ctrl1 = __SHIFTIN(segs[i].ds_len, ET_TDCTRL1_LEN);
2054 if (i == ctx.nsegs - 1) { /* Last frag */
2055 td->td_ctrl2 = last_td_ctrl2;
2059 KKASSERT(tx_ring->tr_ready_index < ET_TX_NDESC);
2060 if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
2061 tx_ring->tr_ready_index = 0;
2062 tx_ring->tr_ready_wrap ^= 1;
2065 td = &tx_ring->tr_desc[first_idx];
2066 td->td_ctrl2 |= ET_TDCTRL2_FIRST_FRAG; /* First frag */
2068 KKASSERT(last_idx >= 0);
2069 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
2070 tbd->tbd_buf[last_idx].tb_dmap = map;
2071 tbd->tbd_buf[last_idx].tb_mbuf = m;
2073 tbd->tbd_used += ctx.nsegs;
2074 KKASSERT(tbd->tbd_used <= ET_TX_NDESC);
2076 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2077 BUS_DMASYNC_PREWRITE);
2079 tx_ready_pos = __SHIFTIN(tx_ring->tr_ready_index,
2080 ET_TX_READY_POS_INDEX);
2081 if (tx_ring->tr_ready_wrap)
2082 tx_ready_pos |= ET_TX_READY_POS_WRAP;
2083 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
2095 et_txeof(struct et_softc *sc)
2097 struct ifnet *ifp = &sc->arpcom.ac_if;
2098 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
2099 struct et_txbuf_data *tbd = &sc->sc_tx_data;
2103 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
2106 if (tbd->tbd_used == 0)
2109 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
2110 end = __SHIFTOUT(tx_done, ET_TX_DONE_POS_INDEX);
2111 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
2113 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
2114 struct et_txbuf *tb;
2116 KKASSERT(tbd->tbd_start_index < ET_TX_NDESC);
2117 tb = &tbd->tbd_buf[tbd->tbd_start_index];
2119 bzero(&tx_ring->tr_desc[tbd->tbd_start_index],
2120 sizeof(struct et_txdesc));
2121 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
2122 BUS_DMASYNC_PREWRITE);
2124 if (tb->tb_mbuf != NULL) {
2125 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
2126 m_freem(tb->tb_mbuf);
2130 if (++tbd->tbd_start_index == ET_TX_NDESC) {
2131 tbd->tbd_start_index = 0;
2132 tbd->tbd_start_wrap ^= 1;
2135 KKASSERT(tbd->tbd_used > 0);
2139 if (tbd->tbd_used == 0)
2141 if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC)
2142 ifp->if_flags &= ~IFF_OACTIVE;
2150 struct et_softc *sc = xsc;
2151 struct ifnet *ifp = &sc->arpcom.ac_if;
2152 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2154 lwkt_serialize_enter(ifp->if_serializer);
2157 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0 &&
2158 (mii->mii_media_status & IFM_ACTIVE) &&
2159 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2160 if_printf(ifp, "Link up, enable TX/RX\n");
2161 if (et_enable_txrx(sc, 0) == 0)
2164 callout_reset(&sc->sc_tick, hz, et_tick, sc);
2166 lwkt_serialize_exit(ifp->if_serializer);
2170 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init)
2172 return et_newbuf(rbd, buf_idx, init, MCLBYTES);
2176 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init)
2178 return et_newbuf(rbd, buf_idx, init, MHLEN);
2182 et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0)
2184 struct et_softc *sc = rbd->rbd_softc;
2185 struct et_rxbuf *rb;
2187 struct et_dmamap_ctx ctx;
2188 bus_dma_segment_t seg;
2192 KASSERT(!rbd->rbd_jumbo, ("calling %s with jumbo ring\n", __func__));
2194 KKASSERT(buf_idx < ET_RX_NDESC);
2195 rb = &rbd->rbd_buf[buf_idx];
2197 m = m_getl(len0, init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR, &len);
2202 if_printf(&sc->arpcom.ac_if,
2203 "m_getl failed, size %d\n", len0);
2209 m->m_len = m->m_pkthdr.len = len;
2212 * Try load RX mbuf into temporary DMA tag
2216 error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap, m,
2217 et_dma_buf_addr, &ctx,
2218 init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
2219 if (error || ctx.nsegs == 0) {
2221 bus_dmamap_unload(sc->sc_mbuf_dtag,
2222 sc->sc_mbuf_tmp_dmap);
2224 if_printf(&sc->arpcom.ac_if, "too many segments?!\n");
2229 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2237 bus_dmamap_sync(sc->sc_mbuf_dtag, rb->rb_dmap,
2238 BUS_DMASYNC_POSTREAD);
2239 bus_dmamap_unload(sc->sc_mbuf_dtag, rb->rb_dmap);
2242 rb->rb_paddr = seg.ds_addr;
2245 * Swap RX buf's DMA map with the loaded temporary one
2248 rb->rb_dmap = sc->sc_mbuf_tmp_dmap;
2249 sc->sc_mbuf_tmp_dmap = dmap;
2253 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr);
2258 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS)
2260 struct et_softc *sc = arg1;
2261 struct ifnet *ifp = &sc->arpcom.ac_if;
2264 lwkt_serialize_enter(ifp->if_serializer);
2266 v = sc->sc_rx_intr_npkts;
2267 error = sysctl_handle_int(oidp, &v, 0, req);
2268 if (error || req->newptr == NULL)
2275 if (sc->sc_rx_intr_npkts != v) {
2276 if (ifp->if_flags & IFF_RUNNING)
2277 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v);
2278 sc->sc_rx_intr_npkts = v;
2281 lwkt_serialize_exit(ifp->if_serializer);
2286 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS)
2288 struct et_softc *sc = arg1;
2289 struct ifnet *ifp = &sc->arpcom.ac_if;
2292 lwkt_serialize_enter(ifp->if_serializer);
2294 v = sc->sc_rx_intr_delay;
2295 error = sysctl_handle_int(oidp, &v, 0, req);
2296 if (error || req->newptr == NULL)
2303 if (sc->sc_rx_intr_delay != v) {
2304 if (ifp->if_flags & IFF_RUNNING)
2305 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v);
2306 sc->sc_rx_intr_delay = v;
2309 lwkt_serialize_exit(ifp->if_serializer);
2314 et_setmedia(struct et_softc *sc)
2316 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2317 uint32_t cfg2, ctrl;
2319 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
2320 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
2321 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
2322 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
2323 __SHIFTIN(7, ET_MAC_CFG2_PREAMBLE_LEN);
2325 ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
2326 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
2328 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
2329 cfg2 |= ET_MAC_CFG2_MODE_GMII;
2331 cfg2 |= ET_MAC_CFG2_MODE_MII;
2332 ctrl |= ET_MAC_CTRL_MODE_MII;
2335 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
2336 cfg2 |= ET_MAC_CFG2_FDX;
2338 ctrl |= ET_MAC_CTRL_GHDX;
2340 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
2341 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
2345 et_jumbo_mem_alloc(device_t dev)
2347 struct et_softc *sc = device_get_softc(dev);
2348 struct et_jumbo_data *jd = &sc->sc_jumbo_data;
2353 error = et_dma_mem_create(dev, ET_JUMBO_MEM_SIZE, &jd->jd_dtag,
2354 &jd->jd_buf, &paddr, &jd->jd_dmap);
2356 device_printf(dev, "can't create jumbo DMA stuffs\n");
2360 jd->jd_slots = kmalloc(sizeof(*jd->jd_slots) * ET_JSLOTS, M_DEVBUF,
2362 lwkt_serialize_init(&jd->jd_serializer);
2363 SLIST_INIT(&jd->jd_free_slots);
2366 for (i = 0; i < ET_JSLOTS; ++i) {
2367 struct et_jslot *jslot = &jd->jd_slots[i];
2369 jslot->jslot_data = jd;
2370 jslot->jslot_buf = buf;
2371 jslot->jslot_paddr = paddr;
2372 jslot->jslot_inuse = 0;
2373 jslot->jslot_index = i;
2374 SLIST_INSERT_HEAD(&jd->jd_free_slots, jslot, jslot_link);
2383 et_jumbo_mem_free(device_t dev)
2385 struct et_softc *sc = device_get_softc(dev);
2386 struct et_jumbo_data *jd = &sc->sc_jumbo_data;
2388 KKASSERT(sc->sc_flags & ET_FLAG_JUMBO);
2390 kfree(jd->jd_slots, M_DEVBUF);
2391 et_dma_mem_destroy(jd->jd_dtag, jd->jd_buf, jd->jd_dmap);
2394 static struct et_jslot *
2395 et_jalloc(struct et_jumbo_data *jd)
2397 struct et_jslot *jslot;
2399 lwkt_serialize_enter(&jd->jd_serializer);
2401 jslot = SLIST_FIRST(&jd->jd_free_slots);
2403 SLIST_REMOVE_HEAD(&jd->jd_free_slots, jslot_link);
2404 jslot->jslot_inuse = 1;
2407 lwkt_serialize_exit(&jd->jd_serializer);
2412 et_jfree(void *xjslot)
2414 struct et_jslot *jslot = xjslot;
2415 struct et_jumbo_data *jd = jslot->jslot_data;
2417 if (&jd->jd_slots[jslot->jslot_index] != jslot) {
2418 panic("%s wrong jslot!?\n", __func__);
2419 } else if (jslot->jslot_inuse == 0) {
2420 panic("%s jslot already freed\n", __func__);
2422 lwkt_serialize_enter(&jd->jd_serializer);
2424 atomic_subtract_int(&jslot->jslot_inuse, 1);
2425 if (jslot->jslot_inuse == 0) {
2426 SLIST_INSERT_HEAD(&jd->jd_free_slots, jslot,
2430 lwkt_serialize_exit(&jd->jd_serializer);
2435 et_jref(void *xjslot)
2437 struct et_jslot *jslot = xjslot;
2438 struct et_jumbo_data *jd = jslot->jslot_data;
2440 if (&jd->jd_slots[jslot->jslot_index] != jslot)
2441 panic("%s wrong jslot!?\n", __func__);
2442 else if (jslot->jslot_inuse == 0)
2443 panic("%s jslot already freed\n", __func__);
2445 atomic_add_int(&jslot->jslot_inuse, 1);
2449 et_newbuf_jumbo(struct et_rxbuf_data *rbd, int buf_idx, int init)
2451 struct et_softc *sc = rbd->rbd_softc;
2452 struct et_rxbuf *rb;
2454 struct et_jslot *jslot;
2457 KASSERT(rbd->rbd_jumbo, ("calling %s with non-jumbo ring\n", __func__));
2459 KKASSERT(buf_idx < ET_RX_NDESC);
2460 rb = &rbd->rbd_buf[buf_idx];
2464 MGETHDR(m, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
2467 if_printf(&sc->arpcom.ac_if, "MGETHDR failed\n");
2474 jslot = et_jalloc(&sc->sc_jumbo_data);
2475 if (jslot == NULL) {
2479 if_printf(&sc->arpcom.ac_if,
2480 "jslot allocation failed\n");
2487 m->m_ext.ext_arg = jslot;
2488 m->m_ext.ext_buf = jslot->jslot_buf;
2489 m->m_ext.ext_free = et_jfree;
2490 m->m_ext.ext_ref = et_jref;
2491 m->m_ext.ext_size = ET_JUMBO_FRAMELEN;
2492 m->m_flags |= M_EXT;
2493 m->m_data = m->m_ext.ext_buf;
2494 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2497 rb->rb_paddr = jslot->jslot_paddr;
2501 et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr);
2506 et_setup_rxdesc(struct et_rxbuf_data *rbd, int buf_idx, bus_addr_t paddr)
2508 struct et_rxdesc_ring *rx_ring = rbd->rbd_ring;
2509 struct et_rxdesc *desc;
2511 KKASSERT(buf_idx < ET_RX_NDESC);
2512 desc = &rx_ring->rr_desc[buf_idx];
2514 desc->rd_addr_hi = ET_ADDR_HI(paddr);
2515 desc->rd_addr_lo = ET_ADDR_LO(paddr);
2516 desc->rd_ctrl = __SHIFTIN(buf_idx, ET_RDCTRL_BUFIDX);
2518 bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap,
2519 BUS_DMASYNC_PREWRITE);