2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
30 #include "opt_ifpoll.h"
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
41 #include <sys/serialize.h>
42 #include <sys/serialize2.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
47 #include <net/ethernet.h>
50 #include <net/if_arp.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_poll.h>
54 #include <net/ifq_var.h>
55 #include <net/toeplitz.h>
56 #include <net/toeplitz2.h>
57 #include <net/vlan/if_vlan_var.h>
58 #include <net/vlan/if_vlan_ether.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
63 #include <dev/netif/mii_layer/miivar.h>
64 #include <dev/netif/mii_layer/jmphyreg.h>
66 #include <bus/pci/pcireg.h>
67 #include <bus/pci/pcivar.h>
68 #include <bus/pci/pcidevs.h>
70 #include <dev/netif/jme/if_jmereg.h>
71 #include <dev/netif/jme/if_jmevar.h>
73 #include "miibus_if.h"
75 #define JME_TICK_CPUID 0 /* DO NOT CHANGE THIS */
77 #define JME_TX_SERIALIZE 1
78 #define JME_RX_SERIALIZE 2
80 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
83 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
85 if ((sc)->jme_rss_debug >= (lvl)) \
86 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
88 #else /* !JME_RSS_DEBUG */
89 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
90 #endif /* JME_RSS_DEBUG */
92 static int jme_probe(device_t);
93 static int jme_attach(device_t);
94 static int jme_detach(device_t);
95 static int jme_shutdown(device_t);
96 static int jme_suspend(device_t);
97 static int jme_resume(device_t);
99 static int jme_miibus_readreg(device_t, int, int);
100 static int jme_miibus_writereg(device_t, int, int, int);
101 static void jme_miibus_statchg(device_t);
103 static void jme_init(void *);
104 static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
105 static void jme_start(struct ifnet *);
106 static void jme_watchdog(struct ifnet *);
107 static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
108 static int jme_mediachange(struct ifnet *);
110 static void jme_npoll(struct ifnet *, struct ifpoll_info *);
111 static void jme_npoll_status(struct ifnet *);
112 static void jme_npoll_rx(struct ifnet *, void *, int);
113 static void jme_npoll_tx(struct ifnet *, void *, int);
115 static void jme_serialize(struct ifnet *, enum ifnet_serialize);
116 static void jme_deserialize(struct ifnet *, enum ifnet_serialize);
117 static int jme_tryserialize(struct ifnet *, enum ifnet_serialize);
119 static void jme_serialize_assert(struct ifnet *, enum ifnet_serialize,
123 static void jme_intr(void *);
124 static void jme_msix_tx(void *);
125 static void jme_msix_rx(void *);
126 static void jme_msix_status(void *);
127 static void jme_txeof(struct jme_txdata *);
128 static void jme_rxeof(struct jme_rxdata *, int);
129 static void jme_rx_intr(struct jme_softc *, uint32_t);
130 static void jme_enable_intr(struct jme_softc *);
131 static void jme_disable_intr(struct jme_softc *);
132 static void jme_rx_restart(struct jme_softc *, uint32_t);
134 static int jme_msix_setup(device_t);
135 static void jme_msix_teardown(device_t, int);
136 static int jme_intr_setup(device_t);
137 static void jme_intr_teardown(device_t);
138 static void jme_msix_try_alloc(device_t);
139 static void jme_msix_free(device_t);
140 static int jme_intr_alloc(device_t);
141 static void jme_intr_free(device_t);
142 static int jme_dma_alloc(struct jme_softc *);
143 static void jme_dma_free(struct jme_softc *);
144 static int jme_init_rx_ring(struct jme_rxdata *);
145 static void jme_init_tx_ring(struct jme_txdata *);
146 static void jme_init_ssb(struct jme_softc *);
147 static int jme_newbuf(struct jme_rxdata *, struct jme_rxdesc *, int);
148 static int jme_encap(struct jme_txdata *, struct mbuf **, int *);
149 static void jme_rxpkt(struct jme_rxdata *);
150 static int jme_rxring_dma_alloc(struct jme_rxdata *);
151 static int jme_rxbuf_dma_alloc(struct jme_rxdata *);
152 static int jme_rxbuf_dma_filter(void *, bus_addr_t);
154 static void jme_tick(void *);
155 static void jme_stop(struct jme_softc *);
156 static void jme_reset(struct jme_softc *);
157 static void jme_set_msinum(struct jme_softc *);
158 static void jme_set_vlan(struct jme_softc *);
159 static void jme_set_filter(struct jme_softc *);
160 static void jme_stop_tx(struct jme_softc *);
161 static void jme_stop_rx(struct jme_softc *);
162 static void jme_mac_config(struct jme_softc *);
163 static void jme_reg_macaddr(struct jme_softc *, uint8_t[]);
164 static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
165 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
167 static void jme_setwol(struct jme_softc *);
168 static void jme_setlinkspeed(struct jme_softc *);
170 static void jme_set_tx_coal(struct jme_softc *);
171 static void jme_set_rx_coal(struct jme_softc *);
172 static void jme_enable_rss(struct jme_softc *);
173 static void jme_disable_rss(struct jme_softc *);
174 static void jme_serialize_skipmain(struct jme_softc *);
175 static void jme_deserialize_skipmain(struct jme_softc *);
177 static void jme_sysctl_node(struct jme_softc *);
178 static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
179 static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
180 static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
181 static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
183 static int jme_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS);
184 static int jme_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS);
188 * Devices supported by this driver.
190 static const struct jme_dev {
191 uint16_t jme_vendorid;
192 uint16_t jme_deviceid;
194 const char *jme_name;
196 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
198 "JMicron Inc, JMC250 Gigabit Ethernet" },
199 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
201 "JMicron Inc, JMC260 Fast Ethernet" },
205 static device_method_t jme_methods[] = {
206 /* Device interface. */
207 DEVMETHOD(device_probe, jme_probe),
208 DEVMETHOD(device_attach, jme_attach),
209 DEVMETHOD(device_detach, jme_detach),
210 DEVMETHOD(device_shutdown, jme_shutdown),
211 DEVMETHOD(device_suspend, jme_suspend),
212 DEVMETHOD(device_resume, jme_resume),
215 DEVMETHOD(bus_print_child, bus_generic_print_child),
216 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
219 DEVMETHOD(miibus_readreg, jme_miibus_readreg),
220 DEVMETHOD(miibus_writereg, jme_miibus_writereg),
221 DEVMETHOD(miibus_statchg, jme_miibus_statchg),
226 static driver_t jme_driver = {
229 sizeof(struct jme_softc)
232 static devclass_t jme_devclass;
234 DECLARE_DUMMY_MODULE(if_jme);
235 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
236 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL);
237 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL);
239 static const struct {
243 } jme_rx_status[JME_NRXRING_MAX] = {
244 { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP,
245 INTR_RXQ0_DESC_EMPTY },
246 { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP,
247 INTR_RXQ1_DESC_EMPTY },
248 { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP,
249 INTR_RXQ2_DESC_EMPTY },
250 { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP,
251 INTR_RXQ3_DESC_EMPTY }
254 static int jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
255 static int jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
256 static int jme_rx_ring_count = 0;
257 static int jme_msi_enable = 1;
258 static int jme_msix_enable = 1;
260 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
261 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
262 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
263 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable);
264 TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable);
267 jme_setup_rxdesc(struct jme_rxdesc *rxd)
269 struct jme_desc *desc;
272 desc->buflen = htole32(MCLBYTES);
273 desc->addr_lo = htole32(JME_ADDR_LO(rxd->rx_paddr));
274 desc->addr_hi = htole32(JME_ADDR_HI(rxd->rx_paddr));
275 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
279 * Read a PHY register on the MII of the JMC250.
282 jme_miibus_readreg(device_t dev, int phy, int reg)
284 struct jme_softc *sc = device_get_softc(dev);
288 /* For FPGA version, PHY address 0 should be ignored. */
289 if (sc->jme_caps & JME_CAP_FPGA) {
293 if (sc->jme_phyaddr != phy)
297 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
298 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
300 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
302 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
306 device_printf(sc->jme_dev, "phy read timeout: "
307 "phy %d, reg %d\n", phy, reg);
311 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
315 * Write a PHY register on the MII of the JMC250.
318 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
320 struct jme_softc *sc = device_get_softc(dev);
323 /* For FPGA version, PHY address 0 should be ignored. */
324 if (sc->jme_caps & JME_CAP_FPGA) {
328 if (sc->jme_phyaddr != phy)
332 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
333 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
334 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
336 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
338 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
342 device_printf(sc->jme_dev, "phy write timeout: "
343 "phy %d, reg %d\n", phy, reg);
350 * Callback from MII layer when media changes.
353 jme_miibus_statchg(device_t dev)
355 struct jme_softc *sc = device_get_softc(dev);
356 struct ifnet *ifp = &sc->arpcom.ac_if;
357 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
358 struct mii_data *mii;
359 struct jme_txdesc *txd;
364 jme_serialize_skipmain(sc);
365 ASSERT_IFNET_SERIALIZED_ALL(ifp);
367 if ((ifp->if_flags & IFF_RUNNING) == 0)
370 mii = device_get_softc(sc->jme_miibus);
372 sc->jme_has_link = FALSE;
373 if ((mii->mii_media_status & IFM_AVALID) != 0) {
374 switch (IFM_SUBTYPE(mii->mii_media_active)) {
377 sc->jme_has_link = TRUE;
380 if (sc->jme_caps & JME_CAP_FASTETH)
382 sc->jme_has_link = TRUE;
390 * Disabling Rx/Tx MACs have a side-effect of resetting
391 * JME_TXNDA/JME_RXNDA register to the first address of
392 * Tx/Rx descriptor address. So driver should reset its
393 * internal procucer/consumer pointer and reclaim any
394 * allocated resources. Note, just saving the value of
395 * JME_TXNDA and JME_RXNDA registers before stopping MAC
396 * and restoring JME_TXNDA/JME_RXNDA register is not
397 * sufficient to make sure correct MAC state because
398 * stopping MAC operation can take a while and hardware
399 * might have updated JME_TXNDA/JME_RXNDA registers
400 * during the stop operation.
403 /* Disable interrupts */
404 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
407 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
409 callout_stop(&sc->jme_tick_ch);
411 /* Stop receiver/transmitter. */
415 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
416 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
418 jme_rxeof(rdata, -1);
419 if (rdata->jme_rxhead != NULL)
420 m_freem(rdata->jme_rxhead);
421 JME_RXCHAIN_RESET(rdata);
424 * Reuse configured Rx descriptors and reset
425 * procuder/consumer index.
427 rdata->jme_rx_cons = 0;
429 if (JME_ENABLE_HWRSS(sc))
435 if (tdata->jme_tx_cnt != 0) {
436 /* Remove queued packets for transmit. */
437 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
438 txd = &tdata->jme_txdesc[i];
439 if (txd->tx_m != NULL) {
440 bus_dmamap_unload( tdata->jme_tx_tag,
449 jme_init_tx_ring(tdata);
451 /* Initialize shadow status block. */
454 /* Program MAC with resolved speed/duplex/flow-control. */
455 if (sc->jme_has_link) {
458 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
460 /* Set Tx ring address to the hardware. */
461 paddr = tdata->jme_tx_ring_paddr;
462 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
463 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
465 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
466 CSR_WRITE_4(sc, JME_RXCSR,
467 sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
469 /* Set Rx ring address to the hardware. */
470 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
471 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
472 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
475 /* Restart receiver/transmitter. */
476 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
478 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
481 ifp->if_flags |= IFF_RUNNING;
482 ifp->if_flags &= ~IFF_OACTIVE;
483 callout_reset_bycpu(&sc->jme_tick_ch, hz, jme_tick, sc,
487 if (!(ifp->if_flags & IFF_NPOLLING))
489 /* Reenable interrupts. */
490 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
494 jme_deserialize_skipmain(sc);
498 * Get the current interface media status.
501 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
503 struct jme_softc *sc = ifp->if_softc;
504 struct mii_data *mii = device_get_softc(sc->jme_miibus);
506 ASSERT_IFNET_SERIALIZED_ALL(ifp);
509 ifmr->ifm_status = mii->mii_media_status;
510 ifmr->ifm_active = mii->mii_media_active;
514 * Set hardware to newly-selected media.
517 jme_mediachange(struct ifnet *ifp)
519 struct jme_softc *sc = ifp->if_softc;
520 struct mii_data *mii = device_get_softc(sc->jme_miibus);
523 ASSERT_IFNET_SERIALIZED_ALL(ifp);
525 if (mii->mii_instance != 0) {
526 struct mii_softc *miisc;
528 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
529 mii_phy_reset(miisc);
531 error = mii_mediachg(mii);
537 jme_probe(device_t dev)
539 const struct jme_dev *sp;
542 vid = pci_get_vendor(dev);
543 did = pci_get_device(dev);
544 for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
545 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
546 struct jme_softc *sc = device_get_softc(dev);
548 sc->jme_caps = sp->jme_caps;
549 device_set_desc(dev, sp->jme_name);
557 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
563 for (i = JME_TIMEOUT; i > 0; i--) {
564 reg = CSR_READ_4(sc, JME_SMBCSR);
565 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
571 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
575 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
576 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
577 for (i = JME_TIMEOUT; i > 0; i--) {
579 reg = CSR_READ_4(sc, JME_SMBINTF);
580 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
585 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
589 reg = CSR_READ_4(sc, JME_SMBINTF);
590 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
596 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
598 uint8_t fup, reg, val;
603 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
604 fup != JME_EEPROM_SIG0)
606 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
607 fup != JME_EEPROM_SIG1)
611 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
613 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
614 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
615 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0)
617 if (reg >= JME_PAR0 &&
618 reg < JME_PAR0 + ETHER_ADDR_LEN) {
619 if (jme_eeprom_read_byte(sc, offset + 2,
622 eaddr[reg - JME_PAR0] = val;
626 /* Check for the end of EEPROM descriptor. */
627 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
629 /* Try next eeprom descriptor. */
630 offset += JME_EEPROM_DESC_BYTES;
631 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
633 if (match == ETHER_ADDR_LEN)
640 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
644 /* Read station address. */
645 par0 = CSR_READ_4(sc, JME_PAR0);
646 par1 = CSR_READ_4(sc, JME_PAR1);
648 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
649 device_printf(sc->jme_dev,
650 "generating fake ethernet address.\n");
651 par0 = karc4random();
652 /* Set OUI to JMicron. */
656 eaddr[3] = (par0 >> 16) & 0xff;
657 eaddr[4] = (par0 >> 8) & 0xff;
658 eaddr[5] = par0 & 0xff;
660 eaddr[0] = (par0 >> 0) & 0xFF;
661 eaddr[1] = (par0 >> 8) & 0xFF;
662 eaddr[2] = (par0 >> 16) & 0xFF;
663 eaddr[3] = (par0 >> 24) & 0xFF;
664 eaddr[4] = (par1 >> 0) & 0xFF;
665 eaddr[5] = (par1 >> 8) & 0xFF;
670 jme_attach(device_t dev)
672 struct jme_softc *sc = device_get_softc(dev);
673 struct ifnet *ifp = &sc->arpcom.ac_if;
676 uint8_t pcie_ptr, rev;
677 int error = 0, i, j, rx_desc_cnt, coal_max;
678 uint8_t eaddr[ETHER_ADDR_LEN];
680 int offset, offset_def;
684 * Initialize serializers
686 lwkt_serialize_init(&sc->jme_serialize);
687 lwkt_serialize_init(&sc->jme_cdata.jme_tx_data.jme_tx_serialize);
688 for (i = 0; i < JME_NRXRING_MAX; ++i) {
690 &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize);
694 * Get # of RX ring descriptors
696 rx_desc_cnt = device_getenv_int(dev, "rx_desc_count",
698 rx_desc_cnt = roundup(rx_desc_cnt, JME_NDESC_ALIGN);
699 if (rx_desc_cnt > JME_NDESC_MAX)
700 rx_desc_cnt = JME_NDESC_MAX;
703 * Get # of TX ring descriptors
705 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt =
706 device_getenv_int(dev, "tx_desc_count", jme_tx_desc_count);
707 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt =
708 roundup(sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt, JME_NDESC_ALIGN);
709 if (sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt > JME_NDESC_MAX)
710 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt = JME_NDESC_MAX;
715 sc->jme_cdata.jme_rx_ring_cnt = device_getenv_int(dev, "rx_ring_count",
717 sc->jme_cdata.jme_rx_ring_cnt =
718 if_ring_count2(sc->jme_cdata.jme_rx_ring_cnt, JME_NRXRING_MAX);
721 * Initialize serializer array
724 sc->jme_serialize_arr[i++] = &sc->jme_serialize;
726 KKASSERT(i == JME_TX_SERIALIZE);
727 sc->jme_serialize_arr[i++] =
728 &sc->jme_cdata.jme_tx_data.jme_tx_serialize;
730 KKASSERT(i == JME_RX_SERIALIZE);
731 for (j = 0; j < sc->jme_cdata.jme_rx_ring_cnt; ++j) {
732 sc->jme_serialize_arr[i++] =
733 &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize;
735 KKASSERT(i <= JME_NSERIALIZE);
736 sc->jme_serialize_cnt = i;
739 * Setup TX ring specific data
741 sc->jme_cdata.jme_tx_data.jme_sc = sc;
744 * Setup RX rings specific data
746 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
747 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
750 rdata->jme_rx_coal = jme_rx_status[i].jme_coal;
751 rdata->jme_rx_comp = jme_rx_status[i].jme_comp;
752 rdata->jme_rx_empty = jme_rx_status[i].jme_empty;
753 rdata->jme_rx_idx = i;
754 rdata->jme_rx_desc_cnt = rx_desc_cnt;
758 sc->jme_lowaddr = BUS_SPACE_MAXADDR;
760 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
762 callout_init(&sc->jme_tick_ch);
765 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
768 irq = pci_read_config(dev, PCIR_INTLINE, 4);
769 mem = pci_read_config(dev, JME_PCIR_BAR, 4);
771 device_printf(dev, "chip is in D%d power mode "
772 "-- setting to D0\n", pci_get_powerstate(dev));
774 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
776 pci_write_config(dev, PCIR_INTLINE, irq, 4);
777 pci_write_config(dev, JME_PCIR_BAR, mem, 4);
779 #endif /* !BURN_BRIDGE */
781 /* Enable bus mastering */
782 pci_enable_busmaster(dev);
787 * JMC250 supports both memory mapped and I/O register space
788 * access. Because I/O register access should use different
789 * BARs to access registers it's waste of time to use I/O
790 * register spce access. JMC250 uses 16K to map entire memory
793 sc->jme_mem_rid = JME_PCIR_BAR;
794 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
795 &sc->jme_mem_rid, RF_ACTIVE);
796 if (sc->jme_mem_res == NULL) {
797 device_printf(dev, "can't allocate IO memory\n");
800 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
801 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
806 error = jme_intr_alloc(dev);
813 reg = CSR_READ_4(sc, JME_CHIPMODE);
814 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
816 sc->jme_caps |= JME_CAP_FPGA;
818 device_printf(dev, "FPGA revision: 0x%04x\n",
819 (reg & CHIPMODE_FPGA_REV_MASK) >>
820 CHIPMODE_FPGA_REV_SHIFT);
824 /* NOTE: FM revision is put in the upper 4 bits */
825 rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
826 rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
828 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
830 did = pci_get_device(dev);
832 case PCI_PRODUCT_JMICRON_JMC250:
833 if (rev == JME_REV1_A2)
834 sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
837 case PCI_PRODUCT_JMICRON_JMC260:
839 sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
843 panic("unknown device id 0x%04x", did);
845 if (rev >= JME_REV2) {
846 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
847 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
848 GHC_TXMAC_CLKSRC_1000;
851 /* Reset the ethernet controller. */
854 /* Map MSI/MSI-X vectors */
857 /* Get station address. */
858 reg = CSR_READ_4(sc, JME_SMBCSR);
859 if (reg & SMBCSR_EEPROM_PRESENT)
860 error = jme_eeprom_macaddr(sc, eaddr);
861 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
862 if (error != 0 && (bootverbose)) {
863 device_printf(dev, "ethernet hardware address "
864 "not found in EEPROM.\n");
866 jme_reg_macaddr(sc, eaddr);
871 * Integrated JR0211 has fixed PHY address whereas FPGA version
872 * requires PHY probing to get correct PHY address.
874 if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
875 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
876 GPREG0_PHY_ADDR_MASK;
878 device_printf(dev, "PHY is at address %d.\n",
885 /* Set max allowable DMA size. */
886 pcie_ptr = pci_get_pciecap_ptr(dev);
890 sc->jme_caps |= JME_CAP_PCIE;
891 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
893 device_printf(dev, "Read request size : %d bytes.\n",
894 128 << ((ctrl >> 12) & 0x07));
895 device_printf(dev, "TLP payload size : %d bytes.\n",
896 128 << ((ctrl >> 5) & 0x07));
898 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
899 case PCIEM_DEVCTL_MAX_READRQ_128:
900 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
902 case PCIEM_DEVCTL_MAX_READRQ_256:
903 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
906 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
909 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
911 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
912 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
916 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
917 sc->jme_caps |= JME_CAP_PMCAP;
922 * NPOLLING RX CPU offset
924 if (sc->jme_cdata.jme_rx_ring_cnt == ncpus2) {
927 offset_def = (sc->jme_cdata.jme_rx_ring_cnt *
928 device_get_unit(dev)) % ncpus2;
929 offset = device_getenv_int(dev, "npoll.rxoff", offset_def);
930 if (offset >= ncpus2 ||
931 offset % sc->jme_cdata.jme_rx_ring_cnt != 0) {
932 device_printf(dev, "invalid npoll.rxoff %d, use %d\n",
937 sc->jme_npoll_rxoff = offset;
940 * NPOLLING TX CPU offset
942 offset_def = sc->jme_npoll_rxoff;
943 offset = device_getenv_int(dev, "npoll.txoff", offset_def);
944 if (offset >= ncpus2) {
945 device_printf(dev, "invalid npoll.txoff %d, use %d\n",
949 sc->jme_npoll_txoff = offset;
953 * Set default coalesce valves
955 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
956 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
957 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
958 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
961 * Adjust coalesce valves, in case that the number of TX/RX
962 * descs are set to small values by users.
964 * NOTE: coal_max will not be zero, since number of descs
965 * must aligned by JME_NDESC_ALIGN (16 currently)
967 coal_max = sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt / 2;
968 if (coal_max < sc->jme_tx_coal_pkt)
969 sc->jme_tx_coal_pkt = coal_max;
971 coal_max = sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt / 2;
972 if (coal_max < sc->jme_rx_coal_pkt)
973 sc->jme_rx_coal_pkt = coal_max;
975 sc->jme_cdata.jme_tx_data.jme_tx_wreg = 16;
982 /* Allocate DMA stuffs */
983 error = jme_dma_alloc(sc);
988 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
989 ifp->if_init = jme_init;
990 ifp->if_ioctl = jme_ioctl;
991 ifp->if_start = jme_start;
993 ifp->if_npoll = jme_npoll;
995 ifp->if_watchdog = jme_watchdog;
996 ifp->if_serialize = jme_serialize;
997 ifp->if_deserialize = jme_deserialize;
998 ifp->if_tryserialize = jme_tryserialize;
1000 ifp->if_serialize_assert = jme_serialize_assert;
1002 ifq_set_maxlen(&ifp->if_snd,
1003 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt - JME_TXD_RSVD);
1004 ifq_set_ready(&ifp->if_snd);
1006 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
1007 ifp->if_capabilities = IFCAP_HWCSUM |
1010 IFCAP_VLAN_HWTAGGING;
1011 if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN)
1012 ifp->if_capabilities |= IFCAP_RSS;
1013 ifp->if_capenable = ifp->if_capabilities;
1016 * Disable TXCSUM by default to improve bulk data
1017 * transmit performance (+20Mbps improvement).
1019 ifp->if_capenable &= ~IFCAP_TXCSUM;
1021 if (ifp->if_capenable & IFCAP_TXCSUM)
1022 ifp->if_hwassist |= JME_CSUM_FEATURES;
1023 ifp->if_hwassist |= CSUM_TSO;
1025 /* Set up MII bus. */
1026 error = mii_phy_probe(dev, &sc->jme_miibus,
1027 jme_mediachange, jme_mediastatus);
1029 device_printf(dev, "no PHY found!\n");
1034 * Save PHYADDR for FPGA mode PHY.
1036 if (sc->jme_caps & JME_CAP_FPGA) {
1037 struct mii_data *mii = device_get_softc(sc->jme_miibus);
1039 if (mii->mii_instance != 0) {
1040 struct mii_softc *miisc;
1042 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
1043 if (miisc->mii_phy != 0) {
1044 sc->jme_phyaddr = miisc->mii_phy;
1048 if (sc->jme_phyaddr != 0) {
1049 device_printf(sc->jme_dev,
1050 "FPGA PHY is at %d\n", sc->jme_phyaddr);
1052 jme_miibus_writereg(dev, sc->jme_phyaddr,
1053 JMPHY_CONF, JMPHY_CONF_DEFFIFO);
1055 /* XXX should we clear JME_WA_EXTFIFO */
1060 ether_ifattach(ifp, eaddr, NULL);
1062 /* Tell the upper layer(s) we support long frames. */
1063 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1065 error = jme_intr_setup(dev);
1067 ether_ifdetach(ifp);
1078 jme_detach(device_t dev)
1080 struct jme_softc *sc = device_get_softc(dev);
1082 if (device_is_attached(dev)) {
1083 struct ifnet *ifp = &sc->arpcom.ac_if;
1085 ifnet_serialize_all(ifp);
1087 jme_intr_teardown(dev);
1088 ifnet_deserialize_all(ifp);
1090 ether_ifdetach(ifp);
1093 if (sc->jme_sysctl_tree != NULL)
1094 sysctl_ctx_free(&sc->jme_sysctl_ctx);
1096 if (sc->jme_miibus != NULL)
1097 device_delete_child(dev, sc->jme_miibus);
1098 bus_generic_detach(dev);
1102 if (sc->jme_mem_res != NULL) {
1103 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
1113 jme_sysctl_node(struct jme_softc *sc)
1115 #ifdef JME_RSS_DEBUG
1119 sysctl_ctx_init(&sc->jme_sysctl_ctx);
1120 sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
1121 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
1122 device_get_nameunit(sc->jme_dev),
1124 if (sc->jme_sysctl_tree == NULL) {
1125 device_printf(sc->jme_dev, "can't add sysctl node\n");
1129 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1130 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1131 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1132 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
1134 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1135 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1136 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1137 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
1139 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1140 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1141 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1142 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
1144 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1145 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1146 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1147 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
1149 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1150 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1151 "rx_desc_count", CTLFLAG_RD,
1152 &sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt,
1153 0, "RX desc count");
1154 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1155 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1156 "tx_desc_count", CTLFLAG_RD,
1157 &sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt,
1158 0, "TX desc count");
1159 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1160 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1161 "rx_ring_count", CTLFLAG_RD,
1162 &sc->jme_cdata.jme_rx_ring_cnt,
1163 0, "RX ring count");
1164 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1165 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1166 "tx_wreg", CTLFLAG_RW,
1167 &sc->jme_cdata.jme_tx_data.jme_tx_wreg, 0,
1168 "# of segments before writing to hardware register");
1170 #ifdef JME_RSS_DEBUG
1171 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1172 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1173 "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
1174 0, "RSS debug level");
1175 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1176 char rx_ring_desc[32];
1178 ksnprintf(rx_ring_desc, sizeof(rx_ring_desc),
1179 "rx_ring%d_pkt", r);
1180 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx,
1181 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1182 rx_ring_desc, CTLFLAG_RW,
1183 &sc->jme_cdata.jme_rx_data[r].jme_rx_pkt, "RXed packets");
1185 ksnprintf(rx_ring_desc, sizeof(rx_ring_desc),
1186 "rx_ring%d_emp", r);
1187 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx,
1188 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1189 rx_ring_desc, CTLFLAG_RW,
1190 &sc->jme_cdata.jme_rx_data[r].jme_rx_emp,
1191 "# of time RX ring empty");
1195 #ifdef IFPOLL_ENABLE
1196 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1197 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1198 "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
1199 jme_sysctl_npoll_rxoff, "I", "NPOLLING RX cpu offset");
1200 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1201 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1202 "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
1203 jme_sysctl_npoll_txoff, "I", "NPOLLING TX cpu offset");
1208 jme_dma_alloc(struct jme_softc *sc)
1210 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
1211 struct jme_txdesc *txd;
1213 int error, i, asize;
1215 asize = __VM_CACHELINE_ALIGN(
1216 tdata->jme_tx_desc_cnt * sizeof(struct jme_txdesc));
1217 tdata->jme_txdesc = kmalloc_cachealign(asize, M_DEVBUF,
1220 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1221 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
1223 asize = __VM_CACHELINE_ALIGN(
1224 rdata->jme_rx_desc_cnt * sizeof(struct jme_rxdesc));
1225 rdata->jme_rxdesc = kmalloc_cachealign(asize, M_DEVBUF,
1229 /* Create parent ring tag. */
1230 error = bus_dma_tag_create(NULL,/* parent */
1231 1, JME_RING_BOUNDARY, /* algnmnt, boundary */
1232 sc->jme_lowaddr, /* lowaddr */
1233 BUS_SPACE_MAXADDR, /* highaddr */
1234 NULL, NULL, /* filter, filterarg */
1235 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1237 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1239 &sc->jme_cdata.jme_ring_tag);
1241 device_printf(sc->jme_dev,
1242 "could not create parent ring DMA tag.\n");
1247 * Create DMA stuffs for TX ring
1249 asize = roundup2(JME_TX_RING_SIZE(tdata), JME_TX_RING_ALIGN);
1250 error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1251 JME_TX_RING_ALIGN, 0,
1252 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1253 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1255 device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1258 tdata->jme_tx_ring_tag = dmem.dmem_tag;
1259 tdata->jme_tx_ring_map = dmem.dmem_map;
1260 tdata->jme_tx_ring = dmem.dmem_addr;
1261 tdata->jme_tx_ring_paddr = dmem.dmem_busaddr;
1264 * Create DMA stuffs for RX rings
1266 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1267 error = jme_rxring_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1272 /* Create parent buffer tag. */
1273 error = bus_dma_tag_create(NULL,/* parent */
1274 1, 0, /* algnmnt, boundary */
1275 sc->jme_lowaddr, /* lowaddr */
1276 BUS_SPACE_MAXADDR, /* highaddr */
1277 NULL, NULL, /* filter, filterarg */
1278 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1280 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1282 &sc->jme_cdata.jme_buffer_tag);
1284 device_printf(sc->jme_dev,
1285 "could not create parent buffer DMA tag.\n");
1290 * Create DMA stuffs for shadow status block
1292 asize = roundup2(JME_SSB_SIZE, JME_SSB_ALIGN);
1293 error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1294 JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1295 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1297 device_printf(sc->jme_dev,
1298 "could not create shadow status block.\n");
1301 sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1302 sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1303 sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1304 sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1307 * Create DMA stuffs for TX buffers
1310 /* Create tag for Tx buffers. */
1311 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1312 1, 0, /* algnmnt, boundary */
1313 BUS_SPACE_MAXADDR, /* lowaddr */
1314 BUS_SPACE_MAXADDR, /* highaddr */
1315 NULL, NULL, /* filter, filterarg */
1316 JME_TSO_MAXSIZE, /* maxsize */
1317 JME_MAXTXSEGS, /* nsegments */
1318 JME_MAXSEGSIZE, /* maxsegsize */
1319 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1320 &tdata->jme_tx_tag);
1322 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1326 /* Create DMA maps for Tx buffers. */
1327 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
1328 txd = &tdata->jme_txdesc[i];
1329 error = bus_dmamap_create(tdata->jme_tx_tag,
1330 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1335 device_printf(sc->jme_dev,
1336 "could not create %dth Tx dmamap.\n", i);
1338 for (j = 0; j < i; ++j) {
1339 txd = &tdata->jme_txdesc[j];
1340 bus_dmamap_destroy(tdata->jme_tx_tag,
1343 bus_dma_tag_destroy(tdata->jme_tx_tag);
1344 tdata->jme_tx_tag = NULL;
1350 * Create DMA stuffs for RX buffers
1352 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1353 error = jme_rxbuf_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1361 jme_dma_free(struct jme_softc *sc)
1363 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
1364 struct jme_txdesc *txd;
1365 struct jme_rxdesc *rxd;
1366 struct jme_rxdata *rdata;
1370 if (tdata->jme_tx_ring_tag != NULL) {
1371 bus_dmamap_unload(tdata->jme_tx_ring_tag,
1372 tdata->jme_tx_ring_map);
1373 bus_dmamem_free(tdata->jme_tx_ring_tag,
1374 tdata->jme_tx_ring, tdata->jme_tx_ring_map);
1375 bus_dma_tag_destroy(tdata->jme_tx_ring_tag);
1376 tdata->jme_tx_ring_tag = NULL;
1380 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1381 rdata = &sc->jme_cdata.jme_rx_data[r];
1382 if (rdata->jme_rx_ring_tag != NULL) {
1383 bus_dmamap_unload(rdata->jme_rx_ring_tag,
1384 rdata->jme_rx_ring_map);
1385 bus_dmamem_free(rdata->jme_rx_ring_tag,
1387 rdata->jme_rx_ring_map);
1388 bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1389 rdata->jme_rx_ring_tag = NULL;
1394 if (tdata->jme_tx_tag != NULL) {
1395 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
1396 txd = &tdata->jme_txdesc[i];
1397 bus_dmamap_destroy(tdata->jme_tx_tag, txd->tx_dmamap);
1399 bus_dma_tag_destroy(tdata->jme_tx_tag);
1400 tdata->jme_tx_tag = NULL;
1404 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1405 rdata = &sc->jme_cdata.jme_rx_data[r];
1406 if (rdata->jme_rx_tag != NULL) {
1407 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
1408 rxd = &rdata->jme_rxdesc[i];
1409 bus_dmamap_destroy(rdata->jme_rx_tag,
1412 bus_dmamap_destroy(rdata->jme_rx_tag,
1413 rdata->jme_rx_sparemap);
1414 bus_dma_tag_destroy(rdata->jme_rx_tag);
1415 rdata->jme_rx_tag = NULL;
1419 /* Shadow status block. */
1420 if (sc->jme_cdata.jme_ssb_tag != NULL) {
1421 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1422 sc->jme_cdata.jme_ssb_map);
1423 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1424 sc->jme_cdata.jme_ssb_block,
1425 sc->jme_cdata.jme_ssb_map);
1426 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1427 sc->jme_cdata.jme_ssb_tag = NULL;
1430 if (sc->jme_cdata.jme_buffer_tag != NULL) {
1431 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1432 sc->jme_cdata.jme_buffer_tag = NULL;
1434 if (sc->jme_cdata.jme_ring_tag != NULL) {
1435 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1436 sc->jme_cdata.jme_ring_tag = NULL;
1439 if (tdata->jme_txdesc != NULL) {
1440 kfree(tdata->jme_txdesc, M_DEVBUF);
1441 tdata->jme_txdesc = NULL;
1443 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1444 rdata = &sc->jme_cdata.jme_rx_data[r];
1445 if (rdata->jme_rxdesc != NULL) {
1446 kfree(rdata->jme_rxdesc, M_DEVBUF);
1447 rdata->jme_rxdesc = NULL;
1453 * Make sure the interface is stopped at reboot time.
1456 jme_shutdown(device_t dev)
1458 return jme_suspend(dev);
1463 * Unlike other ethernet controllers, JMC250 requires
1464 * explicit resetting link speed to 10/100Mbps as gigabit
1465 * link will cunsume more power than 375mA.
1466 * Note, we reset the link speed to 10/100Mbps with
1467 * auto-negotiation but we don't know whether that operation
1468 * would succeed or not as we have no control after powering
1469 * off. If the renegotiation fail WOL may not work. Running
1470 * at 1Gbps draws more power than 375mA at 3.3V which is
1471 * specified in PCI specification and that would result in
1472 * complete shutdowning power to ethernet controller.
1475 * Save current negotiated media speed/duplex/flow-control
1476 * to softc and restore the same link again after resuming.
1477 * PHY handling such as power down/resetting to 100Mbps
1478 * may be better handled in suspend method in phy driver.
1481 jme_setlinkspeed(struct jme_softc *sc)
1483 struct mii_data *mii;
1486 JME_LOCK_ASSERT(sc);
1488 mii = device_get_softc(sc->jme_miibus);
1491 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1492 switch IFM_SUBTYPE(mii->mii_media_active) {
1502 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1503 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1504 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1505 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1506 BMCR_AUTOEN | BMCR_STARTNEG);
1509 /* Poll link state until jme(4) get a 10/100 link. */
1510 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1512 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1513 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1523 pause("jmelnk", hz);
1526 if (i == MII_ANEGTICKS_GIGE)
1527 device_printf(sc->jme_dev, "establishing link failed, "
1528 "WOL may not work!");
1531 * No link, force MAC to have 100Mbps, full-duplex link.
1532 * This is the last resort and may/may not work.
1534 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1535 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1540 jme_setwol(struct jme_softc *sc)
1542 struct ifnet *ifp = &sc->arpcom.ac_if;
1547 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1548 /* No PME capability, PHY power down. */
1549 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1550 MII_BMCR, BMCR_PDOWN);
1554 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1555 pmcs = CSR_READ_4(sc, JME_PMCS);
1556 pmcs &= ~PMCS_WOL_ENB_MASK;
1557 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1558 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1559 /* Enable PME message. */
1560 gpr |= GPREG0_PME_ENB;
1561 /* For gigabit controllers, reset link speed to 10/100. */
1562 if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1563 jme_setlinkspeed(sc);
1566 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1567 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1570 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1571 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1572 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1573 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1574 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1575 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1576 /* No WOL, PHY power down. */
1577 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1578 MII_BMCR, BMCR_PDOWN);
1584 jme_suspend(device_t dev)
1586 struct jme_softc *sc = device_get_softc(dev);
1587 struct ifnet *ifp = &sc->arpcom.ac_if;
1589 ifnet_serialize_all(ifp);
1594 ifnet_deserialize_all(ifp);
1600 jme_resume(device_t dev)
1602 struct jme_softc *sc = device_get_softc(dev);
1603 struct ifnet *ifp = &sc->arpcom.ac_if;
1608 ifnet_serialize_all(ifp);
1611 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1614 pmstat = pci_read_config(sc->jme_dev,
1615 pmc + PCIR_POWER_STATUS, 2);
1616 /* Disable PME clear PME status. */
1617 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1618 pci_write_config(sc->jme_dev,
1619 pmc + PCIR_POWER_STATUS, pmstat, 2);
1623 if (ifp->if_flags & IFF_UP)
1626 ifnet_deserialize_all(ifp);
1632 jme_tso_pullup(struct mbuf **mp)
1634 int hoff, iphlen, thoff;
1638 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
1640 iphlen = m->m_pkthdr.csum_iphlen;
1641 thoff = m->m_pkthdr.csum_thlen;
1642 hoff = m->m_pkthdr.csum_lhlen;
1644 KASSERT(iphlen > 0, ("invalid ip hlen"));
1645 KASSERT(thoff > 0, ("invalid tcp hlen"));
1646 KASSERT(hoff > 0, ("invalid ether hlen"));
1648 if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
1649 m = m_pullup(m, hoff + iphlen + thoff);
1660 jme_encap(struct jme_txdata *tdata, struct mbuf **m_head, int *segs_used)
1662 struct jme_txdesc *txd;
1663 struct jme_desc *desc;
1665 bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1667 int error, i, prod, symbol_desc;
1668 uint32_t cflags, flag64, mss;
1670 M_ASSERTPKTHDR((*m_head));
1672 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) {
1673 /* XXX Is this necessary? */
1674 error = jme_tso_pullup(m_head);
1679 prod = tdata->jme_tx_prod;
1680 txd = &tdata->jme_txdesc[prod];
1682 if (tdata->jme_sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1687 maxsegs = (tdata->jme_tx_desc_cnt - tdata->jme_tx_cnt) -
1688 (JME_TXD_RSVD + symbol_desc);
1689 if (maxsegs > JME_MAXTXSEGS)
1690 maxsegs = JME_MAXTXSEGS;
1691 KASSERT(maxsegs >= (JME_TXD_SPARE - symbol_desc),
1692 ("not enough segments %d", maxsegs));
1694 error = bus_dmamap_load_mbuf_defrag(tdata->jme_tx_tag,
1695 txd->tx_dmamap, m_head,
1696 txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1699 *segs_used += nsegs;
1701 bus_dmamap_sync(tdata->jme_tx_tag, txd->tx_dmamap,
1702 BUS_DMASYNC_PREWRITE);
1708 /* Configure checksum offload. */
1709 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1710 mss = (uint32_t)m->m_pkthdr.tso_segsz << JME_TD_MSS_SHIFT;
1711 cflags |= JME_TD_TSO;
1712 } else if (m->m_pkthdr.csum_flags & JME_CSUM_FEATURES) {
1713 if (m->m_pkthdr.csum_flags & CSUM_IP)
1714 cflags |= JME_TD_IPCSUM;
1715 if (m->m_pkthdr.csum_flags & CSUM_TCP)
1716 cflags |= JME_TD_TCPCSUM;
1717 if (m->m_pkthdr.csum_flags & CSUM_UDP)
1718 cflags |= JME_TD_UDPCSUM;
1721 /* Configure VLAN. */
1722 if (m->m_flags & M_VLANTAG) {
1723 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1724 cflags |= JME_TD_VLAN_TAG;
1727 desc = &tdata->jme_tx_ring[prod];
1728 desc->flags = htole32(cflags);
1729 desc->addr_hi = htole32(m->m_pkthdr.len);
1730 if (tdata->jme_sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1732 * Use 64bits TX desc chain format.
1734 * The first TX desc of the chain, which is setup here,
1735 * is just a symbol TX desc carrying no payload.
1737 flag64 = JME_TD_64BIT;
1738 desc->buflen = htole32(mss);
1743 /* No effective TX desc is consumed */
1747 * Use 32bits TX desc chain format.
1749 * The first TX desc of the chain, which is setup here,
1750 * is an effective TX desc carrying the first segment of
1754 desc->buflen = htole32(mss | txsegs[0].ds_len);
1755 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1757 /* One effective TX desc is consumed */
1760 tdata->jme_tx_cnt++;
1761 KKASSERT(tdata->jme_tx_cnt - i < tdata->jme_tx_desc_cnt - JME_TXD_RSVD);
1762 JME_DESC_INC(prod, tdata->jme_tx_desc_cnt);
1764 txd->tx_ndesc = 1 - i;
1765 for (; i < nsegs; i++) {
1766 desc = &tdata->jme_tx_ring[prod];
1767 desc->buflen = htole32(txsegs[i].ds_len);
1768 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1769 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1770 desc->flags = htole32(JME_TD_OWN | flag64);
1772 tdata->jme_tx_cnt++;
1773 KKASSERT(tdata->jme_tx_cnt <=
1774 tdata->jme_tx_desc_cnt - JME_TXD_RSVD);
1775 JME_DESC_INC(prod, tdata->jme_tx_desc_cnt);
1778 /* Update producer index. */
1779 tdata->jme_tx_prod = prod;
1781 * Finally request interrupt and give the first descriptor
1782 * owenership to hardware.
1784 desc = txd->tx_desc;
1785 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1788 txd->tx_ndesc += nsegs;
1798 jme_start(struct ifnet *ifp)
1800 struct jme_softc *sc = ifp->if_softc;
1801 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
1802 struct mbuf *m_head;
1805 ASSERT_SERIALIZED(&tdata->jme_tx_serialize);
1807 if (!sc->jme_has_link) {
1808 ifq_purge(&ifp->if_snd);
1812 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1815 if (tdata->jme_tx_cnt >= JME_TX_DESC_HIWAT(tdata))
1818 while (!ifq_is_empty(&ifp->if_snd)) {
1820 * Check number of available TX descs, always
1821 * leave JME_TXD_RSVD free TX descs.
1823 if (tdata->jme_tx_cnt + JME_TXD_SPARE >
1824 tdata->jme_tx_desc_cnt - JME_TXD_RSVD) {
1825 ifp->if_flags |= IFF_OACTIVE;
1829 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1834 * Pack the data into the transmit ring. If we
1835 * don't have room, set the OACTIVE flag and wait
1836 * for the NIC to drain the ring.
1838 if (jme_encap(tdata, &m_head, &enq)) {
1839 KKASSERT(m_head == NULL);
1841 ifp->if_flags |= IFF_OACTIVE;
1845 if (enq >= tdata->jme_tx_wreg) {
1846 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr |
1847 TXCSR_TX_ENB | TXCSR_TXQ_N_START(TXCSR_TXQ0));
1852 * If there's a BPF listener, bounce a copy of this frame
1855 ETHER_BPF_MTAP(ifp, m_head);
1857 /* Set a timeout in case the chip goes out to lunch. */
1858 ifp->if_timer = JME_TX_TIMEOUT;
1863 * Reading TXCSR takes very long time under heavy load
1864 * so cache TXCSR value and writes the ORed value with
1865 * the kick command to the TXCSR. This saves one register
1868 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1869 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1874 jme_watchdog(struct ifnet *ifp)
1876 struct jme_softc *sc = ifp->if_softc;
1877 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
1879 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1881 if (!sc->jme_has_link) {
1882 if_printf(ifp, "watchdog timeout (missed link)\n");
1889 if (tdata->jme_tx_cnt == 0) {
1890 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1892 if (!ifq_is_empty(&ifp->if_snd))
1897 if_printf(ifp, "watchdog timeout\n");
1900 if (!ifq_is_empty(&ifp->if_snd))
1905 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1907 struct jme_softc *sc = ifp->if_softc;
1908 struct mii_data *mii = device_get_softc(sc->jme_miibus);
1909 struct ifreq *ifr = (struct ifreq *)data;
1910 int error = 0, mask;
1912 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1916 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1917 (!(sc->jme_caps & JME_CAP_JUMBO) &&
1918 ifr->ifr_mtu > JME_MAX_MTU)) {
1923 if (ifp->if_mtu != ifr->ifr_mtu) {
1925 * No special configuration is required when interface
1926 * MTU is changed but availability of Tx checksum
1927 * offload should be chcked against new MTU size as
1928 * FIFO size is just 2K.
1930 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1931 ifp->if_capenable &=
1932 ~(IFCAP_TXCSUM | IFCAP_TSO);
1934 ~(JME_CSUM_FEATURES | CSUM_TSO);
1936 ifp->if_mtu = ifr->ifr_mtu;
1937 if (ifp->if_flags & IFF_RUNNING)
1943 if (ifp->if_flags & IFF_UP) {
1944 if (ifp->if_flags & IFF_RUNNING) {
1945 if ((ifp->if_flags ^ sc->jme_if_flags) &
1946 (IFF_PROMISC | IFF_ALLMULTI))
1952 if (ifp->if_flags & IFF_RUNNING)
1955 sc->jme_if_flags = ifp->if_flags;
1960 if (ifp->if_flags & IFF_RUNNING)
1966 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1970 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1972 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1973 ifp->if_capenable ^= IFCAP_TXCSUM;
1974 if (ifp->if_capenable & IFCAP_TXCSUM)
1975 ifp->if_hwassist |= JME_CSUM_FEATURES;
1977 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1979 if (mask & IFCAP_RXCSUM) {
1982 ifp->if_capenable ^= IFCAP_RXCSUM;
1983 reg = CSR_READ_4(sc, JME_RXMAC);
1984 reg &= ~RXMAC_CSUM_ENB;
1985 if (ifp->if_capenable & IFCAP_RXCSUM)
1986 reg |= RXMAC_CSUM_ENB;
1987 CSR_WRITE_4(sc, JME_RXMAC, reg);
1990 if (mask & IFCAP_VLAN_HWTAGGING) {
1991 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1995 if ((mask & IFCAP_TSO) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1996 ifp->if_capenable ^= IFCAP_TSO;
1997 if (ifp->if_capenable & IFCAP_TSO)
1998 ifp->if_hwassist |= CSUM_TSO;
2000 ifp->if_hwassist &= ~CSUM_TSO;
2003 if (mask & IFCAP_RSS)
2004 ifp->if_capenable ^= IFCAP_RSS;
2008 error = ether_ioctl(ifp, cmd, data);
2015 jme_mac_config(struct jme_softc *sc)
2017 struct mii_data *mii;
2018 uint32_t ghc, rxmac, txmac, txpause, gp1;
2019 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
2021 mii = device_get_softc(sc->jme_miibus);
2023 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2025 CSR_WRITE_4(sc, JME_GHC, 0);
2027 rxmac = CSR_READ_4(sc, JME_RXMAC);
2028 rxmac &= ~RXMAC_FC_ENB;
2029 txmac = CSR_READ_4(sc, JME_TXMAC);
2030 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
2031 txpause = CSR_READ_4(sc, JME_TXPFC);
2032 txpause &= ~TXPFC_PAUSE_ENB;
2033 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2034 ghc |= GHC_FULL_DUPLEX;
2035 rxmac &= ~RXMAC_COLL_DET_ENB;
2036 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
2037 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
2040 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2041 txpause |= TXPFC_PAUSE_ENB;
2042 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2043 rxmac |= RXMAC_FC_ENB;
2045 /* Disable retry transmit timer/retry limit. */
2046 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
2047 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
2049 rxmac |= RXMAC_COLL_DET_ENB;
2050 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
2051 /* Enable retry transmit timer/retry limit. */
2052 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
2053 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
2057 * Reprogram Tx/Rx MACs with resolved speed/duplex.
2059 gp1 = CSR_READ_4(sc, JME_GPREG1);
2060 gp1 &= ~GPREG1_WA_HDX;
2062 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
2065 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2067 ghc |= GHC_SPEED_10 | sc->jme_clksrc;
2069 gp1 |= GPREG1_WA_HDX;
2073 ghc |= GHC_SPEED_100 | sc->jme_clksrc;
2075 gp1 |= GPREG1_WA_HDX;
2078 * Use extended FIFO depth to workaround CRC errors
2079 * emitted by chips before JMC250B
2081 phyconf = JMPHY_CONF_EXTFIFO;
2085 if (sc->jme_caps & JME_CAP_FASTETH)
2088 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
2090 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
2096 CSR_WRITE_4(sc, JME_GHC, ghc);
2097 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
2098 CSR_WRITE_4(sc, JME_TXMAC, txmac);
2099 CSR_WRITE_4(sc, JME_TXPFC, txpause);
2101 if (sc->jme_workaround & JME_WA_EXTFIFO) {
2102 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2103 JMPHY_CONF, phyconf);
2105 if (sc->jme_workaround & JME_WA_HDX)
2106 CSR_WRITE_4(sc, JME_GPREG1, gp1);
2112 struct jme_softc *sc = xsc;
2113 struct ifnet *ifp = &sc->arpcom.ac_if;
2117 ASSERT_SERIALIZED(&sc->jme_serialize);
2119 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2120 if (status == 0 || status == 0xFFFFFFFF)
2123 /* Disable interrupts. */
2124 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2126 status = CSR_READ_4(sc, JME_INTR_STATUS);
2127 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2130 /* Reset PCC counter/timer and Ack interrupts. */
2131 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2133 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
2134 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2136 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2137 if (status & jme_rx_status[r].jme_coal) {
2138 status |= jme_rx_status[r].jme_coal |
2139 jme_rx_status[r].jme_comp;
2143 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2145 if (ifp->if_flags & IFF_RUNNING) {
2146 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
2148 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
2149 jme_rx_intr(sc, status);
2151 if (status & INTR_RXQ_DESC_EMPTY) {
2153 * Notify hardware availability of new Rx buffers.
2154 * Reading RXCSR takes very long time under heavy
2155 * load so cache RXCSR value and writes the ORed
2156 * value with the kick command to the RXCSR. This
2157 * saves one register access cycle.
2159 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2160 RXCSR_RX_ENB | RXCSR_RXQ_START);
2163 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
2164 lwkt_serialize_enter(&tdata->jme_tx_serialize);
2166 if (!ifq_is_empty(&ifp->if_snd))
2168 lwkt_serialize_exit(&tdata->jme_tx_serialize);
2172 /* Reenable interrupts. */
2173 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2177 jme_txeof(struct jme_txdata *tdata)
2179 struct ifnet *ifp = &tdata->jme_sc->arpcom.ac_if;
2182 cons = tdata->jme_tx_cons;
2183 if (cons == tdata->jme_tx_prod)
2187 * Go through our Tx list and free mbufs for those
2188 * frames which have been transmitted.
2190 while (cons != tdata->jme_tx_prod) {
2191 struct jme_txdesc *txd, *next_txd;
2192 uint32_t status, next_status;
2193 int next_cons, nsegs;
2195 txd = &tdata->jme_txdesc[cons];
2196 KASSERT(txd->tx_m != NULL,
2197 ("%s: freeing NULL mbuf!", __func__));
2199 status = le32toh(txd->tx_desc->flags);
2200 if ((status & JME_TD_OWN) == JME_TD_OWN)
2205 * This chip will always update the TX descriptor's
2206 * buflen field and this updating always happens
2207 * after clearing the OWN bit, so even if the OWN
2208 * bit is cleared by the chip, we still don't sure
2209 * about whether the buflen field has been updated
2210 * by the chip or not. To avoid this race, we wait
2211 * for the next TX descriptor's OWN bit to be cleared
2212 * by the chip before reusing this TX descriptor.
2215 JME_DESC_ADD(next_cons, txd->tx_ndesc, tdata->jme_tx_desc_cnt);
2216 next_txd = &tdata->jme_txdesc[next_cons];
2217 if (next_txd->tx_m == NULL)
2219 next_status = le32toh(next_txd->tx_desc->flags);
2220 if ((next_status & JME_TD_OWN) == JME_TD_OWN)
2223 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2227 if (status & JME_TD_COLLISION) {
2228 ifp->if_collisions +=
2229 le32toh(txd->tx_desc->buflen) &
2230 JME_TD_BUF_LEN_MASK;
2235 * Only the first descriptor of multi-descriptor
2236 * transmission is updated so driver have to skip entire
2237 * chained buffers for the transmiited frame. In other
2238 * words, JME_TD_OWN bit is valid only at the first
2239 * descriptor of a multi-descriptor transmission.
2241 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2242 tdata->jme_tx_ring[cons].flags = 0;
2243 JME_DESC_INC(cons, tdata->jme_tx_desc_cnt);
2246 /* Reclaim transferred mbufs. */
2247 bus_dmamap_unload(tdata->jme_tx_tag, txd->tx_dmamap);
2250 tdata->jme_tx_cnt -= txd->tx_ndesc;
2251 KASSERT(tdata->jme_tx_cnt >= 0,
2252 ("%s: Active Tx desc counter was garbled", __func__));
2255 tdata->jme_tx_cons = cons;
2257 /* 1 for symbol TX descriptor */
2258 if (tdata->jme_tx_cnt <= JME_MAXTXSEGS + 1)
2261 if (tdata->jme_tx_cnt + JME_TXD_SPARE <=
2262 tdata->jme_tx_desc_cnt - JME_TXD_RSVD)
2263 ifp->if_flags &= ~IFF_OACTIVE;
2266 static __inline void
2267 jme_discard_rxbufs(struct jme_rxdata *rdata, int cons, int count)
2271 for (i = 0; i < count; ++i) {
2272 jme_setup_rxdesc(&rdata->jme_rxdesc[cons]);
2273 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2277 static __inline struct pktinfo *
2278 jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2280 if (flags & JME_RD_IPV4)
2281 pi->pi_netisr = NETISR_IP;
2282 else if (flags & JME_RD_IPV6)
2283 pi->pi_netisr = NETISR_IPV6;
2288 pi->pi_l3proto = IPPROTO_UNKNOWN;
2290 if (flags & JME_RD_MORE_FRAG)
2291 pi->pi_flags |= PKTINFO_FLAG_FRAG;
2292 else if (flags & JME_RD_TCP)
2293 pi->pi_l3proto = IPPROTO_TCP;
2294 else if (flags & JME_RD_UDP)
2295 pi->pi_l3proto = IPPROTO_UDP;
2301 /* Receive a frame. */
2303 jme_rxpkt(struct jme_rxdata *rdata)
2305 struct ifnet *ifp = &rdata->jme_sc->arpcom.ac_if;
2306 struct jme_desc *desc;
2307 struct jme_rxdesc *rxd;
2308 struct mbuf *mp, *m;
2309 uint32_t flags, status, hash, hashinfo;
2310 int cons, count, nsegs;
2312 cons = rdata->jme_rx_cons;
2313 desc = &rdata->jme_rx_ring[cons];
2315 flags = le32toh(desc->flags);
2316 status = le32toh(desc->buflen);
2317 hash = le32toh(desc->addr_hi);
2318 hashinfo = le32toh(desc->addr_lo);
2319 nsegs = JME_RX_NSEGS(status);
2322 /* Skip the first descriptor. */
2323 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2326 * Clear the OWN bit of the following RX descriptors;
2327 * hardware will not clear the OWN bit except the first
2330 * Since the first RX descriptor is setup, i.e. OWN bit
2331 * on, before its followins RX descriptors, leaving the
2332 * OWN bit on the following RX descriptors will trick
2333 * the hardware into thinking that the following RX
2334 * descriptors are ready to be used too.
2336 for (count = 1; count < nsegs; count++,
2337 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt))
2338 rdata->jme_rx_ring[cons].flags = 0;
2340 cons = rdata->jme_rx_cons;
2343 JME_RSS_DPRINTF(rdata->jme_sc, 15, "ring%d, flags 0x%08x, "
2344 "hash 0x%08x, hash info 0x%08x\n",
2345 rdata->jme_rx_idx, flags, hash, hashinfo);
2347 if (status & JME_RX_ERR_STAT) {
2349 jme_discard_rxbufs(rdata, cons, nsegs);
2350 #ifdef JME_SHOW_ERRORS
2351 if_printf(ifp, "%s : receive error = 0x%b\n",
2352 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2354 rdata->jme_rx_cons += nsegs;
2355 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2359 rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2360 for (count = 0; count < nsegs; count++,
2361 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) {
2362 rxd = &rdata->jme_rxdesc[cons];
2365 /* Add a new receive buffer to the ring. */
2366 if (jme_newbuf(rdata, rxd, 0) != 0) {
2369 jme_discard_rxbufs(rdata, cons, nsegs - count);
2370 if (rdata->jme_rxhead != NULL) {
2371 m_freem(rdata->jme_rxhead);
2372 JME_RXCHAIN_RESET(rdata);
2378 * Assume we've received a full sized frame.
2379 * Actual size is fixed when we encounter the end of
2380 * multi-segmented frame.
2382 mp->m_len = MCLBYTES;
2384 /* Chain received mbufs. */
2385 if (rdata->jme_rxhead == NULL) {
2386 rdata->jme_rxhead = mp;
2387 rdata->jme_rxtail = mp;
2390 * Receive processor can receive a maximum frame
2391 * size of 65535 bytes.
2393 rdata->jme_rxtail->m_next = mp;
2394 rdata->jme_rxtail = mp;
2397 if (count == nsegs - 1) {
2398 struct pktinfo pi0, *pi;
2400 /* Last desc. for this frame. */
2401 m = rdata->jme_rxhead;
2402 m->m_pkthdr.len = rdata->jme_rxlen;
2404 /* Set first mbuf size. */
2405 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2406 /* Set last mbuf size. */
2407 mp->m_len = rdata->jme_rxlen -
2408 ((MCLBYTES - JME_RX_PAD_BYTES) +
2409 (MCLBYTES * (nsegs - 2)));
2411 m->m_len = rdata->jme_rxlen;
2413 m->m_pkthdr.rcvif = ifp;
2416 * Account for 10bytes auto padding which is used
2417 * to align IP header on 32bit boundary. Also note,
2418 * CRC bytes is automatically removed by the
2421 m->m_data += JME_RX_PAD_BYTES;
2423 /* Set checksum information. */
2424 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2425 (flags & JME_RD_IPV4)) {
2426 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2427 if (flags & JME_RD_IPCSUM)
2428 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2429 if ((flags & JME_RD_MORE_FRAG) == 0 &&
2430 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2431 (JME_RD_TCP | JME_RD_TCPCSUM) ||
2432 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2433 (JME_RD_UDP | JME_RD_UDPCSUM))) {
2434 m->m_pkthdr.csum_flags |=
2435 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2436 m->m_pkthdr.csum_data = 0xffff;
2440 /* Check for VLAN tagged packets. */
2441 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2442 (flags & JME_RD_VLAN_TAG)) {
2443 m->m_pkthdr.ether_vlantag =
2444 flags & JME_RD_VLAN_MASK;
2445 m->m_flags |= M_VLANTAG;
2450 if (ifp->if_capenable & IFCAP_RSS)
2451 pi = jme_pktinfo(&pi0, flags);
2456 (hashinfo & JME_RD_HASH_FN_MASK) ==
2457 JME_RD_HASH_FN_TOEPLITZ) {
2458 m->m_flags |= (M_HASH | M_CKHASH);
2459 m->m_pkthdr.hash = toeplitz_hash(hash);
2462 #ifdef JME_RSS_DEBUG
2464 JME_RSS_DPRINTF(rdata->jme_sc, 10,
2465 "isr %d flags %08x, l3 %d %s\n",
2466 pi->pi_netisr, pi->pi_flags,
2468 (m->m_flags & M_HASH) ? "hash" : "");
2473 ether_input_pkt(ifp, m, pi);
2475 /* Reset mbuf chains. */
2476 JME_RXCHAIN_RESET(rdata);
2477 #ifdef JME_RSS_DEBUG
2478 rdata->jme_rx_pkt++;
2483 rdata->jme_rx_cons += nsegs;
2484 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2488 jme_rxeof(struct jme_rxdata *rdata, int count)
2490 struct jme_desc *desc;
2494 #ifdef IFPOLL_ENABLE
2495 if (count >= 0 && count-- == 0)
2498 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2499 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2501 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2505 * Check number of segments against received bytes.
2506 * Non-matching value would indicate that hardware
2507 * is still trying to update Rx descriptors. I'm not
2508 * sure whether this check is needed.
2510 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2511 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2512 if (nsegs != howmany(pktlen, MCLBYTES)) {
2513 if_printf(&rdata->jme_sc->arpcom.ac_if,
2514 "RX fragment count(%d) and "
2515 "packet size(%d) mismach\n", nsegs, pktlen);
2521 * RSS hash and hash information may _not_ be set by the
2522 * hardware even if the OWN bit is cleared and VALID bit
2525 * If the RSS information is not delivered by the hardware
2526 * yet, we MUST NOT accept this packet, let alone reusing
2527 * its RX descriptor. If this packet was accepted and its
2528 * RX descriptor was reused before hardware delivering the
2529 * RSS information, the RX buffer's address would be trashed
2530 * by the RSS information delivered by the hardware.
2532 if (JME_ENABLE_HWRSS(rdata->jme_sc)) {
2533 struct jme_rxdesc *rxd;
2536 hashinfo = le32toh(desc->addr_lo);
2537 rxd = &rdata->jme_rxdesc[rdata->jme_rx_cons];
2540 * This test should be enough to detect the pending
2541 * RSS information delivery, given:
2542 * - If RSS hash is not calculated, the hashinfo
2543 * will be 0. Howvever, the lower 32bits of RX
2544 * buffers' physical address will never be 0.
2545 * (see jme_rxbuf_dma_filter)
2546 * - If RSS hash is calculated, the lowest 4 bits
2547 * of hashinfo will be set, while the RX buffers
2548 * are at least 2K aligned.
2550 if (hashinfo == JME_ADDR_LO(rxd->rx_paddr)) {
2551 #ifdef JME_SHOW_RSSWB
2552 if_printf(&rdata->jme_sc->arpcom.ac_if,
2553 "RSS is not written back yet\n");
2559 /* Received a frame. */
2567 struct jme_softc *sc = xsc;
2568 struct mii_data *mii = device_get_softc(sc->jme_miibus);
2570 lwkt_serialize_enter(&sc->jme_serialize);
2572 KKASSERT(mycpuid == JME_TICK_CPUID);
2574 sc->jme_in_tick = TRUE;
2576 sc->jme_in_tick = FALSE;
2578 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2580 lwkt_serialize_exit(&sc->jme_serialize);
2584 jme_reset(struct jme_softc *sc)
2588 /* Make sure that TX and RX are stopped */
2593 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2597 * Hold reset bit before stop reset
2600 /* Disable TXMAC and TXOFL clock sources */
2601 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2602 /* Disable RXMAC clock source */
2603 val = CSR_READ_4(sc, JME_GPREG1);
2604 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2606 CSR_READ_4(sc, JME_GHC);
2609 CSR_WRITE_4(sc, JME_GHC, 0);
2611 CSR_READ_4(sc, JME_GHC);
2614 * Clear reset bit after stop reset
2617 /* Enable TXMAC and TXOFL clock sources */
2618 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2619 /* Enable RXMAC clock source */
2620 val = CSR_READ_4(sc, JME_GPREG1);
2621 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2623 CSR_READ_4(sc, JME_GHC);
2625 /* Disable TXMAC and TXOFL clock sources */
2626 CSR_WRITE_4(sc, JME_GHC, 0);
2627 /* Disable RXMAC clock source */
2628 val = CSR_READ_4(sc, JME_GPREG1);
2629 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2631 CSR_READ_4(sc, JME_GHC);
2633 /* Enable TX and RX */
2634 val = CSR_READ_4(sc, JME_TXCSR);
2635 CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB);
2636 val = CSR_READ_4(sc, JME_RXCSR);
2637 CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB);
2639 CSR_READ_4(sc, JME_TXCSR);
2640 CSR_READ_4(sc, JME_RXCSR);
2642 /* Enable TXMAC and TXOFL clock sources */
2643 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2644 /* Eisable RXMAC clock source */
2645 val = CSR_READ_4(sc, JME_GPREG1);
2646 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2648 CSR_READ_4(sc, JME_GHC);
2650 /* Stop TX and RX */
2658 struct jme_softc *sc = xsc;
2659 struct ifnet *ifp = &sc->arpcom.ac_if;
2660 struct mii_data *mii;
2661 uint8_t eaddr[ETHER_ADDR_LEN];
2666 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2669 * Cancel any pending I/O.
2674 * Reset the chip to a known state.
2679 * Setup MSI/MSI-X vectors to interrupts mapping
2683 if (JME_ENABLE_HWRSS(sc))
2686 jme_disable_rss(sc);
2688 /* Init RX descriptors */
2689 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2690 error = jme_init_rx_ring(&sc->jme_cdata.jme_rx_data[r]);
2692 if_printf(ifp, "initialization failed: "
2693 "no memory for %dth RX ring.\n", r);
2699 /* Init TX descriptors */
2700 jme_init_tx_ring(&sc->jme_cdata.jme_tx_data);
2702 /* Initialize shadow status block. */
2705 /* Reprogram the station address. */
2706 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2707 CSR_WRITE_4(sc, JME_PAR0,
2708 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2709 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2712 * Configure Tx queue.
2713 * Tx priority queue weight value : 0
2714 * Tx FIFO threshold for processing next packet : 16QW
2715 * Maximum Tx DMA length : 512
2716 * Allow Tx DMA burst.
2718 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2719 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2720 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2721 sc->jme_txcsr |= sc->jme_tx_dma_size;
2722 sc->jme_txcsr |= TXCSR_DMA_BURST;
2723 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2725 /* Set Tx descriptor counter. */
2726 CSR_WRITE_4(sc, JME_TXQDC, sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt);
2728 /* Set Tx ring address to the hardware. */
2729 paddr = sc->jme_cdata.jme_tx_data.jme_tx_ring_paddr;
2730 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2731 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2733 /* Configure TxMAC parameters. */
2734 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2735 reg |= TXMAC_THRESH_1_PKT;
2736 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2737 CSR_WRITE_4(sc, JME_TXMAC, reg);
2740 * Configure Rx queue.
2741 * FIFO full threshold for transmitting Tx pause packet : 128T
2742 * FIFO threshold for processing next packet : 128QW
2744 * Max Rx DMA length : 128
2745 * Rx descriptor retry : 32
2746 * Rx descriptor retry time gap : 256ns
2747 * Don't receive runt/bad frame.
2749 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2752 * Since Rx FIFO size is 4K bytes, receiving frames larger
2753 * than 4K bytes will suffer from Rx FIFO overruns. So
2754 * decrease FIFO threshold to reduce the FIFO overruns for
2755 * frames larger than 4000 bytes.
2756 * For best performance of standard MTU sized frames use
2757 * maximum allowable FIFO threshold, 128QW.
2759 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2761 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2763 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2765 /* Improve PCI Express compatibility */
2766 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2768 sc->jme_rxcsr |= sc->jme_rx_dma_size;
2769 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2770 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2771 /* XXX TODO DROP_BAD */
2773 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2774 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
2776 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2778 /* Set Rx descriptor counter. */
2779 CSR_WRITE_4(sc, JME_RXQDC, rdata->jme_rx_desc_cnt);
2781 /* Set Rx ring address to the hardware. */
2782 paddr = rdata->jme_rx_ring_paddr;
2783 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2784 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2787 /* Clear receive filter. */
2788 CSR_WRITE_4(sc, JME_RXMAC, 0);
2790 /* Set up the receive filter. */
2795 * Disable all WOL bits as WOL can interfere normal Rx
2796 * operation. Also clear WOL detection status bits.
2798 reg = CSR_READ_4(sc, JME_PMCS);
2799 reg &= ~PMCS_WOL_ENB_MASK;
2800 CSR_WRITE_4(sc, JME_PMCS, reg);
2803 * Pad 10bytes right before received frame. This will greatly
2804 * help Rx performance on strict-alignment architectures as
2805 * it does not need to copy the frame to align the payload.
2807 reg = CSR_READ_4(sc, JME_RXMAC);
2808 reg |= RXMAC_PAD_10BYTES;
2810 if (ifp->if_capenable & IFCAP_RXCSUM)
2811 reg |= RXMAC_CSUM_ENB;
2812 CSR_WRITE_4(sc, JME_RXMAC, reg);
2814 /* Configure general purpose reg0 */
2815 reg = CSR_READ_4(sc, JME_GPREG0);
2816 reg &= ~GPREG0_PCC_UNIT_MASK;
2817 /* Set PCC timer resolution to micro-seconds unit. */
2818 reg |= GPREG0_PCC_UNIT_US;
2820 * Disable all shadow register posting as we have to read
2821 * JME_INTR_STATUS register in jme_intr. Also it seems
2822 * that it's hard to synchronize interrupt status between
2823 * hardware and software with shadow posting due to
2824 * requirements of bus_dmamap_sync(9).
2826 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2827 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2828 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2829 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2830 /* Disable posting of DW0. */
2831 reg &= ~GPREG0_POST_DW0_ENB;
2832 /* Clear PME message. */
2833 reg &= ~GPREG0_PME_ENB;
2834 /* Set PHY address. */
2835 reg &= ~GPREG0_PHY_ADDR_MASK;
2836 reg |= sc->jme_phyaddr;
2837 CSR_WRITE_4(sc, JME_GPREG0, reg);
2839 /* Configure Tx queue 0 packet completion coalescing. */
2840 jme_set_tx_coal(sc);
2842 /* Configure Rx queues packet completion coalescing. */
2843 jme_set_rx_coal(sc);
2845 /* Configure shadow status block but don't enable posting. */
2846 paddr = sc->jme_cdata.jme_ssb_block_paddr;
2847 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2848 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2850 /* Disable Timer 1 and Timer 2. */
2851 CSR_WRITE_4(sc, JME_TIMER1, 0);
2852 CSR_WRITE_4(sc, JME_TIMER2, 0);
2854 /* Configure retry transmit period, retry limit value. */
2855 CSR_WRITE_4(sc, JME_TXTRHD,
2856 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2857 TXTRHD_RT_PERIOD_MASK) |
2858 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2859 TXTRHD_RT_LIMIT_SHIFT));
2861 #ifdef IFPOLL_ENABLE
2862 if (!(ifp->if_flags & IFF_NPOLLING))
2864 /* Initialize the interrupt mask. */
2865 jme_enable_intr(sc);
2866 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2869 * Enabling Tx/Rx DMA engines and Rx queue processing is
2870 * done after detection of valid link in jme_miibus_statchg.
2872 sc->jme_has_link = FALSE;
2874 /* Set the current media. */
2875 mii = device_get_softc(sc->jme_miibus);
2878 callout_reset_bycpu(&sc->jme_tick_ch, hz, jme_tick, sc,
2881 ifp->if_flags |= IFF_RUNNING;
2882 ifp->if_flags &= ~IFF_OACTIVE;
2886 jme_stop(struct jme_softc *sc)
2888 struct ifnet *ifp = &sc->arpcom.ac_if;
2889 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
2890 struct jme_txdesc *txd;
2891 struct jme_rxdesc *rxd;
2892 struct jme_rxdata *rdata;
2895 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2898 * Mark the interface down and cancel the watchdog timer.
2900 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2903 callout_stop(&sc->jme_tick_ch);
2904 sc->jme_has_link = FALSE;
2907 * Disable interrupts.
2909 jme_disable_intr(sc);
2910 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2912 /* Disable updating shadow status block. */
2913 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2914 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2916 /* Stop receiver, transmitter. */
2921 * Free partial finished RX segments
2923 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2924 rdata = &sc->jme_cdata.jme_rx_data[r];
2925 if (rdata->jme_rxhead != NULL)
2926 m_freem(rdata->jme_rxhead);
2927 JME_RXCHAIN_RESET(rdata);
2931 * Free RX and TX mbufs still in the queues.
2933 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2934 rdata = &sc->jme_cdata.jme_rx_data[r];
2935 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
2936 rxd = &rdata->jme_rxdesc[i];
2937 if (rxd->rx_m != NULL) {
2938 bus_dmamap_unload(rdata->jme_rx_tag,
2945 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
2946 txd = &tdata->jme_txdesc[i];
2947 if (txd->tx_m != NULL) {
2948 bus_dmamap_unload(tdata->jme_tx_tag, txd->tx_dmamap);
2957 jme_stop_tx(struct jme_softc *sc)
2962 reg = CSR_READ_4(sc, JME_TXCSR);
2963 if ((reg & TXCSR_TX_ENB) == 0)
2965 reg &= ~TXCSR_TX_ENB;
2966 CSR_WRITE_4(sc, JME_TXCSR, reg);
2967 for (i = JME_TIMEOUT; i > 0; i--) {
2969 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2973 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2977 jme_stop_rx(struct jme_softc *sc)
2982 reg = CSR_READ_4(sc, JME_RXCSR);
2983 if ((reg & RXCSR_RX_ENB) == 0)
2985 reg &= ~RXCSR_RX_ENB;
2986 CSR_WRITE_4(sc, JME_RXCSR, reg);
2987 for (i = JME_TIMEOUT; i > 0; i--) {
2989 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2993 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2997 jme_init_tx_ring(struct jme_txdata *tdata)
2999 struct jme_txdesc *txd;
3002 tdata->jme_tx_prod = 0;
3003 tdata->jme_tx_cons = 0;
3004 tdata->jme_tx_cnt = 0;
3006 bzero(tdata->jme_tx_ring, JME_TX_RING_SIZE(tdata));
3007 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
3008 txd = &tdata->jme_txdesc[i];
3010 txd->tx_desc = &tdata->jme_tx_ring[i];
3016 jme_init_ssb(struct jme_softc *sc)
3018 struct jme_chain_data *cd;
3020 cd = &sc->jme_cdata;
3021 bzero(cd->jme_ssb_block, JME_SSB_SIZE);
3025 jme_init_rx_ring(struct jme_rxdata *rdata)
3027 struct jme_rxdesc *rxd;
3030 KKASSERT(rdata->jme_rxhead == NULL &&
3031 rdata->jme_rxtail == NULL &&
3032 rdata->jme_rxlen == 0);
3033 rdata->jme_rx_cons = 0;
3035 bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(rdata));
3036 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
3039 rxd = &rdata->jme_rxdesc[i];
3041 rxd->rx_desc = &rdata->jme_rx_ring[i];
3042 error = jme_newbuf(rdata, rxd, 1);
3050 jme_newbuf(struct jme_rxdata *rdata, struct jme_rxdesc *rxd, int init)
3053 bus_dma_segment_t segs;
3057 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
3061 * JMC250 has 64bit boundary alignment limitation so jme(4)
3062 * takes advantage of 10 bytes padding feature of hardware
3063 * in order not to copy entire frame to align IP header on
3066 m->m_len = m->m_pkthdr.len = MCLBYTES;
3068 error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
3069 rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
3074 if_printf(&rdata->jme_sc->arpcom.ac_if,
3075 "can't load RX mbuf\n");
3080 if (rxd->rx_m != NULL) {
3081 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
3082 BUS_DMASYNC_POSTREAD);
3083 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
3085 map = rxd->rx_dmamap;
3086 rxd->rx_dmamap = rdata->jme_rx_sparemap;
3087 rdata->jme_rx_sparemap = map;
3089 rxd->rx_paddr = segs.ds_addr;
3091 jme_setup_rxdesc(rxd);
3096 jme_set_vlan(struct jme_softc *sc)
3098 struct ifnet *ifp = &sc->arpcom.ac_if;
3101 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3103 reg = CSR_READ_4(sc, JME_RXMAC);
3104 reg &= ~RXMAC_VLAN_ENB;
3105 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
3106 reg |= RXMAC_VLAN_ENB;
3107 CSR_WRITE_4(sc, JME_RXMAC, reg);
3111 jme_set_filter(struct jme_softc *sc)
3113 struct ifnet *ifp = &sc->arpcom.ac_if;
3114 struct ifmultiaddr *ifma;
3119 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3121 rxcfg = CSR_READ_4(sc, JME_RXMAC);
3122 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
3126 * Always accept frames destined to our station address.
3127 * Always accept broadcast frames.
3129 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
3131 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
3132 if (ifp->if_flags & IFF_PROMISC)
3133 rxcfg |= RXMAC_PROMISC;
3134 if (ifp->if_flags & IFF_ALLMULTI)
3135 rxcfg |= RXMAC_ALLMULTI;
3136 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
3137 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
3138 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3143 * Set up the multicast address filter by passing all multicast
3144 * addresses through a CRC generator, and then using the low-order
3145 * 6 bits as an index into the 64 bit multicast hash table. The
3146 * high order bits select the register, while the rest of the bits
3147 * select the bit within the register.
3149 rxcfg |= RXMAC_MULTICAST;
3150 bzero(mchash, sizeof(mchash));
3152 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3153 if (ifma->ifma_addr->sa_family != AF_LINK)
3155 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3156 ifma->ifma_addr), ETHER_ADDR_LEN);
3158 /* Just want the 6 least significant bits. */
3161 /* Set the corresponding bit in the hash table. */
3162 mchash[crc >> 5] |= 1 << (crc & 0x1f);
3165 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
3166 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
3167 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3171 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
3173 struct jme_softc *sc = arg1;
3174 struct ifnet *ifp = &sc->arpcom.ac_if;
3177 ifnet_serialize_all(ifp);
3179 v = sc->jme_tx_coal_to;
3180 error = sysctl_handle_int(oidp, &v, 0, req);
3181 if (error || req->newptr == NULL)
3184 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
3189 if (v != sc->jme_tx_coal_to) {
3190 sc->jme_tx_coal_to = v;
3191 if (ifp->if_flags & IFF_RUNNING)
3192 jme_set_tx_coal(sc);
3195 ifnet_deserialize_all(ifp);
3200 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
3202 struct jme_softc *sc = arg1;
3203 struct ifnet *ifp = &sc->arpcom.ac_if;
3206 ifnet_serialize_all(ifp);
3208 v = sc->jme_tx_coal_pkt;
3209 error = sysctl_handle_int(oidp, &v, 0, req);
3210 if (error || req->newptr == NULL)
3213 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
3218 if (v != sc->jme_tx_coal_pkt) {
3219 sc->jme_tx_coal_pkt = v;
3220 if (ifp->if_flags & IFF_RUNNING)
3221 jme_set_tx_coal(sc);
3224 ifnet_deserialize_all(ifp);
3229 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
3231 struct jme_softc *sc = arg1;
3232 struct ifnet *ifp = &sc->arpcom.ac_if;
3235 ifnet_serialize_all(ifp);
3237 v = sc->jme_rx_coal_to;
3238 error = sysctl_handle_int(oidp, &v, 0, req);
3239 if (error || req->newptr == NULL)
3242 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
3247 if (v != sc->jme_rx_coal_to) {
3248 sc->jme_rx_coal_to = v;
3249 if (ifp->if_flags & IFF_RUNNING)
3250 jme_set_rx_coal(sc);
3253 ifnet_deserialize_all(ifp);
3258 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3260 struct jme_softc *sc = arg1;
3261 struct ifnet *ifp = &sc->arpcom.ac_if;
3264 ifnet_serialize_all(ifp);
3266 v = sc->jme_rx_coal_pkt;
3267 error = sysctl_handle_int(oidp, &v, 0, req);
3268 if (error || req->newptr == NULL)
3271 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
3276 if (v != sc->jme_rx_coal_pkt) {
3277 sc->jme_rx_coal_pkt = v;
3278 if (ifp->if_flags & IFF_RUNNING)
3279 jme_set_rx_coal(sc);
3282 ifnet_deserialize_all(ifp);
3287 jme_set_tx_coal(struct jme_softc *sc)
3291 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3293 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3294 PCCTX_COAL_PKT_MASK;
3295 reg |= PCCTX_COAL_TXQ0;
3296 CSR_WRITE_4(sc, JME_PCCTX, reg);
3300 jme_set_rx_coal(struct jme_softc *sc)
3305 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3307 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3308 PCCRX_COAL_PKT_MASK;
3309 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r)
3310 CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3313 #ifdef IFPOLL_ENABLE
3316 jme_npoll_status(struct ifnet *ifp)
3318 struct jme_softc *sc = ifp->if_softc;
3321 ASSERT_SERIALIZED(&sc->jme_serialize);
3323 status = CSR_READ_4(sc, JME_INTR_STATUS);
3324 if (status & INTR_RXQ_DESC_EMPTY) {
3325 CSR_WRITE_4(sc, JME_INTR_STATUS, status & INTR_RXQ_DESC_EMPTY);
3326 jme_rx_restart(sc, status);
3331 jme_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle)
3333 struct jme_rxdata *rdata = arg;
3335 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3337 jme_rxeof(rdata, cycle);
3341 jme_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused)
3343 struct jme_txdata *tdata = arg;
3345 ASSERT_SERIALIZED(&tdata->jme_tx_serialize);
3348 if (!ifq_is_empty(&ifp->if_snd))
3353 jme_npoll(struct ifnet *ifp, struct ifpoll_info *info)
3355 struct jme_softc *sc = ifp->if_softc;
3357 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3362 info->ifpi_status.status_func = jme_npoll_status;
3363 info->ifpi_status.serializer = &sc->jme_serialize;
3365 off = sc->jme_npoll_txoff;
3366 KKASSERT(off <= ncpus2);
3367 info->ifpi_tx[off].poll_func = jme_npoll_tx;
3368 info->ifpi_tx[off].arg = &sc->jme_cdata.jme_tx_data;
3369 info->ifpi_tx[off].serializer =
3370 &sc->jme_cdata.jme_tx_data.jme_tx_serialize;
3372 off = sc->jme_npoll_rxoff;
3373 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
3374 struct jme_rxdata *rdata =
3375 &sc->jme_cdata.jme_rx_data[i];
3378 info->ifpi_rx[idx].poll_func = jme_npoll_rx;
3379 info->ifpi_rx[idx].arg = rdata;
3380 info->ifpi_rx[idx].serializer =
3381 &rdata->jme_rx_serialize;
3384 if (ifp->if_flags & IFF_RUNNING)
3385 jme_disable_intr(sc);
3386 ifp->if_npoll_cpuid = sc->jme_npoll_txoff;
3388 if (ifp->if_flags & IFF_RUNNING)
3389 jme_enable_intr(sc);
3390 ifp->if_npoll_cpuid = -1;
3395 jme_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS)
3397 struct jme_softc *sc = (void *)arg1;
3398 struct ifnet *ifp = &sc->arpcom.ac_if;
3401 off = sc->jme_npoll_rxoff;
3402 error = sysctl_handle_int(oidp, &off, 0, req);
3403 if (error || req->newptr == NULL)
3408 ifnet_serialize_all(ifp);
3409 if (off >= ncpus2 || off % sc->jme_cdata.jme_rx_ring_cnt != 0) {
3413 sc->jme_npoll_rxoff = off;
3415 ifnet_deserialize_all(ifp);
3421 jme_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS)
3423 struct jme_softc *sc = (void *)arg1;
3424 struct ifnet *ifp = &sc->arpcom.ac_if;
3427 off = sc->jme_npoll_txoff;
3428 error = sysctl_handle_int(oidp, &off, 0, req);
3429 if (error || req->newptr == NULL)
3434 ifnet_serialize_all(ifp);
3435 if (off >= ncpus2) {
3439 sc->jme_npoll_txoff = off;
3441 ifnet_deserialize_all(ifp);
3446 #endif /* IFPOLL_ENABLE */
3449 jme_rxring_dma_alloc(struct jme_rxdata *rdata)
3454 asize = roundup2(JME_RX_RING_SIZE(rdata), JME_RX_RING_ALIGN);
3455 error = bus_dmamem_coherent(rdata->jme_sc->jme_cdata.jme_ring_tag,
3456 JME_RX_RING_ALIGN, 0,
3457 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3458 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3460 device_printf(rdata->jme_sc->jme_dev,
3461 "could not allocate %dth Rx ring.\n", rdata->jme_rx_idx);
3464 rdata->jme_rx_ring_tag = dmem.dmem_tag;
3465 rdata->jme_rx_ring_map = dmem.dmem_map;
3466 rdata->jme_rx_ring = dmem.dmem_addr;
3467 rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
3473 jme_rxbuf_dma_filter(void *arg __unused, bus_addr_t paddr)
3475 if ((paddr & 0xffffffff) == 0) {
3477 * Don't allow lower 32bits of the RX buffer's
3478 * physical address to be 0, else it will break
3479 * hardware pending RSS information delivery
3480 * detection on RX path.
3488 jme_rxbuf_dma_alloc(struct jme_rxdata *rdata)
3493 lowaddr = BUS_SPACE_MAXADDR;
3494 if (JME_ENABLE_HWRSS(rdata->jme_sc)) {
3495 /* jme_rxbuf_dma_filter will be called */
3496 lowaddr = BUS_SPACE_MAXADDR_32BIT;
3499 /* Create tag for Rx buffers. */
3500 error = bus_dma_tag_create(
3501 rdata->jme_sc->jme_cdata.jme_buffer_tag,/* parent */
3502 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
3503 lowaddr, /* lowaddr */
3504 BUS_SPACE_MAXADDR, /* highaddr */
3505 jme_rxbuf_dma_filter, NULL, /* filter, filterarg */
3506 MCLBYTES, /* maxsize */
3508 MCLBYTES, /* maxsegsize */
3509 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
3510 &rdata->jme_rx_tag);
3512 device_printf(rdata->jme_sc->jme_dev,
3513 "could not create %dth Rx DMA tag.\n", rdata->jme_rx_idx);
3517 /* Create DMA maps for Rx buffers. */
3518 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3519 &rdata->jme_rx_sparemap);
3521 device_printf(rdata->jme_sc->jme_dev,
3522 "could not create %dth spare Rx dmamap.\n",
3524 bus_dma_tag_destroy(rdata->jme_rx_tag);
3525 rdata->jme_rx_tag = NULL;
3528 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
3529 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3531 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3536 device_printf(rdata->jme_sc->jme_dev,
3537 "could not create %dth Rx dmamap "
3538 "for %dth RX ring.\n", i, rdata->jme_rx_idx);
3540 for (j = 0; j < i; ++j) {
3541 rxd = &rdata->jme_rxdesc[j];
3542 bus_dmamap_destroy(rdata->jme_rx_tag,
3545 bus_dmamap_destroy(rdata->jme_rx_tag,
3546 rdata->jme_rx_sparemap);
3547 bus_dma_tag_destroy(rdata->jme_rx_tag);
3548 rdata->jme_rx_tag = NULL;
3556 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3560 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3561 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3563 if (status & rdata->jme_rx_coal) {
3564 lwkt_serialize_enter(&rdata->jme_rx_serialize);
3565 jme_rxeof(rdata, -1);
3566 lwkt_serialize_exit(&rdata->jme_rx_serialize);
3572 jme_enable_rss(struct jme_softc *sc)
3575 uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3578 KASSERT(sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_2 ||
3579 sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_4,
3580 ("%s: invalid # of RX rings (%d)",
3581 sc->arpcom.ac_if.if_xname, sc->jme_cdata.jme_rx_ring_cnt));
3583 rssc = RSSC_HASH_64_ENTRY;
3584 rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3585 rssc |= sc->jme_cdata.jme_rx_ring_cnt >> 1;
3586 JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3587 CSR_WRITE_4(sc, JME_RSSC, rssc);
3589 toeplitz_get_key(key, sizeof(key));
3590 for (i = 0; i < RSSKEY_NREGS; ++i) {
3593 keyreg = RSSKEY_REGVAL(key, i);
3594 JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg);
3596 CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg);
3600 * Create redirect table in following fashion:
3601 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3604 for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3607 q = i % sc->jme_cdata.jme_rx_ring_cnt;
3608 ind |= q << (i * 8);
3610 JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3612 for (i = 0; i < RSSTBL_NREGS; ++i)
3613 CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3617 jme_disable_rss(struct jme_softc *sc)
3619 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3623 jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3625 struct jme_softc *sc = ifp->if_softc;
3627 ifnet_serialize_array_enter(sc->jme_serialize_arr,
3628 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz);
3632 jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3634 struct jme_softc *sc = ifp->if_softc;
3636 ifnet_serialize_array_exit(sc->jme_serialize_arr,
3637 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz);
3641 jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3643 struct jme_softc *sc = ifp->if_softc;
3645 return ifnet_serialize_array_try(sc->jme_serialize_arr,
3646 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz);
3652 jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
3653 boolean_t serialized)
3655 struct jme_softc *sc = ifp->if_softc;
3657 ifnet_serialize_array_assert(sc->jme_serialize_arr,
3658 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE,
3662 #endif /* INVARIANTS */
3665 jme_msix_try_alloc(device_t dev)
3667 struct jme_softc *sc = device_get_softc(dev);
3668 struct jme_msix_data *msix;
3669 int error, i, r, msix_enable, msix_count;
3670 int offset, offset_def;
3672 msix_count = JME_MSIXCNT(sc->jme_cdata.jme_rx_ring_cnt);
3673 KKASSERT(msix_count <= JME_NMSIX);
3675 msix_enable = device_getenv_int(dev, "msix.enable", jme_msix_enable);
3678 * We leave the 1st MSI-X vector unused, so we
3679 * actually need msix_count + 1 MSI-X vectors.
3681 if (!msix_enable || pci_msix_count(dev) < (msix_count + 1))
3684 for (i = 0; i < msix_count; ++i)
3685 sc->jme_msix[i].jme_msix_rid = -1;
3690 * Setup status MSI-X
3693 msix = &sc->jme_msix[i++];
3694 msix->jme_msix_cpuid = 0;
3695 msix->jme_msix_arg = sc;
3696 msix->jme_msix_func = jme_msix_status;
3697 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3698 msix->jme_msix_intrs |=
3699 sc->jme_cdata.jme_rx_data[r].jme_rx_empty;
3701 msix->jme_msix_serialize = &sc->jme_serialize;
3702 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s sts",
3703 device_get_nameunit(dev));
3709 offset_def = device_get_unit(dev) % ncpus2;
3710 offset = device_getenv_int(dev, "msix.txoff", offset_def);
3711 if (offset >= ncpus2) {
3712 device_printf(dev, "invalid msix.txoff %d, use %d\n",
3713 offset, offset_def);
3714 offset = offset_def;
3717 msix = &sc->jme_msix[i++];
3718 msix->jme_msix_cpuid = offset;
3719 sc->jme_tx_cpuid = msix->jme_msix_cpuid;
3720 msix->jme_msix_arg = &sc->jme_cdata.jme_tx_data;
3721 msix->jme_msix_func = jme_msix_tx;
3722 msix->jme_msix_intrs = INTR_TXQ_COAL | INTR_TXQ_COAL_TO;
3723 msix->jme_msix_serialize = &sc->jme_cdata.jme_tx_data.jme_tx_serialize;
3724 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s tx",
3725 device_get_nameunit(dev));
3731 if (sc->jme_cdata.jme_rx_ring_cnt == ncpus2) {
3734 offset_def = (sc->jme_cdata.jme_rx_ring_cnt *
3735 device_get_unit(dev)) % ncpus2;
3737 offset = device_getenv_int(dev, "msix.rxoff", offset_def);
3738 if (offset >= ncpus2 ||
3739 offset % sc->jme_cdata.jme_rx_ring_cnt != 0) {
3740 device_printf(dev, "invalid msix.rxoff %d, use %d\n",
3741 offset, offset_def);
3742 offset = offset_def;
3746 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3747 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3749 msix = &sc->jme_msix[i++];
3750 msix->jme_msix_cpuid = r + offset;
3751 KKASSERT(msix->jme_msix_cpuid < ncpus2);
3752 msix->jme_msix_arg = rdata;
3753 msix->jme_msix_func = jme_msix_rx;
3754 msix->jme_msix_intrs = rdata->jme_rx_coal;
3755 msix->jme_msix_serialize = &rdata->jme_rx_serialize;
3756 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc),
3757 "%s rx%d", device_get_nameunit(dev), r);
3760 KKASSERT(i == msix_count);
3762 error = pci_setup_msix(dev);
3766 /* Setup jme_msix_cnt early, so we could cleanup */
3767 sc->jme_msix_cnt = msix_count;
3769 for (i = 0; i < msix_count; ++i) {
3770 msix = &sc->jme_msix[i];
3772 msix->jme_msix_vector = i + 1;
3773 error = pci_alloc_msix_vector(dev, msix->jme_msix_vector,
3774 &msix->jme_msix_rid, msix->jme_msix_cpuid);
3778 msix->jme_msix_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3779 &msix->jme_msix_rid, RF_ACTIVE);
3780 if (msix->jme_msix_res == NULL) {
3786 for (i = 0; i < JME_INTR_CNT; ++i) {
3787 uint32_t intr_mask = (1 << i);
3790 if ((JME_INTRS & intr_mask) == 0)
3793 for (x = 0; x < msix_count; ++x) {
3794 msix = &sc->jme_msix[x];
3795 if (msix->jme_msix_intrs & intr_mask) {
3798 reg = i / JME_MSINUM_FACTOR;
3799 KKASSERT(reg < JME_MSINUM_CNT);
3801 shift = (i % JME_MSINUM_FACTOR) * 4;
3803 sc->jme_msinum[reg] |=
3804 (msix->jme_msix_vector << shift);
3812 for (i = 0; i < JME_MSINUM_CNT; ++i) {
3813 device_printf(dev, "MSINUM%d: %#x\n", i,
3818 pci_enable_msix(dev);
3819 sc->jme_irq_type = PCI_INTR_TYPE_MSIX;
3827 jme_intr_alloc(device_t dev)
3829 struct jme_softc *sc = device_get_softc(dev);
3832 jme_msix_try_alloc(dev);
3834 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3835 sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable,
3836 &sc->jme_irq_rid, &irq_flags);
3838 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3839 &sc->jme_irq_rid, irq_flags);
3840 if (sc->jme_irq_res == NULL) {
3841 device_printf(dev, "can't allocate irq\n");
3849 jme_msix_free(device_t dev)
3851 struct jme_softc *sc = device_get_softc(dev);
3854 KKASSERT(sc->jme_msix_cnt > 1);
3856 for (i = 0; i < sc->jme_msix_cnt; ++i) {
3857 struct jme_msix_data *msix = &sc->jme_msix[i];
3859 if (msix->jme_msix_res != NULL) {
3860 bus_release_resource(dev, SYS_RES_IRQ,
3861 msix->jme_msix_rid, msix->jme_msix_res);
3862 msix->jme_msix_res = NULL;
3864 if (msix->jme_msix_rid >= 0) {
3865 pci_release_msix_vector(dev, msix->jme_msix_rid);
3866 msix->jme_msix_rid = -1;
3869 pci_teardown_msix(dev);
3873 jme_intr_free(device_t dev)
3875 struct jme_softc *sc = device_get_softc(dev);
3877 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3878 if (sc->jme_irq_res != NULL) {
3879 bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
3882 if (sc->jme_irq_type == PCI_INTR_TYPE_MSI)
3883 pci_release_msi(dev);
3890 jme_msix_tx(void *xtdata)
3892 struct jme_txdata *tdata = xtdata;
3893 struct jme_softc *sc = tdata->jme_sc;
3894 struct ifnet *ifp = &sc->arpcom.ac_if;
3896 ASSERT_SERIALIZED(&tdata->jme_tx_serialize);
3898 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3900 CSR_WRITE_4(sc, JME_INTR_STATUS,
3901 INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP);
3903 if (ifp->if_flags & IFF_RUNNING) {
3905 if (!ifq_is_empty(&ifp->if_snd))
3909 CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3913 jme_msix_rx(void *xrdata)
3915 struct jme_rxdata *rdata = xrdata;
3916 struct jme_softc *sc = rdata->jme_sc;
3917 struct ifnet *ifp = &sc->arpcom.ac_if;
3919 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3921 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, rdata->jme_rx_coal);
3923 CSR_WRITE_4(sc, JME_INTR_STATUS,
3924 rdata->jme_rx_coal | rdata->jme_rx_comp);
3926 if (ifp->if_flags & IFF_RUNNING)
3927 jme_rxeof(rdata, -1);
3929 CSR_WRITE_4(sc, JME_INTR_MASK_SET, rdata->jme_rx_coal);
3933 jme_msix_status(void *xsc)
3935 struct jme_softc *sc = xsc;
3936 struct ifnet *ifp = &sc->arpcom.ac_if;
3939 ASSERT_SERIALIZED(&sc->jme_serialize);
3941 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_RXQ_DESC_EMPTY);
3943 status = CSR_READ_4(sc, JME_INTR_STATUS);
3945 if (status & INTR_RXQ_DESC_EMPTY) {
3946 CSR_WRITE_4(sc, JME_INTR_STATUS, status & INTR_RXQ_DESC_EMPTY);
3947 if (ifp->if_flags & IFF_RUNNING)
3948 jme_rx_restart(sc, status);
3951 CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_RXQ_DESC_EMPTY);
3955 jme_rx_restart(struct jme_softc *sc, uint32_t status)
3959 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
3960 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
3962 if (status & rdata->jme_rx_empty) {
3963 lwkt_serialize_enter(&rdata->jme_rx_serialize);
3964 jme_rxeof(rdata, -1);
3965 #ifdef JME_RSS_DEBUG
3966 rdata->jme_rx_emp++;
3968 lwkt_serialize_exit(&rdata->jme_rx_serialize);
3971 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
3976 jme_set_msinum(struct jme_softc *sc)
3980 for (i = 0; i < JME_MSINUM_CNT; ++i)
3981 CSR_WRITE_4(sc, JME_MSINUM(i), sc->jme_msinum[i]);
3985 jme_intr_setup(device_t dev)
3987 struct jme_softc *sc = device_get_softc(dev);
3988 struct ifnet *ifp = &sc->arpcom.ac_if;
3991 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3992 return jme_msix_setup(dev);
3994 error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE,
3995 jme_intr, sc, &sc->jme_irq_handle, &sc->jme_serialize);
3997 device_printf(dev, "could not set up interrupt handler.\n");
4001 ifp->if_cpuid = rman_get_cpuid(sc->jme_irq_res);
4002 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
4007 jme_intr_teardown(device_t dev)
4009 struct jme_softc *sc = device_get_softc(dev);
4011 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
4012 jme_msix_teardown(dev, sc->jme_msix_cnt);
4014 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
4018 jme_msix_setup(device_t dev)
4020 struct jme_softc *sc = device_get_softc(dev);
4021 struct ifnet *ifp = &sc->arpcom.ac_if;
4024 for (x = 0; x < sc->jme_msix_cnt; ++x) {
4025 struct jme_msix_data *msix = &sc->jme_msix[x];
4028 error = bus_setup_intr_descr(dev, msix->jme_msix_res,
4029 INTR_MPSAFE, msix->jme_msix_func, msix->jme_msix_arg,
4030 &msix->jme_msix_handle, msix->jme_msix_serialize,
4031 msix->jme_msix_desc);
4033 device_printf(dev, "could not set up %s "
4034 "interrupt handler.\n", msix->jme_msix_desc);
4035 jme_msix_teardown(dev, x);
4039 ifp->if_cpuid = sc->jme_tx_cpuid;
4044 jme_msix_teardown(device_t dev, int msix_count)
4046 struct jme_softc *sc = device_get_softc(dev);
4049 for (x = 0; x < msix_count; ++x) {
4050 struct jme_msix_data *msix = &sc->jme_msix[x];
4052 bus_teardown_intr(dev, msix->jme_msix_res,
4053 msix->jme_msix_handle);
4058 jme_serialize_skipmain(struct jme_softc *sc)
4060 lwkt_serialize_array_enter(sc->jme_serialize_arr,
4061 sc->jme_serialize_cnt, 1);
4065 jme_deserialize_skipmain(struct jme_softc *sc)
4067 lwkt_serialize_array_exit(sc->jme_serialize_arr,
4068 sc->jme_serialize_cnt, 1);
4072 jme_enable_intr(struct jme_softc *sc)
4076 for (i = 0; i < sc->jme_serialize_cnt; ++i)
4077 lwkt_serialize_handler_enable(sc->jme_serialize_arr[i]);
4079 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
4083 jme_disable_intr(struct jme_softc *sc)
4087 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
4089 for (i = 0; i < sc->jme_serialize_cnt; ++i)
4090 lwkt_serialize_handler_disable(sc->jme_serialize_arr[i]);