2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
30 #include "opt_ifpoll.h"
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
41 #include <sys/serialize.h>
42 #include <sys/serialize2.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
47 #include <net/ethernet.h>
50 #include <net/if_arp.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_poll.h>
54 #include <net/ifq_var.h>
55 #include <net/toeplitz.h>
56 #include <net/toeplitz2.h>
57 #include <net/vlan/if_vlan_var.h>
58 #include <net/vlan/if_vlan_ether.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
63 #include <dev/netif/mii_layer/miivar.h>
64 #include <dev/netif/mii_layer/jmphyreg.h>
66 #include <bus/pci/pcireg.h>
67 #include <bus/pci/pcivar.h>
68 #include <bus/pci/pcidevs.h>
70 #include <dev/netif/jme/if_jmereg.h>
71 #include <dev/netif/jme/if_jmevar.h>
73 #include "miibus_if.h"
75 #define JME_TX_SERIALIZE 1
76 #define JME_RX_SERIALIZE 2
78 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
81 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
83 if ((sc)->jme_rss_debug >= (lvl)) \
84 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
86 #else /* !JME_RSS_DEBUG */
87 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
88 #endif /* JME_RSS_DEBUG */
90 static int jme_probe(device_t);
91 static int jme_attach(device_t);
92 static int jme_detach(device_t);
93 static int jme_shutdown(device_t);
94 static int jme_suspend(device_t);
95 static int jme_resume(device_t);
97 static int jme_miibus_readreg(device_t, int, int);
98 static int jme_miibus_writereg(device_t, int, int, int);
99 static void jme_miibus_statchg(device_t);
101 static void jme_init(void *);
102 static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
103 static void jme_start(struct ifnet *);
104 static void jme_watchdog(struct ifnet *);
105 static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
106 static int jme_mediachange(struct ifnet *);
108 static void jme_npoll(struct ifnet *, struct ifpoll_info *);
110 static void jme_serialize(struct ifnet *, enum ifnet_serialize);
111 static void jme_deserialize(struct ifnet *, enum ifnet_serialize);
112 static int jme_tryserialize(struct ifnet *, enum ifnet_serialize);
114 static void jme_serialize_assert(struct ifnet *, enum ifnet_serialize,
118 static void jme_intr(void *);
119 static void jme_msix_tx(void *);
120 static void jme_msix_rx(void *);
121 static void jme_msix_status(void *);
122 static void jme_txeof(struct jme_softc *);
123 static void jme_rxeof(struct jme_rxdata *, int);
124 static void jme_rx_intr(struct jme_softc *, uint32_t);
125 static void jme_enable_intr(struct jme_softc *);
126 static void jme_disable_intr(struct jme_softc *);
128 static int jme_msix_setup(device_t);
129 static void jme_msix_teardown(device_t, int);
130 static int jme_intr_setup(device_t);
131 static void jme_intr_teardown(device_t);
132 static void jme_msix_try_alloc(device_t);
133 static void jme_msix_free(device_t);
134 static int jme_intr_alloc(device_t);
135 static void jme_intr_free(device_t);
136 static int jme_dma_alloc(struct jme_softc *);
137 static void jme_dma_free(struct jme_softc *);
138 static int jme_init_rx_ring(struct jme_rxdata *);
139 static void jme_init_tx_ring(struct jme_softc *);
140 static void jme_init_ssb(struct jme_softc *);
141 static int jme_newbuf(struct jme_rxdata *, struct jme_rxdesc *, int);
142 static int jme_encap(struct jme_softc *, struct mbuf **);
143 static void jme_rxpkt(struct jme_rxdata *);
144 static int jme_rxring_dma_alloc(struct jme_rxdata *);
145 static int jme_rxbuf_dma_alloc(struct jme_rxdata *);
146 static int jme_rxbuf_dma_filter(void *, bus_addr_t);
148 static void jme_tick(void *);
149 static void jme_stop(struct jme_softc *);
150 static void jme_reset(struct jme_softc *);
151 static void jme_set_msinum(struct jme_softc *);
152 static void jme_set_vlan(struct jme_softc *);
153 static void jme_set_filter(struct jme_softc *);
154 static void jme_stop_tx(struct jme_softc *);
155 static void jme_stop_rx(struct jme_softc *);
156 static void jme_mac_config(struct jme_softc *);
157 static void jme_reg_macaddr(struct jme_softc *, uint8_t[]);
158 static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
159 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
161 static void jme_setwol(struct jme_softc *);
162 static void jme_setlinkspeed(struct jme_softc *);
164 static void jme_set_tx_coal(struct jme_softc *);
165 static void jme_set_rx_coal(struct jme_softc *);
166 static void jme_enable_rss(struct jme_softc *);
167 static void jme_disable_rss(struct jme_softc *);
168 static void jme_serialize_skipmain(struct jme_softc *);
169 static void jme_deserialize_skipmain(struct jme_softc *);
171 static void jme_sysctl_node(struct jme_softc *);
172 static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
173 static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
174 static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
175 static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
177 static int jme_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS);
178 static int jme_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS);
182 * Devices supported by this driver.
184 static const struct jme_dev {
185 uint16_t jme_vendorid;
186 uint16_t jme_deviceid;
188 const char *jme_name;
190 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
192 "JMicron Inc, JMC250 Gigabit Ethernet" },
193 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
195 "JMicron Inc, JMC260 Fast Ethernet" },
199 static device_method_t jme_methods[] = {
200 /* Device interface. */
201 DEVMETHOD(device_probe, jme_probe),
202 DEVMETHOD(device_attach, jme_attach),
203 DEVMETHOD(device_detach, jme_detach),
204 DEVMETHOD(device_shutdown, jme_shutdown),
205 DEVMETHOD(device_suspend, jme_suspend),
206 DEVMETHOD(device_resume, jme_resume),
209 DEVMETHOD(bus_print_child, bus_generic_print_child),
210 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
213 DEVMETHOD(miibus_readreg, jme_miibus_readreg),
214 DEVMETHOD(miibus_writereg, jme_miibus_writereg),
215 DEVMETHOD(miibus_statchg, jme_miibus_statchg),
220 static driver_t jme_driver = {
223 sizeof(struct jme_softc)
226 static devclass_t jme_devclass;
228 DECLARE_DUMMY_MODULE(if_jme);
229 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
230 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL);
231 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL);
233 static const struct {
237 } jme_rx_status[JME_NRXRING_MAX] = {
238 { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP,
239 INTR_RXQ0_DESC_EMPTY },
240 { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP,
241 INTR_RXQ1_DESC_EMPTY },
242 { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP,
243 INTR_RXQ2_DESC_EMPTY },
244 { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP,
245 INTR_RXQ3_DESC_EMPTY }
248 static int jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
249 static int jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
250 static int jme_rx_ring_count = 0;
251 static int jme_msi_enable = 1;
252 static int jme_msix_enable = 1;
254 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
255 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
256 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
257 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable);
258 TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable);
261 jme_setup_rxdesc(struct jme_rxdesc *rxd)
263 struct jme_desc *desc;
266 desc->buflen = htole32(MCLBYTES);
267 desc->addr_lo = htole32(JME_ADDR_LO(rxd->rx_paddr));
268 desc->addr_hi = htole32(JME_ADDR_HI(rxd->rx_paddr));
269 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
273 * Read a PHY register on the MII of the JMC250.
276 jme_miibus_readreg(device_t dev, int phy, int reg)
278 struct jme_softc *sc = device_get_softc(dev);
282 /* For FPGA version, PHY address 0 should be ignored. */
283 if (sc->jme_caps & JME_CAP_FPGA) {
287 if (sc->jme_phyaddr != phy)
291 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
292 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
294 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
296 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
300 device_printf(sc->jme_dev, "phy read timeout: "
301 "phy %d, reg %d\n", phy, reg);
305 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
309 * Write a PHY register on the MII of the JMC250.
312 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
314 struct jme_softc *sc = device_get_softc(dev);
317 /* For FPGA version, PHY address 0 should be ignored. */
318 if (sc->jme_caps & JME_CAP_FPGA) {
322 if (sc->jme_phyaddr != phy)
326 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
327 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
328 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
330 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
332 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
336 device_printf(sc->jme_dev, "phy write timeout: "
337 "phy %d, reg %d\n", phy, reg);
344 * Callback from MII layer when media changes.
347 jme_miibus_statchg(device_t dev)
349 struct jme_softc *sc = device_get_softc(dev);
350 struct ifnet *ifp = &sc->arpcom.ac_if;
351 struct mii_data *mii;
352 struct jme_txdesc *txd;
357 jme_serialize_skipmain(sc);
358 ASSERT_IFNET_SERIALIZED_ALL(ifp);
360 if ((ifp->if_flags & IFF_RUNNING) == 0)
363 mii = device_get_softc(sc->jme_miibus);
365 sc->jme_has_link = FALSE;
366 if ((mii->mii_media_status & IFM_AVALID) != 0) {
367 switch (IFM_SUBTYPE(mii->mii_media_active)) {
370 sc->jme_has_link = TRUE;
373 if (sc->jme_caps & JME_CAP_FASTETH)
375 sc->jme_has_link = TRUE;
383 * Disabling Rx/Tx MACs have a side-effect of resetting
384 * JME_TXNDA/JME_RXNDA register to the first address of
385 * Tx/Rx descriptor address. So driver should reset its
386 * internal procucer/consumer pointer and reclaim any
387 * allocated resources. Note, just saving the value of
388 * JME_TXNDA and JME_RXNDA registers before stopping MAC
389 * and restoring JME_TXNDA/JME_RXNDA register is not
390 * sufficient to make sure correct MAC state because
391 * stopping MAC operation can take a while and hardware
392 * might have updated JME_TXNDA/JME_RXNDA registers
393 * during the stop operation.
396 /* Disable interrupts */
397 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
400 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
402 callout_stop(&sc->jme_tick_ch);
404 /* Stop receiver/transmitter. */
408 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
409 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
411 jme_rxeof(rdata, -1);
412 if (rdata->jme_rxhead != NULL)
413 m_freem(rdata->jme_rxhead);
414 JME_RXCHAIN_RESET(rdata);
417 * Reuse configured Rx descriptors and reset
418 * procuder/consumer index.
420 rdata->jme_rx_cons = 0;
422 if (JME_ENABLE_HWRSS(sc))
428 if (sc->jme_cdata.jme_tx_cnt != 0) {
429 /* Remove queued packets for transmit. */
430 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
431 txd = &sc->jme_cdata.jme_txdesc[i];
432 if (txd->tx_m != NULL) {
434 sc->jme_cdata.jme_tx_tag,
443 jme_init_tx_ring(sc);
445 /* Initialize shadow status block. */
448 /* Program MAC with resolved speed/duplex/flow-control. */
449 if (sc->jme_has_link) {
452 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
454 /* Set Tx ring address to the hardware. */
455 paddr = sc->jme_cdata.jme_tx_ring_paddr;
456 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
457 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
459 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
460 CSR_WRITE_4(sc, JME_RXCSR,
461 sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
463 /* Set Rx ring address to the hardware. */
464 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
465 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
466 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
469 /* Restart receiver/transmitter. */
470 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
472 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
475 ifp->if_flags |= IFF_RUNNING;
476 ifp->if_flags &= ~IFF_OACTIVE;
477 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
480 if (!(ifp->if_flags & IFF_NPOLLING))
482 /* Reenable interrupts. */
483 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
487 jme_deserialize_skipmain(sc);
491 * Get the current interface media status.
494 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
496 struct jme_softc *sc = ifp->if_softc;
497 struct mii_data *mii = device_get_softc(sc->jme_miibus);
499 ASSERT_IFNET_SERIALIZED_ALL(ifp);
502 ifmr->ifm_status = mii->mii_media_status;
503 ifmr->ifm_active = mii->mii_media_active;
507 * Set hardware to newly-selected media.
510 jme_mediachange(struct ifnet *ifp)
512 struct jme_softc *sc = ifp->if_softc;
513 struct mii_data *mii = device_get_softc(sc->jme_miibus);
516 ASSERT_IFNET_SERIALIZED_ALL(ifp);
518 if (mii->mii_instance != 0) {
519 struct mii_softc *miisc;
521 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
522 mii_phy_reset(miisc);
524 error = mii_mediachg(mii);
530 jme_probe(device_t dev)
532 const struct jme_dev *sp;
535 vid = pci_get_vendor(dev);
536 did = pci_get_device(dev);
537 for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
538 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
539 struct jme_softc *sc = device_get_softc(dev);
541 sc->jme_caps = sp->jme_caps;
542 device_set_desc(dev, sp->jme_name);
550 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
556 for (i = JME_TIMEOUT; i > 0; i--) {
557 reg = CSR_READ_4(sc, JME_SMBCSR);
558 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
564 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
568 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
569 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
570 for (i = JME_TIMEOUT; i > 0; i--) {
572 reg = CSR_READ_4(sc, JME_SMBINTF);
573 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
578 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
582 reg = CSR_READ_4(sc, JME_SMBINTF);
583 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
589 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
591 uint8_t fup, reg, val;
596 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
597 fup != JME_EEPROM_SIG0)
599 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
600 fup != JME_EEPROM_SIG1)
604 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
606 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
607 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
608 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0)
610 if (reg >= JME_PAR0 &&
611 reg < JME_PAR0 + ETHER_ADDR_LEN) {
612 if (jme_eeprom_read_byte(sc, offset + 2,
615 eaddr[reg - JME_PAR0] = val;
619 /* Check for the end of EEPROM descriptor. */
620 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
622 /* Try next eeprom descriptor. */
623 offset += JME_EEPROM_DESC_BYTES;
624 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
626 if (match == ETHER_ADDR_LEN)
633 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
637 /* Read station address. */
638 par0 = CSR_READ_4(sc, JME_PAR0);
639 par1 = CSR_READ_4(sc, JME_PAR1);
641 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
642 device_printf(sc->jme_dev,
643 "generating fake ethernet address.\n");
644 par0 = karc4random();
645 /* Set OUI to JMicron. */
649 eaddr[3] = (par0 >> 16) & 0xff;
650 eaddr[4] = (par0 >> 8) & 0xff;
651 eaddr[5] = par0 & 0xff;
653 eaddr[0] = (par0 >> 0) & 0xFF;
654 eaddr[1] = (par0 >> 8) & 0xFF;
655 eaddr[2] = (par0 >> 16) & 0xFF;
656 eaddr[3] = (par0 >> 24) & 0xFF;
657 eaddr[4] = (par1 >> 0) & 0xFF;
658 eaddr[5] = (par1 >> 8) & 0xFF;
663 jme_attach(device_t dev)
665 struct jme_softc *sc = device_get_softc(dev);
666 struct ifnet *ifp = &sc->arpcom.ac_if;
669 uint8_t pcie_ptr, rev;
670 int error = 0, i, j, rx_desc_cnt, coal_max;
671 uint8_t eaddr[ETHER_ADDR_LEN];
673 int offset, offset_def;
676 lwkt_serialize_init(&sc->jme_serialize);
677 lwkt_serialize_init(&sc->jme_cdata.jme_tx_serialize);
678 for (i = 0; i < JME_NRXRING_MAX; ++i) {
680 &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize);
683 rx_desc_cnt = device_getenv_int(dev, "rx_desc_count",
685 rx_desc_cnt = roundup(rx_desc_cnt, JME_NDESC_ALIGN);
686 if (rx_desc_cnt > JME_NDESC_MAX)
687 rx_desc_cnt = JME_NDESC_MAX;
689 sc->jme_cdata.jme_tx_desc_cnt = device_getenv_int(dev, "tx_desc_count",
691 sc->jme_cdata.jme_tx_desc_cnt = roundup(sc->jme_cdata.jme_tx_desc_cnt,
693 if (sc->jme_cdata.jme_tx_desc_cnt > JME_NDESC_MAX)
694 sc->jme_cdata.jme_tx_desc_cnt = JME_NDESC_MAX;
699 sc->jme_cdata.jme_rx_ring_cnt = device_getenv_int(dev, "rx_ring_count",
701 sc->jme_cdata.jme_rx_ring_cnt =
702 if_ring_count2(sc->jme_cdata.jme_rx_ring_cnt, JME_NRXRING_MAX);
705 sc->jme_serialize_arr[i++] = &sc->jme_serialize;
707 KKASSERT(i == JME_TX_SERIALIZE);
708 sc->jme_serialize_arr[i++] = &sc->jme_cdata.jme_tx_serialize;
710 KKASSERT(i == JME_RX_SERIALIZE);
711 for (j = 0; j < sc->jme_cdata.jme_rx_ring_cnt; ++j) {
712 sc->jme_serialize_arr[i++] =
713 &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize;
715 KKASSERT(i <= JME_NSERIALIZE);
716 sc->jme_serialize_cnt = i;
718 sc->jme_cdata.jme_sc = sc;
719 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
720 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
723 rdata->jme_rx_coal = jme_rx_status[i].jme_coal;
724 rdata->jme_rx_comp = jme_rx_status[i].jme_comp;
725 rdata->jme_rx_empty = jme_rx_status[i].jme_empty;
726 rdata->jme_rx_idx = i;
727 rdata->jme_rx_desc_cnt = rx_desc_cnt;
731 sc->jme_lowaddr = BUS_SPACE_MAXADDR;
733 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
735 callout_init(&sc->jme_tick_ch);
738 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
741 irq = pci_read_config(dev, PCIR_INTLINE, 4);
742 mem = pci_read_config(dev, JME_PCIR_BAR, 4);
744 device_printf(dev, "chip is in D%d power mode "
745 "-- setting to D0\n", pci_get_powerstate(dev));
747 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
749 pci_write_config(dev, PCIR_INTLINE, irq, 4);
750 pci_write_config(dev, JME_PCIR_BAR, mem, 4);
752 #endif /* !BURN_BRIDGE */
754 /* Enable bus mastering */
755 pci_enable_busmaster(dev);
760 * JMC250 supports both memory mapped and I/O register space
761 * access. Because I/O register access should use different
762 * BARs to access registers it's waste of time to use I/O
763 * register spce access. JMC250 uses 16K to map entire memory
766 sc->jme_mem_rid = JME_PCIR_BAR;
767 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
768 &sc->jme_mem_rid, RF_ACTIVE);
769 if (sc->jme_mem_res == NULL) {
770 device_printf(dev, "can't allocate IO memory\n");
773 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
774 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
779 error = jme_intr_alloc(dev);
786 reg = CSR_READ_4(sc, JME_CHIPMODE);
787 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
789 sc->jme_caps |= JME_CAP_FPGA;
791 device_printf(dev, "FPGA revision: 0x%04x\n",
792 (reg & CHIPMODE_FPGA_REV_MASK) >>
793 CHIPMODE_FPGA_REV_SHIFT);
797 /* NOTE: FM revision is put in the upper 4 bits */
798 rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
799 rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
801 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
803 did = pci_get_device(dev);
805 case PCI_PRODUCT_JMICRON_JMC250:
806 if (rev == JME_REV1_A2)
807 sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
810 case PCI_PRODUCT_JMICRON_JMC260:
812 sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
816 panic("unknown device id 0x%04x", did);
818 if (rev >= JME_REV2) {
819 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
820 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
821 GHC_TXMAC_CLKSRC_1000;
824 /* Reset the ethernet controller. */
827 /* Map MSI/MSI-X vectors */
830 /* Get station address. */
831 reg = CSR_READ_4(sc, JME_SMBCSR);
832 if (reg & SMBCSR_EEPROM_PRESENT)
833 error = jme_eeprom_macaddr(sc, eaddr);
834 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
835 if (error != 0 && (bootverbose)) {
836 device_printf(dev, "ethernet hardware address "
837 "not found in EEPROM.\n");
839 jme_reg_macaddr(sc, eaddr);
844 * Integrated JR0211 has fixed PHY address whereas FPGA version
845 * requires PHY probing to get correct PHY address.
847 if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
848 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
849 GPREG0_PHY_ADDR_MASK;
851 device_printf(dev, "PHY is at address %d.\n",
858 /* Set max allowable DMA size. */
859 pcie_ptr = pci_get_pciecap_ptr(dev);
863 sc->jme_caps |= JME_CAP_PCIE;
864 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
866 device_printf(dev, "Read request size : %d bytes.\n",
867 128 << ((ctrl >> 12) & 0x07));
868 device_printf(dev, "TLP payload size : %d bytes.\n",
869 128 << ((ctrl >> 5) & 0x07));
871 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
872 case PCIEM_DEVCTL_MAX_READRQ_128:
873 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
875 case PCIEM_DEVCTL_MAX_READRQ_256:
876 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
879 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
882 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
884 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
885 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
889 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
890 sc->jme_caps |= JME_CAP_PMCAP;
895 * NPOLLING RX CPU offset
897 if (sc->jme_cdata.jme_rx_ring_cnt == ncpus2) {
900 offset_def = (sc->jme_cdata.jme_rx_ring_cnt *
901 device_get_unit(dev)) % ncpus2;
902 offset = device_getenv_int(dev, "npoll.rxoff", offset_def);
903 if (offset >= ncpus2 ||
904 offset % sc->jme_cdata.jme_rx_ring_cnt != 0) {
905 device_printf(dev, "invalid npoll.rxoff %d, use %d\n",
910 sc->jme_npoll_rxoff = offset;
913 * NPOLLING TX CPU offset
915 offset_def = sc->jme_npoll_rxoff;
916 offset = device_getenv_int(dev, "npoll.txoff", offset_def);
917 if (offset >= ncpus2) {
918 device_printf(dev, "invalid npoll.txoff %d, use %d\n",
922 sc->jme_npoll_txoff = offset;
926 * Set default coalesce valves
928 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
929 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
930 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
931 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
934 * Adjust coalesce valves, in case that the number of TX/RX
935 * descs are set to small values by users.
937 * NOTE: coal_max will not be zero, since number of descs
938 * must aligned by JME_NDESC_ALIGN (16 currently)
940 coal_max = sc->jme_cdata.jme_tx_desc_cnt / 2;
941 if (coal_max < sc->jme_tx_coal_pkt)
942 sc->jme_tx_coal_pkt = coal_max;
944 coal_max = sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt / 2;
945 if (coal_max < sc->jme_rx_coal_pkt)
946 sc->jme_rx_coal_pkt = coal_max;
953 /* Allocate DMA stuffs */
954 error = jme_dma_alloc(sc);
959 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
960 ifp->if_init = jme_init;
961 ifp->if_ioctl = jme_ioctl;
962 ifp->if_start = jme_start;
964 ifp->if_npoll = jme_npoll;
966 ifp->if_watchdog = jme_watchdog;
967 ifp->if_serialize = jme_serialize;
968 ifp->if_deserialize = jme_deserialize;
969 ifp->if_tryserialize = jme_tryserialize;
971 ifp->if_serialize_assert = jme_serialize_assert;
973 ifq_set_maxlen(&ifp->if_snd,
974 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
975 ifq_set_ready(&ifp->if_snd);
977 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
978 ifp->if_capabilities = IFCAP_HWCSUM |
981 IFCAP_VLAN_HWTAGGING;
982 if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN)
983 ifp->if_capabilities |= IFCAP_RSS;
984 ifp->if_capenable = ifp->if_capabilities;
987 * Disable TXCSUM by default to improve bulk data
988 * transmit performance (+20Mbps improvement).
990 ifp->if_capenable &= ~IFCAP_TXCSUM;
992 if (ifp->if_capenable & IFCAP_TXCSUM)
993 ifp->if_hwassist |= JME_CSUM_FEATURES;
994 ifp->if_hwassist |= CSUM_TSO;
996 /* Set up MII bus. */
997 error = mii_phy_probe(dev, &sc->jme_miibus,
998 jme_mediachange, jme_mediastatus);
1000 device_printf(dev, "no PHY found!\n");
1005 * Save PHYADDR for FPGA mode PHY.
1007 if (sc->jme_caps & JME_CAP_FPGA) {
1008 struct mii_data *mii = device_get_softc(sc->jme_miibus);
1010 if (mii->mii_instance != 0) {
1011 struct mii_softc *miisc;
1013 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
1014 if (miisc->mii_phy != 0) {
1015 sc->jme_phyaddr = miisc->mii_phy;
1019 if (sc->jme_phyaddr != 0) {
1020 device_printf(sc->jme_dev,
1021 "FPGA PHY is at %d\n", sc->jme_phyaddr);
1023 jme_miibus_writereg(dev, sc->jme_phyaddr,
1024 JMPHY_CONF, JMPHY_CONF_DEFFIFO);
1026 /* XXX should we clear JME_WA_EXTFIFO */
1031 ether_ifattach(ifp, eaddr, NULL);
1033 /* Tell the upper layer(s) we support long frames. */
1034 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1036 error = jme_intr_setup(dev);
1038 ether_ifdetach(ifp);
1049 jme_detach(device_t dev)
1051 struct jme_softc *sc = device_get_softc(dev);
1053 if (device_is_attached(dev)) {
1054 struct ifnet *ifp = &sc->arpcom.ac_if;
1056 ifnet_serialize_all(ifp);
1058 jme_intr_teardown(dev);
1059 ifnet_deserialize_all(ifp);
1061 ether_ifdetach(ifp);
1064 if (sc->jme_sysctl_tree != NULL)
1065 sysctl_ctx_free(&sc->jme_sysctl_ctx);
1067 if (sc->jme_miibus != NULL)
1068 device_delete_child(dev, sc->jme_miibus);
1069 bus_generic_detach(dev);
1073 if (sc->jme_mem_res != NULL) {
1074 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
1084 jme_sysctl_node(struct jme_softc *sc)
1086 #ifdef JME_RSS_DEBUG
1090 sysctl_ctx_init(&sc->jme_sysctl_ctx);
1091 sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
1092 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
1093 device_get_nameunit(sc->jme_dev),
1095 if (sc->jme_sysctl_tree == NULL) {
1096 device_printf(sc->jme_dev, "can't add sysctl node\n");
1100 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1101 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1102 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1103 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
1105 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1106 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1107 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1108 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
1110 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1111 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1112 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1113 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
1115 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1116 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1117 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1118 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
1120 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1121 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1122 "rx_desc_count", CTLFLAG_RD,
1123 &sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt,
1124 0, "RX desc count");
1125 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1126 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1127 "tx_desc_count", CTLFLAG_RD,
1128 &sc->jme_cdata.jme_tx_desc_cnt,
1129 0, "TX desc count");
1130 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1131 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1132 "rx_ring_count", CTLFLAG_RD,
1133 &sc->jme_cdata.jme_rx_ring_cnt,
1134 0, "RX ring count");
1136 #ifdef JME_RSS_DEBUG
1137 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1138 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1139 "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
1140 0, "RSS debug level");
1141 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1142 char rx_ring_desc[32];
1144 ksnprintf(rx_ring_desc, sizeof(rx_ring_desc),
1145 "rx_ring%d_pkt", r);
1146 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx,
1147 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1148 rx_ring_desc, CTLFLAG_RW,
1149 &sc->jme_cdata.jme_rx_data[r].jme_rx_pkt, "RXed packets");
1151 ksnprintf(rx_ring_desc, sizeof(rx_ring_desc),
1152 "rx_ring%d_emp", r);
1153 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx,
1154 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1155 rx_ring_desc, CTLFLAG_RW,
1156 &sc->jme_cdata.jme_rx_data[r].jme_rx_emp,
1157 "# of time RX ring empty");
1161 #ifdef IFPOLL_ENABLE
1162 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1163 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1164 "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
1165 jme_sysctl_npoll_rxoff, "I", "NPOLLING RX cpu offset");
1166 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1167 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1168 "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
1169 jme_sysctl_npoll_txoff, "I", "NPOLLING TX cpu offset");
1174 jme_dma_alloc(struct jme_softc *sc)
1176 struct jme_txdesc *txd;
1178 int error, i, asize;
1180 sc->jme_cdata.jme_txdesc =
1181 kmalloc(sc->jme_cdata.jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1182 M_DEVBUF, M_WAITOK | M_ZERO);
1183 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1184 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
1187 kmalloc(rdata->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1188 M_DEVBUF, M_WAITOK | M_ZERO);
1191 /* Create parent ring tag. */
1192 error = bus_dma_tag_create(NULL,/* parent */
1193 1, JME_RING_BOUNDARY, /* algnmnt, boundary */
1194 sc->jme_lowaddr, /* lowaddr */
1195 BUS_SPACE_MAXADDR, /* highaddr */
1196 NULL, NULL, /* filter, filterarg */
1197 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1199 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1201 &sc->jme_cdata.jme_ring_tag);
1203 device_printf(sc->jme_dev,
1204 "could not create parent ring DMA tag.\n");
1209 * Create DMA stuffs for TX ring
1211 asize = roundup2(JME_TX_RING_SIZE(sc), JME_TX_RING_ALIGN);
1212 error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1213 JME_TX_RING_ALIGN, 0,
1214 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1215 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1217 device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1220 sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag;
1221 sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map;
1222 sc->jme_cdata.jme_tx_ring = dmem.dmem_addr;
1223 sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr;
1226 * Create DMA stuffs for RX rings
1228 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1229 error = jme_rxring_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1234 /* Create parent buffer tag. */
1235 error = bus_dma_tag_create(NULL,/* parent */
1236 1, 0, /* algnmnt, boundary */
1237 sc->jme_lowaddr, /* lowaddr */
1238 BUS_SPACE_MAXADDR, /* highaddr */
1239 NULL, NULL, /* filter, filterarg */
1240 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1242 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1244 &sc->jme_cdata.jme_buffer_tag);
1246 device_printf(sc->jme_dev,
1247 "could not create parent buffer DMA tag.\n");
1252 * Create DMA stuffs for shadow status block
1254 asize = roundup2(JME_SSB_SIZE, JME_SSB_ALIGN);
1255 error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1256 JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1257 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1259 device_printf(sc->jme_dev,
1260 "could not create shadow status block.\n");
1263 sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1264 sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1265 sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1266 sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1269 * Create DMA stuffs for TX buffers
1272 /* Create tag for Tx buffers. */
1273 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1274 1, 0, /* algnmnt, boundary */
1275 BUS_SPACE_MAXADDR, /* lowaddr */
1276 BUS_SPACE_MAXADDR, /* highaddr */
1277 NULL, NULL, /* filter, filterarg */
1278 JME_TSO_MAXSIZE, /* maxsize */
1279 JME_MAXTXSEGS, /* nsegments */
1280 JME_MAXSEGSIZE, /* maxsegsize */
1281 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1282 &sc->jme_cdata.jme_tx_tag);
1284 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1288 /* Create DMA maps for Tx buffers. */
1289 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
1290 txd = &sc->jme_cdata.jme_txdesc[i];
1291 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag,
1292 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1297 device_printf(sc->jme_dev,
1298 "could not create %dth Tx dmamap.\n", i);
1300 for (j = 0; j < i; ++j) {
1301 txd = &sc->jme_cdata.jme_txdesc[j];
1302 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1305 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1306 sc->jme_cdata.jme_tx_tag = NULL;
1312 * Create DMA stuffs for RX buffers
1314 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1315 error = jme_rxbuf_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1323 jme_dma_free(struct jme_softc *sc)
1325 struct jme_txdesc *txd;
1326 struct jme_rxdesc *rxd;
1327 struct jme_rxdata *rdata;
1331 if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1332 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1333 sc->jme_cdata.jme_tx_ring_map);
1334 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1335 sc->jme_cdata.jme_tx_ring,
1336 sc->jme_cdata.jme_tx_ring_map);
1337 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1338 sc->jme_cdata.jme_tx_ring_tag = NULL;
1342 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1343 rdata = &sc->jme_cdata.jme_rx_data[r];
1344 if (rdata->jme_rx_ring_tag != NULL) {
1345 bus_dmamap_unload(rdata->jme_rx_ring_tag,
1346 rdata->jme_rx_ring_map);
1347 bus_dmamem_free(rdata->jme_rx_ring_tag,
1349 rdata->jme_rx_ring_map);
1350 bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1351 rdata->jme_rx_ring_tag = NULL;
1356 if (sc->jme_cdata.jme_tx_tag != NULL) {
1357 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
1358 txd = &sc->jme_cdata.jme_txdesc[i];
1359 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1362 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1363 sc->jme_cdata.jme_tx_tag = NULL;
1367 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1368 rdata = &sc->jme_cdata.jme_rx_data[r];
1369 if (rdata->jme_rx_tag != NULL) {
1370 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
1371 rxd = &rdata->jme_rxdesc[i];
1372 bus_dmamap_destroy(rdata->jme_rx_tag,
1375 bus_dmamap_destroy(rdata->jme_rx_tag,
1376 rdata->jme_rx_sparemap);
1377 bus_dma_tag_destroy(rdata->jme_rx_tag);
1378 rdata->jme_rx_tag = NULL;
1382 /* Shadow status block. */
1383 if (sc->jme_cdata.jme_ssb_tag != NULL) {
1384 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1385 sc->jme_cdata.jme_ssb_map);
1386 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1387 sc->jme_cdata.jme_ssb_block,
1388 sc->jme_cdata.jme_ssb_map);
1389 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1390 sc->jme_cdata.jme_ssb_tag = NULL;
1393 if (sc->jme_cdata.jme_buffer_tag != NULL) {
1394 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1395 sc->jme_cdata.jme_buffer_tag = NULL;
1397 if (sc->jme_cdata.jme_ring_tag != NULL) {
1398 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1399 sc->jme_cdata.jme_ring_tag = NULL;
1402 if (sc->jme_cdata.jme_txdesc != NULL) {
1403 kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1404 sc->jme_cdata.jme_txdesc = NULL;
1406 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1407 rdata = &sc->jme_cdata.jme_rx_data[r];
1408 if (rdata->jme_rxdesc != NULL) {
1409 kfree(rdata->jme_rxdesc, M_DEVBUF);
1410 rdata->jme_rxdesc = NULL;
1416 * Make sure the interface is stopped at reboot time.
1419 jme_shutdown(device_t dev)
1421 return jme_suspend(dev);
1426 * Unlike other ethernet controllers, JMC250 requires
1427 * explicit resetting link speed to 10/100Mbps as gigabit
1428 * link will cunsume more power than 375mA.
1429 * Note, we reset the link speed to 10/100Mbps with
1430 * auto-negotiation but we don't know whether that operation
1431 * would succeed or not as we have no control after powering
1432 * off. If the renegotiation fail WOL may not work. Running
1433 * at 1Gbps draws more power than 375mA at 3.3V which is
1434 * specified in PCI specification and that would result in
1435 * complete shutdowning power to ethernet controller.
1438 * Save current negotiated media speed/duplex/flow-control
1439 * to softc and restore the same link again after resuming.
1440 * PHY handling such as power down/resetting to 100Mbps
1441 * may be better handled in suspend method in phy driver.
1444 jme_setlinkspeed(struct jme_softc *sc)
1446 struct mii_data *mii;
1449 JME_LOCK_ASSERT(sc);
1451 mii = device_get_softc(sc->jme_miibus);
1454 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1455 switch IFM_SUBTYPE(mii->mii_media_active) {
1465 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1466 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1467 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1468 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1469 BMCR_AUTOEN | BMCR_STARTNEG);
1472 /* Poll link state until jme(4) get a 10/100 link. */
1473 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1475 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1476 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1486 pause("jmelnk", hz);
1489 if (i == MII_ANEGTICKS_GIGE)
1490 device_printf(sc->jme_dev, "establishing link failed, "
1491 "WOL may not work!");
1494 * No link, force MAC to have 100Mbps, full-duplex link.
1495 * This is the last resort and may/may not work.
1497 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1498 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1503 jme_setwol(struct jme_softc *sc)
1505 struct ifnet *ifp = &sc->arpcom.ac_if;
1510 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1511 /* No PME capability, PHY power down. */
1512 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1513 MII_BMCR, BMCR_PDOWN);
1517 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1518 pmcs = CSR_READ_4(sc, JME_PMCS);
1519 pmcs &= ~PMCS_WOL_ENB_MASK;
1520 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1521 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1522 /* Enable PME message. */
1523 gpr |= GPREG0_PME_ENB;
1524 /* For gigabit controllers, reset link speed to 10/100. */
1525 if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1526 jme_setlinkspeed(sc);
1529 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1530 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1533 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1534 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1535 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1536 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1537 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1538 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1539 /* No WOL, PHY power down. */
1540 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1541 MII_BMCR, BMCR_PDOWN);
1547 jme_suspend(device_t dev)
1549 struct jme_softc *sc = device_get_softc(dev);
1550 struct ifnet *ifp = &sc->arpcom.ac_if;
1552 ifnet_serialize_all(ifp);
1557 ifnet_deserialize_all(ifp);
1563 jme_resume(device_t dev)
1565 struct jme_softc *sc = device_get_softc(dev);
1566 struct ifnet *ifp = &sc->arpcom.ac_if;
1571 ifnet_serialize_all(ifp);
1574 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1577 pmstat = pci_read_config(sc->jme_dev,
1578 pmc + PCIR_POWER_STATUS, 2);
1579 /* Disable PME clear PME status. */
1580 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1581 pci_write_config(sc->jme_dev,
1582 pmc + PCIR_POWER_STATUS, pmstat, 2);
1586 if (ifp->if_flags & IFF_UP)
1589 ifnet_deserialize_all(ifp);
1595 jme_tso_pullup(struct mbuf **mp)
1597 int hoff, iphlen, thoff;
1601 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
1603 iphlen = m->m_pkthdr.csum_iphlen;
1604 thoff = m->m_pkthdr.csum_thlen;
1605 hoff = m->m_pkthdr.csum_lhlen;
1607 KASSERT(iphlen > 0, ("invalid ip hlen"));
1608 KASSERT(thoff > 0, ("invalid tcp hlen"));
1609 KASSERT(hoff > 0, ("invalid ether hlen"));
1611 if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
1612 m = m_pullup(m, hoff + iphlen + thoff);
1623 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1625 struct jme_txdesc *txd;
1626 struct jme_desc *desc;
1628 bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1630 int error, i, prod, symbol_desc;
1631 uint32_t cflags, flag64, mss;
1633 M_ASSERTPKTHDR((*m_head));
1635 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) {
1636 /* XXX Is this necessary? */
1637 error = jme_tso_pullup(m_head);
1642 prod = sc->jme_cdata.jme_tx_prod;
1643 txd = &sc->jme_cdata.jme_txdesc[prod];
1645 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1650 maxsegs = (sc->jme_cdata.jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1651 (JME_TXD_RSVD + symbol_desc);
1652 if (maxsegs > JME_MAXTXSEGS)
1653 maxsegs = JME_MAXTXSEGS;
1654 KASSERT(maxsegs >= (JME_TXD_SPARE - symbol_desc),
1655 ("not enough segments %d", maxsegs));
1657 error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag,
1658 txd->tx_dmamap, m_head,
1659 txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1663 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1664 BUS_DMASYNC_PREWRITE);
1670 /* Configure checksum offload. */
1671 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1672 mss = (uint32_t)m->m_pkthdr.tso_segsz << JME_TD_MSS_SHIFT;
1673 cflags |= JME_TD_TSO;
1674 } else if (m->m_pkthdr.csum_flags & JME_CSUM_FEATURES) {
1675 if (m->m_pkthdr.csum_flags & CSUM_IP)
1676 cflags |= JME_TD_IPCSUM;
1677 if (m->m_pkthdr.csum_flags & CSUM_TCP)
1678 cflags |= JME_TD_TCPCSUM;
1679 if (m->m_pkthdr.csum_flags & CSUM_UDP)
1680 cflags |= JME_TD_UDPCSUM;
1683 /* Configure VLAN. */
1684 if (m->m_flags & M_VLANTAG) {
1685 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1686 cflags |= JME_TD_VLAN_TAG;
1689 desc = &sc->jme_cdata.jme_tx_ring[prod];
1690 desc->flags = htole32(cflags);
1691 desc->addr_hi = htole32(m->m_pkthdr.len);
1692 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1694 * Use 64bits TX desc chain format.
1696 * The first TX desc of the chain, which is setup here,
1697 * is just a symbol TX desc carrying no payload.
1699 flag64 = JME_TD_64BIT;
1700 desc->buflen = htole32(mss);
1703 /* No effective TX desc is consumed */
1707 * Use 32bits TX desc chain format.
1709 * The first TX desc of the chain, which is setup here,
1710 * is an effective TX desc carrying the first segment of
1714 desc->buflen = htole32(mss | txsegs[0].ds_len);
1715 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1717 /* One effective TX desc is consumed */
1720 sc->jme_cdata.jme_tx_cnt++;
1721 KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1722 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
1723 JME_DESC_INC(prod, sc->jme_cdata.jme_tx_desc_cnt);
1725 txd->tx_ndesc = 1 - i;
1726 for (; i < nsegs; i++) {
1727 desc = &sc->jme_cdata.jme_tx_ring[prod];
1728 desc->buflen = htole32(txsegs[i].ds_len);
1729 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1730 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1731 desc->flags = htole32(JME_TD_OWN | flag64);
1733 sc->jme_cdata.jme_tx_cnt++;
1734 KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1735 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
1736 JME_DESC_INC(prod, sc->jme_cdata.jme_tx_desc_cnt);
1739 /* Update producer index. */
1740 sc->jme_cdata.jme_tx_prod = prod;
1742 * Finally request interrupt and give the first descriptor
1743 * owenership to hardware.
1745 desc = txd->tx_desc;
1746 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1749 txd->tx_ndesc += nsegs;
1759 jme_start(struct ifnet *ifp)
1761 struct jme_softc *sc = ifp->if_softc;
1762 struct mbuf *m_head;
1765 ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
1767 if (!sc->jme_has_link) {
1768 ifq_purge(&ifp->if_snd);
1772 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1775 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1778 while (!ifq_is_empty(&ifp->if_snd)) {
1780 * Check number of available TX descs, always
1781 * leave JME_TXD_RSVD free TX descs.
1783 if (sc->jme_cdata.jme_tx_cnt + JME_TXD_SPARE >
1784 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD) {
1785 ifp->if_flags |= IFF_OACTIVE;
1789 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1794 * Pack the data into the transmit ring. If we
1795 * don't have room, set the OACTIVE flag and wait
1796 * for the NIC to drain the ring.
1798 if (jme_encap(sc, &m_head)) {
1799 KKASSERT(m_head == NULL);
1801 ifp->if_flags |= IFF_OACTIVE;
1807 * If there's a BPF listener, bounce a copy of this frame
1810 ETHER_BPF_MTAP(ifp, m_head);
1815 * Reading TXCSR takes very long time under heavy load
1816 * so cache TXCSR value and writes the ORed value with
1817 * the kick command to the TXCSR. This saves one register
1820 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1821 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1822 /* Set a timeout in case the chip goes out to lunch. */
1823 ifp->if_timer = JME_TX_TIMEOUT;
1828 jme_watchdog(struct ifnet *ifp)
1830 struct jme_softc *sc = ifp->if_softc;
1832 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1834 if (!sc->jme_has_link) {
1835 if_printf(ifp, "watchdog timeout (missed link)\n");
1842 if (sc->jme_cdata.jme_tx_cnt == 0) {
1843 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1845 if (!ifq_is_empty(&ifp->if_snd))
1850 if_printf(ifp, "watchdog timeout\n");
1853 if (!ifq_is_empty(&ifp->if_snd))
1858 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1860 struct jme_softc *sc = ifp->if_softc;
1861 struct mii_data *mii = device_get_softc(sc->jme_miibus);
1862 struct ifreq *ifr = (struct ifreq *)data;
1863 int error = 0, mask;
1865 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1869 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1870 (!(sc->jme_caps & JME_CAP_JUMBO) &&
1871 ifr->ifr_mtu > JME_MAX_MTU)) {
1876 if (ifp->if_mtu != ifr->ifr_mtu) {
1878 * No special configuration is required when interface
1879 * MTU is changed but availability of Tx checksum
1880 * offload should be chcked against new MTU size as
1881 * FIFO size is just 2K.
1883 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1884 ifp->if_capenable &=
1885 ~(IFCAP_TXCSUM | IFCAP_TSO);
1887 ~(JME_CSUM_FEATURES | CSUM_TSO);
1889 ifp->if_mtu = ifr->ifr_mtu;
1890 if (ifp->if_flags & IFF_RUNNING)
1896 if (ifp->if_flags & IFF_UP) {
1897 if (ifp->if_flags & IFF_RUNNING) {
1898 if ((ifp->if_flags ^ sc->jme_if_flags) &
1899 (IFF_PROMISC | IFF_ALLMULTI))
1905 if (ifp->if_flags & IFF_RUNNING)
1908 sc->jme_if_flags = ifp->if_flags;
1913 if (ifp->if_flags & IFF_RUNNING)
1919 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1923 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1925 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1926 ifp->if_capenable ^= IFCAP_TXCSUM;
1927 if (ifp->if_capenable & IFCAP_TXCSUM)
1928 ifp->if_hwassist |= JME_CSUM_FEATURES;
1930 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1932 if (mask & IFCAP_RXCSUM) {
1935 ifp->if_capenable ^= IFCAP_RXCSUM;
1936 reg = CSR_READ_4(sc, JME_RXMAC);
1937 reg &= ~RXMAC_CSUM_ENB;
1938 if (ifp->if_capenable & IFCAP_RXCSUM)
1939 reg |= RXMAC_CSUM_ENB;
1940 CSR_WRITE_4(sc, JME_RXMAC, reg);
1943 if (mask & IFCAP_VLAN_HWTAGGING) {
1944 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1948 if ((mask & IFCAP_TSO) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1949 ifp->if_capenable ^= IFCAP_TSO;
1950 if (ifp->if_capenable & IFCAP_TSO)
1951 ifp->if_hwassist |= CSUM_TSO;
1953 ifp->if_hwassist &= ~CSUM_TSO;
1956 if (mask & IFCAP_RSS)
1957 ifp->if_capenable ^= IFCAP_RSS;
1961 error = ether_ioctl(ifp, cmd, data);
1968 jme_mac_config(struct jme_softc *sc)
1970 struct mii_data *mii;
1971 uint32_t ghc, rxmac, txmac, txpause, gp1;
1972 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1974 mii = device_get_softc(sc->jme_miibus);
1976 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1978 CSR_WRITE_4(sc, JME_GHC, 0);
1980 rxmac = CSR_READ_4(sc, JME_RXMAC);
1981 rxmac &= ~RXMAC_FC_ENB;
1982 txmac = CSR_READ_4(sc, JME_TXMAC);
1983 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1984 txpause = CSR_READ_4(sc, JME_TXPFC);
1985 txpause &= ~TXPFC_PAUSE_ENB;
1986 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1987 ghc |= GHC_FULL_DUPLEX;
1988 rxmac &= ~RXMAC_COLL_DET_ENB;
1989 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1990 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1993 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1994 txpause |= TXPFC_PAUSE_ENB;
1995 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1996 rxmac |= RXMAC_FC_ENB;
1998 /* Disable retry transmit timer/retry limit. */
1999 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
2000 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
2002 rxmac |= RXMAC_COLL_DET_ENB;
2003 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
2004 /* Enable retry transmit timer/retry limit. */
2005 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
2006 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
2010 * Reprogram Tx/Rx MACs with resolved speed/duplex.
2012 gp1 = CSR_READ_4(sc, JME_GPREG1);
2013 gp1 &= ~GPREG1_WA_HDX;
2015 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
2018 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2020 ghc |= GHC_SPEED_10 | sc->jme_clksrc;
2022 gp1 |= GPREG1_WA_HDX;
2026 ghc |= GHC_SPEED_100 | sc->jme_clksrc;
2028 gp1 |= GPREG1_WA_HDX;
2031 * Use extended FIFO depth to workaround CRC errors
2032 * emitted by chips before JMC250B
2034 phyconf = JMPHY_CONF_EXTFIFO;
2038 if (sc->jme_caps & JME_CAP_FASTETH)
2041 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
2043 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
2049 CSR_WRITE_4(sc, JME_GHC, ghc);
2050 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
2051 CSR_WRITE_4(sc, JME_TXMAC, txmac);
2052 CSR_WRITE_4(sc, JME_TXPFC, txpause);
2054 if (sc->jme_workaround & JME_WA_EXTFIFO) {
2055 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2056 JMPHY_CONF, phyconf);
2058 if (sc->jme_workaround & JME_WA_HDX)
2059 CSR_WRITE_4(sc, JME_GPREG1, gp1);
2065 struct jme_softc *sc = xsc;
2066 struct ifnet *ifp = &sc->arpcom.ac_if;
2070 ASSERT_SERIALIZED(&sc->jme_serialize);
2072 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2073 if (status == 0 || status == 0xFFFFFFFF)
2076 /* Disable interrupts. */
2077 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2079 status = CSR_READ_4(sc, JME_INTR_STATUS);
2080 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2083 /* Reset PCC counter/timer and Ack interrupts. */
2084 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2086 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
2087 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2089 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2090 if (status & jme_rx_status[r].jme_coal) {
2091 status |= jme_rx_status[r].jme_coal |
2092 jme_rx_status[r].jme_comp;
2096 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2098 if (ifp->if_flags & IFF_RUNNING) {
2099 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
2100 jme_rx_intr(sc, status);
2102 if (status & INTR_RXQ_DESC_EMPTY) {
2104 * Notify hardware availability of new Rx buffers.
2105 * Reading RXCSR takes very long time under heavy
2106 * load so cache RXCSR value and writes the ORed
2107 * value with the kick command to the RXCSR. This
2108 * saves one register access cycle.
2110 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2111 RXCSR_RX_ENB | RXCSR_RXQ_START);
2114 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
2115 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
2117 if (!ifq_is_empty(&ifp->if_snd))
2119 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
2123 /* Reenable interrupts. */
2124 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2128 jme_txeof(struct jme_softc *sc)
2130 struct ifnet *ifp = &sc->arpcom.ac_if;
2133 cons = sc->jme_cdata.jme_tx_cons;
2134 if (cons == sc->jme_cdata.jme_tx_prod)
2138 * Go through our Tx list and free mbufs for those
2139 * frames which have been transmitted.
2141 while (cons != sc->jme_cdata.jme_tx_prod) {
2142 struct jme_txdesc *txd, *next_txd;
2143 uint32_t status, next_status;
2144 int next_cons, nsegs;
2146 txd = &sc->jme_cdata.jme_txdesc[cons];
2147 KASSERT(txd->tx_m != NULL,
2148 ("%s: freeing NULL mbuf!", __func__));
2150 status = le32toh(txd->tx_desc->flags);
2151 if ((status & JME_TD_OWN) == JME_TD_OWN)
2156 * This chip will always update the TX descriptor's
2157 * buflen field and this updating always happens
2158 * after clearing the OWN bit, so even if the OWN
2159 * bit is cleared by the chip, we still don't sure
2160 * about whether the buflen field has been updated
2161 * by the chip or not. To avoid this race, we wait
2162 * for the next TX descriptor's OWN bit to be cleared
2163 * by the chip before reusing this TX descriptor.
2166 JME_DESC_ADD(next_cons, txd->tx_ndesc,
2167 sc->jme_cdata.jme_tx_desc_cnt);
2168 next_txd = &sc->jme_cdata.jme_txdesc[next_cons];
2169 if (next_txd->tx_m == NULL)
2171 next_status = le32toh(next_txd->tx_desc->flags);
2172 if ((next_status & JME_TD_OWN) == JME_TD_OWN)
2175 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2179 if (status & JME_TD_COLLISION) {
2180 ifp->if_collisions +=
2181 le32toh(txd->tx_desc->buflen) &
2182 JME_TD_BUF_LEN_MASK;
2187 * Only the first descriptor of multi-descriptor
2188 * transmission is updated so driver have to skip entire
2189 * chained buffers for the transmiited frame. In other
2190 * words, JME_TD_OWN bit is valid only at the first
2191 * descriptor of a multi-descriptor transmission.
2193 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2194 sc->jme_cdata.jme_tx_ring[cons].flags = 0;
2195 JME_DESC_INC(cons, sc->jme_cdata.jme_tx_desc_cnt);
2198 /* Reclaim transferred mbufs. */
2199 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2202 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2203 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2204 ("%s: Active Tx desc counter was garbled", __func__));
2207 sc->jme_cdata.jme_tx_cons = cons;
2209 /* 1 for symbol TX descriptor */
2210 if (sc->jme_cdata.jme_tx_cnt <= JME_MAXTXSEGS + 1)
2213 if (sc->jme_cdata.jme_tx_cnt + JME_TXD_SPARE <=
2214 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD)
2215 ifp->if_flags &= ~IFF_OACTIVE;
2218 static __inline void
2219 jme_discard_rxbufs(struct jme_rxdata *rdata, int cons, int count)
2223 for (i = 0; i < count; ++i) {
2224 jme_setup_rxdesc(&rdata->jme_rxdesc[cons]);
2225 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2229 static __inline struct pktinfo *
2230 jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2232 if (flags & JME_RD_IPV4)
2233 pi->pi_netisr = NETISR_IP;
2234 else if (flags & JME_RD_IPV6)
2235 pi->pi_netisr = NETISR_IPV6;
2240 pi->pi_l3proto = IPPROTO_UNKNOWN;
2242 if (flags & JME_RD_MORE_FRAG)
2243 pi->pi_flags |= PKTINFO_FLAG_FRAG;
2244 else if (flags & JME_RD_TCP)
2245 pi->pi_l3proto = IPPROTO_TCP;
2246 else if (flags & JME_RD_UDP)
2247 pi->pi_l3proto = IPPROTO_UDP;
2253 /* Receive a frame. */
2255 jme_rxpkt(struct jme_rxdata *rdata)
2257 struct ifnet *ifp = &rdata->jme_sc->arpcom.ac_if;
2258 struct jme_desc *desc;
2259 struct jme_rxdesc *rxd;
2260 struct mbuf *mp, *m;
2261 uint32_t flags, status, hash, hashinfo;
2262 int cons, count, nsegs;
2264 cons = rdata->jme_rx_cons;
2265 desc = &rdata->jme_rx_ring[cons];
2267 flags = le32toh(desc->flags);
2268 status = le32toh(desc->buflen);
2269 hash = le32toh(desc->addr_hi);
2270 hashinfo = le32toh(desc->addr_lo);
2271 nsegs = JME_RX_NSEGS(status);
2274 /* Skip the first descriptor. */
2275 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2278 * Clear the OWN bit of the following RX descriptors;
2279 * hardware will not clear the OWN bit except the first
2282 * Since the first RX descriptor is setup, i.e. OWN bit
2283 * on, before its followins RX descriptors, leaving the
2284 * OWN bit on the following RX descriptors will trick
2285 * the hardware into thinking that the following RX
2286 * descriptors are ready to be used too.
2288 for (count = 1; count < nsegs; count++,
2289 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt))
2290 rdata->jme_rx_ring[cons].flags = 0;
2292 cons = rdata->jme_rx_cons;
2295 JME_RSS_DPRINTF(rdata->jme_sc, 15, "ring%d, flags 0x%08x, "
2296 "hash 0x%08x, hash info 0x%08x\n",
2297 rdata->jme_rx_idx, flags, hash, hashinfo);
2299 if (status & JME_RX_ERR_STAT) {
2301 jme_discard_rxbufs(rdata, cons, nsegs);
2302 #ifdef JME_SHOW_ERRORS
2303 if_printf(ifp, "%s : receive error = 0x%b\n",
2304 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2306 rdata->jme_rx_cons += nsegs;
2307 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2311 rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2312 for (count = 0; count < nsegs; count++,
2313 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) {
2314 rxd = &rdata->jme_rxdesc[cons];
2317 /* Add a new receive buffer to the ring. */
2318 if (jme_newbuf(rdata, rxd, 0) != 0) {
2321 jme_discard_rxbufs(rdata, cons, nsegs - count);
2322 if (rdata->jme_rxhead != NULL) {
2323 m_freem(rdata->jme_rxhead);
2324 JME_RXCHAIN_RESET(rdata);
2330 * Assume we've received a full sized frame.
2331 * Actual size is fixed when we encounter the end of
2332 * multi-segmented frame.
2334 mp->m_len = MCLBYTES;
2336 /* Chain received mbufs. */
2337 if (rdata->jme_rxhead == NULL) {
2338 rdata->jme_rxhead = mp;
2339 rdata->jme_rxtail = mp;
2342 * Receive processor can receive a maximum frame
2343 * size of 65535 bytes.
2345 rdata->jme_rxtail->m_next = mp;
2346 rdata->jme_rxtail = mp;
2349 if (count == nsegs - 1) {
2350 struct pktinfo pi0, *pi;
2352 /* Last desc. for this frame. */
2353 m = rdata->jme_rxhead;
2354 m->m_pkthdr.len = rdata->jme_rxlen;
2356 /* Set first mbuf size. */
2357 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2358 /* Set last mbuf size. */
2359 mp->m_len = rdata->jme_rxlen -
2360 ((MCLBYTES - JME_RX_PAD_BYTES) +
2361 (MCLBYTES * (nsegs - 2)));
2363 m->m_len = rdata->jme_rxlen;
2365 m->m_pkthdr.rcvif = ifp;
2368 * Account for 10bytes auto padding which is used
2369 * to align IP header on 32bit boundary. Also note,
2370 * CRC bytes is automatically removed by the
2373 m->m_data += JME_RX_PAD_BYTES;
2375 /* Set checksum information. */
2376 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2377 (flags & JME_RD_IPV4)) {
2378 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2379 if (flags & JME_RD_IPCSUM)
2380 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2381 if ((flags & JME_RD_MORE_FRAG) == 0 &&
2382 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2383 (JME_RD_TCP | JME_RD_TCPCSUM) ||
2384 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2385 (JME_RD_UDP | JME_RD_UDPCSUM))) {
2386 m->m_pkthdr.csum_flags |=
2387 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2388 m->m_pkthdr.csum_data = 0xffff;
2392 /* Check for VLAN tagged packets. */
2393 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2394 (flags & JME_RD_VLAN_TAG)) {
2395 m->m_pkthdr.ether_vlantag =
2396 flags & JME_RD_VLAN_MASK;
2397 m->m_flags |= M_VLANTAG;
2402 if (ifp->if_capenable & IFCAP_RSS)
2403 pi = jme_pktinfo(&pi0, flags);
2408 (hashinfo & JME_RD_HASH_FN_MASK) ==
2409 JME_RD_HASH_FN_TOEPLITZ) {
2410 m->m_flags |= (M_HASH | M_CKHASH);
2411 m->m_pkthdr.hash = toeplitz_hash(hash);
2414 #ifdef JME_RSS_DEBUG
2416 JME_RSS_DPRINTF(rdata->jme_sc, 10,
2417 "isr %d flags %08x, l3 %d %s\n",
2418 pi->pi_netisr, pi->pi_flags,
2420 (m->m_flags & M_HASH) ? "hash" : "");
2425 ether_input_pkt(ifp, m, pi);
2427 /* Reset mbuf chains. */
2428 JME_RXCHAIN_RESET(rdata);
2429 #ifdef JME_RSS_DEBUG
2430 rdata->jme_rx_pkt++;
2435 rdata->jme_rx_cons += nsegs;
2436 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2440 jme_rxeof(struct jme_rxdata *rdata, int count)
2442 struct jme_desc *desc;
2446 #ifdef IFPOLL_ENABLE
2447 if (count >= 0 && count-- == 0)
2450 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2451 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2453 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2457 * Check number of segments against received bytes.
2458 * Non-matching value would indicate that hardware
2459 * is still trying to update Rx descriptors. I'm not
2460 * sure whether this check is needed.
2462 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2463 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2464 if (nsegs != howmany(pktlen, MCLBYTES)) {
2465 if_printf(&rdata->jme_sc->arpcom.ac_if,
2466 "RX fragment count(%d) and "
2467 "packet size(%d) mismach\n", nsegs, pktlen);
2473 * RSS hash and hash information may _not_ be set by the
2474 * hardware even if the OWN bit is cleared and VALID bit
2477 * If the RSS information is not delivered by the hardware
2478 * yet, we MUST NOT accept this packet, let alone reusing
2479 * its RX descriptor. If this packet was accepted and its
2480 * RX descriptor was reused before hardware delivering the
2481 * RSS information, the RX buffer's address would be trashed
2482 * by the RSS information delivered by the hardware.
2484 if (JME_ENABLE_HWRSS(rdata->jme_sc)) {
2485 struct jme_rxdesc *rxd;
2488 hashinfo = le32toh(desc->addr_lo);
2489 rxd = &rdata->jme_rxdesc[rdata->jme_rx_cons];
2492 * This test should be enough to detect the pending
2493 * RSS information delivery, given:
2494 * - If RSS hash is not calculated, the hashinfo
2495 * will be 0. Howvever, the lower 32bits of RX
2496 * buffers' physical address will never be 0.
2497 * (see jme_rxbuf_dma_filter)
2498 * - If RSS hash is calculated, the lowest 4 bits
2499 * of hashinfo will be set, while the RX buffers
2500 * are at least 2K aligned.
2502 if (hashinfo == JME_ADDR_LO(rxd->rx_paddr)) {
2503 #ifdef JME_SHOW_RSSWB
2504 if_printf(&rdata->jme_sc->arpcom.ac_if,
2505 "RSS is not written back yet\n");
2511 /* Received a frame. */
2519 struct jme_softc *sc = xsc;
2520 struct mii_data *mii = device_get_softc(sc->jme_miibus);
2522 lwkt_serialize_enter(&sc->jme_serialize);
2524 sc->jme_in_tick = TRUE;
2526 sc->jme_in_tick = FALSE;
2528 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2530 lwkt_serialize_exit(&sc->jme_serialize);
2534 jme_reset(struct jme_softc *sc)
2538 /* Make sure that TX and RX are stopped */
2543 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2547 * Hold reset bit before stop reset
2550 /* Disable TXMAC and TXOFL clock sources */
2551 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2552 /* Disable RXMAC clock source */
2553 val = CSR_READ_4(sc, JME_GPREG1);
2554 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2556 CSR_READ_4(sc, JME_GHC);
2559 CSR_WRITE_4(sc, JME_GHC, 0);
2561 CSR_READ_4(sc, JME_GHC);
2564 * Clear reset bit after stop reset
2567 /* Enable TXMAC and TXOFL clock sources */
2568 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2569 /* Enable RXMAC clock source */
2570 val = CSR_READ_4(sc, JME_GPREG1);
2571 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2573 CSR_READ_4(sc, JME_GHC);
2575 /* Disable TXMAC and TXOFL clock sources */
2576 CSR_WRITE_4(sc, JME_GHC, 0);
2577 /* Disable RXMAC clock source */
2578 val = CSR_READ_4(sc, JME_GPREG1);
2579 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2581 CSR_READ_4(sc, JME_GHC);
2583 /* Enable TX and RX */
2584 val = CSR_READ_4(sc, JME_TXCSR);
2585 CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB);
2586 val = CSR_READ_4(sc, JME_RXCSR);
2587 CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB);
2589 CSR_READ_4(sc, JME_TXCSR);
2590 CSR_READ_4(sc, JME_RXCSR);
2592 /* Enable TXMAC and TXOFL clock sources */
2593 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2594 /* Eisable RXMAC clock source */
2595 val = CSR_READ_4(sc, JME_GPREG1);
2596 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2598 CSR_READ_4(sc, JME_GHC);
2600 /* Stop TX and RX */
2608 struct jme_softc *sc = xsc;
2609 struct ifnet *ifp = &sc->arpcom.ac_if;
2610 struct mii_data *mii;
2611 uint8_t eaddr[ETHER_ADDR_LEN];
2616 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2619 * Cancel any pending I/O.
2624 * Reset the chip to a known state.
2629 * Setup MSI/MSI-X vectors to interrupts mapping
2633 if (JME_ENABLE_HWRSS(sc))
2636 jme_disable_rss(sc);
2638 /* Init RX descriptors */
2639 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2640 error = jme_init_rx_ring(&sc->jme_cdata.jme_rx_data[r]);
2642 if_printf(ifp, "initialization failed: "
2643 "no memory for %dth RX ring.\n", r);
2649 /* Init TX descriptors */
2650 jme_init_tx_ring(sc);
2652 /* Initialize shadow status block. */
2655 /* Reprogram the station address. */
2656 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2657 CSR_WRITE_4(sc, JME_PAR0,
2658 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2659 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2662 * Configure Tx queue.
2663 * Tx priority queue weight value : 0
2664 * Tx FIFO threshold for processing next packet : 16QW
2665 * Maximum Tx DMA length : 512
2666 * Allow Tx DMA burst.
2668 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2669 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2670 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2671 sc->jme_txcsr |= sc->jme_tx_dma_size;
2672 sc->jme_txcsr |= TXCSR_DMA_BURST;
2673 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2675 /* Set Tx descriptor counter. */
2676 CSR_WRITE_4(sc, JME_TXQDC, sc->jme_cdata.jme_tx_desc_cnt);
2678 /* Set Tx ring address to the hardware. */
2679 paddr = sc->jme_cdata.jme_tx_ring_paddr;
2680 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2681 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2683 /* Configure TxMAC parameters. */
2684 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2685 reg |= TXMAC_THRESH_1_PKT;
2686 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2687 CSR_WRITE_4(sc, JME_TXMAC, reg);
2690 * Configure Rx queue.
2691 * FIFO full threshold for transmitting Tx pause packet : 128T
2692 * FIFO threshold for processing next packet : 128QW
2694 * Max Rx DMA length : 128
2695 * Rx descriptor retry : 32
2696 * Rx descriptor retry time gap : 256ns
2697 * Don't receive runt/bad frame.
2699 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2702 * Since Rx FIFO size is 4K bytes, receiving frames larger
2703 * than 4K bytes will suffer from Rx FIFO overruns. So
2704 * decrease FIFO threshold to reduce the FIFO overruns for
2705 * frames larger than 4000 bytes.
2706 * For best performance of standard MTU sized frames use
2707 * maximum allowable FIFO threshold, 128QW.
2709 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2711 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2713 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2715 /* Improve PCI Express compatibility */
2716 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2718 sc->jme_rxcsr |= sc->jme_rx_dma_size;
2719 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2720 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2721 /* XXX TODO DROP_BAD */
2723 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2724 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
2726 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2728 /* Set Rx descriptor counter. */
2729 CSR_WRITE_4(sc, JME_RXQDC, rdata->jme_rx_desc_cnt);
2731 /* Set Rx ring address to the hardware. */
2732 paddr = rdata->jme_rx_ring_paddr;
2733 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2734 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2737 /* Clear receive filter. */
2738 CSR_WRITE_4(sc, JME_RXMAC, 0);
2740 /* Set up the receive filter. */
2745 * Disable all WOL bits as WOL can interfere normal Rx
2746 * operation. Also clear WOL detection status bits.
2748 reg = CSR_READ_4(sc, JME_PMCS);
2749 reg &= ~PMCS_WOL_ENB_MASK;
2750 CSR_WRITE_4(sc, JME_PMCS, reg);
2753 * Pad 10bytes right before received frame. This will greatly
2754 * help Rx performance on strict-alignment architectures as
2755 * it does not need to copy the frame to align the payload.
2757 reg = CSR_READ_4(sc, JME_RXMAC);
2758 reg |= RXMAC_PAD_10BYTES;
2760 if (ifp->if_capenable & IFCAP_RXCSUM)
2761 reg |= RXMAC_CSUM_ENB;
2762 CSR_WRITE_4(sc, JME_RXMAC, reg);
2764 /* Configure general purpose reg0 */
2765 reg = CSR_READ_4(sc, JME_GPREG0);
2766 reg &= ~GPREG0_PCC_UNIT_MASK;
2767 /* Set PCC timer resolution to micro-seconds unit. */
2768 reg |= GPREG0_PCC_UNIT_US;
2770 * Disable all shadow register posting as we have to read
2771 * JME_INTR_STATUS register in jme_intr. Also it seems
2772 * that it's hard to synchronize interrupt status between
2773 * hardware and software with shadow posting due to
2774 * requirements of bus_dmamap_sync(9).
2776 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2777 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2778 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2779 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2780 /* Disable posting of DW0. */
2781 reg &= ~GPREG0_POST_DW0_ENB;
2782 /* Clear PME message. */
2783 reg &= ~GPREG0_PME_ENB;
2784 /* Set PHY address. */
2785 reg &= ~GPREG0_PHY_ADDR_MASK;
2786 reg |= sc->jme_phyaddr;
2787 CSR_WRITE_4(sc, JME_GPREG0, reg);
2789 /* Configure Tx queue 0 packet completion coalescing. */
2790 jme_set_tx_coal(sc);
2792 /* Configure Rx queues packet completion coalescing. */
2793 jme_set_rx_coal(sc);
2795 /* Configure shadow status block but don't enable posting. */
2796 paddr = sc->jme_cdata.jme_ssb_block_paddr;
2797 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2798 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2800 /* Disable Timer 1 and Timer 2. */
2801 CSR_WRITE_4(sc, JME_TIMER1, 0);
2802 CSR_WRITE_4(sc, JME_TIMER2, 0);
2804 /* Configure retry transmit period, retry limit value. */
2805 CSR_WRITE_4(sc, JME_TXTRHD,
2806 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2807 TXTRHD_RT_PERIOD_MASK) |
2808 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2809 TXTRHD_RT_LIMIT_SHIFT));
2811 #ifdef IFPOLL_ENABLE
2812 if (!(ifp->if_flags & IFF_NPOLLING))
2814 /* Initialize the interrupt mask. */
2815 jme_enable_intr(sc);
2816 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2819 * Enabling Tx/Rx DMA engines and Rx queue processing is
2820 * done after detection of valid link in jme_miibus_statchg.
2822 sc->jme_has_link = FALSE;
2824 /* Set the current media. */
2825 mii = device_get_softc(sc->jme_miibus);
2828 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2830 ifp->if_flags |= IFF_RUNNING;
2831 ifp->if_flags &= ~IFF_OACTIVE;
2835 jme_stop(struct jme_softc *sc)
2837 struct ifnet *ifp = &sc->arpcom.ac_if;
2838 struct jme_txdesc *txd;
2839 struct jme_rxdesc *rxd;
2840 struct jme_rxdata *rdata;
2843 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2846 * Mark the interface down and cancel the watchdog timer.
2848 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2851 callout_stop(&sc->jme_tick_ch);
2852 sc->jme_has_link = FALSE;
2855 * Disable interrupts.
2857 jme_disable_intr(sc);
2858 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2860 /* Disable updating shadow status block. */
2861 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2862 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2864 /* Stop receiver, transmitter. */
2869 * Free partial finished RX segments
2871 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2872 rdata = &sc->jme_cdata.jme_rx_data[r];
2873 if (rdata->jme_rxhead != NULL)
2874 m_freem(rdata->jme_rxhead);
2875 JME_RXCHAIN_RESET(rdata);
2879 * Free RX and TX mbufs still in the queues.
2881 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2882 rdata = &sc->jme_cdata.jme_rx_data[r];
2883 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
2884 rxd = &rdata->jme_rxdesc[i];
2885 if (rxd->rx_m != NULL) {
2886 bus_dmamap_unload(rdata->jme_rx_tag,
2893 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
2894 txd = &sc->jme_cdata.jme_txdesc[i];
2895 if (txd->tx_m != NULL) {
2896 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2906 jme_stop_tx(struct jme_softc *sc)
2911 reg = CSR_READ_4(sc, JME_TXCSR);
2912 if ((reg & TXCSR_TX_ENB) == 0)
2914 reg &= ~TXCSR_TX_ENB;
2915 CSR_WRITE_4(sc, JME_TXCSR, reg);
2916 for (i = JME_TIMEOUT; i > 0; i--) {
2918 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2922 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2926 jme_stop_rx(struct jme_softc *sc)
2931 reg = CSR_READ_4(sc, JME_RXCSR);
2932 if ((reg & RXCSR_RX_ENB) == 0)
2934 reg &= ~RXCSR_RX_ENB;
2935 CSR_WRITE_4(sc, JME_RXCSR, reg);
2936 for (i = JME_TIMEOUT; i > 0; i--) {
2938 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2942 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2946 jme_init_tx_ring(struct jme_softc *sc)
2948 struct jme_chain_data *cd;
2949 struct jme_txdesc *txd;
2952 sc->jme_cdata.jme_tx_prod = 0;
2953 sc->jme_cdata.jme_tx_cons = 0;
2954 sc->jme_cdata.jme_tx_cnt = 0;
2956 cd = &sc->jme_cdata;
2957 bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2958 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
2959 txd = &sc->jme_cdata.jme_txdesc[i];
2961 txd->tx_desc = &cd->jme_tx_ring[i];
2967 jme_init_ssb(struct jme_softc *sc)
2969 struct jme_chain_data *cd;
2971 cd = &sc->jme_cdata;
2972 bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2976 jme_init_rx_ring(struct jme_rxdata *rdata)
2978 struct jme_rxdesc *rxd;
2981 KKASSERT(rdata->jme_rxhead == NULL &&
2982 rdata->jme_rxtail == NULL &&
2983 rdata->jme_rxlen == 0);
2984 rdata->jme_rx_cons = 0;
2986 bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(rdata));
2987 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
2990 rxd = &rdata->jme_rxdesc[i];
2992 rxd->rx_desc = &rdata->jme_rx_ring[i];
2993 error = jme_newbuf(rdata, rxd, 1);
3001 jme_newbuf(struct jme_rxdata *rdata, struct jme_rxdesc *rxd, int init)
3004 bus_dma_segment_t segs;
3008 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
3012 * JMC250 has 64bit boundary alignment limitation so jme(4)
3013 * takes advantage of 10 bytes padding feature of hardware
3014 * in order not to copy entire frame to align IP header on
3017 m->m_len = m->m_pkthdr.len = MCLBYTES;
3019 error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
3020 rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
3025 if_printf(&rdata->jme_sc->arpcom.ac_if,
3026 "can't load RX mbuf\n");
3031 if (rxd->rx_m != NULL) {
3032 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
3033 BUS_DMASYNC_POSTREAD);
3034 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
3036 map = rxd->rx_dmamap;
3037 rxd->rx_dmamap = rdata->jme_rx_sparemap;
3038 rdata->jme_rx_sparemap = map;
3040 rxd->rx_paddr = segs.ds_addr;
3042 jme_setup_rxdesc(rxd);
3047 jme_set_vlan(struct jme_softc *sc)
3049 struct ifnet *ifp = &sc->arpcom.ac_if;
3052 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3054 reg = CSR_READ_4(sc, JME_RXMAC);
3055 reg &= ~RXMAC_VLAN_ENB;
3056 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
3057 reg |= RXMAC_VLAN_ENB;
3058 CSR_WRITE_4(sc, JME_RXMAC, reg);
3062 jme_set_filter(struct jme_softc *sc)
3064 struct ifnet *ifp = &sc->arpcom.ac_if;
3065 struct ifmultiaddr *ifma;
3070 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3072 rxcfg = CSR_READ_4(sc, JME_RXMAC);
3073 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
3077 * Always accept frames destined to our station address.
3078 * Always accept broadcast frames.
3080 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
3082 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
3083 if (ifp->if_flags & IFF_PROMISC)
3084 rxcfg |= RXMAC_PROMISC;
3085 if (ifp->if_flags & IFF_ALLMULTI)
3086 rxcfg |= RXMAC_ALLMULTI;
3087 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
3088 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
3089 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3094 * Set up the multicast address filter by passing all multicast
3095 * addresses through a CRC generator, and then using the low-order
3096 * 6 bits as an index into the 64 bit multicast hash table. The
3097 * high order bits select the register, while the rest of the bits
3098 * select the bit within the register.
3100 rxcfg |= RXMAC_MULTICAST;
3101 bzero(mchash, sizeof(mchash));
3103 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3104 if (ifma->ifma_addr->sa_family != AF_LINK)
3106 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3107 ifma->ifma_addr), ETHER_ADDR_LEN);
3109 /* Just want the 6 least significant bits. */
3112 /* Set the corresponding bit in the hash table. */
3113 mchash[crc >> 5] |= 1 << (crc & 0x1f);
3116 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
3117 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
3118 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3122 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
3124 struct jme_softc *sc = arg1;
3125 struct ifnet *ifp = &sc->arpcom.ac_if;
3128 ifnet_serialize_all(ifp);
3130 v = sc->jme_tx_coal_to;
3131 error = sysctl_handle_int(oidp, &v, 0, req);
3132 if (error || req->newptr == NULL)
3135 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
3140 if (v != sc->jme_tx_coal_to) {
3141 sc->jme_tx_coal_to = v;
3142 if (ifp->if_flags & IFF_RUNNING)
3143 jme_set_tx_coal(sc);
3146 ifnet_deserialize_all(ifp);
3151 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
3153 struct jme_softc *sc = arg1;
3154 struct ifnet *ifp = &sc->arpcom.ac_if;
3157 ifnet_serialize_all(ifp);
3159 v = sc->jme_tx_coal_pkt;
3160 error = sysctl_handle_int(oidp, &v, 0, req);
3161 if (error || req->newptr == NULL)
3164 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
3169 if (v != sc->jme_tx_coal_pkt) {
3170 sc->jme_tx_coal_pkt = v;
3171 if (ifp->if_flags & IFF_RUNNING)
3172 jme_set_tx_coal(sc);
3175 ifnet_deserialize_all(ifp);
3180 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
3182 struct jme_softc *sc = arg1;
3183 struct ifnet *ifp = &sc->arpcom.ac_if;
3186 ifnet_serialize_all(ifp);
3188 v = sc->jme_rx_coal_to;
3189 error = sysctl_handle_int(oidp, &v, 0, req);
3190 if (error || req->newptr == NULL)
3193 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
3198 if (v != sc->jme_rx_coal_to) {
3199 sc->jme_rx_coal_to = v;
3200 if (ifp->if_flags & IFF_RUNNING)
3201 jme_set_rx_coal(sc);
3204 ifnet_deserialize_all(ifp);
3209 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3211 struct jme_softc *sc = arg1;
3212 struct ifnet *ifp = &sc->arpcom.ac_if;
3215 ifnet_serialize_all(ifp);
3217 v = sc->jme_rx_coal_pkt;
3218 error = sysctl_handle_int(oidp, &v, 0, req);
3219 if (error || req->newptr == NULL)
3222 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
3227 if (v != sc->jme_rx_coal_pkt) {
3228 sc->jme_rx_coal_pkt = v;
3229 if (ifp->if_flags & IFF_RUNNING)
3230 jme_set_rx_coal(sc);
3233 ifnet_deserialize_all(ifp);
3238 jme_set_tx_coal(struct jme_softc *sc)
3242 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3244 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3245 PCCTX_COAL_PKT_MASK;
3246 reg |= PCCTX_COAL_TXQ0;
3247 CSR_WRITE_4(sc, JME_PCCTX, reg);
3251 jme_set_rx_coal(struct jme_softc *sc)
3256 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3258 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3259 PCCRX_COAL_PKT_MASK;
3260 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r)
3261 CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3264 #ifdef IFPOLL_ENABLE
3267 jme_npoll_status(struct ifnet *ifp, int pollhz __unused)
3269 struct jme_softc *sc = ifp->if_softc;
3272 ASSERT_SERIALIZED(&sc->jme_serialize);
3274 status = CSR_READ_4(sc, JME_INTR_STATUS);
3275 if (status & INTR_RXQ_DESC_EMPTY) {
3278 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
3279 struct jme_rxdata *rdata =
3280 &sc->jme_cdata.jme_rx_data[i];
3282 if (status & rdata->jme_rx_empty) {
3283 lwkt_serialize_enter(&rdata->jme_rx_serialize);
3284 jme_rxeof(rdata, -1);
3285 #ifdef JME_RSS_DEBUG
3286 rdata->jme_rx_emp++;
3288 lwkt_serialize_exit(&rdata->jme_rx_serialize);
3291 CSR_WRITE_4(sc, JME_INTR_STATUS, status & INTR_RXQ_DESC_EMPTY);
3292 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3293 RXCSR_RX_ENB | RXCSR_RXQ_START);
3298 jme_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle)
3300 struct jme_rxdata *rdata = arg;
3302 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3304 jme_rxeof(rdata, cycle);
3308 jme_npoll_tx(struct ifnet *ifp, void *arg __unused, int cycle __unused)
3310 struct jme_softc *sc = ifp->if_softc;
3312 ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
3315 if (!ifq_is_empty(&ifp->if_snd))
3320 jme_npoll(struct ifnet *ifp, struct ifpoll_info *info)
3322 struct jme_softc *sc = ifp->if_softc;
3324 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3329 info->ifpi_status.status_func = jme_npoll_status;
3330 info->ifpi_status.serializer = &sc->jme_serialize;
3332 off = sc->jme_npoll_txoff;
3333 KKASSERT(off <= ncpus2);
3334 info->ifpi_tx[off].poll_func = jme_npoll_tx;
3335 info->ifpi_tx[off].arg = NULL;
3336 info->ifpi_tx[off].serializer = &sc->jme_cdata.jme_tx_serialize;
3338 off = sc->jme_npoll_rxoff;
3339 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
3340 struct jme_rxdata *rdata =
3341 &sc->jme_cdata.jme_rx_data[i];
3344 info->ifpi_rx[idx].poll_func = jme_npoll_rx;
3345 info->ifpi_rx[idx].arg = rdata;
3346 info->ifpi_rx[idx].serializer =
3347 &rdata->jme_rx_serialize;
3350 if (ifp->if_flags & IFF_RUNNING)
3351 jme_disable_intr(sc);
3352 ifp->if_npoll_cpuid = sc->jme_npoll_txoff;
3354 if (ifp->if_flags & IFF_RUNNING)
3355 jme_enable_intr(sc);
3356 ifp->if_npoll_cpuid = -1;
3361 jme_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS)
3363 struct jme_softc *sc = (void *)arg1;
3364 struct ifnet *ifp = &sc->arpcom.ac_if;
3367 off = sc->jme_npoll_rxoff;
3368 error = sysctl_handle_int(oidp, &off, 0, req);
3369 if (error || req->newptr == NULL)
3374 ifnet_serialize_all(ifp);
3375 if (off >= ncpus2 || off % sc->jme_cdata.jme_rx_ring_cnt != 0) {
3379 sc->jme_npoll_rxoff = off;
3381 ifnet_deserialize_all(ifp);
3387 jme_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS)
3389 struct jme_softc *sc = (void *)arg1;
3390 struct ifnet *ifp = &sc->arpcom.ac_if;
3393 off = sc->jme_npoll_txoff;
3394 error = sysctl_handle_int(oidp, &off, 0, req);
3395 if (error || req->newptr == NULL)
3400 ifnet_serialize_all(ifp);
3401 if (off >= ncpus2) {
3405 sc->jme_npoll_txoff = off;
3407 ifnet_deserialize_all(ifp);
3412 #endif /* IFPOLL_ENABLE */
3415 jme_rxring_dma_alloc(struct jme_rxdata *rdata)
3420 asize = roundup2(JME_RX_RING_SIZE(rdata), JME_RX_RING_ALIGN);
3421 error = bus_dmamem_coherent(rdata->jme_sc->jme_cdata.jme_ring_tag,
3422 JME_RX_RING_ALIGN, 0,
3423 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3424 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3426 device_printf(rdata->jme_sc->jme_dev,
3427 "could not allocate %dth Rx ring.\n", rdata->jme_rx_idx);
3430 rdata->jme_rx_ring_tag = dmem.dmem_tag;
3431 rdata->jme_rx_ring_map = dmem.dmem_map;
3432 rdata->jme_rx_ring = dmem.dmem_addr;
3433 rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
3439 jme_rxbuf_dma_filter(void *arg __unused, bus_addr_t paddr)
3441 if ((paddr & 0xffffffff) == 0) {
3443 * Don't allow lower 32bits of the RX buffer's
3444 * physical address to be 0, else it will break
3445 * hardware pending RSS information delivery
3446 * detection on RX path.
3454 jme_rxbuf_dma_alloc(struct jme_rxdata *rdata)
3459 lowaddr = BUS_SPACE_MAXADDR;
3460 if (JME_ENABLE_HWRSS(rdata->jme_sc)) {
3461 /* jme_rxbuf_dma_filter will be called */
3462 lowaddr = BUS_SPACE_MAXADDR_32BIT;
3465 /* Create tag for Rx buffers. */
3466 error = bus_dma_tag_create(
3467 rdata->jme_sc->jme_cdata.jme_buffer_tag,/* parent */
3468 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
3469 lowaddr, /* lowaddr */
3470 BUS_SPACE_MAXADDR, /* highaddr */
3471 jme_rxbuf_dma_filter, NULL, /* filter, filterarg */
3472 MCLBYTES, /* maxsize */
3474 MCLBYTES, /* maxsegsize */
3475 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
3476 &rdata->jme_rx_tag);
3478 device_printf(rdata->jme_sc->jme_dev,
3479 "could not create %dth Rx DMA tag.\n", rdata->jme_rx_idx);
3483 /* Create DMA maps for Rx buffers. */
3484 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3485 &rdata->jme_rx_sparemap);
3487 device_printf(rdata->jme_sc->jme_dev,
3488 "could not create %dth spare Rx dmamap.\n",
3490 bus_dma_tag_destroy(rdata->jme_rx_tag);
3491 rdata->jme_rx_tag = NULL;
3494 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
3495 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3497 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3502 device_printf(rdata->jme_sc->jme_dev,
3503 "could not create %dth Rx dmamap "
3504 "for %dth RX ring.\n", i, rdata->jme_rx_idx);
3506 for (j = 0; j < i; ++j) {
3507 rxd = &rdata->jme_rxdesc[j];
3508 bus_dmamap_destroy(rdata->jme_rx_tag,
3511 bus_dmamap_destroy(rdata->jme_rx_tag,
3512 rdata->jme_rx_sparemap);
3513 bus_dma_tag_destroy(rdata->jme_rx_tag);
3514 rdata->jme_rx_tag = NULL;
3522 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3526 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3527 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3529 if (status & rdata->jme_rx_coal) {
3530 lwkt_serialize_enter(&rdata->jme_rx_serialize);
3531 jme_rxeof(rdata, -1);
3532 lwkt_serialize_exit(&rdata->jme_rx_serialize);
3538 jme_enable_rss(struct jme_softc *sc)
3541 uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3544 KASSERT(sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_2 ||
3545 sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_4,
3546 ("%s: invalid # of RX rings (%d)",
3547 sc->arpcom.ac_if.if_xname, sc->jme_cdata.jme_rx_ring_cnt));
3549 rssc = RSSC_HASH_64_ENTRY;
3550 rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3551 rssc |= sc->jme_cdata.jme_rx_ring_cnt >> 1;
3552 JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3553 CSR_WRITE_4(sc, JME_RSSC, rssc);
3555 toeplitz_get_key(key, sizeof(key));
3556 for (i = 0; i < RSSKEY_NREGS; ++i) {
3559 keyreg = RSSKEY_REGVAL(key, i);
3560 JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg);
3562 CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg);
3566 * Create redirect table in following fashion:
3567 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3570 for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3573 q = i % sc->jme_cdata.jme_rx_ring_cnt;
3574 ind |= q << (i * 8);
3576 JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3578 for (i = 0; i < RSSTBL_NREGS; ++i)
3579 CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3583 jme_disable_rss(struct jme_softc *sc)
3585 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3589 jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3591 struct jme_softc *sc = ifp->if_softc;
3593 ifnet_serialize_array_enter(sc->jme_serialize_arr,
3594 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz);
3598 jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3600 struct jme_softc *sc = ifp->if_softc;
3602 ifnet_serialize_array_exit(sc->jme_serialize_arr,
3603 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz);
3607 jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3609 struct jme_softc *sc = ifp->if_softc;
3611 return ifnet_serialize_array_try(sc->jme_serialize_arr,
3612 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz);
3618 jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
3619 boolean_t serialized)
3621 struct jme_softc *sc = ifp->if_softc;
3623 ifnet_serialize_array_assert(sc->jme_serialize_arr,
3624 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE,
3628 #endif /* INVARIANTS */
3631 jme_msix_try_alloc(device_t dev)
3633 struct jme_softc *sc = device_get_softc(dev);
3634 struct jme_msix_data *msix;
3635 int error, i, r, msix_enable, msix_count;
3636 int offset, offset_def;
3638 msix_count = JME_MSIXCNT(sc->jme_cdata.jme_rx_ring_cnt);
3639 KKASSERT(msix_count <= JME_NMSIX);
3641 msix_enable = device_getenv_int(dev, "msix.enable", jme_msix_enable);
3644 * We leave the 1st MSI-X vector unused, so we
3645 * actually need msix_count + 1 MSI-X vectors.
3647 if (!msix_enable || pci_msix_count(dev) < (msix_count + 1))
3650 for (i = 0; i < msix_count; ++i)
3651 sc->jme_msix[i].jme_msix_rid = -1;
3656 * Setup status MSI-X
3659 msix = &sc->jme_msix[i++];
3660 msix->jme_msix_cpuid = 0;
3661 msix->jme_msix_arg = sc;
3662 msix->jme_msix_func = jme_msix_status;
3663 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3664 msix->jme_msix_intrs |=
3665 sc->jme_cdata.jme_rx_data[r].jme_rx_empty;
3667 msix->jme_msix_serialize = &sc->jme_serialize;
3668 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s sts",
3669 device_get_nameunit(dev));
3675 offset_def = device_get_unit(dev) % ncpus2;
3676 offset = device_getenv_int(dev, "msix.txoff", offset_def);
3677 if (offset >= ncpus2) {
3678 device_printf(dev, "invalid msix.txoff %d, use %d\n",
3679 offset, offset_def);
3680 offset = offset_def;
3683 msix = &sc->jme_msix[i++];
3684 msix->jme_msix_cpuid = offset;
3685 sc->jme_tx_cpuid = msix->jme_msix_cpuid;
3686 msix->jme_msix_arg = &sc->jme_cdata;
3687 msix->jme_msix_func = jme_msix_tx;
3688 msix->jme_msix_intrs = INTR_TXQ_COAL | INTR_TXQ_COAL_TO;
3689 msix->jme_msix_serialize = &sc->jme_cdata.jme_tx_serialize;
3690 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s tx",
3691 device_get_nameunit(dev));
3697 if (sc->jme_cdata.jme_rx_ring_cnt == ncpus2) {
3700 offset_def = (sc->jme_cdata.jme_rx_ring_cnt *
3701 device_get_unit(dev)) % ncpus2;
3703 offset = device_getenv_int(dev, "msix.rxoff", offset_def);
3704 if (offset >= ncpus2 ||
3705 offset % sc->jme_cdata.jme_rx_ring_cnt != 0) {
3706 device_printf(dev, "invalid msix.rxoff %d, use %d\n",
3707 offset, offset_def);
3708 offset = offset_def;
3712 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3713 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3715 msix = &sc->jme_msix[i++];
3716 msix->jme_msix_cpuid = r + offset;
3717 KKASSERT(msix->jme_msix_cpuid < ncpus2);
3718 msix->jme_msix_arg = rdata;
3719 msix->jme_msix_func = jme_msix_rx;
3720 msix->jme_msix_intrs = rdata->jme_rx_coal;
3721 msix->jme_msix_serialize = &rdata->jme_rx_serialize;
3722 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc),
3723 "%s rx%d", device_get_nameunit(dev), r);
3726 KKASSERT(i == msix_count);
3728 error = pci_setup_msix(dev);
3732 /* Setup jme_msix_cnt early, so we could cleanup */
3733 sc->jme_msix_cnt = msix_count;
3735 for (i = 0; i < msix_count; ++i) {
3736 msix = &sc->jme_msix[i];
3738 msix->jme_msix_vector = i + 1;
3739 error = pci_alloc_msix_vector(dev, msix->jme_msix_vector,
3740 &msix->jme_msix_rid, msix->jme_msix_cpuid);
3744 msix->jme_msix_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3745 &msix->jme_msix_rid, RF_ACTIVE);
3746 if (msix->jme_msix_res == NULL) {
3752 for (i = 0; i < JME_INTR_CNT; ++i) {
3753 uint32_t intr_mask = (1 << i);
3756 if ((JME_INTRS & intr_mask) == 0)
3759 for (x = 0; x < msix_count; ++x) {
3760 msix = &sc->jme_msix[x];
3761 if (msix->jme_msix_intrs & intr_mask) {
3764 reg = i / JME_MSINUM_FACTOR;
3765 KKASSERT(reg < JME_MSINUM_CNT);
3767 shift = (i % JME_MSINUM_FACTOR) * 4;
3769 sc->jme_msinum[reg] |=
3770 (msix->jme_msix_vector << shift);
3778 for (i = 0; i < JME_MSINUM_CNT; ++i) {
3779 device_printf(dev, "MSINUM%d: %#x\n", i,
3784 pci_enable_msix(dev);
3785 sc->jme_irq_type = PCI_INTR_TYPE_MSIX;
3793 jme_intr_alloc(device_t dev)
3795 struct jme_softc *sc = device_get_softc(dev);
3798 jme_msix_try_alloc(dev);
3800 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3801 sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable,
3802 &sc->jme_irq_rid, &irq_flags);
3804 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3805 &sc->jme_irq_rid, irq_flags);
3806 if (sc->jme_irq_res == NULL) {
3807 device_printf(dev, "can't allocate irq\n");
3815 jme_msix_free(device_t dev)
3817 struct jme_softc *sc = device_get_softc(dev);
3820 KKASSERT(sc->jme_msix_cnt > 1);
3822 for (i = 0; i < sc->jme_msix_cnt; ++i) {
3823 struct jme_msix_data *msix = &sc->jme_msix[i];
3825 if (msix->jme_msix_res != NULL) {
3826 bus_release_resource(dev, SYS_RES_IRQ,
3827 msix->jme_msix_rid, msix->jme_msix_res);
3828 msix->jme_msix_res = NULL;
3830 if (msix->jme_msix_rid >= 0) {
3831 pci_release_msix_vector(dev, msix->jme_msix_rid);
3832 msix->jme_msix_rid = -1;
3835 pci_teardown_msix(dev);
3839 jme_intr_free(device_t dev)
3841 struct jme_softc *sc = device_get_softc(dev);
3843 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3844 if (sc->jme_irq_res != NULL) {
3845 bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
3848 if (sc->jme_irq_type == PCI_INTR_TYPE_MSI)
3849 pci_release_msi(dev);
3856 jme_msix_tx(void *xcd)
3858 struct jme_chain_data *cd = xcd;
3859 struct jme_softc *sc = cd->jme_sc;
3860 struct ifnet *ifp = &sc->arpcom.ac_if;
3862 ASSERT_SERIALIZED(&cd->jme_tx_serialize);
3864 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3866 CSR_WRITE_4(sc, JME_INTR_STATUS,
3867 INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP);
3869 if (ifp->if_flags & IFF_RUNNING) {
3871 if (!ifq_is_empty(&ifp->if_snd))
3875 CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3879 jme_msix_rx(void *xrdata)
3881 struct jme_rxdata *rdata = xrdata;
3882 struct jme_softc *sc = rdata->jme_sc;
3883 struct ifnet *ifp = &sc->arpcom.ac_if;
3885 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3887 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, rdata->jme_rx_coal);
3889 CSR_WRITE_4(sc, JME_INTR_STATUS,
3890 rdata->jme_rx_coal | rdata->jme_rx_comp);
3892 if (ifp->if_flags & IFF_RUNNING)
3893 jme_rxeof(rdata, -1);
3895 CSR_WRITE_4(sc, JME_INTR_MASK_SET, rdata->jme_rx_coal);
3899 jme_msix_status(void *xsc)
3901 struct jme_softc *sc = xsc;
3902 struct ifnet *ifp = &sc->arpcom.ac_if;
3905 ASSERT_SERIALIZED(&sc->jme_serialize);
3907 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_RXQ_DESC_EMPTY);
3909 status = CSR_READ_4(sc, JME_INTR_STATUS);
3910 status &= INTR_RXQ_DESC_EMPTY;
3913 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3915 if ((ifp->if_flags & IFF_RUNNING) && status) {
3918 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
3919 struct jme_rxdata *rdata =
3920 &sc->jme_cdata.jme_rx_data[i];
3922 if (status & rdata->jme_rx_empty) {
3923 lwkt_serialize_enter(&rdata->jme_rx_serialize);
3924 jme_rxeof(rdata, -1);
3925 #ifdef JME_RSS_DEBUG
3926 rdata->jme_rx_emp++;
3928 lwkt_serialize_exit(&rdata->jme_rx_serialize);
3931 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3932 RXCSR_RX_ENB | RXCSR_RXQ_START);
3935 CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_RXQ_DESC_EMPTY);
3939 jme_set_msinum(struct jme_softc *sc)
3943 for (i = 0; i < JME_MSINUM_CNT; ++i)
3944 CSR_WRITE_4(sc, JME_MSINUM(i), sc->jme_msinum[i]);
3948 jme_intr_setup(device_t dev)
3950 struct jme_softc *sc = device_get_softc(dev);
3951 struct ifnet *ifp = &sc->arpcom.ac_if;
3954 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3955 return jme_msix_setup(dev);
3957 error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE,
3958 jme_intr, sc, &sc->jme_irq_handle, &sc->jme_serialize);
3960 device_printf(dev, "could not set up interrupt handler.\n");
3964 ifp->if_cpuid = rman_get_cpuid(sc->jme_irq_res);
3965 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
3970 jme_intr_teardown(device_t dev)
3972 struct jme_softc *sc = device_get_softc(dev);
3974 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3975 jme_msix_teardown(dev, sc->jme_msix_cnt);
3977 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
3981 jme_msix_setup(device_t dev)
3983 struct jme_softc *sc = device_get_softc(dev);
3984 struct ifnet *ifp = &sc->arpcom.ac_if;
3987 for (x = 0; x < sc->jme_msix_cnt; ++x) {
3988 struct jme_msix_data *msix = &sc->jme_msix[x];
3991 error = bus_setup_intr_descr(dev, msix->jme_msix_res,
3992 INTR_MPSAFE, msix->jme_msix_func, msix->jme_msix_arg,
3993 &msix->jme_msix_handle, msix->jme_msix_serialize,
3994 msix->jme_msix_desc);
3996 device_printf(dev, "could not set up %s "
3997 "interrupt handler.\n", msix->jme_msix_desc);
3998 jme_msix_teardown(dev, x);
4002 ifp->if_cpuid = sc->jme_tx_cpuid;
4007 jme_msix_teardown(device_t dev, int msix_count)
4009 struct jme_softc *sc = device_get_softc(dev);
4012 for (x = 0; x < msix_count; ++x) {
4013 struct jme_msix_data *msix = &sc->jme_msix[x];
4015 bus_teardown_intr(dev, msix->jme_msix_res,
4016 msix->jme_msix_handle);
4021 jme_serialize_skipmain(struct jme_softc *sc)
4023 lwkt_serialize_array_enter(sc->jme_serialize_arr,
4024 sc->jme_serialize_cnt, 1);
4028 jme_deserialize_skipmain(struct jme_softc *sc)
4030 lwkt_serialize_array_exit(sc->jme_serialize_arr,
4031 sc->jme_serialize_cnt, 1);
4035 jme_enable_intr(struct jme_softc *sc)
4039 for (i = 0; i < sc->jme_serialize_cnt; ++i)
4040 lwkt_serialize_handler_enable(sc->jme_serialize_arr[i]);
4042 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
4046 jme_disable_intr(struct jme_softc *sc)
4050 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
4052 for (i = 0; i < sc->jme_serialize_cnt; ++i)
4053 lwkt_serialize_handler_disable(sc->jme_serialize_arr[i]);