2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
30 #include "opt_polling.h"
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
41 #include <sys/serialize.h>
42 #include <sys/serialize2.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
47 #include <net/ethernet.h>
50 #include <net/if_arp.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/ifq_var.h>
54 #include <net/toeplitz.h>
55 #include <net/toeplitz2.h>
56 #include <net/vlan/if_vlan_var.h>
57 #include <net/vlan/if_vlan_ether.h>
59 #include <netinet/in.h>
61 #include <dev/netif/mii_layer/miivar.h>
62 #include <dev/netif/mii_layer/jmphyreg.h>
64 #include <bus/pci/pcireg.h>
65 #include <bus/pci/pcivar.h>
66 #include <bus/pci/pcidevs.h>
68 #include <dev/netif/jme/if_jmereg.h>
69 #include <dev/netif/jme/if_jmevar.h>
71 #include "miibus_if.h"
73 /* Define the following to disable printing Rx errors. */
74 #undef JME_SHOW_ERRORS
76 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
79 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
81 if ((sc)->jme_rss_debug >= (lvl)) \
82 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
84 #else /* !JME_RSS_DEBUG */
85 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
86 #endif /* JME_RSS_DEBUG */
88 static int jme_probe(device_t);
89 static int jme_attach(device_t);
90 static int jme_detach(device_t);
91 static int jme_shutdown(device_t);
92 static int jme_suspend(device_t);
93 static int jme_resume(device_t);
95 static int jme_miibus_readreg(device_t, int, int);
96 static int jme_miibus_writereg(device_t, int, int, int);
97 static void jme_miibus_statchg(device_t);
99 static void jme_init(void *);
100 static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
101 static void jme_start(struct ifnet *);
102 static void jme_watchdog(struct ifnet *);
103 static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
104 static int jme_mediachange(struct ifnet *);
105 #ifdef DEVICE_POLLING
106 static void jme_poll(struct ifnet *, enum poll_cmd, int);
108 static void jme_serialize(struct ifnet *, enum ifnet_serialize);
109 static void jme_deserialize(struct ifnet *, enum ifnet_serialize);
110 static int jme_tryserialize(struct ifnet *, enum ifnet_serialize);
112 static void jme_serialize_assert(struct ifnet *, enum ifnet_serialize,
116 static void jme_intr(void *);
117 static void jme_msix_tx(void *);
118 static void jme_msix_rx(void *);
119 static void jme_txeof(struct jme_softc *);
120 static void jme_rxeof(struct jme_rxdata *, int);
121 static void jme_rx_intr(struct jme_softc *, uint32_t);
123 static int jme_msix_setup(device_t);
124 static void jme_msix_teardown(device_t, int);
125 static int jme_intr_setup(device_t);
126 static void jme_intr_teardown(device_t);
127 static void jme_msix_try_alloc(device_t);
128 static void jme_msix_free(device_t);
129 static int jme_intr_alloc(device_t);
130 static void jme_intr_free(device_t);
131 static int jme_dma_alloc(struct jme_softc *);
132 static void jme_dma_free(struct jme_softc *);
133 static int jme_init_rx_ring(struct jme_rxdata *);
134 static void jme_init_tx_ring(struct jme_softc *);
135 static void jme_init_ssb(struct jme_softc *);
136 static int jme_newbuf(struct jme_rxdata *, struct jme_rxdesc *, int);
137 static int jme_encap(struct jme_softc *, struct mbuf **);
138 static void jme_rxpkt(struct jme_rxdata *);
139 static int jme_rxring_dma_alloc(struct jme_rxdata *);
140 static int jme_rxbuf_dma_alloc(struct jme_rxdata *);
142 static void jme_tick(void *);
143 static void jme_stop(struct jme_softc *);
144 static void jme_reset(struct jme_softc *);
145 static void jme_set_msinum(struct jme_softc *);
146 static void jme_set_vlan(struct jme_softc *);
147 static void jme_set_filter(struct jme_softc *);
148 static void jme_stop_tx(struct jme_softc *);
149 static void jme_stop_rx(struct jme_softc *);
150 static void jme_mac_config(struct jme_softc *);
151 static void jme_reg_macaddr(struct jme_softc *, uint8_t[]);
152 static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
153 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
155 static void jme_setwol(struct jme_softc *);
156 static void jme_setlinkspeed(struct jme_softc *);
158 static void jme_set_tx_coal(struct jme_softc *);
159 static void jme_set_rx_coal(struct jme_softc *);
160 static void jme_enable_rss(struct jme_softc *);
161 static void jme_disable_rss(struct jme_softc *);
163 static void jme_sysctl_node(struct jme_softc *);
164 static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
165 static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
166 static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
167 static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
170 * Devices supported by this driver.
172 static const struct jme_dev {
173 uint16_t jme_vendorid;
174 uint16_t jme_deviceid;
176 const char *jme_name;
178 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
180 "JMicron Inc, JMC250 Gigabit Ethernet" },
181 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
183 "JMicron Inc, JMC260 Fast Ethernet" },
187 static device_method_t jme_methods[] = {
188 /* Device interface. */
189 DEVMETHOD(device_probe, jme_probe),
190 DEVMETHOD(device_attach, jme_attach),
191 DEVMETHOD(device_detach, jme_detach),
192 DEVMETHOD(device_shutdown, jme_shutdown),
193 DEVMETHOD(device_suspend, jme_suspend),
194 DEVMETHOD(device_resume, jme_resume),
197 DEVMETHOD(bus_print_child, bus_generic_print_child),
198 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
201 DEVMETHOD(miibus_readreg, jme_miibus_readreg),
202 DEVMETHOD(miibus_writereg, jme_miibus_writereg),
203 DEVMETHOD(miibus_statchg, jme_miibus_statchg),
208 static driver_t jme_driver = {
211 sizeof(struct jme_softc)
214 static devclass_t jme_devclass;
216 DECLARE_DUMMY_MODULE(if_jme);
217 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
218 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL);
219 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL);
221 static const struct {
225 } jme_rx_status[JME_NRXRING_MAX] = {
226 { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP,
227 INTR_RXQ0_DESC_EMPTY },
228 { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP,
229 INTR_RXQ1_DESC_EMPTY },
230 { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP,
231 INTR_RXQ2_DESC_EMPTY },
232 { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP,
233 INTR_RXQ3_DESC_EMPTY }
236 static int jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
237 static int jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
238 static int jme_rx_ring_count = 1;
239 static int jme_msi_enable = 1;
240 static int jme_msix_enable = 1;
242 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
243 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
244 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
245 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable);
246 TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable);
249 jme_setup_rxdesc(struct jme_rxdesc *rxd)
251 struct jme_desc *desc;
254 desc->buflen = htole32(MCLBYTES);
255 desc->addr_lo = htole32(JME_ADDR_LO(rxd->rx_paddr));
256 desc->addr_hi = htole32(JME_ADDR_HI(rxd->rx_paddr));
257 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
261 * Read a PHY register on the MII of the JMC250.
264 jme_miibus_readreg(device_t dev, int phy, int reg)
266 struct jme_softc *sc = device_get_softc(dev);
270 /* For FPGA version, PHY address 0 should be ignored. */
271 if (sc->jme_caps & JME_CAP_FPGA) {
275 if (sc->jme_phyaddr != phy)
279 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
280 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
282 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
284 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
288 device_printf(sc->jme_dev, "phy read timeout: "
289 "phy %d, reg %d\n", phy, reg);
293 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
297 * Write a PHY register on the MII of the JMC250.
300 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
302 struct jme_softc *sc = device_get_softc(dev);
305 /* For FPGA version, PHY address 0 should be ignored. */
306 if (sc->jme_caps & JME_CAP_FPGA) {
310 if (sc->jme_phyaddr != phy)
314 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
315 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
316 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
318 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
320 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
324 device_printf(sc->jme_dev, "phy write timeout: "
325 "phy %d, reg %d\n", phy, reg);
332 * Callback from MII layer when media changes.
335 jme_miibus_statchg(device_t dev)
337 struct jme_softc *sc = device_get_softc(dev);
338 struct ifnet *ifp = &sc->arpcom.ac_if;
339 struct mii_data *mii;
340 struct jme_txdesc *txd;
344 ASSERT_IFNET_SERIALIZED_ALL(ifp);
346 if ((ifp->if_flags & IFF_RUNNING) == 0)
349 mii = device_get_softc(sc->jme_miibus);
351 sc->jme_flags &= ~JME_FLAG_LINK;
352 if ((mii->mii_media_status & IFM_AVALID) != 0) {
353 switch (IFM_SUBTYPE(mii->mii_media_active)) {
356 sc->jme_flags |= JME_FLAG_LINK;
359 if (sc->jme_caps & JME_CAP_FASTETH)
361 sc->jme_flags |= JME_FLAG_LINK;
369 * Disabling Rx/Tx MACs have a side-effect of resetting
370 * JME_TXNDA/JME_RXNDA register to the first address of
371 * Tx/Rx descriptor address. So driver should reset its
372 * internal procucer/consumer pointer and reclaim any
373 * allocated resources. Note, just saving the value of
374 * JME_TXNDA and JME_RXNDA registers before stopping MAC
375 * and restoring JME_TXNDA/JME_RXNDA register is not
376 * sufficient to make sure correct MAC state because
377 * stopping MAC operation can take a while and hardware
378 * might have updated JME_TXNDA/JME_RXNDA registers
379 * during the stop operation.
382 /* Disable interrupts */
383 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
386 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
388 callout_stop(&sc->jme_tick_ch);
390 /* Stop receiver/transmitter. */
394 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
395 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
397 jme_rxeof(rdata, -1);
398 if (rdata->jme_rxhead != NULL)
399 m_freem(rdata->jme_rxhead);
400 JME_RXCHAIN_RESET(rdata);
403 * Reuse configured Rx descriptors and reset
404 * procuder/consumer index.
406 rdata->jme_rx_cons = 0;
408 if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN)
414 if (sc->jme_cdata.jme_tx_cnt != 0) {
415 /* Remove queued packets for transmit. */
416 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
417 txd = &sc->jme_cdata.jme_txdesc[i];
418 if (txd->tx_m != NULL) {
420 sc->jme_cdata.jme_tx_tag,
429 jme_init_tx_ring(sc);
431 /* Initialize shadow status block. */
434 /* Program MAC with resolved speed/duplex/flow-control. */
435 if (sc->jme_flags & JME_FLAG_LINK) {
438 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
440 /* Set Tx ring address to the hardware. */
441 paddr = sc->jme_cdata.jme_tx_ring_paddr;
442 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
443 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
445 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
446 CSR_WRITE_4(sc, JME_RXCSR,
447 sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
449 /* Set Rx ring address to the hardware. */
450 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
451 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
452 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
455 /* Restart receiver/transmitter. */
456 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
458 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
461 ifp->if_flags |= IFF_RUNNING;
462 ifp->if_flags &= ~IFF_OACTIVE;
463 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
465 #ifdef DEVICE_POLLING
466 if (!(ifp->if_flags & IFF_POLLING))
468 /* Reenable interrupts. */
469 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
473 * Get the current interface media status.
476 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
478 struct jme_softc *sc = ifp->if_softc;
479 struct mii_data *mii = device_get_softc(sc->jme_miibus);
481 ASSERT_IFNET_SERIALIZED_ALL(ifp);
484 ifmr->ifm_status = mii->mii_media_status;
485 ifmr->ifm_active = mii->mii_media_active;
489 * Set hardware to newly-selected media.
492 jme_mediachange(struct ifnet *ifp)
494 struct jme_softc *sc = ifp->if_softc;
495 struct mii_data *mii = device_get_softc(sc->jme_miibus);
498 ASSERT_IFNET_SERIALIZED_ALL(ifp);
500 if (mii->mii_instance != 0) {
501 struct mii_softc *miisc;
503 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
504 mii_phy_reset(miisc);
506 error = mii_mediachg(mii);
512 jme_probe(device_t dev)
514 const struct jme_dev *sp;
517 vid = pci_get_vendor(dev);
518 did = pci_get_device(dev);
519 for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
520 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
521 struct jme_softc *sc = device_get_softc(dev);
523 sc->jme_caps = sp->jme_caps;
524 device_set_desc(dev, sp->jme_name);
532 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
538 for (i = JME_TIMEOUT; i > 0; i--) {
539 reg = CSR_READ_4(sc, JME_SMBCSR);
540 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
546 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
550 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
551 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
552 for (i = JME_TIMEOUT; i > 0; i--) {
554 reg = CSR_READ_4(sc, JME_SMBINTF);
555 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
560 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
564 reg = CSR_READ_4(sc, JME_SMBINTF);
565 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
571 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
573 uint8_t fup, reg, val;
578 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
579 fup != JME_EEPROM_SIG0)
581 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
582 fup != JME_EEPROM_SIG1)
586 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
588 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
589 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
590 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0)
592 if (reg >= JME_PAR0 &&
593 reg < JME_PAR0 + ETHER_ADDR_LEN) {
594 if (jme_eeprom_read_byte(sc, offset + 2,
597 eaddr[reg - JME_PAR0] = val;
601 /* Check for the end of EEPROM descriptor. */
602 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
604 /* Try next eeprom descriptor. */
605 offset += JME_EEPROM_DESC_BYTES;
606 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
608 if (match == ETHER_ADDR_LEN)
615 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
619 /* Read station address. */
620 par0 = CSR_READ_4(sc, JME_PAR0);
621 par1 = CSR_READ_4(sc, JME_PAR1);
623 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
624 device_printf(sc->jme_dev,
625 "generating fake ethernet address.\n");
626 par0 = karc4random();
627 /* Set OUI to JMicron. */
631 eaddr[3] = (par0 >> 16) & 0xff;
632 eaddr[4] = (par0 >> 8) & 0xff;
633 eaddr[5] = par0 & 0xff;
635 eaddr[0] = (par0 >> 0) & 0xFF;
636 eaddr[1] = (par0 >> 8) & 0xFF;
637 eaddr[2] = (par0 >> 16) & 0xFF;
638 eaddr[3] = (par0 >> 24) & 0xFF;
639 eaddr[4] = (par1 >> 0) & 0xFF;
640 eaddr[5] = (par1 >> 8) & 0xFF;
645 jme_attach(device_t dev)
647 struct jme_softc *sc = device_get_softc(dev);
648 struct ifnet *ifp = &sc->arpcom.ac_if;
651 uint8_t pcie_ptr, rev;
652 int error = 0, i, j, rx_desc_cnt;
653 uint8_t eaddr[ETHER_ADDR_LEN];
655 lwkt_serialize_init(&sc->jme_serialize);
656 lwkt_serialize_init(&sc->jme_cdata.jme_tx_serialize);
657 for (i = 0; i < JME_NRXRING_MAX; ++i) {
659 &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize);
662 rx_desc_cnt = device_getenv_int(dev, "rx_desc_count",
664 rx_desc_cnt = roundup(rx_desc_cnt, JME_NDESC_ALIGN);
665 if (rx_desc_cnt > JME_NDESC_MAX)
666 rx_desc_cnt = JME_NDESC_MAX;
668 sc->jme_cdata.jme_tx_desc_cnt = device_getenv_int(dev, "tx_desc_count",
670 sc->jme_cdata.jme_tx_desc_cnt = roundup(sc->jme_cdata.jme_tx_desc_cnt,
672 if (sc->jme_cdata.jme_tx_desc_cnt > JME_NDESC_MAX)
673 sc->jme_cdata.jme_tx_desc_cnt = JME_NDESC_MAX;
678 sc->jme_cdata.jme_rx_ring_cnt = device_getenv_int(dev, "rx_ring_count",
680 sc->jme_cdata.jme_rx_ring_cnt =
681 if_ring_count2(sc->jme_cdata.jme_rx_ring_cnt, JME_NRXRING_MAX);
684 sc->jme_serialize_arr[i++] = &sc->jme_serialize;
685 sc->jme_serialize_arr[i++] = &sc->jme_cdata.jme_tx_serialize;
686 for (j = 0; j < sc->jme_cdata.jme_rx_ring_cnt; ++j) {
687 sc->jme_serialize_arr[i++] =
688 &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize;
690 KKASSERT(i <= JME_NSERIALIZE);
691 sc->jme_serialize_cnt = i;
693 sc->jme_cdata.jme_sc = sc;
694 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
695 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
698 rdata->jme_rx_coal = jme_rx_status[i].jme_coal;
699 rdata->jme_rx_comp = jme_rx_status[i].jme_comp;
700 rdata->jme_rx_empty = jme_rx_status[i].jme_empty;
701 rdata->jme_rx_idx = i;
702 rdata->jme_rx_desc_cnt = rx_desc_cnt;
706 sc->jme_lowaddr = BUS_SPACE_MAXADDR;
708 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
710 callout_init(&sc->jme_tick_ch);
713 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
716 irq = pci_read_config(dev, PCIR_INTLINE, 4);
717 mem = pci_read_config(dev, JME_PCIR_BAR, 4);
719 device_printf(dev, "chip is in D%d power mode "
720 "-- setting to D0\n", pci_get_powerstate(dev));
722 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
724 pci_write_config(dev, PCIR_INTLINE, irq, 4);
725 pci_write_config(dev, JME_PCIR_BAR, mem, 4);
727 #endif /* !BURN_BRIDGE */
729 /* Enable bus mastering */
730 pci_enable_busmaster(dev);
735 * JMC250 supports both memory mapped and I/O register space
736 * access. Because I/O register access should use different
737 * BARs to access registers it's waste of time to use I/O
738 * register spce access. JMC250 uses 16K to map entire memory
741 sc->jme_mem_rid = JME_PCIR_BAR;
742 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
743 &sc->jme_mem_rid, RF_ACTIVE);
744 if (sc->jme_mem_res == NULL) {
745 device_printf(dev, "can't allocate IO memory\n");
748 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
749 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
754 error = jme_intr_alloc(dev);
761 reg = CSR_READ_4(sc, JME_CHIPMODE);
762 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
764 sc->jme_caps |= JME_CAP_FPGA;
766 device_printf(dev, "FPGA revision: 0x%04x\n",
767 (reg & CHIPMODE_FPGA_REV_MASK) >>
768 CHIPMODE_FPGA_REV_SHIFT);
772 /* NOTE: FM revision is put in the upper 4 bits */
773 rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
774 rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
776 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
778 did = pci_get_device(dev);
780 case PCI_PRODUCT_JMICRON_JMC250:
781 if (rev == JME_REV1_A2)
782 sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
785 case PCI_PRODUCT_JMICRON_JMC260:
787 sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
791 panic("unknown device id 0x%04x", did);
793 if (rev >= JME_REV2) {
794 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
795 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
796 GHC_TXMAC_CLKSRC_1000;
799 /* Reset the ethernet controller. */
802 /* Map MSI/MSI-X vectors */
805 /* Get station address. */
806 reg = CSR_READ_4(sc, JME_SMBCSR);
807 if (reg & SMBCSR_EEPROM_PRESENT)
808 error = jme_eeprom_macaddr(sc, eaddr);
809 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
810 if (error != 0 && (bootverbose)) {
811 device_printf(dev, "ethernet hardware address "
812 "not found in EEPROM.\n");
814 jme_reg_macaddr(sc, eaddr);
819 * Integrated JR0211 has fixed PHY address whereas FPGA version
820 * requires PHY probing to get correct PHY address.
822 if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
823 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
824 GPREG0_PHY_ADDR_MASK;
826 device_printf(dev, "PHY is at address %d.\n",
833 /* Set max allowable DMA size. */
834 pcie_ptr = pci_get_pciecap_ptr(dev);
838 sc->jme_caps |= JME_CAP_PCIE;
839 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
841 device_printf(dev, "Read request size : %d bytes.\n",
842 128 << ((ctrl >> 12) & 0x07));
843 device_printf(dev, "TLP payload size : %d bytes.\n",
844 128 << ((ctrl >> 5) & 0x07));
846 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
847 case PCIEM_DEVCTL_MAX_READRQ_128:
848 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
850 case PCIEM_DEVCTL_MAX_READRQ_256:
851 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
854 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
857 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
859 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
860 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
864 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
865 sc->jme_caps |= JME_CAP_PMCAP;
873 /* Allocate DMA stuffs */
874 error = jme_dma_alloc(sc);
879 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
880 ifp->if_init = jme_init;
881 ifp->if_ioctl = jme_ioctl;
882 ifp->if_start = jme_start;
883 #ifdef DEVICE_POLLING
884 ifp->if_poll = jme_poll;
886 ifp->if_watchdog = jme_watchdog;
887 ifp->if_serialize = jme_serialize;
888 ifp->if_deserialize = jme_deserialize;
889 ifp->if_tryserialize = jme_tryserialize;
891 ifp->if_serialize_assert = jme_serialize_assert;
893 ifq_set_maxlen(&ifp->if_snd,
894 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
895 ifq_set_ready(&ifp->if_snd);
897 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
898 ifp->if_capabilities = IFCAP_HWCSUM |
900 IFCAP_VLAN_HWTAGGING;
901 if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN)
902 ifp->if_capabilities |= IFCAP_RSS;
903 ifp->if_capenable = ifp->if_capabilities;
906 * Disable TXCSUM by default to improve bulk data
907 * transmit performance (+20Mbps improvement).
909 ifp->if_capenable &= ~IFCAP_TXCSUM;
911 if (ifp->if_capenable & IFCAP_TXCSUM)
912 ifp->if_hwassist = JME_CSUM_FEATURES;
914 /* Set up MII bus. */
915 error = mii_phy_probe(dev, &sc->jme_miibus,
916 jme_mediachange, jme_mediastatus);
918 device_printf(dev, "no PHY found!\n");
923 * Save PHYADDR for FPGA mode PHY.
925 if (sc->jme_caps & JME_CAP_FPGA) {
926 struct mii_data *mii = device_get_softc(sc->jme_miibus);
928 if (mii->mii_instance != 0) {
929 struct mii_softc *miisc;
931 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
932 if (miisc->mii_phy != 0) {
933 sc->jme_phyaddr = miisc->mii_phy;
937 if (sc->jme_phyaddr != 0) {
938 device_printf(sc->jme_dev,
939 "FPGA PHY is at %d\n", sc->jme_phyaddr);
941 jme_miibus_writereg(dev, sc->jme_phyaddr,
942 JMPHY_CONF, JMPHY_CONF_DEFFIFO);
944 /* XXX should we clear JME_WA_EXTFIFO */
949 ether_ifattach(ifp, eaddr, NULL);
951 /* Tell the upper layer(s) we support long frames. */
952 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
954 error = jme_intr_setup(dev);
967 jme_detach(device_t dev)
969 struct jme_softc *sc = device_get_softc(dev);
971 if (device_is_attached(dev)) {
972 struct ifnet *ifp = &sc->arpcom.ac_if;
974 ifnet_serialize_all(ifp);
976 jme_intr_teardown(dev);
977 ifnet_deserialize_all(ifp);
982 if (sc->jme_sysctl_tree != NULL)
983 sysctl_ctx_free(&sc->jme_sysctl_ctx);
985 if (sc->jme_miibus != NULL)
986 device_delete_child(dev, sc->jme_miibus);
987 bus_generic_detach(dev);
991 if (sc->jme_mem_res != NULL) {
992 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
1002 jme_sysctl_node(struct jme_softc *sc)
1005 #ifdef JME_RSS_DEBUG
1009 sysctl_ctx_init(&sc->jme_sysctl_ctx);
1010 sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
1011 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
1012 device_get_nameunit(sc->jme_dev),
1014 if (sc->jme_sysctl_tree == NULL) {
1015 device_printf(sc->jme_dev, "can't add sysctl node\n");
1019 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1020 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1021 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1022 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
1024 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1025 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1026 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1027 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
1029 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1030 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1031 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1032 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
1034 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1035 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1036 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1037 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
1039 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1040 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1041 "rx_desc_count", CTLFLAG_RD,
1042 &sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt,
1043 0, "RX desc count");
1044 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1045 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1046 "tx_desc_count", CTLFLAG_RD,
1047 &sc->jme_cdata.jme_tx_desc_cnt,
1048 0, "TX desc count");
1049 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1050 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1051 "rx_ring_count", CTLFLAG_RD,
1052 &sc->jme_cdata.jme_rx_ring_cnt,
1053 0, "RX ring count");
1054 #ifdef JME_RSS_DEBUG
1055 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1056 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1057 "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
1058 0, "RSS debug level");
1059 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1060 char rx_ring_pkt[32];
1062 ksnprintf(rx_ring_pkt, sizeof(rx_ring_pkt), "rx_ring%d_pkt", r);
1063 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx,
1064 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1065 rx_ring_pkt, CTLFLAG_RW,
1066 &sc->jme_cdata.jme_rx_data[r].jme_rx_pkt, "RXed packets");
1071 * Set default coalesce valves
1073 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1074 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1075 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1076 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1079 * Adjust coalesce valves, in case that the number of TX/RX
1080 * descs are set to small values by users.
1082 * NOTE: coal_max will not be zero, since number of descs
1083 * must aligned by JME_NDESC_ALIGN (16 currently)
1085 coal_max = sc->jme_cdata.jme_tx_desc_cnt / 6;
1086 if (coal_max < sc->jme_tx_coal_pkt)
1087 sc->jme_tx_coal_pkt = coal_max;
1089 coal_max = sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt / 4;
1090 if (coal_max < sc->jme_rx_coal_pkt)
1091 sc->jme_rx_coal_pkt = coal_max;
1095 jme_dma_alloc(struct jme_softc *sc)
1097 struct jme_txdesc *txd;
1099 int error, i, asize;
1101 sc->jme_cdata.jme_txdesc =
1102 kmalloc(sc->jme_cdata.jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1103 M_DEVBUF, M_WAITOK | M_ZERO);
1104 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1105 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
1108 kmalloc(rdata->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1109 M_DEVBUF, M_WAITOK | M_ZERO);
1112 /* Create parent ring tag. */
1113 error = bus_dma_tag_create(NULL,/* parent */
1114 1, JME_RING_BOUNDARY, /* algnmnt, boundary */
1115 sc->jme_lowaddr, /* lowaddr */
1116 BUS_SPACE_MAXADDR, /* highaddr */
1117 NULL, NULL, /* filter, filterarg */
1118 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1120 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1122 &sc->jme_cdata.jme_ring_tag);
1124 device_printf(sc->jme_dev,
1125 "could not create parent ring DMA tag.\n");
1130 * Create DMA stuffs for TX ring
1132 asize = roundup2(JME_TX_RING_SIZE(sc), JME_TX_RING_ALIGN);
1133 error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1134 JME_TX_RING_ALIGN, 0,
1135 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1136 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1138 device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1141 sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag;
1142 sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map;
1143 sc->jme_cdata.jme_tx_ring = dmem.dmem_addr;
1144 sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr;
1147 * Create DMA stuffs for RX rings
1149 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1150 error = jme_rxring_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1155 /* Create parent buffer tag. */
1156 error = bus_dma_tag_create(NULL,/* parent */
1157 1, 0, /* algnmnt, boundary */
1158 sc->jme_lowaddr, /* lowaddr */
1159 BUS_SPACE_MAXADDR, /* highaddr */
1160 NULL, NULL, /* filter, filterarg */
1161 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1163 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1165 &sc->jme_cdata.jme_buffer_tag);
1167 device_printf(sc->jme_dev,
1168 "could not create parent buffer DMA tag.\n");
1173 * Create DMA stuffs for shadow status block
1175 asize = roundup2(JME_SSB_SIZE, JME_SSB_ALIGN);
1176 error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1177 JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1178 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1180 device_printf(sc->jme_dev,
1181 "could not create shadow status block.\n");
1184 sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1185 sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1186 sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1187 sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1190 * Create DMA stuffs for TX buffers
1193 /* Create tag for Tx buffers. */
1194 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1195 1, 0, /* algnmnt, boundary */
1196 BUS_SPACE_MAXADDR, /* lowaddr */
1197 BUS_SPACE_MAXADDR, /* highaddr */
1198 NULL, NULL, /* filter, filterarg */
1199 JME_JUMBO_FRAMELEN, /* maxsize */
1200 JME_MAXTXSEGS, /* nsegments */
1201 JME_MAXSEGSIZE, /* maxsegsize */
1202 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1203 &sc->jme_cdata.jme_tx_tag);
1205 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1209 /* Create DMA maps for Tx buffers. */
1210 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
1211 txd = &sc->jme_cdata.jme_txdesc[i];
1212 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag,
1213 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1218 device_printf(sc->jme_dev,
1219 "could not create %dth Tx dmamap.\n", i);
1221 for (j = 0; j < i; ++j) {
1222 txd = &sc->jme_cdata.jme_txdesc[j];
1223 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1226 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1227 sc->jme_cdata.jme_tx_tag = NULL;
1233 * Create DMA stuffs for RX buffers
1235 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1236 error = jme_rxbuf_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1244 jme_dma_free(struct jme_softc *sc)
1246 struct jme_txdesc *txd;
1247 struct jme_rxdesc *rxd;
1248 struct jme_rxdata *rdata;
1252 if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1253 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1254 sc->jme_cdata.jme_tx_ring_map);
1255 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1256 sc->jme_cdata.jme_tx_ring,
1257 sc->jme_cdata.jme_tx_ring_map);
1258 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1259 sc->jme_cdata.jme_tx_ring_tag = NULL;
1263 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1264 rdata = &sc->jme_cdata.jme_rx_data[r];
1265 if (rdata->jme_rx_ring_tag != NULL) {
1266 bus_dmamap_unload(rdata->jme_rx_ring_tag,
1267 rdata->jme_rx_ring_map);
1268 bus_dmamem_free(rdata->jme_rx_ring_tag,
1270 rdata->jme_rx_ring_map);
1271 bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1272 rdata->jme_rx_ring_tag = NULL;
1277 if (sc->jme_cdata.jme_tx_tag != NULL) {
1278 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
1279 txd = &sc->jme_cdata.jme_txdesc[i];
1280 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1283 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1284 sc->jme_cdata.jme_tx_tag = NULL;
1288 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1289 rdata = &sc->jme_cdata.jme_rx_data[r];
1290 if (rdata->jme_rx_tag != NULL) {
1291 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
1292 rxd = &rdata->jme_rxdesc[i];
1293 bus_dmamap_destroy(rdata->jme_rx_tag,
1296 bus_dmamap_destroy(rdata->jme_rx_tag,
1297 rdata->jme_rx_sparemap);
1298 bus_dma_tag_destroy(rdata->jme_rx_tag);
1299 rdata->jme_rx_tag = NULL;
1303 /* Shadow status block. */
1304 if (sc->jme_cdata.jme_ssb_tag != NULL) {
1305 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1306 sc->jme_cdata.jme_ssb_map);
1307 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1308 sc->jme_cdata.jme_ssb_block,
1309 sc->jme_cdata.jme_ssb_map);
1310 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1311 sc->jme_cdata.jme_ssb_tag = NULL;
1314 if (sc->jme_cdata.jme_buffer_tag != NULL) {
1315 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1316 sc->jme_cdata.jme_buffer_tag = NULL;
1318 if (sc->jme_cdata.jme_ring_tag != NULL) {
1319 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1320 sc->jme_cdata.jme_ring_tag = NULL;
1323 if (sc->jme_cdata.jme_txdesc != NULL) {
1324 kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1325 sc->jme_cdata.jme_txdesc = NULL;
1327 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1328 rdata = &sc->jme_cdata.jme_rx_data[r];
1329 if (rdata->jme_rxdesc != NULL) {
1330 kfree(rdata->jme_rxdesc, M_DEVBUF);
1331 rdata->jme_rxdesc = NULL;
1337 * Make sure the interface is stopped at reboot time.
1340 jme_shutdown(device_t dev)
1342 return jme_suspend(dev);
1347 * Unlike other ethernet controllers, JMC250 requires
1348 * explicit resetting link speed to 10/100Mbps as gigabit
1349 * link will cunsume more power than 375mA.
1350 * Note, we reset the link speed to 10/100Mbps with
1351 * auto-negotiation but we don't know whether that operation
1352 * would succeed or not as we have no control after powering
1353 * off. If the renegotiation fail WOL may not work. Running
1354 * at 1Gbps draws more power than 375mA at 3.3V which is
1355 * specified in PCI specification and that would result in
1356 * complete shutdowning power to ethernet controller.
1359 * Save current negotiated media speed/duplex/flow-control
1360 * to softc and restore the same link again after resuming.
1361 * PHY handling such as power down/resetting to 100Mbps
1362 * may be better handled in suspend method in phy driver.
1365 jme_setlinkspeed(struct jme_softc *sc)
1367 struct mii_data *mii;
1370 JME_LOCK_ASSERT(sc);
1372 mii = device_get_softc(sc->jme_miibus);
1375 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1376 switch IFM_SUBTYPE(mii->mii_media_active) {
1386 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1387 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1388 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1389 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1390 BMCR_AUTOEN | BMCR_STARTNEG);
1393 /* Poll link state until jme(4) get a 10/100 link. */
1394 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1396 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1397 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1407 pause("jmelnk", hz);
1410 if (i == MII_ANEGTICKS_GIGE)
1411 device_printf(sc->jme_dev, "establishing link failed, "
1412 "WOL may not work!");
1415 * No link, force MAC to have 100Mbps, full-duplex link.
1416 * This is the last resort and may/may not work.
1418 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1419 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1424 jme_setwol(struct jme_softc *sc)
1426 struct ifnet *ifp = &sc->arpcom.ac_if;
1431 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1432 /* No PME capability, PHY power down. */
1433 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1434 MII_BMCR, BMCR_PDOWN);
1438 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1439 pmcs = CSR_READ_4(sc, JME_PMCS);
1440 pmcs &= ~PMCS_WOL_ENB_MASK;
1441 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1442 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1443 /* Enable PME message. */
1444 gpr |= GPREG0_PME_ENB;
1445 /* For gigabit controllers, reset link speed to 10/100. */
1446 if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1447 jme_setlinkspeed(sc);
1450 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1451 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1454 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1455 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1456 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1457 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1458 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1459 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1460 /* No WOL, PHY power down. */
1461 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1462 MII_BMCR, BMCR_PDOWN);
1468 jme_suspend(device_t dev)
1470 struct jme_softc *sc = device_get_softc(dev);
1471 struct ifnet *ifp = &sc->arpcom.ac_if;
1473 ifnet_serialize_all(ifp);
1478 ifnet_deserialize_all(ifp);
1484 jme_resume(device_t dev)
1486 struct jme_softc *sc = device_get_softc(dev);
1487 struct ifnet *ifp = &sc->arpcom.ac_if;
1492 ifnet_serialize_all(ifp);
1495 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1498 pmstat = pci_read_config(sc->jme_dev,
1499 pmc + PCIR_POWER_STATUS, 2);
1500 /* Disable PME clear PME status. */
1501 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1502 pci_write_config(sc->jme_dev,
1503 pmc + PCIR_POWER_STATUS, pmstat, 2);
1507 if (ifp->if_flags & IFF_UP)
1510 ifnet_deserialize_all(ifp);
1516 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1518 struct jme_txdesc *txd;
1519 struct jme_desc *desc;
1521 bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1523 int error, i, prod, symbol_desc;
1524 uint32_t cflags, flag64;
1526 M_ASSERTPKTHDR((*m_head));
1528 prod = sc->jme_cdata.jme_tx_prod;
1529 txd = &sc->jme_cdata.jme_txdesc[prod];
1531 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1536 maxsegs = (sc->jme_cdata.jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1537 (JME_TXD_RSVD + symbol_desc);
1538 if (maxsegs > JME_MAXTXSEGS)
1539 maxsegs = JME_MAXTXSEGS;
1540 KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc),
1541 ("not enough segments %d", maxsegs));
1543 error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag,
1544 txd->tx_dmamap, m_head,
1545 txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1549 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1550 BUS_DMASYNC_PREWRITE);
1555 /* Configure checksum offload. */
1556 if (m->m_pkthdr.csum_flags & CSUM_IP)
1557 cflags |= JME_TD_IPCSUM;
1558 if (m->m_pkthdr.csum_flags & CSUM_TCP)
1559 cflags |= JME_TD_TCPCSUM;
1560 if (m->m_pkthdr.csum_flags & CSUM_UDP)
1561 cflags |= JME_TD_UDPCSUM;
1563 /* Configure VLAN. */
1564 if (m->m_flags & M_VLANTAG) {
1565 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1566 cflags |= JME_TD_VLAN_TAG;
1569 desc = &sc->jme_cdata.jme_tx_ring[prod];
1570 desc->flags = htole32(cflags);
1571 desc->addr_hi = htole32(m->m_pkthdr.len);
1572 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1574 * Use 64bits TX desc chain format.
1576 * The first TX desc of the chain, which is setup here,
1577 * is just a symbol TX desc carrying no payload.
1579 flag64 = JME_TD_64BIT;
1583 /* No effective TX desc is consumed */
1587 * Use 32bits TX desc chain format.
1589 * The first TX desc of the chain, which is setup here,
1590 * is an effective TX desc carrying the first segment of
1594 desc->buflen = htole32(txsegs[0].ds_len);
1595 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1597 /* One effective TX desc is consumed */
1600 sc->jme_cdata.jme_tx_cnt++;
1601 KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1602 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
1603 JME_DESC_INC(prod, sc->jme_cdata.jme_tx_desc_cnt);
1605 txd->tx_ndesc = 1 - i;
1606 for (; i < nsegs; i++) {
1607 desc = &sc->jme_cdata.jme_tx_ring[prod];
1608 desc->buflen = htole32(txsegs[i].ds_len);
1609 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1610 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1611 desc->flags = htole32(JME_TD_OWN | flag64);
1613 sc->jme_cdata.jme_tx_cnt++;
1614 KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1615 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
1616 JME_DESC_INC(prod, sc->jme_cdata.jme_tx_desc_cnt);
1619 /* Update producer index. */
1620 sc->jme_cdata.jme_tx_prod = prod;
1622 * Finally request interrupt and give the first descriptor
1623 * owenership to hardware.
1625 desc = txd->tx_desc;
1626 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1629 txd->tx_ndesc += nsegs;
1639 jme_start(struct ifnet *ifp)
1641 struct jme_softc *sc = ifp->if_softc;
1642 struct mbuf *m_head;
1645 ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
1647 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1648 ifq_purge(&ifp->if_snd);
1652 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1655 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1658 while (!ifq_is_empty(&ifp->if_snd)) {
1660 * Check number of available TX descs, always
1661 * leave JME_TXD_RSVD free TX descs.
1663 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1664 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD) {
1665 ifp->if_flags |= IFF_OACTIVE;
1669 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1674 * Pack the data into the transmit ring. If we
1675 * don't have room, set the OACTIVE flag and wait
1676 * for the NIC to drain the ring.
1678 if (jme_encap(sc, &m_head)) {
1679 KKASSERT(m_head == NULL);
1681 ifp->if_flags |= IFF_OACTIVE;
1687 * If there's a BPF listener, bounce a copy of this frame
1690 ETHER_BPF_MTAP(ifp, m_head);
1695 * Reading TXCSR takes very long time under heavy load
1696 * so cache TXCSR value and writes the ORed value with
1697 * the kick command to the TXCSR. This saves one register
1700 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1701 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1702 /* Set a timeout in case the chip goes out to lunch. */
1703 ifp->if_timer = JME_TX_TIMEOUT;
1708 jme_watchdog(struct ifnet *ifp)
1710 struct jme_softc *sc = ifp->if_softc;
1712 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1714 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1715 if_printf(ifp, "watchdog timeout (missed link)\n");
1722 if (sc->jme_cdata.jme_tx_cnt == 0) {
1723 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1725 if (!ifq_is_empty(&ifp->if_snd))
1730 if_printf(ifp, "watchdog timeout\n");
1733 if (!ifq_is_empty(&ifp->if_snd))
1738 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1740 struct jme_softc *sc = ifp->if_softc;
1741 struct mii_data *mii = device_get_softc(sc->jme_miibus);
1742 struct ifreq *ifr = (struct ifreq *)data;
1743 int error = 0, mask;
1745 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1749 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1750 (!(sc->jme_caps & JME_CAP_JUMBO) &&
1751 ifr->ifr_mtu > JME_MAX_MTU)) {
1756 if (ifp->if_mtu != ifr->ifr_mtu) {
1758 * No special configuration is required when interface
1759 * MTU is changed but availability of Tx checksum
1760 * offload should be chcked against new MTU size as
1761 * FIFO size is just 2K.
1763 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1764 ifp->if_capenable &= ~IFCAP_TXCSUM;
1765 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1767 ifp->if_mtu = ifr->ifr_mtu;
1768 if (ifp->if_flags & IFF_RUNNING)
1774 if (ifp->if_flags & IFF_UP) {
1775 if (ifp->if_flags & IFF_RUNNING) {
1776 if ((ifp->if_flags ^ sc->jme_if_flags) &
1777 (IFF_PROMISC | IFF_ALLMULTI))
1783 if (ifp->if_flags & IFF_RUNNING)
1786 sc->jme_if_flags = ifp->if_flags;
1791 if (ifp->if_flags & IFF_RUNNING)
1797 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1801 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1803 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1804 ifp->if_capenable ^= IFCAP_TXCSUM;
1805 if (IFCAP_TXCSUM & ifp->if_capenable)
1806 ifp->if_hwassist |= JME_CSUM_FEATURES;
1808 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1810 if (mask & IFCAP_RXCSUM) {
1813 ifp->if_capenable ^= IFCAP_RXCSUM;
1814 reg = CSR_READ_4(sc, JME_RXMAC);
1815 reg &= ~RXMAC_CSUM_ENB;
1816 if (ifp->if_capenable & IFCAP_RXCSUM)
1817 reg |= RXMAC_CSUM_ENB;
1818 CSR_WRITE_4(sc, JME_RXMAC, reg);
1821 if (mask & IFCAP_VLAN_HWTAGGING) {
1822 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1826 if (mask & IFCAP_RSS)
1827 ifp->if_capenable ^= IFCAP_RSS;
1831 error = ether_ioctl(ifp, cmd, data);
1838 jme_mac_config(struct jme_softc *sc)
1840 struct mii_data *mii;
1841 uint32_t ghc, rxmac, txmac, txpause, gp1;
1842 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1844 mii = device_get_softc(sc->jme_miibus);
1846 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1848 CSR_WRITE_4(sc, JME_GHC, 0);
1850 rxmac = CSR_READ_4(sc, JME_RXMAC);
1851 rxmac &= ~RXMAC_FC_ENB;
1852 txmac = CSR_READ_4(sc, JME_TXMAC);
1853 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1854 txpause = CSR_READ_4(sc, JME_TXPFC);
1855 txpause &= ~TXPFC_PAUSE_ENB;
1856 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1857 ghc |= GHC_FULL_DUPLEX;
1858 rxmac &= ~RXMAC_COLL_DET_ENB;
1859 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1860 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1863 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1864 txpause |= TXPFC_PAUSE_ENB;
1865 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1866 rxmac |= RXMAC_FC_ENB;
1868 /* Disable retry transmit timer/retry limit. */
1869 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1870 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1872 rxmac |= RXMAC_COLL_DET_ENB;
1873 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1874 /* Enable retry transmit timer/retry limit. */
1875 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1876 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1880 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1882 gp1 = CSR_READ_4(sc, JME_GPREG1);
1883 gp1 &= ~GPREG1_WA_HDX;
1885 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1888 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1890 ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1892 gp1 |= GPREG1_WA_HDX;
1896 ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1898 gp1 |= GPREG1_WA_HDX;
1901 * Use extended FIFO depth to workaround CRC errors
1902 * emitted by chips before JMC250B
1904 phyconf = JMPHY_CONF_EXTFIFO;
1908 if (sc->jme_caps & JME_CAP_FASTETH)
1911 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
1913 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1919 CSR_WRITE_4(sc, JME_GHC, ghc);
1920 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1921 CSR_WRITE_4(sc, JME_TXMAC, txmac);
1922 CSR_WRITE_4(sc, JME_TXPFC, txpause);
1924 if (sc->jme_workaround & JME_WA_EXTFIFO) {
1925 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1926 JMPHY_CONF, phyconf);
1928 if (sc->jme_workaround & JME_WA_HDX)
1929 CSR_WRITE_4(sc, JME_GPREG1, gp1);
1935 struct jme_softc *sc = xsc;
1936 struct ifnet *ifp = &sc->arpcom.ac_if;
1940 ASSERT_SERIALIZED(&sc->jme_serialize);
1942 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1943 if (status == 0 || status == 0xFFFFFFFF)
1946 /* Disable interrupts. */
1947 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1949 status = CSR_READ_4(sc, JME_INTR_STATUS);
1950 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1953 /* Reset PCC counter/timer and Ack interrupts. */
1954 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1956 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1957 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1959 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1960 if (status & jme_rx_status[r].jme_coal) {
1961 status |= jme_rx_status[r].jme_coal |
1962 jme_rx_status[r].jme_comp;
1966 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1968 if (ifp->if_flags & IFF_RUNNING) {
1969 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1970 jme_rx_intr(sc, status);
1972 if (status & INTR_RXQ_DESC_EMPTY) {
1974 * Notify hardware availability of new Rx buffers.
1975 * Reading RXCSR takes very long time under heavy
1976 * load so cache RXCSR value and writes the ORed
1977 * value with the kick command to the RXCSR. This
1978 * saves one register access cycle.
1980 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1981 RXCSR_RX_ENB | RXCSR_RXQ_START);
1984 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1985 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
1987 if (!ifq_is_empty(&ifp->if_snd))
1989 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
1993 /* Reenable interrupts. */
1994 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1998 jme_txeof(struct jme_softc *sc)
2000 struct ifnet *ifp = &sc->arpcom.ac_if;
2001 struct jme_txdesc *txd;
2005 cons = sc->jme_cdata.jme_tx_cons;
2006 if (cons == sc->jme_cdata.jme_tx_prod)
2010 * Go through our Tx list and free mbufs for those
2011 * frames which have been transmitted.
2013 while (cons != sc->jme_cdata.jme_tx_prod) {
2014 txd = &sc->jme_cdata.jme_txdesc[cons];
2015 KASSERT(txd->tx_m != NULL,
2016 ("%s: freeing NULL mbuf!", __func__));
2018 status = le32toh(txd->tx_desc->flags);
2019 if ((status & JME_TD_OWN) == JME_TD_OWN)
2022 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2026 if (status & JME_TD_COLLISION) {
2027 ifp->if_collisions +=
2028 le32toh(txd->tx_desc->buflen) &
2029 JME_TD_BUF_LEN_MASK;
2034 * Only the first descriptor of multi-descriptor
2035 * transmission is updated so driver have to skip entire
2036 * chained buffers for the transmiited frame. In other
2037 * words, JME_TD_OWN bit is valid only at the first
2038 * descriptor of a multi-descriptor transmission.
2040 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2041 sc->jme_cdata.jme_tx_ring[cons].flags = 0;
2042 JME_DESC_INC(cons, sc->jme_cdata.jme_tx_desc_cnt);
2045 /* Reclaim transferred mbufs. */
2046 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2049 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2050 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2051 ("%s: Active Tx desc counter was garbled", __func__));
2054 sc->jme_cdata.jme_tx_cons = cons;
2056 if (sc->jme_cdata.jme_tx_cnt == 0)
2059 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
2060 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD)
2061 ifp->if_flags &= ~IFF_OACTIVE;
2064 static __inline void
2065 jme_discard_rxbufs(struct jme_rxdata *rdata, int cons, int count)
2069 for (i = 0; i < count; ++i) {
2070 jme_setup_rxdesc(&rdata->jme_rxdesc[cons]);
2071 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2075 static __inline struct pktinfo *
2076 jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2078 if (flags & JME_RD_IPV4)
2079 pi->pi_netisr = NETISR_IP;
2080 else if (flags & JME_RD_IPV6)
2081 pi->pi_netisr = NETISR_IPV6;
2086 pi->pi_l3proto = IPPROTO_UNKNOWN;
2088 if (flags & JME_RD_MORE_FRAG)
2089 pi->pi_flags |= PKTINFO_FLAG_FRAG;
2090 else if (flags & JME_RD_TCP)
2091 pi->pi_l3proto = IPPROTO_TCP;
2092 else if (flags & JME_RD_UDP)
2093 pi->pi_l3proto = IPPROTO_UDP;
2099 /* Receive a frame. */
2101 jme_rxpkt(struct jme_rxdata *rdata)
2103 struct ifnet *ifp = &rdata->jme_sc->arpcom.ac_if;
2104 struct jme_desc *desc;
2105 struct jme_rxdesc *rxd;
2106 struct mbuf *mp, *m;
2107 uint32_t flags, status, hash, hashinfo;
2108 int cons, count, nsegs;
2110 cons = rdata->jme_rx_cons;
2111 desc = &rdata->jme_rx_ring[cons];
2112 flags = le32toh(desc->flags);
2113 status = le32toh(desc->buflen);
2114 hash = le32toh(desc->addr_hi);
2115 hashinfo = le32toh(desc->addr_lo);
2116 nsegs = JME_RX_NSEGS(status);
2118 JME_RSS_DPRINTF(rdata->jme_sc, 15, "ring%d, flags 0x%08x, "
2119 "hash 0x%08x, hash info 0x%08x\n",
2120 rdata->jme_rx_idx, flags, hash, hashinfo);
2122 if (status & JME_RX_ERR_STAT) {
2124 jme_discard_rxbufs(rdata, cons, nsegs);
2125 #ifdef JME_SHOW_ERRORS
2126 if_printf(ifp, "%s : receive error = 0x%b\n",
2127 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2129 rdata->jme_rx_cons += nsegs;
2130 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2134 rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2135 for (count = 0; count < nsegs; count++,
2136 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) {
2137 rxd = &rdata->jme_rxdesc[cons];
2140 /* Add a new receive buffer to the ring. */
2141 if (jme_newbuf(rdata, rxd, 0) != 0) {
2144 jme_discard_rxbufs(rdata, cons, nsegs - count);
2145 if (rdata->jme_rxhead != NULL) {
2146 m_freem(rdata->jme_rxhead);
2147 JME_RXCHAIN_RESET(rdata);
2153 * Assume we've received a full sized frame.
2154 * Actual size is fixed when we encounter the end of
2155 * multi-segmented frame.
2157 mp->m_len = MCLBYTES;
2159 /* Chain received mbufs. */
2160 if (rdata->jme_rxhead == NULL) {
2161 rdata->jme_rxhead = mp;
2162 rdata->jme_rxtail = mp;
2165 * Receive processor can receive a maximum frame
2166 * size of 65535 bytes.
2168 rdata->jme_rxtail->m_next = mp;
2169 rdata->jme_rxtail = mp;
2172 if (count == nsegs - 1) {
2173 struct pktinfo pi0, *pi;
2175 /* Last desc. for this frame. */
2176 m = rdata->jme_rxhead;
2177 m->m_pkthdr.len = rdata->jme_rxlen;
2179 /* Set first mbuf size. */
2180 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2181 /* Set last mbuf size. */
2182 mp->m_len = rdata->jme_rxlen -
2183 ((MCLBYTES - JME_RX_PAD_BYTES) +
2184 (MCLBYTES * (nsegs - 2)));
2186 m->m_len = rdata->jme_rxlen;
2188 m->m_pkthdr.rcvif = ifp;
2191 * Account for 10bytes auto padding which is used
2192 * to align IP header on 32bit boundary. Also note,
2193 * CRC bytes is automatically removed by the
2196 m->m_data += JME_RX_PAD_BYTES;
2198 /* Set checksum information. */
2199 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2200 (flags & JME_RD_IPV4)) {
2201 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2202 if (flags & JME_RD_IPCSUM)
2203 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2204 if ((flags & JME_RD_MORE_FRAG) == 0 &&
2205 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2206 (JME_RD_TCP | JME_RD_TCPCSUM) ||
2207 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2208 (JME_RD_UDP | JME_RD_UDPCSUM))) {
2209 m->m_pkthdr.csum_flags |=
2210 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2211 m->m_pkthdr.csum_data = 0xffff;
2215 /* Check for VLAN tagged packets. */
2216 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2217 (flags & JME_RD_VLAN_TAG)) {
2218 m->m_pkthdr.ether_vlantag =
2219 flags & JME_RD_VLAN_MASK;
2220 m->m_flags |= M_VLANTAG;
2225 if (ifp->if_capenable & IFCAP_RSS)
2226 pi = jme_pktinfo(&pi0, flags);
2231 (hashinfo & JME_RD_HASH_FN_MASK) != 0) {
2232 m->m_flags |= M_HASH;
2233 m->m_pkthdr.hash = toeplitz_hash(hash);
2236 #ifdef JME_RSS_DEBUG
2238 JME_RSS_DPRINTF(rdata->jme_sc, 10,
2239 "isr %d flags %08x, l3 %d %s\n",
2240 pi->pi_netisr, pi->pi_flags,
2242 (m->m_flags & M_HASH) ? "hash" : "");
2247 ether_input_pkt(ifp, m, pi);
2249 /* Reset mbuf chains. */
2250 JME_RXCHAIN_RESET(rdata);
2251 #ifdef JME_RSS_DEBUG
2252 rdata->jme_rx_pkt++;
2257 rdata->jme_rx_cons += nsegs;
2258 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2262 jme_rxeof(struct jme_rxdata *rdata, int count)
2264 struct jme_desc *desc;
2268 #ifdef DEVICE_POLLING
2269 if (count >= 0 && count-- == 0)
2272 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2273 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2275 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2279 * Check number of segments against received bytes.
2280 * Non-matching value would indicate that hardware
2281 * is still trying to update Rx descriptors. I'm not
2282 * sure whether this check is needed.
2284 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2285 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2286 if (nsegs != howmany(pktlen, MCLBYTES)) {
2287 if_printf(&rdata->jme_sc->arpcom.ac_if,
2288 "RX fragment count(%d) and "
2289 "packet size(%d) mismach\n", nsegs, pktlen);
2293 /* Received a frame. */
2301 struct jme_softc *sc = xsc;
2302 struct ifnet *ifp = &sc->arpcom.ac_if;
2303 struct mii_data *mii = device_get_softc(sc->jme_miibus);
2305 ifnet_serialize_all(ifp);
2308 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2310 ifnet_deserialize_all(ifp);
2314 jme_reset(struct jme_softc *sc)
2318 /* Make sure that TX and RX are stopped */
2323 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2327 * Hold reset bit before stop reset
2330 /* Disable TXMAC and TXOFL clock sources */
2331 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2332 /* Disable RXMAC clock source */
2333 val = CSR_READ_4(sc, JME_GPREG1);
2334 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2336 CSR_READ_4(sc, JME_GHC);
2339 CSR_WRITE_4(sc, JME_GHC, 0);
2341 CSR_READ_4(sc, JME_GHC);
2344 * Clear reset bit after stop reset
2347 /* Enable TXMAC and TXOFL clock sources */
2348 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2349 /* Enable RXMAC clock source */
2350 val = CSR_READ_4(sc, JME_GPREG1);
2351 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2353 CSR_READ_4(sc, JME_GHC);
2355 /* Disable TXMAC and TXOFL clock sources */
2356 CSR_WRITE_4(sc, JME_GHC, 0);
2357 /* Disable RXMAC clock source */
2358 val = CSR_READ_4(sc, JME_GPREG1);
2359 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2361 CSR_READ_4(sc, JME_GHC);
2363 /* Enable TX and RX */
2364 val = CSR_READ_4(sc, JME_TXCSR);
2365 CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB);
2366 val = CSR_READ_4(sc, JME_RXCSR);
2367 CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB);
2369 CSR_READ_4(sc, JME_TXCSR);
2370 CSR_READ_4(sc, JME_RXCSR);
2372 /* Enable TXMAC and TXOFL clock sources */
2373 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2374 /* Eisable RXMAC clock source */
2375 val = CSR_READ_4(sc, JME_GPREG1);
2376 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2378 CSR_READ_4(sc, JME_GHC);
2380 /* Stop TX and RX */
2388 struct jme_softc *sc = xsc;
2389 struct ifnet *ifp = &sc->arpcom.ac_if;
2390 struct mii_data *mii;
2391 uint8_t eaddr[ETHER_ADDR_LEN];
2396 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2399 * Cancel any pending I/O.
2404 * Reset the chip to a known state.
2409 * Setup MSI/MSI-X vectors to interrupts mapping
2414 howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES);
2415 KKASSERT(sc->jme_txd_spare >= 1);
2418 * If we use 64bit address mode for transmitting, each Tx request
2419 * needs one more symbol descriptor.
2421 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
2422 sc->jme_txd_spare += 1;
2424 if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN)
2427 jme_disable_rss(sc);
2429 /* Init RX descriptors */
2430 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2431 error = jme_init_rx_ring(&sc->jme_cdata.jme_rx_data[r]);
2433 if_printf(ifp, "initialization failed: "
2434 "no memory for %dth RX ring.\n", r);
2440 /* Init TX descriptors */
2441 jme_init_tx_ring(sc);
2443 /* Initialize shadow status block. */
2446 /* Reprogram the station address. */
2447 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2448 CSR_WRITE_4(sc, JME_PAR0,
2449 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2450 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2453 * Configure Tx queue.
2454 * Tx priority queue weight value : 0
2455 * Tx FIFO threshold for processing next packet : 16QW
2456 * Maximum Tx DMA length : 512
2457 * Allow Tx DMA burst.
2459 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2460 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2461 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2462 sc->jme_txcsr |= sc->jme_tx_dma_size;
2463 sc->jme_txcsr |= TXCSR_DMA_BURST;
2464 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2466 /* Set Tx descriptor counter. */
2467 CSR_WRITE_4(sc, JME_TXQDC, sc->jme_cdata.jme_tx_desc_cnt);
2469 /* Set Tx ring address to the hardware. */
2470 paddr = sc->jme_cdata.jme_tx_ring_paddr;
2471 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2472 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2474 /* Configure TxMAC parameters. */
2475 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2476 reg |= TXMAC_THRESH_1_PKT;
2477 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2478 CSR_WRITE_4(sc, JME_TXMAC, reg);
2481 * Configure Rx queue.
2482 * FIFO full threshold for transmitting Tx pause packet : 128T
2483 * FIFO threshold for processing next packet : 128QW
2485 * Max Rx DMA length : 128
2486 * Rx descriptor retry : 32
2487 * Rx descriptor retry time gap : 256ns
2488 * Don't receive runt/bad frame.
2490 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2493 * Since Rx FIFO size is 4K bytes, receiving frames larger
2494 * than 4K bytes will suffer from Rx FIFO overruns. So
2495 * decrease FIFO threshold to reduce the FIFO overruns for
2496 * frames larger than 4000 bytes.
2497 * For best performance of standard MTU sized frames use
2498 * maximum allowable FIFO threshold, 128QW.
2500 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2502 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2504 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2506 /* Improve PCI Express compatibility */
2507 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2509 sc->jme_rxcsr |= sc->jme_rx_dma_size;
2510 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2511 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2512 /* XXX TODO DROP_BAD */
2514 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2515 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
2517 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2519 /* Set Rx descriptor counter. */
2520 CSR_WRITE_4(sc, JME_RXQDC, rdata->jme_rx_desc_cnt);
2522 /* Set Rx ring address to the hardware. */
2523 paddr = rdata->jme_rx_ring_paddr;
2524 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2525 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2528 /* Clear receive filter. */
2529 CSR_WRITE_4(sc, JME_RXMAC, 0);
2531 /* Set up the receive filter. */
2536 * Disable all WOL bits as WOL can interfere normal Rx
2537 * operation. Also clear WOL detection status bits.
2539 reg = CSR_READ_4(sc, JME_PMCS);
2540 reg &= ~PMCS_WOL_ENB_MASK;
2541 CSR_WRITE_4(sc, JME_PMCS, reg);
2544 * Pad 10bytes right before received frame. This will greatly
2545 * help Rx performance on strict-alignment architectures as
2546 * it does not need to copy the frame to align the payload.
2548 reg = CSR_READ_4(sc, JME_RXMAC);
2549 reg |= RXMAC_PAD_10BYTES;
2551 if (ifp->if_capenable & IFCAP_RXCSUM)
2552 reg |= RXMAC_CSUM_ENB;
2553 CSR_WRITE_4(sc, JME_RXMAC, reg);
2555 /* Configure general purpose reg0 */
2556 reg = CSR_READ_4(sc, JME_GPREG0);
2557 reg &= ~GPREG0_PCC_UNIT_MASK;
2558 /* Set PCC timer resolution to micro-seconds unit. */
2559 reg |= GPREG0_PCC_UNIT_US;
2561 * Disable all shadow register posting as we have to read
2562 * JME_INTR_STATUS register in jme_intr. Also it seems
2563 * that it's hard to synchronize interrupt status between
2564 * hardware and software with shadow posting due to
2565 * requirements of bus_dmamap_sync(9).
2567 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2568 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2569 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2570 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2571 /* Disable posting of DW0. */
2572 reg &= ~GPREG0_POST_DW0_ENB;
2573 /* Clear PME message. */
2574 reg &= ~GPREG0_PME_ENB;
2575 /* Set PHY address. */
2576 reg &= ~GPREG0_PHY_ADDR_MASK;
2577 reg |= sc->jme_phyaddr;
2578 CSR_WRITE_4(sc, JME_GPREG0, reg);
2580 /* Configure Tx queue 0 packet completion coalescing. */
2581 jme_set_tx_coal(sc);
2583 /* Configure Rx queues packet completion coalescing. */
2584 jme_set_rx_coal(sc);
2586 /* Configure shadow status block but don't enable posting. */
2587 paddr = sc->jme_cdata.jme_ssb_block_paddr;
2588 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2589 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2591 /* Disable Timer 1 and Timer 2. */
2592 CSR_WRITE_4(sc, JME_TIMER1, 0);
2593 CSR_WRITE_4(sc, JME_TIMER2, 0);
2595 /* Configure retry transmit period, retry limit value. */
2596 CSR_WRITE_4(sc, JME_TXTRHD,
2597 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2598 TXTRHD_RT_PERIOD_MASK) |
2599 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2600 TXTRHD_RT_LIMIT_SHIFT));
2602 #ifdef DEVICE_POLLING
2603 if (!(ifp->if_flags & IFF_POLLING))
2605 /* Initialize the interrupt mask. */
2606 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2607 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2610 * Enabling Tx/Rx DMA engines and Rx queue processing is
2611 * done after detection of valid link in jme_miibus_statchg.
2613 sc->jme_flags &= ~JME_FLAG_LINK;
2615 /* Set the current media. */
2616 mii = device_get_softc(sc->jme_miibus);
2619 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2621 ifp->if_flags |= IFF_RUNNING;
2622 ifp->if_flags &= ~IFF_OACTIVE;
2626 jme_stop(struct jme_softc *sc)
2628 struct ifnet *ifp = &sc->arpcom.ac_if;
2629 struct jme_txdesc *txd;
2630 struct jme_rxdesc *rxd;
2631 struct jme_rxdata *rdata;
2634 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2637 * Mark the interface down and cancel the watchdog timer.
2639 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2642 callout_stop(&sc->jme_tick_ch);
2643 sc->jme_flags &= ~JME_FLAG_LINK;
2646 * Disable interrupts.
2648 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2649 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2651 /* Disable updating shadow status block. */
2652 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2653 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2655 /* Stop receiver, transmitter. */
2660 * Free partial finished RX segments
2662 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2663 rdata = &sc->jme_cdata.jme_rx_data[r];
2664 if (rdata->jme_rxhead != NULL)
2665 m_freem(rdata->jme_rxhead);
2666 JME_RXCHAIN_RESET(rdata);
2670 * Free RX and TX mbufs still in the queues.
2672 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2673 rdata = &sc->jme_cdata.jme_rx_data[r];
2674 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
2675 rxd = &rdata->jme_rxdesc[i];
2676 if (rxd->rx_m != NULL) {
2677 bus_dmamap_unload(rdata->jme_rx_tag,
2684 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
2685 txd = &sc->jme_cdata.jme_txdesc[i];
2686 if (txd->tx_m != NULL) {
2687 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2697 jme_stop_tx(struct jme_softc *sc)
2702 reg = CSR_READ_4(sc, JME_TXCSR);
2703 if ((reg & TXCSR_TX_ENB) == 0)
2705 reg &= ~TXCSR_TX_ENB;
2706 CSR_WRITE_4(sc, JME_TXCSR, reg);
2707 for (i = JME_TIMEOUT; i > 0; i--) {
2709 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2713 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2717 jme_stop_rx(struct jme_softc *sc)
2722 reg = CSR_READ_4(sc, JME_RXCSR);
2723 if ((reg & RXCSR_RX_ENB) == 0)
2725 reg &= ~RXCSR_RX_ENB;
2726 CSR_WRITE_4(sc, JME_RXCSR, reg);
2727 for (i = JME_TIMEOUT; i > 0; i--) {
2729 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2733 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2737 jme_init_tx_ring(struct jme_softc *sc)
2739 struct jme_chain_data *cd;
2740 struct jme_txdesc *txd;
2743 sc->jme_cdata.jme_tx_prod = 0;
2744 sc->jme_cdata.jme_tx_cons = 0;
2745 sc->jme_cdata.jme_tx_cnt = 0;
2747 cd = &sc->jme_cdata;
2748 bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2749 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
2750 txd = &sc->jme_cdata.jme_txdesc[i];
2752 txd->tx_desc = &cd->jme_tx_ring[i];
2758 jme_init_ssb(struct jme_softc *sc)
2760 struct jme_chain_data *cd;
2762 cd = &sc->jme_cdata;
2763 bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2767 jme_init_rx_ring(struct jme_rxdata *rdata)
2769 struct jme_rxdesc *rxd;
2772 KKASSERT(rdata->jme_rxhead == NULL &&
2773 rdata->jme_rxtail == NULL &&
2774 rdata->jme_rxlen == 0);
2775 rdata->jme_rx_cons = 0;
2777 bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(rdata));
2778 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
2781 rxd = &rdata->jme_rxdesc[i];
2783 rxd->rx_desc = &rdata->jme_rx_ring[i];
2784 error = jme_newbuf(rdata, rxd, 1);
2792 jme_newbuf(struct jme_rxdata *rdata, struct jme_rxdesc *rxd, int init)
2795 bus_dma_segment_t segs;
2799 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2803 * JMC250 has 64bit boundary alignment limitation so jme(4)
2804 * takes advantage of 10 bytes padding feature of hardware
2805 * in order not to copy entire frame to align IP header on
2808 m->m_len = m->m_pkthdr.len = MCLBYTES;
2810 error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
2811 rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
2816 if_printf(&rdata->jme_sc->arpcom.ac_if,
2817 "can't load RX mbuf\n");
2822 if (rxd->rx_m != NULL) {
2823 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
2824 BUS_DMASYNC_POSTREAD);
2825 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
2827 map = rxd->rx_dmamap;
2828 rxd->rx_dmamap = rdata->jme_rx_sparemap;
2829 rdata->jme_rx_sparemap = map;
2831 rxd->rx_paddr = segs.ds_addr;
2833 jme_setup_rxdesc(rxd);
2838 jme_set_vlan(struct jme_softc *sc)
2840 struct ifnet *ifp = &sc->arpcom.ac_if;
2843 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2845 reg = CSR_READ_4(sc, JME_RXMAC);
2846 reg &= ~RXMAC_VLAN_ENB;
2847 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2848 reg |= RXMAC_VLAN_ENB;
2849 CSR_WRITE_4(sc, JME_RXMAC, reg);
2853 jme_set_filter(struct jme_softc *sc)
2855 struct ifnet *ifp = &sc->arpcom.ac_if;
2856 struct ifmultiaddr *ifma;
2861 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2863 rxcfg = CSR_READ_4(sc, JME_RXMAC);
2864 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2868 * Always accept frames destined to our station address.
2869 * Always accept broadcast frames.
2871 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2873 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2874 if (ifp->if_flags & IFF_PROMISC)
2875 rxcfg |= RXMAC_PROMISC;
2876 if (ifp->if_flags & IFF_ALLMULTI)
2877 rxcfg |= RXMAC_ALLMULTI;
2878 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2879 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2880 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2885 * Set up the multicast address filter by passing all multicast
2886 * addresses through a CRC generator, and then using the low-order
2887 * 6 bits as an index into the 64 bit multicast hash table. The
2888 * high order bits select the register, while the rest of the bits
2889 * select the bit within the register.
2891 rxcfg |= RXMAC_MULTICAST;
2892 bzero(mchash, sizeof(mchash));
2894 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2895 if (ifma->ifma_addr->sa_family != AF_LINK)
2897 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2898 ifma->ifma_addr), ETHER_ADDR_LEN);
2900 /* Just want the 6 least significant bits. */
2903 /* Set the corresponding bit in the hash table. */
2904 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2907 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2908 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2909 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2913 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
2915 struct jme_softc *sc = arg1;
2916 struct ifnet *ifp = &sc->arpcom.ac_if;
2919 ifnet_serialize_all(ifp);
2921 v = sc->jme_tx_coal_to;
2922 error = sysctl_handle_int(oidp, &v, 0, req);
2923 if (error || req->newptr == NULL)
2926 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2931 if (v != sc->jme_tx_coal_to) {
2932 sc->jme_tx_coal_to = v;
2933 if (ifp->if_flags & IFF_RUNNING)
2934 jme_set_tx_coal(sc);
2937 ifnet_deserialize_all(ifp);
2942 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2944 struct jme_softc *sc = arg1;
2945 struct ifnet *ifp = &sc->arpcom.ac_if;
2948 ifnet_serialize_all(ifp);
2950 v = sc->jme_tx_coal_pkt;
2951 error = sysctl_handle_int(oidp, &v, 0, req);
2952 if (error || req->newptr == NULL)
2955 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
2960 if (v != sc->jme_tx_coal_pkt) {
2961 sc->jme_tx_coal_pkt = v;
2962 if (ifp->if_flags & IFF_RUNNING)
2963 jme_set_tx_coal(sc);
2966 ifnet_deserialize_all(ifp);
2971 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
2973 struct jme_softc *sc = arg1;
2974 struct ifnet *ifp = &sc->arpcom.ac_if;
2977 ifnet_serialize_all(ifp);
2979 v = sc->jme_rx_coal_to;
2980 error = sysctl_handle_int(oidp, &v, 0, req);
2981 if (error || req->newptr == NULL)
2984 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
2989 if (v != sc->jme_rx_coal_to) {
2990 sc->jme_rx_coal_to = v;
2991 if (ifp->if_flags & IFF_RUNNING)
2992 jme_set_rx_coal(sc);
2995 ifnet_deserialize_all(ifp);
3000 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3002 struct jme_softc *sc = arg1;
3003 struct ifnet *ifp = &sc->arpcom.ac_if;
3006 ifnet_serialize_all(ifp);
3008 v = sc->jme_rx_coal_pkt;
3009 error = sysctl_handle_int(oidp, &v, 0, req);
3010 if (error || req->newptr == NULL)
3013 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
3018 if (v != sc->jme_rx_coal_pkt) {
3019 sc->jme_rx_coal_pkt = v;
3020 if (ifp->if_flags & IFF_RUNNING)
3021 jme_set_rx_coal(sc);
3024 ifnet_deserialize_all(ifp);
3029 jme_set_tx_coal(struct jme_softc *sc)
3033 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3035 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3036 PCCTX_COAL_PKT_MASK;
3037 reg |= PCCTX_COAL_TXQ0;
3038 CSR_WRITE_4(sc, JME_PCCTX, reg);
3042 jme_set_rx_coal(struct jme_softc *sc)
3047 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3049 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3050 PCCRX_COAL_PKT_MASK;
3051 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r)
3052 CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3055 #ifdef DEVICE_POLLING
3058 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3060 struct jme_softc *sc = ifp->if_softc;
3064 ASSERT_SERIALIZED(&sc->jme_serialize);
3068 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3071 case POLL_DEREGISTER:
3072 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
3075 case POLL_AND_CHECK_STATUS:
3077 status = CSR_READ_4(sc, JME_INTR_STATUS);
3079 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3080 struct jme_rxdata *rdata =
3081 &sc->jme_cdata.jme_rx_data[r];
3083 lwkt_serialize_enter(&rdata->jme_rx_serialize);
3084 jme_rxeof(rdata, count);
3085 lwkt_serialize_exit(&rdata->jme_rx_serialize);
3088 if (status & INTR_RXQ_DESC_EMPTY) {
3089 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3090 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3091 RXCSR_RX_ENB | RXCSR_RXQ_START);
3094 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
3096 if (!ifq_is_empty(&ifp->if_snd))
3098 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
3103 #endif /* DEVICE_POLLING */
3106 jme_rxring_dma_alloc(struct jme_rxdata *rdata)
3111 asize = roundup2(JME_RX_RING_SIZE(rdata), JME_RX_RING_ALIGN);
3112 error = bus_dmamem_coherent(rdata->jme_sc->jme_cdata.jme_ring_tag,
3113 JME_RX_RING_ALIGN, 0,
3114 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3115 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3117 device_printf(rdata->jme_sc->jme_dev,
3118 "could not allocate %dth Rx ring.\n", rdata->jme_rx_idx);
3121 rdata->jme_rx_ring_tag = dmem.dmem_tag;
3122 rdata->jme_rx_ring_map = dmem.dmem_map;
3123 rdata->jme_rx_ring = dmem.dmem_addr;
3124 rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
3130 jme_rxbuf_dma_alloc(struct jme_rxdata *rdata)
3134 /* Create tag for Rx buffers. */
3135 error = bus_dma_tag_create(
3136 rdata->jme_sc->jme_cdata.jme_buffer_tag,/* parent */
3137 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
3138 BUS_SPACE_MAXADDR, /* lowaddr */
3139 BUS_SPACE_MAXADDR, /* highaddr */
3140 NULL, NULL, /* filter, filterarg */
3141 MCLBYTES, /* maxsize */
3143 MCLBYTES, /* maxsegsize */
3144 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
3145 &rdata->jme_rx_tag);
3147 device_printf(rdata->jme_sc->jme_dev,
3148 "could not create %dth Rx DMA tag.\n", rdata->jme_rx_idx);
3152 /* Create DMA maps for Rx buffers. */
3153 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3154 &rdata->jme_rx_sparemap);
3156 device_printf(rdata->jme_sc->jme_dev,
3157 "could not create %dth spare Rx dmamap.\n",
3159 bus_dma_tag_destroy(rdata->jme_rx_tag);
3160 rdata->jme_rx_tag = NULL;
3163 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
3164 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3166 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3171 device_printf(rdata->jme_sc->jme_dev,
3172 "could not create %dth Rx dmamap "
3173 "for %dth RX ring.\n", i, rdata->jme_rx_idx);
3175 for (j = 0; j < i; ++j) {
3176 rxd = &rdata->jme_rxdesc[j];
3177 bus_dmamap_destroy(rdata->jme_rx_tag,
3180 bus_dmamap_destroy(rdata->jme_rx_tag,
3181 rdata->jme_rx_sparemap);
3182 bus_dma_tag_destroy(rdata->jme_rx_tag);
3183 rdata->jme_rx_tag = NULL;
3191 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3195 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3196 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3198 if (status & rdata->jme_rx_coal) {
3199 lwkt_serialize_enter(&rdata->jme_rx_serialize);
3200 jme_rxeof(rdata, -1);
3201 lwkt_serialize_exit(&rdata->jme_rx_serialize);
3207 jme_enable_rss(struct jme_softc *sc)
3210 uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3213 KASSERT(sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_2 ||
3214 sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_4,
3215 ("%s: invalid # of RX rings (%d)",
3216 sc->arpcom.ac_if.if_xname, sc->jme_cdata.jme_rx_ring_cnt));
3218 rssc = RSSC_HASH_64_ENTRY;
3219 rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3220 rssc |= sc->jme_cdata.jme_rx_ring_cnt >> 1;
3221 JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3222 CSR_WRITE_4(sc, JME_RSSC, rssc);
3224 toeplitz_get_key(key, sizeof(key));
3225 for (i = 0; i < RSSKEY_NREGS; ++i) {
3228 keyreg = RSSKEY_REGVAL(key, i);
3229 JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg);
3231 CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg);
3235 * Create redirect table in following fashion:
3236 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3239 for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3242 q = i % sc->jme_cdata.jme_rx_ring_cnt;
3243 ind |= q << (i * 8);
3245 JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3247 for (i = 0; i < RSSTBL_NREGS; ++i)
3248 CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3252 jme_disable_rss(struct jme_softc *sc)
3254 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3258 jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3260 struct jme_softc *sc = ifp->if_softc;
3263 case IFNET_SERIALIZE_ALL:
3264 lwkt_serialize_array_enter(sc->jme_serialize_arr,
3265 sc->jme_serialize_cnt, 0);
3268 case IFNET_SERIALIZE_MAIN:
3269 lwkt_serialize_enter(&sc->jme_serialize);
3272 case IFNET_SERIALIZE_TX:
3273 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
3276 case IFNET_SERIALIZE_RX(0):
3277 lwkt_serialize_enter(
3278 &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize);
3281 case IFNET_SERIALIZE_RX(1):
3282 lwkt_serialize_enter(
3283 &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize);
3286 case IFNET_SERIALIZE_RX(2):
3287 lwkt_serialize_enter(
3288 &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize);
3291 case IFNET_SERIALIZE_RX(3):
3292 lwkt_serialize_enter(
3293 &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize);
3297 panic("%s unsupported serialize type", ifp->if_xname);
3302 jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3304 struct jme_softc *sc = ifp->if_softc;
3307 case IFNET_SERIALIZE_ALL:
3308 lwkt_serialize_array_exit(sc->jme_serialize_arr,
3309 sc->jme_serialize_cnt, 0);
3312 case IFNET_SERIALIZE_MAIN:
3313 lwkt_serialize_exit(&sc->jme_serialize);
3316 case IFNET_SERIALIZE_TX:
3317 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
3320 case IFNET_SERIALIZE_RX(0):
3321 lwkt_serialize_exit(
3322 &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize);
3325 case IFNET_SERIALIZE_RX(1):
3326 lwkt_serialize_exit(
3327 &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize);
3330 case IFNET_SERIALIZE_RX(2):
3331 lwkt_serialize_exit(
3332 &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize);
3335 case IFNET_SERIALIZE_RX(3):
3336 lwkt_serialize_exit(
3337 &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize);
3341 panic("%s unsupported serialize type", ifp->if_xname);
3346 jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3348 struct jme_softc *sc = ifp->if_softc;
3351 case IFNET_SERIALIZE_ALL:
3352 return lwkt_serialize_array_try(sc->jme_serialize_arr,
3353 sc->jme_serialize_cnt, 0);
3355 case IFNET_SERIALIZE_MAIN:
3356 return lwkt_serialize_try(&sc->jme_serialize);
3358 case IFNET_SERIALIZE_TX:
3359 return lwkt_serialize_try(&sc->jme_cdata.jme_tx_serialize);
3361 case IFNET_SERIALIZE_RX(0):
3362 return lwkt_serialize_try(
3363 &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize);
3365 case IFNET_SERIALIZE_RX(1):
3366 return lwkt_serialize_try(
3367 &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize);
3369 case IFNET_SERIALIZE_RX(2):
3370 return lwkt_serialize_try(
3371 &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize);
3373 case IFNET_SERIALIZE_RX(3):
3374 return lwkt_serialize_try(
3375 &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize);
3378 panic("%s unsupported serialize type", ifp->if_xname);
3385 jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
3386 boolean_t serialized)
3388 struct jme_softc *sc = ifp->if_softc;
3389 struct jme_rxdata *rdata;
3393 case IFNET_SERIALIZE_ALL:
3395 for (i = 0; i < sc->jme_serialize_cnt; ++i)
3396 ASSERT_SERIALIZED(sc->jme_serialize_arr[i]);
3398 for (i = 0; i < sc->jme_serialize_cnt; ++i)
3399 ASSERT_NOT_SERIALIZED(sc->jme_serialize_arr[i]);
3403 case IFNET_SERIALIZE_MAIN:
3405 ASSERT_SERIALIZED(&sc->jme_serialize);
3407 ASSERT_NOT_SERIALIZED(&sc->jme_serialize);
3410 case IFNET_SERIALIZE_TX:
3412 ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
3414 ASSERT_NOT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
3417 case IFNET_SERIALIZE_RX(0):
3418 rdata = &sc->jme_cdata.jme_rx_data[0];
3420 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3422 ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3425 case IFNET_SERIALIZE_RX(1):
3426 rdata = &sc->jme_cdata.jme_rx_data[1];
3428 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3430 ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3433 case IFNET_SERIALIZE_RX(2):
3434 rdata = &sc->jme_cdata.jme_rx_data[2];
3436 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3438 ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3441 case IFNET_SERIALIZE_RX(3):
3442 rdata = &sc->jme_cdata.jme_rx_data[3];
3444 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3446 ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3450 panic("%s unsupported serialize type", ifp->if_xname);
3454 #endif /* INVARIANTS */
3457 jme_msix_try_alloc(device_t dev)
3459 struct jme_softc *sc = device_get_softc(dev);
3460 struct jme_msix_data *msix;
3461 int error, i, r, msix_enable, msix_count;
3463 msix_count = 1 + sc->jme_cdata.jme_rx_ring_cnt;
3464 KKASSERT(msix_count <= JME_NMSIX);
3466 msix_enable = device_getenv_int(dev, "msix.enable", jme_msix_enable);
3469 * We leave the 1st MSI-X vector unused, so we
3470 * actually need msix_count + 1 MSI-X vectors.
3472 if (!msix_enable || pci_msix_count(dev) < (msix_count + 1))
3475 for (i = 0; i < msix_count; ++i)
3476 sc->jme_msix[i].jme_msix_rid = -1;
3480 msix = &sc->jme_msix[i++];
3481 msix->jme_msix_cpuid = 0; /* XXX Put TX to cpu0 */
3482 msix->jme_msix_arg = &sc->jme_cdata;
3483 msix->jme_msix_func = jme_msix_tx;
3484 msix->jme_msix_intrs = INTR_TXQ_COAL | INTR_TXQ_COAL_TO;
3485 msix->jme_msix_serialize = &sc->jme_cdata.jme_tx_serialize;
3486 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s tx",
3487 device_get_nameunit(dev));
3489 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3490 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3492 msix = &sc->jme_msix[i++];
3493 msix->jme_msix_cpuid = r; /* XXX Put RX to cpuX */
3494 msix->jme_msix_arg = rdata;
3495 msix->jme_msix_func = jme_msix_rx;
3496 msix->jme_msix_intrs = rdata->jme_rx_coal | rdata->jme_rx_empty;
3497 msix->jme_msix_serialize = &rdata->jme_rx_serialize;
3498 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc),
3499 "%s rx%d", device_get_nameunit(dev), r);
3502 KKASSERT(i == msix_count);
3504 error = pci_setup_msix(dev);
3508 /* Setup jme_msix_cnt early, so we could cleanup */
3509 sc->jme_msix_cnt = msix_count;
3511 for (i = 0; i < msix_count; ++i) {
3512 msix = &sc->jme_msix[i];
3514 msix->jme_msix_vector = i + 1;
3515 error = pci_alloc_msix_vector(dev, msix->jme_msix_vector,
3516 &msix->jme_msix_rid, msix->jme_msix_cpuid);
3520 msix->jme_msix_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3521 &msix->jme_msix_rid, RF_ACTIVE);
3522 if (msix->jme_msix_res == NULL) {
3528 for (i = 0; i < JME_INTR_CNT; ++i) {
3529 uint32_t intr_mask = (1 << i);
3532 if ((JME_INTRS & intr_mask) == 0)
3535 for (x = 0; x < msix_count; ++x) {
3536 msix = &sc->jme_msix[x];
3537 if (msix->jme_msix_intrs & intr_mask) {
3540 reg = i / JME_MSINUM_FACTOR;
3541 KKASSERT(reg < JME_MSINUM_CNT);
3543 shift = (i % JME_MSINUM_FACTOR) * 4;
3545 sc->jme_msinum[reg] |=
3546 (msix->jme_msix_vector << shift);
3554 for (i = 0; i < JME_MSINUM_CNT; ++i) {
3555 device_printf(dev, "MSINUM%d: %#x\n", i,
3560 pci_enable_msix(dev);
3561 sc->jme_irq_type = PCI_INTR_TYPE_MSIX;
3569 jme_intr_alloc(device_t dev)
3571 struct jme_softc *sc = device_get_softc(dev);
3574 jme_msix_try_alloc(dev);
3576 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3577 sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable,
3578 &sc->jme_irq_rid, &irq_flags);
3580 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3581 &sc->jme_irq_rid, irq_flags);
3582 if (sc->jme_irq_res == NULL) {
3583 device_printf(dev, "can't allocate irq\n");
3591 jme_msix_free(device_t dev)
3593 struct jme_softc *sc = device_get_softc(dev);
3596 KKASSERT(sc->jme_msix_cnt > 1);
3598 for (i = 0; i < sc->jme_msix_cnt; ++i) {
3599 struct jme_msix_data *msix = &sc->jme_msix[i];
3601 if (msix->jme_msix_res != NULL) {
3602 bus_release_resource(dev, SYS_RES_IRQ,
3603 msix->jme_msix_rid, msix->jme_msix_res);
3604 msix->jme_msix_res = NULL;
3606 if (msix->jme_msix_rid >= 0) {
3607 pci_release_msix_vector(dev, msix->jme_msix_rid);
3608 msix->jme_msix_rid = -1;
3611 pci_teardown_msix(dev);
3615 jme_intr_free(device_t dev)
3617 struct jme_softc *sc = device_get_softc(dev);
3619 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3620 if (sc->jme_irq_res != NULL) {
3621 bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
3624 if (sc->jme_irq_type == PCI_INTR_TYPE_MSI)
3625 pci_release_msi(dev);
3632 jme_msix_tx(void *xcd)
3634 struct jme_chain_data *cd = xcd;
3635 struct jme_softc *sc = cd->jme_sc;
3636 struct ifnet *ifp = &sc->arpcom.ac_if;
3638 ASSERT_SERIALIZED(&cd->jme_tx_serialize);
3640 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3642 CSR_WRITE_4(sc, JME_INTR_STATUS,
3643 INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP);
3645 if (ifp->if_flags & IFF_RUNNING) {
3647 if (!ifq_is_empty(&ifp->if_snd))
3651 CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3655 jme_msix_rx(void *xrdata)
3657 struct jme_rxdata *rdata = xrdata;
3658 struct jme_softc *sc = rdata->jme_sc;
3659 struct ifnet *ifp = &sc->arpcom.ac_if;
3662 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3664 CSR_WRITE_4(sc, JME_INTR_MASK_CLR,
3665 (rdata->jme_rx_coal | rdata->jme_rx_empty));
3667 status = CSR_READ_4(sc, JME_INTR_STATUS);
3668 status &= (rdata->jme_rx_coal | rdata->jme_rx_empty);
3670 if (status & rdata->jme_rx_coal)
3671 status |= (rdata->jme_rx_coal | rdata->jme_rx_comp);
3672 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3674 if (ifp->if_flags & IFF_RUNNING) {
3675 if (status & rdata->jme_rx_coal)
3676 jme_rxeof(rdata, -1);
3678 if (status & rdata->jme_rx_empty) {
3679 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3680 RXCSR_RX_ENB | RXCSR_RXQ_START);
3684 CSR_WRITE_4(sc, JME_INTR_MASK_SET,
3685 (rdata->jme_rx_coal | rdata->jme_rx_empty));
3689 jme_set_msinum(struct jme_softc *sc)
3693 for (i = 0; i < JME_MSINUM_CNT; ++i)
3694 CSR_WRITE_4(sc, JME_MSINUM(i), sc->jme_msinum[i]);
3698 jme_intr_setup(device_t dev)
3700 struct jme_softc *sc = device_get_softc(dev);
3701 struct ifnet *ifp = &sc->arpcom.ac_if;
3704 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3705 return jme_msix_setup(dev);
3707 error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE,
3708 jme_intr, sc, &sc->jme_irq_handle, &sc->jme_serialize);
3710 device_printf(dev, "could not set up interrupt handler.\n");
3714 ifp->if_cpuid = rman_get_cpuid(sc->jme_irq_res);
3715 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
3720 jme_intr_teardown(device_t dev)
3722 struct jme_softc *sc = device_get_softc(dev);
3724 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3725 jme_msix_teardown(dev, sc->jme_msix_cnt);
3727 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
3731 jme_msix_setup(device_t dev)
3733 struct jme_softc *sc = device_get_softc(dev);
3734 struct ifnet *ifp = &sc->arpcom.ac_if;
3737 for (x = 0; x < sc->jme_msix_cnt; ++x) {
3738 struct jme_msix_data *msix = &sc->jme_msix[x];
3741 error = bus_setup_intr_descr(dev, msix->jme_msix_res,
3742 INTR_MPSAFE, msix->jme_msix_func, msix->jme_msix_arg,
3743 &msix->jme_msix_handle, msix->jme_msix_serialize,
3744 msix->jme_msix_desc);
3746 device_printf(dev, "could not set up %s "
3747 "interrupt handler.\n", msix->jme_msix_desc);
3748 jme_msix_teardown(dev, x);
3752 ifp->if_cpuid = 0; /* XXX */
3757 jme_msix_teardown(device_t dev, int msix_count)
3759 struct jme_softc *sc = device_get_softc(dev);
3762 for (x = 0; x < msix_count; ++x) {
3763 struct jme_msix_data *msix = &sc->jme_msix[x];
3765 bus_teardown_intr(dev, msix->jme_msix_res,
3766 msix->jme_msix_handle);