2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
30 #include "opt_polling.h"
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
41 #include <sys/serialize.h>
42 #include <sys/serialize2.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
47 #include <net/ethernet.h>
50 #include <net/if_arp.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/ifq_var.h>
54 #include <net/toeplitz.h>
55 #include <net/toeplitz2.h>
56 #include <net/vlan/if_vlan_var.h>
57 #include <net/vlan/if_vlan_ether.h>
59 #include <netinet/ip.h>
60 #include <netinet/tcp.h>
62 #include <dev/netif/mii_layer/miivar.h>
63 #include <dev/netif/mii_layer/jmphyreg.h>
65 #include <bus/pci/pcireg.h>
66 #include <bus/pci/pcivar.h>
67 #include <bus/pci/pcidevs.h>
69 #include <dev/netif/jme/if_jmereg.h>
70 #include <dev/netif/jme/if_jmevar.h>
72 #include "miibus_if.h"
74 #define JME_TX_SERIALIZE 1
75 #define JME_RX_SERIALIZE 2
77 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
80 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
82 if ((sc)->jme_rss_debug >= (lvl)) \
83 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
85 #else /* !JME_RSS_DEBUG */
86 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
87 #endif /* JME_RSS_DEBUG */
89 static int jme_probe(device_t);
90 static int jme_attach(device_t);
91 static int jme_detach(device_t);
92 static int jme_shutdown(device_t);
93 static int jme_suspend(device_t);
94 static int jme_resume(device_t);
96 static int jme_miibus_readreg(device_t, int, int);
97 static int jme_miibus_writereg(device_t, int, int, int);
98 static void jme_miibus_statchg(device_t);
100 static void jme_init(void *);
101 static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
102 static void jme_start(struct ifnet *);
103 static void jme_watchdog(struct ifnet *);
104 static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
105 static int jme_mediachange(struct ifnet *);
106 #ifdef DEVICE_POLLING
107 static void jme_poll(struct ifnet *, enum poll_cmd, int);
109 static void jme_serialize(struct ifnet *, enum ifnet_serialize);
110 static void jme_deserialize(struct ifnet *, enum ifnet_serialize);
111 static int jme_tryserialize(struct ifnet *, enum ifnet_serialize);
113 static void jme_serialize_assert(struct ifnet *, enum ifnet_serialize,
117 static void jme_intr(void *);
118 static void jme_msix_tx(void *);
119 static void jme_msix_rx(void *);
120 static void jme_txeof(struct jme_softc *);
121 static void jme_rxeof(struct jme_rxdata *, int);
122 static void jme_rx_intr(struct jme_softc *, uint32_t);
124 static int jme_msix_setup(device_t);
125 static void jme_msix_teardown(device_t, int);
126 static int jme_intr_setup(device_t);
127 static void jme_intr_teardown(device_t);
128 static void jme_msix_try_alloc(device_t);
129 static void jme_msix_free(device_t);
130 static int jme_intr_alloc(device_t);
131 static void jme_intr_free(device_t);
132 static int jme_dma_alloc(struct jme_softc *);
133 static void jme_dma_free(struct jme_softc *);
134 static int jme_init_rx_ring(struct jme_rxdata *);
135 static void jme_init_tx_ring(struct jme_softc *);
136 static void jme_init_ssb(struct jme_softc *);
137 static int jme_newbuf(struct jme_rxdata *, struct jme_rxdesc *, int);
138 static int jme_encap(struct jme_softc *, struct mbuf **);
139 static void jme_rxpkt(struct jme_rxdata *);
140 static int jme_rxring_dma_alloc(struct jme_rxdata *);
141 static int jme_rxbuf_dma_alloc(struct jme_rxdata *);
142 static int jme_rxbuf_dma_filter(void *, bus_addr_t);
144 static void jme_tick(void *);
145 static void jme_stop(struct jme_softc *);
146 static void jme_reset(struct jme_softc *);
147 static void jme_set_msinum(struct jme_softc *);
148 static void jme_set_vlan(struct jme_softc *);
149 static void jme_set_filter(struct jme_softc *);
150 static void jme_stop_tx(struct jme_softc *);
151 static void jme_stop_rx(struct jme_softc *);
152 static void jme_mac_config(struct jme_softc *);
153 static void jme_reg_macaddr(struct jme_softc *, uint8_t[]);
154 static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
155 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
157 static void jme_setwol(struct jme_softc *);
158 static void jme_setlinkspeed(struct jme_softc *);
160 static void jme_set_tx_coal(struct jme_softc *);
161 static void jme_set_rx_coal(struct jme_softc *);
162 static void jme_enable_rss(struct jme_softc *);
163 static void jme_disable_rss(struct jme_softc *);
164 static void jme_serialize_skipmain(struct jme_softc *);
165 static void jme_deserialize_skipmain(struct jme_softc *);
167 static void jme_sysctl_node(struct jme_softc *);
168 static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
169 static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
170 static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
171 static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
174 * Devices supported by this driver.
176 static const struct jme_dev {
177 uint16_t jme_vendorid;
178 uint16_t jme_deviceid;
180 const char *jme_name;
182 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
184 "JMicron Inc, JMC250 Gigabit Ethernet" },
185 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
187 "JMicron Inc, JMC260 Fast Ethernet" },
191 static device_method_t jme_methods[] = {
192 /* Device interface. */
193 DEVMETHOD(device_probe, jme_probe),
194 DEVMETHOD(device_attach, jme_attach),
195 DEVMETHOD(device_detach, jme_detach),
196 DEVMETHOD(device_shutdown, jme_shutdown),
197 DEVMETHOD(device_suspend, jme_suspend),
198 DEVMETHOD(device_resume, jme_resume),
201 DEVMETHOD(bus_print_child, bus_generic_print_child),
202 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
205 DEVMETHOD(miibus_readreg, jme_miibus_readreg),
206 DEVMETHOD(miibus_writereg, jme_miibus_writereg),
207 DEVMETHOD(miibus_statchg, jme_miibus_statchg),
212 static driver_t jme_driver = {
215 sizeof(struct jme_softc)
218 static devclass_t jme_devclass;
220 DECLARE_DUMMY_MODULE(if_jme);
221 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
222 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL);
223 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL);
225 static const struct {
229 } jme_rx_status[JME_NRXRING_MAX] = {
230 { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP,
231 INTR_RXQ0_DESC_EMPTY },
232 { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP,
233 INTR_RXQ1_DESC_EMPTY },
234 { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP,
235 INTR_RXQ2_DESC_EMPTY },
236 { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP,
237 INTR_RXQ3_DESC_EMPTY }
240 static int jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
241 static int jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
242 static int jme_rx_ring_count = 0;
243 static int jme_msi_enable = 1;
244 static int jme_msix_enable = 1;
246 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
247 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
248 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
249 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable);
250 TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable);
253 jme_setup_rxdesc(struct jme_rxdesc *rxd)
255 struct jme_desc *desc;
258 desc->buflen = htole32(MCLBYTES);
259 desc->addr_lo = htole32(JME_ADDR_LO(rxd->rx_paddr));
260 desc->addr_hi = htole32(JME_ADDR_HI(rxd->rx_paddr));
261 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
265 * Read a PHY register on the MII of the JMC250.
268 jme_miibus_readreg(device_t dev, int phy, int reg)
270 struct jme_softc *sc = device_get_softc(dev);
274 /* For FPGA version, PHY address 0 should be ignored. */
275 if (sc->jme_caps & JME_CAP_FPGA) {
279 if (sc->jme_phyaddr != phy)
283 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
284 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
286 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
288 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
292 device_printf(sc->jme_dev, "phy read timeout: "
293 "phy %d, reg %d\n", phy, reg);
297 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
301 * Write a PHY register on the MII of the JMC250.
304 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
306 struct jme_softc *sc = device_get_softc(dev);
309 /* For FPGA version, PHY address 0 should be ignored. */
310 if (sc->jme_caps & JME_CAP_FPGA) {
314 if (sc->jme_phyaddr != phy)
318 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
319 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
320 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
322 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
324 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
328 device_printf(sc->jme_dev, "phy write timeout: "
329 "phy %d, reg %d\n", phy, reg);
336 * Callback from MII layer when media changes.
339 jme_miibus_statchg(device_t dev)
341 struct jme_softc *sc = device_get_softc(dev);
342 struct ifnet *ifp = &sc->arpcom.ac_if;
343 struct mii_data *mii;
344 struct jme_txdesc *txd;
349 jme_serialize_skipmain(sc);
350 ASSERT_IFNET_SERIALIZED_ALL(ifp);
352 if ((ifp->if_flags & IFF_RUNNING) == 0)
355 mii = device_get_softc(sc->jme_miibus);
357 sc->jme_has_link = FALSE;
358 if ((mii->mii_media_status & IFM_AVALID) != 0) {
359 switch (IFM_SUBTYPE(mii->mii_media_active)) {
362 sc->jme_has_link = TRUE;
365 if (sc->jme_caps & JME_CAP_FASTETH)
367 sc->jme_has_link = TRUE;
375 * Disabling Rx/Tx MACs have a side-effect of resetting
376 * JME_TXNDA/JME_RXNDA register to the first address of
377 * Tx/Rx descriptor address. So driver should reset its
378 * internal procucer/consumer pointer and reclaim any
379 * allocated resources. Note, just saving the value of
380 * JME_TXNDA and JME_RXNDA registers before stopping MAC
381 * and restoring JME_TXNDA/JME_RXNDA register is not
382 * sufficient to make sure correct MAC state because
383 * stopping MAC operation can take a while and hardware
384 * might have updated JME_TXNDA/JME_RXNDA registers
385 * during the stop operation.
388 /* Disable interrupts */
389 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
392 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
394 callout_stop(&sc->jme_tick_ch);
396 /* Stop receiver/transmitter. */
400 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
401 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
403 jme_rxeof(rdata, -1);
404 if (rdata->jme_rxhead != NULL)
405 m_freem(rdata->jme_rxhead);
406 JME_RXCHAIN_RESET(rdata);
409 * Reuse configured Rx descriptors and reset
410 * procuder/consumer index.
412 rdata->jme_rx_cons = 0;
414 if (JME_ENABLE_HWRSS(sc))
420 if (sc->jme_cdata.jme_tx_cnt != 0) {
421 /* Remove queued packets for transmit. */
422 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
423 txd = &sc->jme_cdata.jme_txdesc[i];
424 if (txd->tx_m != NULL) {
426 sc->jme_cdata.jme_tx_tag,
435 jme_init_tx_ring(sc);
437 /* Initialize shadow status block. */
440 /* Program MAC with resolved speed/duplex/flow-control. */
441 if (sc->jme_has_link) {
444 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
446 /* Set Tx ring address to the hardware. */
447 paddr = sc->jme_cdata.jme_tx_ring_paddr;
448 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
449 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
451 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
452 CSR_WRITE_4(sc, JME_RXCSR,
453 sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
455 /* Set Rx ring address to the hardware. */
456 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
457 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
458 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
461 /* Restart receiver/transmitter. */
462 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
464 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
467 ifp->if_flags |= IFF_RUNNING;
468 ifp->if_flags &= ~IFF_OACTIVE;
469 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
471 #ifdef DEVICE_POLLING
472 if (!(ifp->if_flags & IFF_POLLING))
474 /* Reenable interrupts. */
475 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
479 jme_deserialize_skipmain(sc);
483 * Get the current interface media status.
486 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
488 struct jme_softc *sc = ifp->if_softc;
489 struct mii_data *mii = device_get_softc(sc->jme_miibus);
491 ASSERT_IFNET_SERIALIZED_ALL(ifp);
494 ifmr->ifm_status = mii->mii_media_status;
495 ifmr->ifm_active = mii->mii_media_active;
499 * Set hardware to newly-selected media.
502 jme_mediachange(struct ifnet *ifp)
504 struct jme_softc *sc = ifp->if_softc;
505 struct mii_data *mii = device_get_softc(sc->jme_miibus);
508 ASSERT_IFNET_SERIALIZED_ALL(ifp);
510 if (mii->mii_instance != 0) {
511 struct mii_softc *miisc;
513 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
514 mii_phy_reset(miisc);
516 error = mii_mediachg(mii);
522 jme_probe(device_t dev)
524 const struct jme_dev *sp;
527 vid = pci_get_vendor(dev);
528 did = pci_get_device(dev);
529 for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
530 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
531 struct jme_softc *sc = device_get_softc(dev);
533 sc->jme_caps = sp->jme_caps;
534 device_set_desc(dev, sp->jme_name);
542 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
548 for (i = JME_TIMEOUT; i > 0; i--) {
549 reg = CSR_READ_4(sc, JME_SMBCSR);
550 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
556 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
560 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
561 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
562 for (i = JME_TIMEOUT; i > 0; i--) {
564 reg = CSR_READ_4(sc, JME_SMBINTF);
565 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
570 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
574 reg = CSR_READ_4(sc, JME_SMBINTF);
575 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
581 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
583 uint8_t fup, reg, val;
588 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
589 fup != JME_EEPROM_SIG0)
591 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
592 fup != JME_EEPROM_SIG1)
596 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
598 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
599 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
600 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0)
602 if (reg >= JME_PAR0 &&
603 reg < JME_PAR0 + ETHER_ADDR_LEN) {
604 if (jme_eeprom_read_byte(sc, offset + 2,
607 eaddr[reg - JME_PAR0] = val;
611 /* Check for the end of EEPROM descriptor. */
612 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
614 /* Try next eeprom descriptor. */
615 offset += JME_EEPROM_DESC_BYTES;
616 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
618 if (match == ETHER_ADDR_LEN)
625 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
629 /* Read station address. */
630 par0 = CSR_READ_4(sc, JME_PAR0);
631 par1 = CSR_READ_4(sc, JME_PAR1);
633 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
634 device_printf(sc->jme_dev,
635 "generating fake ethernet address.\n");
636 par0 = karc4random();
637 /* Set OUI to JMicron. */
641 eaddr[3] = (par0 >> 16) & 0xff;
642 eaddr[4] = (par0 >> 8) & 0xff;
643 eaddr[5] = par0 & 0xff;
645 eaddr[0] = (par0 >> 0) & 0xFF;
646 eaddr[1] = (par0 >> 8) & 0xFF;
647 eaddr[2] = (par0 >> 16) & 0xFF;
648 eaddr[3] = (par0 >> 24) & 0xFF;
649 eaddr[4] = (par1 >> 0) & 0xFF;
650 eaddr[5] = (par1 >> 8) & 0xFF;
655 jme_attach(device_t dev)
657 struct jme_softc *sc = device_get_softc(dev);
658 struct ifnet *ifp = &sc->arpcom.ac_if;
661 uint8_t pcie_ptr, rev;
662 int error = 0, i, j, rx_desc_cnt;
663 uint8_t eaddr[ETHER_ADDR_LEN];
665 device_printf(dev, "rxdata %zu, chain_data %zu\n",
666 sizeof(struct jme_rxdata), sizeof(struct jme_chain_data));
668 lwkt_serialize_init(&sc->jme_serialize);
669 lwkt_serialize_init(&sc->jme_cdata.jme_tx_serialize);
670 for (i = 0; i < JME_NRXRING_MAX; ++i) {
672 &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize);
675 rx_desc_cnt = device_getenv_int(dev, "rx_desc_count",
677 rx_desc_cnt = roundup(rx_desc_cnt, JME_NDESC_ALIGN);
678 if (rx_desc_cnt > JME_NDESC_MAX)
679 rx_desc_cnt = JME_NDESC_MAX;
681 sc->jme_cdata.jme_tx_desc_cnt = device_getenv_int(dev, "tx_desc_count",
683 sc->jme_cdata.jme_tx_desc_cnt = roundup(sc->jme_cdata.jme_tx_desc_cnt,
685 if (sc->jme_cdata.jme_tx_desc_cnt > JME_NDESC_MAX)
686 sc->jme_cdata.jme_tx_desc_cnt = JME_NDESC_MAX;
691 sc->jme_cdata.jme_rx_ring_cnt = device_getenv_int(dev, "rx_ring_count",
693 sc->jme_cdata.jme_rx_ring_cnt =
694 if_ring_count2(sc->jme_cdata.jme_rx_ring_cnt, JME_NRXRING_MAX);
697 sc->jme_serialize_arr[i++] = &sc->jme_serialize;
699 KKASSERT(i == JME_TX_SERIALIZE);
700 sc->jme_serialize_arr[i++] = &sc->jme_cdata.jme_tx_serialize;
702 KKASSERT(i == JME_RX_SERIALIZE);
703 for (j = 0; j < sc->jme_cdata.jme_rx_ring_cnt; ++j) {
704 sc->jme_serialize_arr[i++] =
705 &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize;
707 KKASSERT(i <= JME_NSERIALIZE);
708 sc->jme_serialize_cnt = i;
710 sc->jme_cdata.jme_sc = sc;
711 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
712 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
715 rdata->jme_rx_coal = jme_rx_status[i].jme_coal;
716 rdata->jme_rx_comp = jme_rx_status[i].jme_comp;
717 rdata->jme_rx_empty = jme_rx_status[i].jme_empty;
718 rdata->jme_rx_idx = i;
719 rdata->jme_rx_desc_cnt = rx_desc_cnt;
723 sc->jme_lowaddr = BUS_SPACE_MAXADDR;
725 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
727 callout_init(&sc->jme_tick_ch);
730 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
733 irq = pci_read_config(dev, PCIR_INTLINE, 4);
734 mem = pci_read_config(dev, JME_PCIR_BAR, 4);
736 device_printf(dev, "chip is in D%d power mode "
737 "-- setting to D0\n", pci_get_powerstate(dev));
739 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
741 pci_write_config(dev, PCIR_INTLINE, irq, 4);
742 pci_write_config(dev, JME_PCIR_BAR, mem, 4);
744 #endif /* !BURN_BRIDGE */
746 /* Enable bus mastering */
747 pci_enable_busmaster(dev);
752 * JMC250 supports both memory mapped and I/O register space
753 * access. Because I/O register access should use different
754 * BARs to access registers it's waste of time to use I/O
755 * register spce access. JMC250 uses 16K to map entire memory
758 sc->jme_mem_rid = JME_PCIR_BAR;
759 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
760 &sc->jme_mem_rid, RF_ACTIVE);
761 if (sc->jme_mem_res == NULL) {
762 device_printf(dev, "can't allocate IO memory\n");
765 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
766 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
771 error = jme_intr_alloc(dev);
778 reg = CSR_READ_4(sc, JME_CHIPMODE);
779 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
781 sc->jme_caps |= JME_CAP_FPGA;
783 device_printf(dev, "FPGA revision: 0x%04x\n",
784 (reg & CHIPMODE_FPGA_REV_MASK) >>
785 CHIPMODE_FPGA_REV_SHIFT);
789 /* NOTE: FM revision is put in the upper 4 bits */
790 rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
791 rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
793 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
795 did = pci_get_device(dev);
797 case PCI_PRODUCT_JMICRON_JMC250:
798 if (rev == JME_REV1_A2)
799 sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
802 case PCI_PRODUCT_JMICRON_JMC260:
804 sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
808 panic("unknown device id 0x%04x", did);
810 if (rev >= JME_REV2) {
811 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
812 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
813 GHC_TXMAC_CLKSRC_1000;
816 /* Reset the ethernet controller. */
819 /* Map MSI/MSI-X vectors */
822 /* Get station address. */
823 reg = CSR_READ_4(sc, JME_SMBCSR);
824 if (reg & SMBCSR_EEPROM_PRESENT)
825 error = jme_eeprom_macaddr(sc, eaddr);
826 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
827 if (error != 0 && (bootverbose)) {
828 device_printf(dev, "ethernet hardware address "
829 "not found in EEPROM.\n");
831 jme_reg_macaddr(sc, eaddr);
836 * Integrated JR0211 has fixed PHY address whereas FPGA version
837 * requires PHY probing to get correct PHY address.
839 if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
840 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
841 GPREG0_PHY_ADDR_MASK;
843 device_printf(dev, "PHY is at address %d.\n",
850 /* Set max allowable DMA size. */
851 pcie_ptr = pci_get_pciecap_ptr(dev);
855 sc->jme_caps |= JME_CAP_PCIE;
856 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
858 device_printf(dev, "Read request size : %d bytes.\n",
859 128 << ((ctrl >> 12) & 0x07));
860 device_printf(dev, "TLP payload size : %d bytes.\n",
861 128 << ((ctrl >> 5) & 0x07));
863 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
864 case PCIEM_DEVCTL_MAX_READRQ_128:
865 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
867 case PCIEM_DEVCTL_MAX_READRQ_256:
868 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
871 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
874 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
876 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
877 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
881 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
882 sc->jme_caps |= JME_CAP_PMCAP;
890 /* Allocate DMA stuffs */
891 error = jme_dma_alloc(sc);
896 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
897 ifp->if_init = jme_init;
898 ifp->if_ioctl = jme_ioctl;
899 ifp->if_start = jme_start;
900 #ifdef DEVICE_POLLING
901 ifp->if_poll = jme_poll;
903 ifp->if_watchdog = jme_watchdog;
904 ifp->if_serialize = jme_serialize;
905 ifp->if_deserialize = jme_deserialize;
906 ifp->if_tryserialize = jme_tryserialize;
908 ifp->if_serialize_assert = jme_serialize_assert;
910 ifq_set_maxlen(&ifp->if_snd,
911 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
912 ifq_set_ready(&ifp->if_snd);
914 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
915 ifp->if_capabilities = IFCAP_HWCSUM |
918 IFCAP_VLAN_HWTAGGING;
919 if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN)
920 ifp->if_capabilities |= IFCAP_RSS;
921 ifp->if_capenable = ifp->if_capabilities;
924 * Disable TXCSUM by default to improve bulk data
925 * transmit performance (+20Mbps improvement).
927 ifp->if_capenable &= ~IFCAP_TXCSUM;
929 if (ifp->if_capenable & IFCAP_TXCSUM)
930 ifp->if_hwassist |= JME_CSUM_FEATURES;
931 ifp->if_hwassist |= CSUM_TSO;
933 /* Set up MII bus. */
934 error = mii_phy_probe(dev, &sc->jme_miibus,
935 jme_mediachange, jme_mediastatus);
937 device_printf(dev, "no PHY found!\n");
942 * Save PHYADDR for FPGA mode PHY.
944 if (sc->jme_caps & JME_CAP_FPGA) {
945 struct mii_data *mii = device_get_softc(sc->jme_miibus);
947 if (mii->mii_instance != 0) {
948 struct mii_softc *miisc;
950 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
951 if (miisc->mii_phy != 0) {
952 sc->jme_phyaddr = miisc->mii_phy;
956 if (sc->jme_phyaddr != 0) {
957 device_printf(sc->jme_dev,
958 "FPGA PHY is at %d\n", sc->jme_phyaddr);
960 jme_miibus_writereg(dev, sc->jme_phyaddr,
961 JMPHY_CONF, JMPHY_CONF_DEFFIFO);
963 /* XXX should we clear JME_WA_EXTFIFO */
968 ether_ifattach(ifp, eaddr, NULL);
970 /* Tell the upper layer(s) we support long frames. */
971 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
973 error = jme_intr_setup(dev);
986 jme_detach(device_t dev)
988 struct jme_softc *sc = device_get_softc(dev);
990 if (device_is_attached(dev)) {
991 struct ifnet *ifp = &sc->arpcom.ac_if;
993 ifnet_serialize_all(ifp);
995 jme_intr_teardown(dev);
996 ifnet_deserialize_all(ifp);
1001 if (sc->jme_sysctl_tree != NULL)
1002 sysctl_ctx_free(&sc->jme_sysctl_ctx);
1004 if (sc->jme_miibus != NULL)
1005 device_delete_child(dev, sc->jme_miibus);
1006 bus_generic_detach(dev);
1010 if (sc->jme_mem_res != NULL) {
1011 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
1021 jme_sysctl_node(struct jme_softc *sc)
1024 #ifdef JME_RSS_DEBUG
1028 sysctl_ctx_init(&sc->jme_sysctl_ctx);
1029 sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
1030 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
1031 device_get_nameunit(sc->jme_dev),
1033 if (sc->jme_sysctl_tree == NULL) {
1034 device_printf(sc->jme_dev, "can't add sysctl node\n");
1038 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1039 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1040 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1041 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
1043 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1044 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1045 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1046 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
1048 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1049 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1050 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1051 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
1053 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1054 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1055 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1056 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
1058 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1059 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1060 "rx_desc_count", CTLFLAG_RD,
1061 &sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt,
1062 0, "RX desc count");
1063 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1064 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1065 "tx_desc_count", CTLFLAG_RD,
1066 &sc->jme_cdata.jme_tx_desc_cnt,
1067 0, "TX desc count");
1068 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1069 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1070 "rx_ring_count", CTLFLAG_RD,
1071 &sc->jme_cdata.jme_rx_ring_cnt,
1072 0, "RX ring count");
1073 #ifdef JME_RSS_DEBUG
1074 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1075 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1076 "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
1077 0, "RSS debug level");
1078 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1079 char rx_ring_pkt[32];
1081 ksnprintf(rx_ring_pkt, sizeof(rx_ring_pkt), "rx_ring%d_pkt", r);
1082 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx,
1083 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1084 rx_ring_pkt, CTLFLAG_RW,
1085 &sc->jme_cdata.jme_rx_data[r].jme_rx_pkt, "RXed packets");
1090 * Set default coalesce valves
1092 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1093 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1094 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1095 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1098 * Adjust coalesce valves, in case that the number of TX/RX
1099 * descs are set to small values by users.
1101 * NOTE: coal_max will not be zero, since number of descs
1102 * must aligned by JME_NDESC_ALIGN (16 currently)
1104 coal_max = sc->jme_cdata.jme_tx_desc_cnt / 6;
1105 if (coal_max < sc->jme_tx_coal_pkt)
1106 sc->jme_tx_coal_pkt = coal_max;
1108 coal_max = sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt / 4;
1109 if (coal_max < sc->jme_rx_coal_pkt)
1110 sc->jme_rx_coal_pkt = coal_max;
1114 jme_dma_alloc(struct jme_softc *sc)
1116 struct jme_txdesc *txd;
1118 int error, i, asize;
1120 sc->jme_cdata.jme_txdesc =
1121 kmalloc(sc->jme_cdata.jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1122 M_DEVBUF, M_WAITOK | M_ZERO);
1123 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1124 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
1127 kmalloc(rdata->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1128 M_DEVBUF, M_WAITOK | M_ZERO);
1131 /* Create parent ring tag. */
1132 error = bus_dma_tag_create(NULL,/* parent */
1133 1, JME_RING_BOUNDARY, /* algnmnt, boundary */
1134 sc->jme_lowaddr, /* lowaddr */
1135 BUS_SPACE_MAXADDR, /* highaddr */
1136 NULL, NULL, /* filter, filterarg */
1137 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1139 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1141 &sc->jme_cdata.jme_ring_tag);
1143 device_printf(sc->jme_dev,
1144 "could not create parent ring DMA tag.\n");
1149 * Create DMA stuffs for TX ring
1151 asize = roundup2(JME_TX_RING_SIZE(sc), JME_TX_RING_ALIGN);
1152 error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1153 JME_TX_RING_ALIGN, 0,
1154 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1155 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1157 device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1160 sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag;
1161 sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map;
1162 sc->jme_cdata.jme_tx_ring = dmem.dmem_addr;
1163 sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr;
1166 * Create DMA stuffs for RX rings
1168 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1169 error = jme_rxring_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1174 /* Create parent buffer tag. */
1175 error = bus_dma_tag_create(NULL,/* parent */
1176 1, 0, /* algnmnt, boundary */
1177 sc->jme_lowaddr, /* lowaddr */
1178 BUS_SPACE_MAXADDR, /* highaddr */
1179 NULL, NULL, /* filter, filterarg */
1180 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1182 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1184 &sc->jme_cdata.jme_buffer_tag);
1186 device_printf(sc->jme_dev,
1187 "could not create parent buffer DMA tag.\n");
1192 * Create DMA stuffs for shadow status block
1194 asize = roundup2(JME_SSB_SIZE, JME_SSB_ALIGN);
1195 error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1196 JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1197 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1199 device_printf(sc->jme_dev,
1200 "could not create shadow status block.\n");
1203 sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1204 sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1205 sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1206 sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1209 * Create DMA stuffs for TX buffers
1212 /* Create tag for Tx buffers. */
1213 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1214 1, 0, /* algnmnt, boundary */
1215 BUS_SPACE_MAXADDR, /* lowaddr */
1216 BUS_SPACE_MAXADDR, /* highaddr */
1217 NULL, NULL, /* filter, filterarg */
1218 JME_TSO_MAXSIZE, /* maxsize */
1219 JME_MAXTXSEGS, /* nsegments */
1220 JME_MAXSEGSIZE, /* maxsegsize */
1221 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1222 &sc->jme_cdata.jme_tx_tag);
1224 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1228 /* Create DMA maps for Tx buffers. */
1229 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
1230 txd = &sc->jme_cdata.jme_txdesc[i];
1231 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag,
1232 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1237 device_printf(sc->jme_dev,
1238 "could not create %dth Tx dmamap.\n", i);
1240 for (j = 0; j < i; ++j) {
1241 txd = &sc->jme_cdata.jme_txdesc[j];
1242 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1245 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1246 sc->jme_cdata.jme_tx_tag = NULL;
1252 * Create DMA stuffs for RX buffers
1254 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1255 error = jme_rxbuf_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1263 jme_dma_free(struct jme_softc *sc)
1265 struct jme_txdesc *txd;
1266 struct jme_rxdesc *rxd;
1267 struct jme_rxdata *rdata;
1271 if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1272 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1273 sc->jme_cdata.jme_tx_ring_map);
1274 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1275 sc->jme_cdata.jme_tx_ring,
1276 sc->jme_cdata.jme_tx_ring_map);
1277 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1278 sc->jme_cdata.jme_tx_ring_tag = NULL;
1282 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1283 rdata = &sc->jme_cdata.jme_rx_data[r];
1284 if (rdata->jme_rx_ring_tag != NULL) {
1285 bus_dmamap_unload(rdata->jme_rx_ring_tag,
1286 rdata->jme_rx_ring_map);
1287 bus_dmamem_free(rdata->jme_rx_ring_tag,
1289 rdata->jme_rx_ring_map);
1290 bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1291 rdata->jme_rx_ring_tag = NULL;
1296 if (sc->jme_cdata.jme_tx_tag != NULL) {
1297 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
1298 txd = &sc->jme_cdata.jme_txdesc[i];
1299 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1302 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1303 sc->jme_cdata.jme_tx_tag = NULL;
1307 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1308 rdata = &sc->jme_cdata.jme_rx_data[r];
1309 if (rdata->jme_rx_tag != NULL) {
1310 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
1311 rxd = &rdata->jme_rxdesc[i];
1312 bus_dmamap_destroy(rdata->jme_rx_tag,
1315 bus_dmamap_destroy(rdata->jme_rx_tag,
1316 rdata->jme_rx_sparemap);
1317 bus_dma_tag_destroy(rdata->jme_rx_tag);
1318 rdata->jme_rx_tag = NULL;
1322 /* Shadow status block. */
1323 if (sc->jme_cdata.jme_ssb_tag != NULL) {
1324 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1325 sc->jme_cdata.jme_ssb_map);
1326 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1327 sc->jme_cdata.jme_ssb_block,
1328 sc->jme_cdata.jme_ssb_map);
1329 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1330 sc->jme_cdata.jme_ssb_tag = NULL;
1333 if (sc->jme_cdata.jme_buffer_tag != NULL) {
1334 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1335 sc->jme_cdata.jme_buffer_tag = NULL;
1337 if (sc->jme_cdata.jme_ring_tag != NULL) {
1338 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1339 sc->jme_cdata.jme_ring_tag = NULL;
1342 if (sc->jme_cdata.jme_txdesc != NULL) {
1343 kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1344 sc->jme_cdata.jme_txdesc = NULL;
1346 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1347 rdata = &sc->jme_cdata.jme_rx_data[r];
1348 if (rdata->jme_rxdesc != NULL) {
1349 kfree(rdata->jme_rxdesc, M_DEVBUF);
1350 rdata->jme_rxdesc = NULL;
1356 * Make sure the interface is stopped at reboot time.
1359 jme_shutdown(device_t dev)
1361 return jme_suspend(dev);
1366 * Unlike other ethernet controllers, JMC250 requires
1367 * explicit resetting link speed to 10/100Mbps as gigabit
1368 * link will cunsume more power than 375mA.
1369 * Note, we reset the link speed to 10/100Mbps with
1370 * auto-negotiation but we don't know whether that operation
1371 * would succeed or not as we have no control after powering
1372 * off. If the renegotiation fail WOL may not work. Running
1373 * at 1Gbps draws more power than 375mA at 3.3V which is
1374 * specified in PCI specification and that would result in
1375 * complete shutdowning power to ethernet controller.
1378 * Save current negotiated media speed/duplex/flow-control
1379 * to softc and restore the same link again after resuming.
1380 * PHY handling such as power down/resetting to 100Mbps
1381 * may be better handled in suspend method in phy driver.
1384 jme_setlinkspeed(struct jme_softc *sc)
1386 struct mii_data *mii;
1389 JME_LOCK_ASSERT(sc);
1391 mii = device_get_softc(sc->jme_miibus);
1394 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1395 switch IFM_SUBTYPE(mii->mii_media_active) {
1405 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1406 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1407 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1408 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1409 BMCR_AUTOEN | BMCR_STARTNEG);
1412 /* Poll link state until jme(4) get a 10/100 link. */
1413 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1415 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1416 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1426 pause("jmelnk", hz);
1429 if (i == MII_ANEGTICKS_GIGE)
1430 device_printf(sc->jme_dev, "establishing link failed, "
1431 "WOL may not work!");
1434 * No link, force MAC to have 100Mbps, full-duplex link.
1435 * This is the last resort and may/may not work.
1437 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1438 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1443 jme_setwol(struct jme_softc *sc)
1445 struct ifnet *ifp = &sc->arpcom.ac_if;
1450 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1451 /* No PME capability, PHY power down. */
1452 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1453 MII_BMCR, BMCR_PDOWN);
1457 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1458 pmcs = CSR_READ_4(sc, JME_PMCS);
1459 pmcs &= ~PMCS_WOL_ENB_MASK;
1460 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1461 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1462 /* Enable PME message. */
1463 gpr |= GPREG0_PME_ENB;
1464 /* For gigabit controllers, reset link speed to 10/100. */
1465 if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1466 jme_setlinkspeed(sc);
1469 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1470 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1473 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1474 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1475 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1476 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1477 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1478 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1479 /* No WOL, PHY power down. */
1480 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1481 MII_BMCR, BMCR_PDOWN);
1487 jme_suspend(device_t dev)
1489 struct jme_softc *sc = device_get_softc(dev);
1490 struct ifnet *ifp = &sc->arpcom.ac_if;
1492 ifnet_serialize_all(ifp);
1497 ifnet_deserialize_all(ifp);
1503 jme_resume(device_t dev)
1505 struct jme_softc *sc = device_get_softc(dev);
1506 struct ifnet *ifp = &sc->arpcom.ac_if;
1511 ifnet_serialize_all(ifp);
1514 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1517 pmstat = pci_read_config(sc->jme_dev,
1518 pmc + PCIR_POWER_STATUS, 2);
1519 /* Disable PME clear PME status. */
1520 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1521 pci_write_config(sc->jme_dev,
1522 pmc + PCIR_POWER_STATUS, pmstat, 2);
1526 if (ifp->if_flags & IFF_UP)
1529 ifnet_deserialize_all(ifp);
1535 jme_tso_pullup(struct mbuf **mp)
1537 int hoff, iphlen, thoff;
1541 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
1543 iphlen = m->m_pkthdr.csum_iphlen;
1544 thoff = m->m_pkthdr.csum_thlen;
1545 hoff = m->m_pkthdr.csum_lhlen;
1547 KASSERT(iphlen > 0, ("invalid ip hlen"));
1548 KASSERT(thoff > 0, ("invalid tcp hlen"));
1549 KASSERT(hoff > 0, ("invalid ether hlen"));
1551 if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
1552 m = m_pullup(m, hoff + iphlen + thoff);
1563 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1565 struct jme_txdesc *txd;
1566 struct jme_desc *desc;
1568 bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1570 int error, i, prod, symbol_desc;
1571 uint32_t cflags, flag64, mss;
1573 M_ASSERTPKTHDR((*m_head));
1575 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) {
1576 /* XXX Is this necessary? */
1577 error = jme_tso_pullup(m_head);
1582 prod = sc->jme_cdata.jme_tx_prod;
1583 txd = &sc->jme_cdata.jme_txdesc[prod];
1585 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1590 maxsegs = (sc->jme_cdata.jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1591 (JME_TXD_RSVD + symbol_desc);
1592 if (maxsegs > JME_MAXTXSEGS)
1593 maxsegs = JME_MAXTXSEGS;
1594 KASSERT(maxsegs >= (JME_TXD_SPARE - symbol_desc),
1595 ("not enough segments %d", maxsegs));
1597 error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag,
1598 txd->tx_dmamap, m_head,
1599 txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1603 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1604 BUS_DMASYNC_PREWRITE);
1610 /* Configure checksum offload. */
1611 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1612 mss = (uint32_t)m->m_pkthdr.tso_segsz << JME_TD_MSS_SHIFT;
1613 cflags |= JME_TD_TSO;
1614 } else if (m->m_pkthdr.csum_flags & JME_CSUM_FEATURES) {
1615 if (m->m_pkthdr.csum_flags & CSUM_IP)
1616 cflags |= JME_TD_IPCSUM;
1617 if (m->m_pkthdr.csum_flags & CSUM_TCP)
1618 cflags |= JME_TD_TCPCSUM;
1619 if (m->m_pkthdr.csum_flags & CSUM_UDP)
1620 cflags |= JME_TD_UDPCSUM;
1623 /* Configure VLAN. */
1624 if (m->m_flags & M_VLANTAG) {
1625 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1626 cflags |= JME_TD_VLAN_TAG;
1629 desc = &sc->jme_cdata.jme_tx_ring[prod];
1630 desc->flags = htole32(cflags);
1631 desc->addr_hi = htole32(m->m_pkthdr.len);
1632 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1634 * Use 64bits TX desc chain format.
1636 * The first TX desc of the chain, which is setup here,
1637 * is just a symbol TX desc carrying no payload.
1639 flag64 = JME_TD_64BIT;
1640 desc->buflen = htole32(mss);
1643 /* No effective TX desc is consumed */
1647 * Use 32bits TX desc chain format.
1649 * The first TX desc of the chain, which is setup here,
1650 * is an effective TX desc carrying the first segment of
1654 desc->buflen = htole32(mss | txsegs[0].ds_len);
1655 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1657 /* One effective TX desc is consumed */
1660 sc->jme_cdata.jme_tx_cnt++;
1661 KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1662 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
1663 JME_DESC_INC(prod, sc->jme_cdata.jme_tx_desc_cnt);
1665 txd->tx_ndesc = 1 - i;
1666 for (; i < nsegs; i++) {
1667 desc = &sc->jme_cdata.jme_tx_ring[prod];
1668 desc->buflen = htole32(txsegs[i].ds_len);
1669 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1670 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1671 desc->flags = htole32(JME_TD_OWN | flag64);
1673 sc->jme_cdata.jme_tx_cnt++;
1674 KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1675 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
1676 JME_DESC_INC(prod, sc->jme_cdata.jme_tx_desc_cnt);
1679 /* Update producer index. */
1680 sc->jme_cdata.jme_tx_prod = prod;
1682 * Finally request interrupt and give the first descriptor
1683 * owenership to hardware.
1685 desc = txd->tx_desc;
1686 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1689 txd->tx_ndesc += nsegs;
1699 jme_start(struct ifnet *ifp)
1701 struct jme_softc *sc = ifp->if_softc;
1702 struct mbuf *m_head;
1705 ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
1707 if (!sc->jme_has_link) {
1708 ifq_purge(&ifp->if_snd);
1712 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1715 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1718 while (!ifq_is_empty(&ifp->if_snd)) {
1720 * Check number of available TX descs, always
1721 * leave JME_TXD_RSVD free TX descs.
1723 if (sc->jme_cdata.jme_tx_cnt + JME_TXD_SPARE >
1724 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD) {
1725 ifp->if_flags |= IFF_OACTIVE;
1729 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1734 * Pack the data into the transmit ring. If we
1735 * don't have room, set the OACTIVE flag and wait
1736 * for the NIC to drain the ring.
1738 if (jme_encap(sc, &m_head)) {
1739 KKASSERT(m_head == NULL);
1741 ifp->if_flags |= IFF_OACTIVE;
1747 * If there's a BPF listener, bounce a copy of this frame
1750 ETHER_BPF_MTAP(ifp, m_head);
1755 * Reading TXCSR takes very long time under heavy load
1756 * so cache TXCSR value and writes the ORed value with
1757 * the kick command to the TXCSR. This saves one register
1760 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1761 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1762 /* Set a timeout in case the chip goes out to lunch. */
1763 ifp->if_timer = JME_TX_TIMEOUT;
1768 jme_watchdog(struct ifnet *ifp)
1770 struct jme_softc *sc = ifp->if_softc;
1772 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1774 if (!sc->jme_has_link) {
1775 if_printf(ifp, "watchdog timeout (missed link)\n");
1782 if (sc->jme_cdata.jme_tx_cnt == 0) {
1783 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1785 if (!ifq_is_empty(&ifp->if_snd))
1790 if_printf(ifp, "watchdog timeout\n");
1793 if (!ifq_is_empty(&ifp->if_snd))
1798 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1800 struct jme_softc *sc = ifp->if_softc;
1801 struct mii_data *mii = device_get_softc(sc->jme_miibus);
1802 struct ifreq *ifr = (struct ifreq *)data;
1803 int error = 0, mask;
1805 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1809 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1810 (!(sc->jme_caps & JME_CAP_JUMBO) &&
1811 ifr->ifr_mtu > JME_MAX_MTU)) {
1816 if (ifp->if_mtu != ifr->ifr_mtu) {
1818 * No special configuration is required when interface
1819 * MTU is changed but availability of Tx checksum
1820 * offload should be chcked against new MTU size as
1821 * FIFO size is just 2K.
1823 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1824 ifp->if_capenable &=
1825 ~(IFCAP_TXCSUM | IFCAP_TSO);
1827 ~(JME_CSUM_FEATURES | CSUM_TSO);
1829 ifp->if_mtu = ifr->ifr_mtu;
1830 if (ifp->if_flags & IFF_RUNNING)
1836 if (ifp->if_flags & IFF_UP) {
1837 if (ifp->if_flags & IFF_RUNNING) {
1838 if ((ifp->if_flags ^ sc->jme_if_flags) &
1839 (IFF_PROMISC | IFF_ALLMULTI))
1845 if (ifp->if_flags & IFF_RUNNING)
1848 sc->jme_if_flags = ifp->if_flags;
1853 if (ifp->if_flags & IFF_RUNNING)
1859 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1863 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1865 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1866 ifp->if_capenable ^= IFCAP_TXCSUM;
1867 if (ifp->if_capenable & IFCAP_TXCSUM)
1868 ifp->if_hwassist |= JME_CSUM_FEATURES;
1870 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1872 if (mask & IFCAP_RXCSUM) {
1875 ifp->if_capenable ^= IFCAP_RXCSUM;
1876 reg = CSR_READ_4(sc, JME_RXMAC);
1877 reg &= ~RXMAC_CSUM_ENB;
1878 if (ifp->if_capenable & IFCAP_RXCSUM)
1879 reg |= RXMAC_CSUM_ENB;
1880 CSR_WRITE_4(sc, JME_RXMAC, reg);
1883 if (mask & IFCAP_VLAN_HWTAGGING) {
1884 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1888 if ((mask & IFCAP_TSO) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1889 ifp->if_capenable ^= IFCAP_TSO;
1890 if (ifp->if_capenable & IFCAP_TSO)
1891 ifp->if_hwassist |= CSUM_TSO;
1893 ifp->if_hwassist &= ~CSUM_TSO;
1896 if (mask & IFCAP_RSS)
1897 ifp->if_capenable ^= IFCAP_RSS;
1901 error = ether_ioctl(ifp, cmd, data);
1908 jme_mac_config(struct jme_softc *sc)
1910 struct mii_data *mii;
1911 uint32_t ghc, rxmac, txmac, txpause, gp1;
1912 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1914 mii = device_get_softc(sc->jme_miibus);
1916 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1918 CSR_WRITE_4(sc, JME_GHC, 0);
1920 rxmac = CSR_READ_4(sc, JME_RXMAC);
1921 rxmac &= ~RXMAC_FC_ENB;
1922 txmac = CSR_READ_4(sc, JME_TXMAC);
1923 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1924 txpause = CSR_READ_4(sc, JME_TXPFC);
1925 txpause &= ~TXPFC_PAUSE_ENB;
1926 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1927 ghc |= GHC_FULL_DUPLEX;
1928 rxmac &= ~RXMAC_COLL_DET_ENB;
1929 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1930 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1933 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1934 txpause |= TXPFC_PAUSE_ENB;
1935 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1936 rxmac |= RXMAC_FC_ENB;
1938 /* Disable retry transmit timer/retry limit. */
1939 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1940 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1942 rxmac |= RXMAC_COLL_DET_ENB;
1943 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1944 /* Enable retry transmit timer/retry limit. */
1945 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1946 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1950 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1952 gp1 = CSR_READ_4(sc, JME_GPREG1);
1953 gp1 &= ~GPREG1_WA_HDX;
1955 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1958 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1960 ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1962 gp1 |= GPREG1_WA_HDX;
1966 ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1968 gp1 |= GPREG1_WA_HDX;
1971 * Use extended FIFO depth to workaround CRC errors
1972 * emitted by chips before JMC250B
1974 phyconf = JMPHY_CONF_EXTFIFO;
1978 if (sc->jme_caps & JME_CAP_FASTETH)
1981 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
1983 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1989 CSR_WRITE_4(sc, JME_GHC, ghc);
1990 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1991 CSR_WRITE_4(sc, JME_TXMAC, txmac);
1992 CSR_WRITE_4(sc, JME_TXPFC, txpause);
1994 if (sc->jme_workaround & JME_WA_EXTFIFO) {
1995 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1996 JMPHY_CONF, phyconf);
1998 if (sc->jme_workaround & JME_WA_HDX)
1999 CSR_WRITE_4(sc, JME_GPREG1, gp1);
2005 struct jme_softc *sc = xsc;
2006 struct ifnet *ifp = &sc->arpcom.ac_if;
2010 ASSERT_SERIALIZED(&sc->jme_serialize);
2012 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2013 if (status == 0 || status == 0xFFFFFFFF)
2016 /* Disable interrupts. */
2017 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2019 status = CSR_READ_4(sc, JME_INTR_STATUS);
2020 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2023 /* Reset PCC counter/timer and Ack interrupts. */
2024 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2026 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
2027 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2029 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2030 if (status & jme_rx_status[r].jme_coal) {
2031 status |= jme_rx_status[r].jme_coal |
2032 jme_rx_status[r].jme_comp;
2036 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2038 if (ifp->if_flags & IFF_RUNNING) {
2039 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
2040 jme_rx_intr(sc, status);
2042 if (status & INTR_RXQ_DESC_EMPTY) {
2044 * Notify hardware availability of new Rx buffers.
2045 * Reading RXCSR takes very long time under heavy
2046 * load so cache RXCSR value and writes the ORed
2047 * value with the kick command to the RXCSR. This
2048 * saves one register access cycle.
2050 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2051 RXCSR_RX_ENB | RXCSR_RXQ_START);
2054 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
2055 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
2057 if (!ifq_is_empty(&ifp->if_snd))
2059 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
2063 /* Reenable interrupts. */
2064 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2068 jme_txeof(struct jme_softc *sc)
2070 struct ifnet *ifp = &sc->arpcom.ac_if;
2073 cons = sc->jme_cdata.jme_tx_cons;
2074 if (cons == sc->jme_cdata.jme_tx_prod)
2078 * Go through our Tx list and free mbufs for those
2079 * frames which have been transmitted.
2081 while (cons != sc->jme_cdata.jme_tx_prod) {
2082 struct jme_txdesc *txd, *next_txd;
2083 uint32_t status, next_status;
2084 int next_cons, nsegs;
2086 txd = &sc->jme_cdata.jme_txdesc[cons];
2087 KASSERT(txd->tx_m != NULL,
2088 ("%s: freeing NULL mbuf!", __func__));
2090 status = le32toh(txd->tx_desc->flags);
2091 if ((status & JME_TD_OWN) == JME_TD_OWN)
2096 * This chip will always update the TX descriptor's
2097 * buflen field and this updating always happens
2098 * after clearing the OWN bit, so even if the OWN
2099 * bit is cleared by the chip, we still don't sure
2100 * about whether the buflen field has been updated
2101 * by the chip or not. To avoid this race, we wait
2102 * for the next TX descriptor's OWN bit to be cleared
2103 * by the chip before reusing this TX descriptor.
2106 JME_DESC_ADD(next_cons, txd->tx_ndesc,
2107 sc->jme_cdata.jme_tx_desc_cnt);
2108 next_txd = &sc->jme_cdata.jme_txdesc[next_cons];
2109 if (next_txd->tx_m == NULL)
2111 next_status = le32toh(next_txd->tx_desc->flags);
2112 if ((next_status & JME_TD_OWN) == JME_TD_OWN)
2115 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2119 if (status & JME_TD_COLLISION) {
2120 ifp->if_collisions +=
2121 le32toh(txd->tx_desc->buflen) &
2122 JME_TD_BUF_LEN_MASK;
2127 * Only the first descriptor of multi-descriptor
2128 * transmission is updated so driver have to skip entire
2129 * chained buffers for the transmiited frame. In other
2130 * words, JME_TD_OWN bit is valid only at the first
2131 * descriptor of a multi-descriptor transmission.
2133 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2134 sc->jme_cdata.jme_tx_ring[cons].flags = 0;
2135 JME_DESC_INC(cons, sc->jme_cdata.jme_tx_desc_cnt);
2138 /* Reclaim transferred mbufs. */
2139 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2142 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2143 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2144 ("%s: Active Tx desc counter was garbled", __func__));
2147 sc->jme_cdata.jme_tx_cons = cons;
2149 /* 1 for symbol TX descriptor */
2150 if (sc->jme_cdata.jme_tx_cnt <= JME_MAXTXSEGS + 1)
2153 if (sc->jme_cdata.jme_tx_cnt + JME_TXD_SPARE <=
2154 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD)
2155 ifp->if_flags &= ~IFF_OACTIVE;
2158 static __inline void
2159 jme_discard_rxbufs(struct jme_rxdata *rdata, int cons, int count)
2163 for (i = 0; i < count; ++i) {
2164 jme_setup_rxdesc(&rdata->jme_rxdesc[cons]);
2165 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2169 static __inline struct pktinfo *
2170 jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2172 if (flags & JME_RD_IPV4)
2173 pi->pi_netisr = NETISR_IP;
2174 else if (flags & JME_RD_IPV6)
2175 pi->pi_netisr = NETISR_IPV6;
2180 pi->pi_l3proto = IPPROTO_UNKNOWN;
2182 if (flags & JME_RD_MORE_FRAG)
2183 pi->pi_flags |= PKTINFO_FLAG_FRAG;
2184 else if (flags & JME_RD_TCP)
2185 pi->pi_l3proto = IPPROTO_TCP;
2186 else if (flags & JME_RD_UDP)
2187 pi->pi_l3proto = IPPROTO_UDP;
2193 /* Receive a frame. */
2195 jme_rxpkt(struct jme_rxdata *rdata)
2197 struct ifnet *ifp = &rdata->jme_sc->arpcom.ac_if;
2198 struct jme_desc *desc;
2199 struct jme_rxdesc *rxd;
2200 struct mbuf *mp, *m;
2201 uint32_t flags, status, hash, hashinfo;
2202 int cons, count, nsegs;
2204 cons = rdata->jme_rx_cons;
2205 desc = &rdata->jme_rx_ring[cons];
2207 flags = le32toh(desc->flags);
2208 status = le32toh(desc->buflen);
2209 hash = le32toh(desc->addr_hi);
2210 hashinfo = le32toh(desc->addr_lo);
2211 nsegs = JME_RX_NSEGS(status);
2214 /* Skip the first descriptor. */
2215 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2218 * Clear the OWN bit of the following RX descriptors;
2219 * hardware will not clear the OWN bit except the first
2222 * Since the first RX descriptor is setup, i.e. OWN bit
2223 * on, before its followins RX descriptors, leaving the
2224 * OWN bit on the following RX descriptors will trick
2225 * the hardware into thinking that the following RX
2226 * descriptors are ready to be used too.
2228 for (count = 1; count < nsegs; count++,
2229 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt))
2230 rdata->jme_rx_ring[cons].flags = 0;
2232 cons = rdata->jme_rx_cons;
2235 JME_RSS_DPRINTF(rdata->jme_sc, 15, "ring%d, flags 0x%08x, "
2236 "hash 0x%08x, hash info 0x%08x\n",
2237 rdata->jme_rx_idx, flags, hash, hashinfo);
2239 if (status & JME_RX_ERR_STAT) {
2241 jme_discard_rxbufs(rdata, cons, nsegs);
2242 #ifdef JME_SHOW_ERRORS
2243 if_printf(ifp, "%s : receive error = 0x%b\n",
2244 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2246 rdata->jme_rx_cons += nsegs;
2247 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2251 rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2252 for (count = 0; count < nsegs; count++,
2253 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) {
2254 rxd = &rdata->jme_rxdesc[cons];
2257 /* Add a new receive buffer to the ring. */
2258 if (jme_newbuf(rdata, rxd, 0) != 0) {
2261 jme_discard_rxbufs(rdata, cons, nsegs - count);
2262 if (rdata->jme_rxhead != NULL) {
2263 m_freem(rdata->jme_rxhead);
2264 JME_RXCHAIN_RESET(rdata);
2270 * Assume we've received a full sized frame.
2271 * Actual size is fixed when we encounter the end of
2272 * multi-segmented frame.
2274 mp->m_len = MCLBYTES;
2276 /* Chain received mbufs. */
2277 if (rdata->jme_rxhead == NULL) {
2278 rdata->jme_rxhead = mp;
2279 rdata->jme_rxtail = mp;
2282 * Receive processor can receive a maximum frame
2283 * size of 65535 bytes.
2285 rdata->jme_rxtail->m_next = mp;
2286 rdata->jme_rxtail = mp;
2289 if (count == nsegs - 1) {
2290 struct pktinfo pi0, *pi;
2292 /* Last desc. for this frame. */
2293 m = rdata->jme_rxhead;
2294 m->m_pkthdr.len = rdata->jme_rxlen;
2296 /* Set first mbuf size. */
2297 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2298 /* Set last mbuf size. */
2299 mp->m_len = rdata->jme_rxlen -
2300 ((MCLBYTES - JME_RX_PAD_BYTES) +
2301 (MCLBYTES * (nsegs - 2)));
2303 m->m_len = rdata->jme_rxlen;
2305 m->m_pkthdr.rcvif = ifp;
2308 * Account for 10bytes auto padding which is used
2309 * to align IP header on 32bit boundary. Also note,
2310 * CRC bytes is automatically removed by the
2313 m->m_data += JME_RX_PAD_BYTES;
2315 /* Set checksum information. */
2316 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2317 (flags & JME_RD_IPV4)) {
2318 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2319 if (flags & JME_RD_IPCSUM)
2320 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2321 if ((flags & JME_RD_MORE_FRAG) == 0 &&
2322 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2323 (JME_RD_TCP | JME_RD_TCPCSUM) ||
2324 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2325 (JME_RD_UDP | JME_RD_UDPCSUM))) {
2326 m->m_pkthdr.csum_flags |=
2327 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2328 m->m_pkthdr.csum_data = 0xffff;
2332 /* Check for VLAN tagged packets. */
2333 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2334 (flags & JME_RD_VLAN_TAG)) {
2335 m->m_pkthdr.ether_vlantag =
2336 flags & JME_RD_VLAN_MASK;
2337 m->m_flags |= M_VLANTAG;
2342 if (ifp->if_capenable & IFCAP_RSS)
2343 pi = jme_pktinfo(&pi0, flags);
2348 (hashinfo & JME_RD_HASH_FN_MASK) ==
2349 JME_RD_HASH_FN_TOEPLITZ) {
2350 m->m_flags |= (M_HASH | M_CKHASH);
2351 m->m_pkthdr.hash = toeplitz_hash(hash);
2354 #ifdef JME_RSS_DEBUG
2356 JME_RSS_DPRINTF(rdata->jme_sc, 10,
2357 "isr %d flags %08x, l3 %d %s\n",
2358 pi->pi_netisr, pi->pi_flags,
2360 (m->m_flags & M_HASH) ? "hash" : "");
2365 ether_input_pkt(ifp, m, pi);
2367 /* Reset mbuf chains. */
2368 JME_RXCHAIN_RESET(rdata);
2369 #ifdef JME_RSS_DEBUG
2370 rdata->jme_rx_pkt++;
2375 rdata->jme_rx_cons += nsegs;
2376 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2380 jme_rxeof(struct jme_rxdata *rdata, int count)
2382 struct jme_desc *desc;
2386 #ifdef DEVICE_POLLING
2387 if (count >= 0 && count-- == 0)
2390 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2391 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2393 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2397 * Check number of segments against received bytes.
2398 * Non-matching value would indicate that hardware
2399 * is still trying to update Rx descriptors. I'm not
2400 * sure whether this check is needed.
2402 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2403 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2404 if (nsegs != howmany(pktlen, MCLBYTES)) {
2405 if_printf(&rdata->jme_sc->arpcom.ac_if,
2406 "RX fragment count(%d) and "
2407 "packet size(%d) mismach\n", nsegs, pktlen);
2413 * RSS hash and hash information may _not_ be set by the
2414 * hardware even if the OWN bit is cleared and VALID bit
2417 * If the RSS information is not delivered by the hardware
2418 * yet, we MUST NOT accept this packet, let alone reusing
2419 * its RX descriptor. If this packet was accepted and its
2420 * RX descriptor was reused before hardware delivering the
2421 * RSS information, the RX buffer's address would be trashed
2422 * by the RSS information delivered by the hardware.
2424 if (JME_ENABLE_HWRSS(rdata->jme_sc)) {
2425 struct jme_rxdesc *rxd;
2428 hashinfo = le32toh(desc->addr_lo);
2429 rxd = &rdata->jme_rxdesc[rdata->jme_rx_cons];
2432 * This test should be enough to detect the pending
2433 * RSS information delivery, given:
2434 * - If RSS hash is not calculated, the hashinfo
2435 * will be 0. Howvever, the lower 32bits of RX
2436 * buffers' physical address will never be 0.
2437 * (see jme_rxbuf_dma_filter)
2438 * - If RSS hash is calculated, the lowest 4 bits
2439 * of hashinfo will be set, while the RX buffers
2440 * are at least 2K aligned.
2442 if (hashinfo == JME_ADDR_LO(rxd->rx_paddr)) {
2443 #ifdef JME_SHOW_RSSWB
2444 if_printf(&rdata->jme_sc->arpcom.ac_if,
2445 "RSS is not written back yet\n");
2451 /* Received a frame. */
2459 struct jme_softc *sc = xsc;
2460 struct mii_data *mii = device_get_softc(sc->jme_miibus);
2462 lwkt_serialize_enter(&sc->jme_serialize);
2464 sc->jme_in_tick = TRUE;
2466 sc->jme_in_tick = FALSE;
2468 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2470 lwkt_serialize_exit(&sc->jme_serialize);
2474 jme_reset(struct jme_softc *sc)
2478 /* Make sure that TX and RX are stopped */
2483 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2487 * Hold reset bit before stop reset
2490 /* Disable TXMAC and TXOFL clock sources */
2491 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2492 /* Disable RXMAC clock source */
2493 val = CSR_READ_4(sc, JME_GPREG1);
2494 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2496 CSR_READ_4(sc, JME_GHC);
2499 CSR_WRITE_4(sc, JME_GHC, 0);
2501 CSR_READ_4(sc, JME_GHC);
2504 * Clear reset bit after stop reset
2507 /* Enable TXMAC and TXOFL clock sources */
2508 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2509 /* Enable RXMAC clock source */
2510 val = CSR_READ_4(sc, JME_GPREG1);
2511 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2513 CSR_READ_4(sc, JME_GHC);
2515 /* Disable TXMAC and TXOFL clock sources */
2516 CSR_WRITE_4(sc, JME_GHC, 0);
2517 /* Disable RXMAC clock source */
2518 val = CSR_READ_4(sc, JME_GPREG1);
2519 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2521 CSR_READ_4(sc, JME_GHC);
2523 /* Enable TX and RX */
2524 val = CSR_READ_4(sc, JME_TXCSR);
2525 CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB);
2526 val = CSR_READ_4(sc, JME_RXCSR);
2527 CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB);
2529 CSR_READ_4(sc, JME_TXCSR);
2530 CSR_READ_4(sc, JME_RXCSR);
2532 /* Enable TXMAC and TXOFL clock sources */
2533 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2534 /* Eisable RXMAC clock source */
2535 val = CSR_READ_4(sc, JME_GPREG1);
2536 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2538 CSR_READ_4(sc, JME_GHC);
2540 /* Stop TX and RX */
2548 struct jme_softc *sc = xsc;
2549 struct ifnet *ifp = &sc->arpcom.ac_if;
2550 struct mii_data *mii;
2551 uint8_t eaddr[ETHER_ADDR_LEN];
2556 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2559 * Cancel any pending I/O.
2564 * Reset the chip to a known state.
2569 * Setup MSI/MSI-X vectors to interrupts mapping
2573 if (JME_ENABLE_HWRSS(sc))
2576 jme_disable_rss(sc);
2578 /* Init RX descriptors */
2579 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2580 error = jme_init_rx_ring(&sc->jme_cdata.jme_rx_data[r]);
2582 if_printf(ifp, "initialization failed: "
2583 "no memory for %dth RX ring.\n", r);
2589 /* Init TX descriptors */
2590 jme_init_tx_ring(sc);
2592 /* Initialize shadow status block. */
2595 /* Reprogram the station address. */
2596 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2597 CSR_WRITE_4(sc, JME_PAR0,
2598 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2599 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2602 * Configure Tx queue.
2603 * Tx priority queue weight value : 0
2604 * Tx FIFO threshold for processing next packet : 16QW
2605 * Maximum Tx DMA length : 512
2606 * Allow Tx DMA burst.
2608 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2609 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2610 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2611 sc->jme_txcsr |= sc->jme_tx_dma_size;
2612 sc->jme_txcsr |= TXCSR_DMA_BURST;
2613 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2615 /* Set Tx descriptor counter. */
2616 CSR_WRITE_4(sc, JME_TXQDC, sc->jme_cdata.jme_tx_desc_cnt);
2618 /* Set Tx ring address to the hardware. */
2619 paddr = sc->jme_cdata.jme_tx_ring_paddr;
2620 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2621 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2623 /* Configure TxMAC parameters. */
2624 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2625 reg |= TXMAC_THRESH_1_PKT;
2626 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2627 CSR_WRITE_4(sc, JME_TXMAC, reg);
2630 * Configure Rx queue.
2631 * FIFO full threshold for transmitting Tx pause packet : 128T
2632 * FIFO threshold for processing next packet : 128QW
2634 * Max Rx DMA length : 128
2635 * Rx descriptor retry : 32
2636 * Rx descriptor retry time gap : 256ns
2637 * Don't receive runt/bad frame.
2639 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2642 * Since Rx FIFO size is 4K bytes, receiving frames larger
2643 * than 4K bytes will suffer from Rx FIFO overruns. So
2644 * decrease FIFO threshold to reduce the FIFO overruns for
2645 * frames larger than 4000 bytes.
2646 * For best performance of standard MTU sized frames use
2647 * maximum allowable FIFO threshold, 128QW.
2649 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2651 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2653 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2655 /* Improve PCI Express compatibility */
2656 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2658 sc->jme_rxcsr |= sc->jme_rx_dma_size;
2659 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2660 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2661 /* XXX TODO DROP_BAD */
2663 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2664 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
2666 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2668 /* Set Rx descriptor counter. */
2669 CSR_WRITE_4(sc, JME_RXQDC, rdata->jme_rx_desc_cnt);
2671 /* Set Rx ring address to the hardware. */
2672 paddr = rdata->jme_rx_ring_paddr;
2673 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2674 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2677 /* Clear receive filter. */
2678 CSR_WRITE_4(sc, JME_RXMAC, 0);
2680 /* Set up the receive filter. */
2685 * Disable all WOL bits as WOL can interfere normal Rx
2686 * operation. Also clear WOL detection status bits.
2688 reg = CSR_READ_4(sc, JME_PMCS);
2689 reg &= ~PMCS_WOL_ENB_MASK;
2690 CSR_WRITE_4(sc, JME_PMCS, reg);
2693 * Pad 10bytes right before received frame. This will greatly
2694 * help Rx performance on strict-alignment architectures as
2695 * it does not need to copy the frame to align the payload.
2697 reg = CSR_READ_4(sc, JME_RXMAC);
2698 reg |= RXMAC_PAD_10BYTES;
2700 if (ifp->if_capenable & IFCAP_RXCSUM)
2701 reg |= RXMAC_CSUM_ENB;
2702 CSR_WRITE_4(sc, JME_RXMAC, reg);
2704 /* Configure general purpose reg0 */
2705 reg = CSR_READ_4(sc, JME_GPREG0);
2706 reg &= ~GPREG0_PCC_UNIT_MASK;
2707 /* Set PCC timer resolution to micro-seconds unit. */
2708 reg |= GPREG0_PCC_UNIT_US;
2710 * Disable all shadow register posting as we have to read
2711 * JME_INTR_STATUS register in jme_intr. Also it seems
2712 * that it's hard to synchronize interrupt status between
2713 * hardware and software with shadow posting due to
2714 * requirements of bus_dmamap_sync(9).
2716 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2717 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2718 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2719 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2720 /* Disable posting of DW0. */
2721 reg &= ~GPREG0_POST_DW0_ENB;
2722 /* Clear PME message. */
2723 reg &= ~GPREG0_PME_ENB;
2724 /* Set PHY address. */
2725 reg &= ~GPREG0_PHY_ADDR_MASK;
2726 reg |= sc->jme_phyaddr;
2727 CSR_WRITE_4(sc, JME_GPREG0, reg);
2729 /* Configure Tx queue 0 packet completion coalescing. */
2730 jme_set_tx_coal(sc);
2732 /* Configure Rx queues packet completion coalescing. */
2733 jme_set_rx_coal(sc);
2735 /* Configure shadow status block but don't enable posting. */
2736 paddr = sc->jme_cdata.jme_ssb_block_paddr;
2737 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2738 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2740 /* Disable Timer 1 and Timer 2. */
2741 CSR_WRITE_4(sc, JME_TIMER1, 0);
2742 CSR_WRITE_4(sc, JME_TIMER2, 0);
2744 /* Configure retry transmit period, retry limit value. */
2745 CSR_WRITE_4(sc, JME_TXTRHD,
2746 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2747 TXTRHD_RT_PERIOD_MASK) |
2748 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2749 TXTRHD_RT_LIMIT_SHIFT));
2751 #ifdef DEVICE_POLLING
2752 if (!(ifp->if_flags & IFF_POLLING))
2754 /* Initialize the interrupt mask. */
2755 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2756 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2759 * Enabling Tx/Rx DMA engines and Rx queue processing is
2760 * done after detection of valid link in jme_miibus_statchg.
2762 sc->jme_has_link = FALSE;
2764 /* Set the current media. */
2765 mii = device_get_softc(sc->jme_miibus);
2768 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2770 ifp->if_flags |= IFF_RUNNING;
2771 ifp->if_flags &= ~IFF_OACTIVE;
2775 jme_stop(struct jme_softc *sc)
2777 struct ifnet *ifp = &sc->arpcom.ac_if;
2778 struct jme_txdesc *txd;
2779 struct jme_rxdesc *rxd;
2780 struct jme_rxdata *rdata;
2783 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2786 * Mark the interface down and cancel the watchdog timer.
2788 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2791 callout_stop(&sc->jme_tick_ch);
2792 sc->jme_has_link = FALSE;
2795 * Disable interrupts.
2797 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2798 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2800 /* Disable updating shadow status block. */
2801 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2802 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2804 /* Stop receiver, transmitter. */
2809 * Free partial finished RX segments
2811 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2812 rdata = &sc->jme_cdata.jme_rx_data[r];
2813 if (rdata->jme_rxhead != NULL)
2814 m_freem(rdata->jme_rxhead);
2815 JME_RXCHAIN_RESET(rdata);
2819 * Free RX and TX mbufs still in the queues.
2821 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2822 rdata = &sc->jme_cdata.jme_rx_data[r];
2823 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
2824 rxd = &rdata->jme_rxdesc[i];
2825 if (rxd->rx_m != NULL) {
2826 bus_dmamap_unload(rdata->jme_rx_tag,
2833 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
2834 txd = &sc->jme_cdata.jme_txdesc[i];
2835 if (txd->tx_m != NULL) {
2836 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2846 jme_stop_tx(struct jme_softc *sc)
2851 reg = CSR_READ_4(sc, JME_TXCSR);
2852 if ((reg & TXCSR_TX_ENB) == 0)
2854 reg &= ~TXCSR_TX_ENB;
2855 CSR_WRITE_4(sc, JME_TXCSR, reg);
2856 for (i = JME_TIMEOUT; i > 0; i--) {
2858 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2862 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2866 jme_stop_rx(struct jme_softc *sc)
2871 reg = CSR_READ_4(sc, JME_RXCSR);
2872 if ((reg & RXCSR_RX_ENB) == 0)
2874 reg &= ~RXCSR_RX_ENB;
2875 CSR_WRITE_4(sc, JME_RXCSR, reg);
2876 for (i = JME_TIMEOUT; i > 0; i--) {
2878 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2882 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2886 jme_init_tx_ring(struct jme_softc *sc)
2888 struct jme_chain_data *cd;
2889 struct jme_txdesc *txd;
2892 sc->jme_cdata.jme_tx_prod = 0;
2893 sc->jme_cdata.jme_tx_cons = 0;
2894 sc->jme_cdata.jme_tx_cnt = 0;
2896 cd = &sc->jme_cdata;
2897 bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2898 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
2899 txd = &sc->jme_cdata.jme_txdesc[i];
2901 txd->tx_desc = &cd->jme_tx_ring[i];
2907 jme_init_ssb(struct jme_softc *sc)
2909 struct jme_chain_data *cd;
2911 cd = &sc->jme_cdata;
2912 bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2916 jme_init_rx_ring(struct jme_rxdata *rdata)
2918 struct jme_rxdesc *rxd;
2921 KKASSERT(rdata->jme_rxhead == NULL &&
2922 rdata->jme_rxtail == NULL &&
2923 rdata->jme_rxlen == 0);
2924 rdata->jme_rx_cons = 0;
2926 bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(rdata));
2927 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
2930 rxd = &rdata->jme_rxdesc[i];
2932 rxd->rx_desc = &rdata->jme_rx_ring[i];
2933 error = jme_newbuf(rdata, rxd, 1);
2941 jme_newbuf(struct jme_rxdata *rdata, struct jme_rxdesc *rxd, int init)
2944 bus_dma_segment_t segs;
2948 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2952 * JMC250 has 64bit boundary alignment limitation so jme(4)
2953 * takes advantage of 10 bytes padding feature of hardware
2954 * in order not to copy entire frame to align IP header on
2957 m->m_len = m->m_pkthdr.len = MCLBYTES;
2959 error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
2960 rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
2965 if_printf(&rdata->jme_sc->arpcom.ac_if,
2966 "can't load RX mbuf\n");
2971 if (rxd->rx_m != NULL) {
2972 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
2973 BUS_DMASYNC_POSTREAD);
2974 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
2976 map = rxd->rx_dmamap;
2977 rxd->rx_dmamap = rdata->jme_rx_sparemap;
2978 rdata->jme_rx_sparemap = map;
2980 rxd->rx_paddr = segs.ds_addr;
2982 jme_setup_rxdesc(rxd);
2987 jme_set_vlan(struct jme_softc *sc)
2989 struct ifnet *ifp = &sc->arpcom.ac_if;
2992 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2994 reg = CSR_READ_4(sc, JME_RXMAC);
2995 reg &= ~RXMAC_VLAN_ENB;
2996 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2997 reg |= RXMAC_VLAN_ENB;
2998 CSR_WRITE_4(sc, JME_RXMAC, reg);
3002 jme_set_filter(struct jme_softc *sc)
3004 struct ifnet *ifp = &sc->arpcom.ac_if;
3005 struct ifmultiaddr *ifma;
3010 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3012 rxcfg = CSR_READ_4(sc, JME_RXMAC);
3013 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
3017 * Always accept frames destined to our station address.
3018 * Always accept broadcast frames.
3020 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
3022 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
3023 if (ifp->if_flags & IFF_PROMISC)
3024 rxcfg |= RXMAC_PROMISC;
3025 if (ifp->if_flags & IFF_ALLMULTI)
3026 rxcfg |= RXMAC_ALLMULTI;
3027 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
3028 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
3029 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3034 * Set up the multicast address filter by passing all multicast
3035 * addresses through a CRC generator, and then using the low-order
3036 * 6 bits as an index into the 64 bit multicast hash table. The
3037 * high order bits select the register, while the rest of the bits
3038 * select the bit within the register.
3040 rxcfg |= RXMAC_MULTICAST;
3041 bzero(mchash, sizeof(mchash));
3043 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3044 if (ifma->ifma_addr->sa_family != AF_LINK)
3046 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3047 ifma->ifma_addr), ETHER_ADDR_LEN);
3049 /* Just want the 6 least significant bits. */
3052 /* Set the corresponding bit in the hash table. */
3053 mchash[crc >> 5] |= 1 << (crc & 0x1f);
3056 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
3057 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
3058 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3062 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
3064 struct jme_softc *sc = arg1;
3065 struct ifnet *ifp = &sc->arpcom.ac_if;
3068 ifnet_serialize_all(ifp);
3070 v = sc->jme_tx_coal_to;
3071 error = sysctl_handle_int(oidp, &v, 0, req);
3072 if (error || req->newptr == NULL)
3075 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
3080 if (v != sc->jme_tx_coal_to) {
3081 sc->jme_tx_coal_to = v;
3082 if (ifp->if_flags & IFF_RUNNING)
3083 jme_set_tx_coal(sc);
3086 ifnet_deserialize_all(ifp);
3091 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
3093 struct jme_softc *sc = arg1;
3094 struct ifnet *ifp = &sc->arpcom.ac_if;
3097 ifnet_serialize_all(ifp);
3099 v = sc->jme_tx_coal_pkt;
3100 error = sysctl_handle_int(oidp, &v, 0, req);
3101 if (error || req->newptr == NULL)
3104 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
3109 if (v != sc->jme_tx_coal_pkt) {
3110 sc->jme_tx_coal_pkt = v;
3111 if (ifp->if_flags & IFF_RUNNING)
3112 jme_set_tx_coal(sc);
3115 ifnet_deserialize_all(ifp);
3120 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
3122 struct jme_softc *sc = arg1;
3123 struct ifnet *ifp = &sc->arpcom.ac_if;
3126 ifnet_serialize_all(ifp);
3128 v = sc->jme_rx_coal_to;
3129 error = sysctl_handle_int(oidp, &v, 0, req);
3130 if (error || req->newptr == NULL)
3133 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
3138 if (v != sc->jme_rx_coal_to) {
3139 sc->jme_rx_coal_to = v;
3140 if (ifp->if_flags & IFF_RUNNING)
3141 jme_set_rx_coal(sc);
3144 ifnet_deserialize_all(ifp);
3149 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3151 struct jme_softc *sc = arg1;
3152 struct ifnet *ifp = &sc->arpcom.ac_if;
3155 ifnet_serialize_all(ifp);
3157 v = sc->jme_rx_coal_pkt;
3158 error = sysctl_handle_int(oidp, &v, 0, req);
3159 if (error || req->newptr == NULL)
3162 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
3167 if (v != sc->jme_rx_coal_pkt) {
3168 sc->jme_rx_coal_pkt = v;
3169 if (ifp->if_flags & IFF_RUNNING)
3170 jme_set_rx_coal(sc);
3173 ifnet_deserialize_all(ifp);
3178 jme_set_tx_coal(struct jme_softc *sc)
3182 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3184 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3185 PCCTX_COAL_PKT_MASK;
3186 reg |= PCCTX_COAL_TXQ0;
3187 CSR_WRITE_4(sc, JME_PCCTX, reg);
3191 jme_set_rx_coal(struct jme_softc *sc)
3196 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3198 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3199 PCCRX_COAL_PKT_MASK;
3200 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r)
3201 CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3204 #ifdef DEVICE_POLLING
3207 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3209 struct jme_softc *sc = ifp->if_softc;
3213 ASSERT_SERIALIZED(&sc->jme_serialize);
3217 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3220 case POLL_DEREGISTER:
3221 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
3224 case POLL_AND_CHECK_STATUS:
3226 status = CSR_READ_4(sc, JME_INTR_STATUS);
3228 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3229 struct jme_rxdata *rdata =
3230 &sc->jme_cdata.jme_rx_data[r];
3232 lwkt_serialize_enter(&rdata->jme_rx_serialize);
3233 jme_rxeof(rdata, count);
3234 lwkt_serialize_exit(&rdata->jme_rx_serialize);
3237 if (status & INTR_RXQ_DESC_EMPTY) {
3238 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3239 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3240 RXCSR_RX_ENB | RXCSR_RXQ_START);
3243 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
3245 if (!ifq_is_empty(&ifp->if_snd))
3247 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
3252 #endif /* DEVICE_POLLING */
3255 jme_rxring_dma_alloc(struct jme_rxdata *rdata)
3260 asize = roundup2(JME_RX_RING_SIZE(rdata), JME_RX_RING_ALIGN);
3261 error = bus_dmamem_coherent(rdata->jme_sc->jme_cdata.jme_ring_tag,
3262 JME_RX_RING_ALIGN, 0,
3263 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3264 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3266 device_printf(rdata->jme_sc->jme_dev,
3267 "could not allocate %dth Rx ring.\n", rdata->jme_rx_idx);
3270 rdata->jme_rx_ring_tag = dmem.dmem_tag;
3271 rdata->jme_rx_ring_map = dmem.dmem_map;
3272 rdata->jme_rx_ring = dmem.dmem_addr;
3273 rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
3279 jme_rxbuf_dma_filter(void *arg __unused, bus_addr_t paddr)
3281 if ((paddr & 0xffffffff) == 0) {
3283 * Don't allow lower 32bits of the RX buffer's
3284 * physical address to be 0, else it will break
3285 * hardware pending RSS information delivery
3286 * detection on RX path.
3294 jme_rxbuf_dma_alloc(struct jme_rxdata *rdata)
3299 lowaddr = BUS_SPACE_MAXADDR;
3300 if (JME_ENABLE_HWRSS(rdata->jme_sc)) {
3301 /* jme_rxbuf_dma_filter will be called */
3302 lowaddr = BUS_SPACE_MAXADDR_32BIT;
3305 /* Create tag for Rx buffers. */
3306 error = bus_dma_tag_create(
3307 rdata->jme_sc->jme_cdata.jme_buffer_tag,/* parent */
3308 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
3309 lowaddr, /* lowaddr */
3310 BUS_SPACE_MAXADDR, /* highaddr */
3311 jme_rxbuf_dma_filter, NULL, /* filter, filterarg */
3312 MCLBYTES, /* maxsize */
3314 MCLBYTES, /* maxsegsize */
3315 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
3316 &rdata->jme_rx_tag);
3318 device_printf(rdata->jme_sc->jme_dev,
3319 "could not create %dth Rx DMA tag.\n", rdata->jme_rx_idx);
3323 /* Create DMA maps for Rx buffers. */
3324 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3325 &rdata->jme_rx_sparemap);
3327 device_printf(rdata->jme_sc->jme_dev,
3328 "could not create %dth spare Rx dmamap.\n",
3330 bus_dma_tag_destroy(rdata->jme_rx_tag);
3331 rdata->jme_rx_tag = NULL;
3334 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
3335 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3337 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3342 device_printf(rdata->jme_sc->jme_dev,
3343 "could not create %dth Rx dmamap "
3344 "for %dth RX ring.\n", i, rdata->jme_rx_idx);
3346 for (j = 0; j < i; ++j) {
3347 rxd = &rdata->jme_rxdesc[j];
3348 bus_dmamap_destroy(rdata->jme_rx_tag,
3351 bus_dmamap_destroy(rdata->jme_rx_tag,
3352 rdata->jme_rx_sparemap);
3353 bus_dma_tag_destroy(rdata->jme_rx_tag);
3354 rdata->jme_rx_tag = NULL;
3362 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3366 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3367 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3369 if (status & rdata->jme_rx_coal) {
3370 lwkt_serialize_enter(&rdata->jme_rx_serialize);
3371 jme_rxeof(rdata, -1);
3372 lwkt_serialize_exit(&rdata->jme_rx_serialize);
3378 jme_enable_rss(struct jme_softc *sc)
3381 uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3384 KASSERT(sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_2 ||
3385 sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_4,
3386 ("%s: invalid # of RX rings (%d)",
3387 sc->arpcom.ac_if.if_xname, sc->jme_cdata.jme_rx_ring_cnt));
3389 rssc = RSSC_HASH_64_ENTRY;
3390 rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3391 rssc |= sc->jme_cdata.jme_rx_ring_cnt >> 1;
3392 JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3393 CSR_WRITE_4(sc, JME_RSSC, rssc);
3395 toeplitz_get_key(key, sizeof(key));
3396 for (i = 0; i < RSSKEY_NREGS; ++i) {
3399 keyreg = RSSKEY_REGVAL(key, i);
3400 JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg);
3402 CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg);
3406 * Create redirect table in following fashion:
3407 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3410 for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3413 q = i % sc->jme_cdata.jme_rx_ring_cnt;
3414 ind |= q << (i * 8);
3416 JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3418 for (i = 0; i < RSSTBL_NREGS; ++i)
3419 CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3423 jme_disable_rss(struct jme_softc *sc)
3425 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3429 jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3431 struct jme_softc *sc = ifp->if_softc;
3433 ifnet_serialize_array_enter(sc->jme_serialize_arr,
3434 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz);
3438 jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3440 struct jme_softc *sc = ifp->if_softc;
3442 ifnet_serialize_array_exit(sc->jme_serialize_arr,
3443 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz);
3447 jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3449 struct jme_softc *sc = ifp->if_softc;
3451 return ifnet_serialize_array_try(sc->jme_serialize_arr,
3452 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz);
3458 jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
3459 boolean_t serialized)
3461 struct jme_softc *sc = ifp->if_softc;
3463 ifnet_serialize_array_assert(sc->jme_serialize_arr,
3464 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE,
3468 #endif /* INVARIANTS */
3471 jme_msix_try_alloc(device_t dev)
3473 struct jme_softc *sc = device_get_softc(dev);
3474 struct jme_msix_data *msix;
3475 int error, i, r, msix_enable, msix_count;
3477 msix_count = 1 + sc->jme_cdata.jme_rx_ring_cnt;
3478 KKASSERT(msix_count <= JME_NMSIX);
3480 msix_enable = device_getenv_int(dev, "msix.enable", jme_msix_enable);
3483 * We leave the 1st MSI-X vector unused, so we
3484 * actually need msix_count + 1 MSI-X vectors.
3486 if (!msix_enable || pci_msix_count(dev) < (msix_count + 1))
3489 for (i = 0; i < msix_count; ++i)
3490 sc->jme_msix[i].jme_msix_rid = -1;
3494 msix = &sc->jme_msix[i++];
3495 msix->jme_msix_cpuid = 0; /* XXX Put TX to cpu0 */
3496 msix->jme_msix_arg = &sc->jme_cdata;
3497 msix->jme_msix_func = jme_msix_tx;
3498 msix->jme_msix_intrs = INTR_TXQ_COAL | INTR_TXQ_COAL_TO;
3499 msix->jme_msix_serialize = &sc->jme_cdata.jme_tx_serialize;
3500 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s tx",
3501 device_get_nameunit(dev));
3503 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3504 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3506 msix = &sc->jme_msix[i++];
3507 msix->jme_msix_cpuid = r; /* XXX Put RX to cpuX */
3508 msix->jme_msix_arg = rdata;
3509 msix->jme_msix_func = jme_msix_rx;
3510 msix->jme_msix_intrs = rdata->jme_rx_coal | rdata->jme_rx_empty;
3511 msix->jme_msix_serialize = &rdata->jme_rx_serialize;
3512 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc),
3513 "%s rx%d", device_get_nameunit(dev), r);
3516 KKASSERT(i == msix_count);
3518 error = pci_setup_msix(dev);
3522 /* Setup jme_msix_cnt early, so we could cleanup */
3523 sc->jme_msix_cnt = msix_count;
3525 for (i = 0; i < msix_count; ++i) {
3526 msix = &sc->jme_msix[i];
3528 msix->jme_msix_vector = i + 1;
3529 error = pci_alloc_msix_vector(dev, msix->jme_msix_vector,
3530 &msix->jme_msix_rid, msix->jme_msix_cpuid);
3534 msix->jme_msix_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3535 &msix->jme_msix_rid, RF_ACTIVE);
3536 if (msix->jme_msix_res == NULL) {
3542 for (i = 0; i < JME_INTR_CNT; ++i) {
3543 uint32_t intr_mask = (1 << i);
3546 if ((JME_INTRS & intr_mask) == 0)
3549 for (x = 0; x < msix_count; ++x) {
3550 msix = &sc->jme_msix[x];
3551 if (msix->jme_msix_intrs & intr_mask) {
3554 reg = i / JME_MSINUM_FACTOR;
3555 KKASSERT(reg < JME_MSINUM_CNT);
3557 shift = (i % JME_MSINUM_FACTOR) * 4;
3559 sc->jme_msinum[reg] |=
3560 (msix->jme_msix_vector << shift);
3568 for (i = 0; i < JME_MSINUM_CNT; ++i) {
3569 device_printf(dev, "MSINUM%d: %#x\n", i,
3574 pci_enable_msix(dev);
3575 sc->jme_irq_type = PCI_INTR_TYPE_MSIX;
3583 jme_intr_alloc(device_t dev)
3585 struct jme_softc *sc = device_get_softc(dev);
3588 jme_msix_try_alloc(dev);
3590 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3591 sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable,
3592 &sc->jme_irq_rid, &irq_flags);
3594 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3595 &sc->jme_irq_rid, irq_flags);
3596 if (sc->jme_irq_res == NULL) {
3597 device_printf(dev, "can't allocate irq\n");
3605 jme_msix_free(device_t dev)
3607 struct jme_softc *sc = device_get_softc(dev);
3610 KKASSERT(sc->jme_msix_cnt > 1);
3612 for (i = 0; i < sc->jme_msix_cnt; ++i) {
3613 struct jme_msix_data *msix = &sc->jme_msix[i];
3615 if (msix->jme_msix_res != NULL) {
3616 bus_release_resource(dev, SYS_RES_IRQ,
3617 msix->jme_msix_rid, msix->jme_msix_res);
3618 msix->jme_msix_res = NULL;
3620 if (msix->jme_msix_rid >= 0) {
3621 pci_release_msix_vector(dev, msix->jme_msix_rid);
3622 msix->jme_msix_rid = -1;
3625 pci_teardown_msix(dev);
3629 jme_intr_free(device_t dev)
3631 struct jme_softc *sc = device_get_softc(dev);
3633 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3634 if (sc->jme_irq_res != NULL) {
3635 bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
3638 if (sc->jme_irq_type == PCI_INTR_TYPE_MSI)
3639 pci_release_msi(dev);
3646 jme_msix_tx(void *xcd)
3648 struct jme_chain_data *cd = xcd;
3649 struct jme_softc *sc = cd->jme_sc;
3650 struct ifnet *ifp = &sc->arpcom.ac_if;
3652 ASSERT_SERIALIZED(&cd->jme_tx_serialize);
3654 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3656 CSR_WRITE_4(sc, JME_INTR_STATUS,
3657 INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP);
3659 if (ifp->if_flags & IFF_RUNNING) {
3661 if (!ifq_is_empty(&ifp->if_snd))
3665 CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3669 jme_msix_rx(void *xrdata)
3671 struct jme_rxdata *rdata = xrdata;
3672 struct jme_softc *sc = rdata->jme_sc;
3673 struct ifnet *ifp = &sc->arpcom.ac_if;
3676 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3678 CSR_WRITE_4(sc, JME_INTR_MASK_CLR,
3679 (rdata->jme_rx_coal | rdata->jme_rx_empty));
3681 status = CSR_READ_4(sc, JME_INTR_STATUS);
3682 status &= (rdata->jme_rx_coal | rdata->jme_rx_empty);
3684 if (status & rdata->jme_rx_coal)
3685 status |= (rdata->jme_rx_coal | rdata->jme_rx_comp);
3686 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3688 if (ifp->if_flags & IFF_RUNNING) {
3689 if (status & rdata->jme_rx_coal)
3690 jme_rxeof(rdata, -1);
3692 if (status & rdata->jme_rx_empty) {
3693 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3694 RXCSR_RX_ENB | RXCSR_RXQ_START);
3698 CSR_WRITE_4(sc, JME_INTR_MASK_SET,
3699 (rdata->jme_rx_coal | rdata->jme_rx_empty));
3703 jme_set_msinum(struct jme_softc *sc)
3707 for (i = 0; i < JME_MSINUM_CNT; ++i)
3708 CSR_WRITE_4(sc, JME_MSINUM(i), sc->jme_msinum[i]);
3712 jme_intr_setup(device_t dev)
3714 struct jme_softc *sc = device_get_softc(dev);
3715 struct ifnet *ifp = &sc->arpcom.ac_if;
3718 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3719 return jme_msix_setup(dev);
3721 error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE,
3722 jme_intr, sc, &sc->jme_irq_handle, &sc->jme_serialize);
3724 device_printf(dev, "could not set up interrupt handler.\n");
3728 ifp->if_cpuid = rman_get_cpuid(sc->jme_irq_res);
3729 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
3734 jme_intr_teardown(device_t dev)
3736 struct jme_softc *sc = device_get_softc(dev);
3738 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3739 jme_msix_teardown(dev, sc->jme_msix_cnt);
3741 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
3745 jme_msix_setup(device_t dev)
3747 struct jme_softc *sc = device_get_softc(dev);
3748 struct ifnet *ifp = &sc->arpcom.ac_if;
3751 for (x = 0; x < sc->jme_msix_cnt; ++x) {
3752 struct jme_msix_data *msix = &sc->jme_msix[x];
3755 error = bus_setup_intr_descr(dev, msix->jme_msix_res,
3756 INTR_MPSAFE, msix->jme_msix_func, msix->jme_msix_arg,
3757 &msix->jme_msix_handle, msix->jme_msix_serialize,
3758 msix->jme_msix_desc);
3760 device_printf(dev, "could not set up %s "
3761 "interrupt handler.\n", msix->jme_msix_desc);
3762 jme_msix_teardown(dev, x);
3766 ifp->if_cpuid = 0; /* XXX */
3771 jme_msix_teardown(device_t dev, int msix_count)
3773 struct jme_softc *sc = device_get_softc(dev);
3776 for (x = 0; x < msix_count; ++x) {
3777 struct jme_msix_data *msix = &sc->jme_msix[x];
3779 bus_teardown_intr(dev, msix->jme_msix_res,
3780 msix->jme_msix_handle);
3785 jme_serialize_skipmain(struct jme_softc *sc)
3787 lwkt_serialize_array_enter(sc->jme_serialize_arr,
3788 sc->jme_serialize_cnt, 1);
3792 jme_deserialize_skipmain(struct jme_softc *sc)
3794 lwkt_serialize_array_exit(sc->jme_serialize_arr,
3795 sc->jme_serialize_cnt, 1);