2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
30 #include "opt_polling.h"
34 #include <sys/param.h>
35 #include <sys/endian.h>
36 #include <sys/kernel.h>
38 #include <sys/interrupt.h>
39 #include <sys/malloc.h>
42 #include <sys/serialize.h>
43 #include <sys/serialize2.h>
44 #include <sys/socket.h>
45 #include <sys/sockio.h>
46 #include <sys/sysctl.h>
48 #include <net/ethernet.h>
51 #include <net/if_arp.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/ifq_var.h>
55 #include <net/toeplitz.h>
56 #include <net/toeplitz2.h>
57 #include <net/vlan/if_vlan_var.h>
58 #include <net/vlan/if_vlan_ether.h>
60 #include <netinet/in.h>
62 #include <dev/netif/mii_layer/miivar.h>
63 #include <dev/netif/mii_layer/jmphyreg.h>
65 #include <bus/pci/pcireg.h>
66 #include <bus/pci/pcivar.h>
67 #include <bus/pci/pcidevs.h>
69 #include <dev/netif/jme/if_jmereg.h>
70 #include <dev/netif/jme/if_jmevar.h>
72 #include "miibus_if.h"
74 /* Define the following to disable printing Rx errors. */
75 #undef JME_SHOW_ERRORS
77 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
80 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
82 if ((sc)->jme_rss_debug >= (lvl)) \
83 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
85 #else /* !JME_RSS_DEBUG */
86 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
87 #endif /* JME_RSS_DEBUG */
89 static int jme_probe(device_t);
90 static int jme_attach(device_t);
91 static int jme_detach(device_t);
92 static int jme_shutdown(device_t);
93 static int jme_suspend(device_t);
94 static int jme_resume(device_t);
96 static int jme_miibus_readreg(device_t, int, int);
97 static int jme_miibus_writereg(device_t, int, int, int);
98 static void jme_miibus_statchg(device_t);
100 static void jme_init(void *);
101 static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
102 static void jme_start(struct ifnet *);
103 static void jme_watchdog(struct ifnet *);
104 static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
105 static int jme_mediachange(struct ifnet *);
106 #ifdef DEVICE_POLLING
107 static void jme_poll(struct ifnet *, enum poll_cmd, int);
109 static void jme_serialize(struct ifnet *, enum ifnet_serialize);
110 static void jme_deserialize(struct ifnet *, enum ifnet_serialize);
111 static int jme_tryserialize(struct ifnet *, enum ifnet_serialize);
113 static void jme_serialize_assert(struct ifnet *, enum ifnet_serialize,
117 static void jme_intr(void *);
118 static void jme_msix_tx(void *);
119 static void jme_msix_rx(void *);
120 static void jme_txeof(struct jme_softc *);
121 static void jme_rxeof(struct jme_softc *, int);
122 static int jme_rxeof_chain(struct jme_softc *, int,
123 struct mbuf_chain *, int);
124 static void jme_rx_intr(struct jme_softc *, uint32_t);
126 static int jme_msix_setup(device_t);
127 static void jme_msix_teardown(device_t, int);
128 static int jme_intr_setup(device_t);
129 static void jme_intr_teardown(device_t);
130 static void jme_msix_try_alloc(device_t);
131 static void jme_msix_free(device_t);
132 static int jme_intr_alloc(device_t);
133 static void jme_intr_free(device_t);
134 static int jme_dma_alloc(struct jme_softc *);
135 static void jme_dma_free(struct jme_softc *);
136 static int jme_init_rx_ring(struct jme_softc *, int);
137 static void jme_init_tx_ring(struct jme_softc *);
138 static void jme_init_ssb(struct jme_softc *);
139 static int jme_newbuf(struct jme_softc *, int, struct jme_rxdesc *, int);
140 static int jme_encap(struct jme_softc *, struct mbuf **);
141 static void jme_rxpkt(struct jme_softc *, int, struct mbuf_chain *);
142 static int jme_rxring_dma_alloc(struct jme_softc *, int);
143 static int jme_rxbuf_dma_alloc(struct jme_softc *, int);
145 static void jme_tick(void *);
146 static void jme_stop(struct jme_softc *);
147 static void jme_reset(struct jme_softc *);
148 static void jme_set_msinum(struct jme_softc *);
149 static void jme_set_vlan(struct jme_softc *);
150 static void jme_set_filter(struct jme_softc *);
151 static void jme_stop_tx(struct jme_softc *);
152 static void jme_stop_rx(struct jme_softc *);
153 static void jme_mac_config(struct jme_softc *);
154 static void jme_reg_macaddr(struct jme_softc *, uint8_t[]);
155 static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
156 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
158 static void jme_setwol(struct jme_softc *);
159 static void jme_setlinkspeed(struct jme_softc *);
161 static void jme_set_tx_coal(struct jme_softc *);
162 static void jme_set_rx_coal(struct jme_softc *);
163 static void jme_enable_rss(struct jme_softc *);
164 static void jme_disable_rss(struct jme_softc *);
166 static void jme_sysctl_node(struct jme_softc *);
167 static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
168 static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
169 static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
170 static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
173 * Devices supported by this driver.
175 static const struct jme_dev {
176 uint16_t jme_vendorid;
177 uint16_t jme_deviceid;
179 const char *jme_name;
181 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
183 "JMicron Inc, JMC250 Gigabit Ethernet" },
184 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
186 "JMicron Inc, JMC260 Fast Ethernet" },
190 static device_method_t jme_methods[] = {
191 /* Device interface. */
192 DEVMETHOD(device_probe, jme_probe),
193 DEVMETHOD(device_attach, jme_attach),
194 DEVMETHOD(device_detach, jme_detach),
195 DEVMETHOD(device_shutdown, jme_shutdown),
196 DEVMETHOD(device_suspend, jme_suspend),
197 DEVMETHOD(device_resume, jme_resume),
200 DEVMETHOD(bus_print_child, bus_generic_print_child),
201 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
204 DEVMETHOD(miibus_readreg, jme_miibus_readreg),
205 DEVMETHOD(miibus_writereg, jme_miibus_writereg),
206 DEVMETHOD(miibus_statchg, jme_miibus_statchg),
211 static driver_t jme_driver = {
214 sizeof(struct jme_softc)
217 static devclass_t jme_devclass;
219 DECLARE_DUMMY_MODULE(if_jme);
220 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
221 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL);
222 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL);
224 static const struct {
228 } jme_rx_status[JME_NRXRING_MAX] = {
229 { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP,
230 INTR_RXQ0_DESC_EMPTY },
231 { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP,
232 INTR_RXQ1_DESC_EMPTY },
233 { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP,
234 INTR_RXQ2_DESC_EMPTY },
235 { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP,
236 INTR_RXQ3_DESC_EMPTY }
239 static int jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
240 static int jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
241 static int jme_rx_ring_count = JME_NRXRING_DEF;
242 static int jme_msi_enable = 1;
243 static int jme_msix_enable = 1;
245 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
246 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
247 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
248 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable);
249 TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable);
252 * Read a PHY register on the MII of the JMC250.
255 jme_miibus_readreg(device_t dev, int phy, int reg)
257 struct jme_softc *sc = device_get_softc(dev);
261 /* For FPGA version, PHY address 0 should be ignored. */
262 if (sc->jme_caps & JME_CAP_FPGA) {
266 if (sc->jme_phyaddr != phy)
270 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
271 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
273 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
275 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
279 device_printf(sc->jme_dev, "phy read timeout: "
280 "phy %d, reg %d\n", phy, reg);
284 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
288 * Write a PHY register on the MII of the JMC250.
291 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
293 struct jme_softc *sc = device_get_softc(dev);
296 /* For FPGA version, PHY address 0 should be ignored. */
297 if (sc->jme_caps & JME_CAP_FPGA) {
301 if (sc->jme_phyaddr != phy)
305 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
306 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
307 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
309 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
311 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
315 device_printf(sc->jme_dev, "phy write timeout: "
316 "phy %d, reg %d\n", phy, reg);
323 * Callback from MII layer when media changes.
326 jme_miibus_statchg(device_t dev)
328 struct jme_softc *sc = device_get_softc(dev);
329 struct ifnet *ifp = &sc->arpcom.ac_if;
330 struct mii_data *mii;
331 struct jme_txdesc *txd;
335 ASSERT_IFNET_SERIALIZED_ALL(ifp);
337 if ((ifp->if_flags & IFF_RUNNING) == 0)
340 mii = device_get_softc(sc->jme_miibus);
342 sc->jme_flags &= ~JME_FLAG_LINK;
343 if ((mii->mii_media_status & IFM_AVALID) != 0) {
344 switch (IFM_SUBTYPE(mii->mii_media_active)) {
347 sc->jme_flags |= JME_FLAG_LINK;
350 if (sc->jme_caps & JME_CAP_FASTETH)
352 sc->jme_flags |= JME_FLAG_LINK;
360 * Disabling Rx/Tx MACs have a side-effect of resetting
361 * JME_TXNDA/JME_RXNDA register to the first address of
362 * Tx/Rx descriptor address. So driver should reset its
363 * internal procucer/consumer pointer and reclaim any
364 * allocated resources. Note, just saving the value of
365 * JME_TXNDA and JME_RXNDA registers before stopping MAC
366 * and restoring JME_TXNDA/JME_RXNDA register is not
367 * sufficient to make sure correct MAC state because
368 * stopping MAC operation can take a while and hardware
369 * might have updated JME_TXNDA/JME_RXNDA registers
370 * during the stop operation.
373 /* Disable interrupts */
374 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
377 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
379 callout_stop(&sc->jme_tick_ch);
381 /* Stop receiver/transmitter. */
385 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
386 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
389 if (rdata->jme_rxhead != NULL)
390 m_freem(rdata->jme_rxhead);
391 JME_RXCHAIN_RESET(sc, r);
394 * Reuse configured Rx descriptors and reset
395 * procuder/consumer index.
397 rdata->jme_rx_cons = 0;
401 if (sc->jme_cdata.jme_tx_cnt != 0) {
402 /* Remove queued packets for transmit. */
403 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
404 txd = &sc->jme_cdata.jme_txdesc[i];
405 if (txd->tx_m != NULL) {
407 sc->jme_cdata.jme_tx_tag,
416 jme_init_tx_ring(sc);
418 /* Initialize shadow status block. */
421 /* Program MAC with resolved speed/duplex/flow-control. */
422 if (sc->jme_flags & JME_FLAG_LINK) {
425 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
427 /* Set Tx ring address to the hardware. */
428 paddr = sc->jme_cdata.jme_tx_ring_paddr;
429 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
430 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
432 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
433 CSR_WRITE_4(sc, JME_RXCSR,
434 sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
436 /* Set Rx ring address to the hardware. */
437 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
438 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
439 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
442 /* Restart receiver/transmitter. */
443 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
445 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
448 ifp->if_flags |= IFF_RUNNING;
449 ifp->if_flags &= ~IFF_OACTIVE;
450 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
452 #ifdef DEVICE_POLLING
453 if (!(ifp->if_flags & IFF_POLLING))
455 /* Reenable interrupts. */
456 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
460 * Get the current interface media status.
463 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
465 struct jme_softc *sc = ifp->if_softc;
466 struct mii_data *mii = device_get_softc(sc->jme_miibus);
468 ASSERT_IFNET_SERIALIZED_ALL(ifp);
471 ifmr->ifm_status = mii->mii_media_status;
472 ifmr->ifm_active = mii->mii_media_active;
476 * Set hardware to newly-selected media.
479 jme_mediachange(struct ifnet *ifp)
481 struct jme_softc *sc = ifp->if_softc;
482 struct mii_data *mii = device_get_softc(sc->jme_miibus);
485 ASSERT_IFNET_SERIALIZED_ALL(ifp);
487 if (mii->mii_instance != 0) {
488 struct mii_softc *miisc;
490 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
491 mii_phy_reset(miisc);
493 error = mii_mediachg(mii);
499 jme_probe(device_t dev)
501 const struct jme_dev *sp;
504 vid = pci_get_vendor(dev);
505 did = pci_get_device(dev);
506 for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
507 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
508 struct jme_softc *sc = device_get_softc(dev);
510 sc->jme_caps = sp->jme_caps;
511 device_set_desc(dev, sp->jme_name);
519 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
525 for (i = JME_TIMEOUT; i > 0; i--) {
526 reg = CSR_READ_4(sc, JME_SMBCSR);
527 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
533 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
537 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
538 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
539 for (i = JME_TIMEOUT; i > 0; i--) {
541 reg = CSR_READ_4(sc, JME_SMBINTF);
542 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
547 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
551 reg = CSR_READ_4(sc, JME_SMBINTF);
552 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
558 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
560 uint8_t fup, reg, val;
565 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
566 fup != JME_EEPROM_SIG0)
568 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
569 fup != JME_EEPROM_SIG1)
573 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
575 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
576 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
577 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0)
579 if (reg >= JME_PAR0 &&
580 reg < JME_PAR0 + ETHER_ADDR_LEN) {
581 if (jme_eeprom_read_byte(sc, offset + 2,
584 eaddr[reg - JME_PAR0] = val;
588 /* Check for the end of EEPROM descriptor. */
589 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
591 /* Try next eeprom descriptor. */
592 offset += JME_EEPROM_DESC_BYTES;
593 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
595 if (match == ETHER_ADDR_LEN)
602 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
606 /* Read station address. */
607 par0 = CSR_READ_4(sc, JME_PAR0);
608 par1 = CSR_READ_4(sc, JME_PAR1);
610 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
611 device_printf(sc->jme_dev,
612 "generating fake ethernet address.\n");
613 par0 = karc4random();
614 /* Set OUI to JMicron. */
618 eaddr[3] = (par0 >> 16) & 0xff;
619 eaddr[4] = (par0 >> 8) & 0xff;
620 eaddr[5] = par0 & 0xff;
622 eaddr[0] = (par0 >> 0) & 0xFF;
623 eaddr[1] = (par0 >> 8) & 0xFF;
624 eaddr[2] = (par0 >> 16) & 0xFF;
625 eaddr[3] = (par0 >> 24) & 0xFF;
626 eaddr[4] = (par1 >> 0) & 0xFF;
627 eaddr[5] = (par1 >> 8) & 0xFF;
632 jme_attach(device_t dev)
634 struct jme_softc *sc = device_get_softc(dev);
635 struct ifnet *ifp = &sc->arpcom.ac_if;
638 uint8_t pcie_ptr, rev;
640 uint8_t eaddr[ETHER_ADDR_LEN];
642 lwkt_serialize_init(&sc->jme_serialize);
643 lwkt_serialize_init(&sc->jme_cdata.jme_tx_serialize);
644 for (i = 0; i < JME_NRXRING_MAX; ++i) {
646 &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize);
649 sc->jme_rx_desc_cnt = roundup(jme_rx_desc_count, JME_NDESC_ALIGN);
650 if (sc->jme_rx_desc_cnt > JME_NDESC_MAX)
651 sc->jme_rx_desc_cnt = JME_NDESC_MAX;
653 sc->jme_tx_desc_cnt = roundup(jme_tx_desc_count, JME_NDESC_ALIGN);
654 if (sc->jme_tx_desc_cnt > JME_NDESC_MAX)
655 sc->jme_tx_desc_cnt = JME_NDESC_MAX;
658 * Calculate rx rings based on ncpus2
660 sc->jme_rx_ring_cnt = jme_rx_ring_count;
661 if (sc->jme_rx_ring_cnt <= 0)
662 sc->jme_rx_ring_cnt = JME_NRXRING_1;
663 if (sc->jme_rx_ring_cnt > ncpus2)
664 sc->jme_rx_ring_cnt = ncpus2;
666 if (sc->jme_rx_ring_cnt >= JME_NRXRING_4)
667 sc->jme_rx_ring_cnt = JME_NRXRING_4;
668 else if (sc->jme_rx_ring_cnt >= JME_NRXRING_2)
669 sc->jme_rx_ring_cnt = JME_NRXRING_2;
670 sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
673 sc->jme_serialize_arr[i++] = &sc->jme_serialize;
674 sc->jme_serialize_arr[i++] = &sc->jme_cdata.jme_tx_serialize;
675 for (j = 0; j < sc->jme_rx_ring_cnt; ++j) {
676 sc->jme_serialize_arr[i++] =
677 &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize;
679 KKASSERT(i <= JME_NSERIALIZE);
680 sc->jme_serialize_cnt = i;
682 sc->jme_cdata.jme_sc = sc;
683 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
684 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
687 rdata->jme_rx_coal = jme_rx_status[i].jme_coal;
688 rdata->jme_rx_comp = jme_rx_status[i].jme_comp;
689 rdata->jme_rx_empty = jme_rx_status[i].jme_empty;
690 rdata->jme_rx_idx = i;
694 sc->jme_lowaddr = BUS_SPACE_MAXADDR;
696 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
698 callout_init(&sc->jme_tick_ch);
701 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
704 irq = pci_read_config(dev, PCIR_INTLINE, 4);
705 mem = pci_read_config(dev, JME_PCIR_BAR, 4);
707 device_printf(dev, "chip is in D%d power mode "
708 "-- setting to D0\n", pci_get_powerstate(dev));
710 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
712 pci_write_config(dev, PCIR_INTLINE, irq, 4);
713 pci_write_config(dev, JME_PCIR_BAR, mem, 4);
715 #endif /* !BURN_BRIDGE */
717 /* Enable bus mastering */
718 pci_enable_busmaster(dev);
723 * JMC250 supports both memory mapped and I/O register space
724 * access. Because I/O register access should use different
725 * BARs to access registers it's waste of time to use I/O
726 * register spce access. JMC250 uses 16K to map entire memory
729 sc->jme_mem_rid = JME_PCIR_BAR;
730 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
731 &sc->jme_mem_rid, RF_ACTIVE);
732 if (sc->jme_mem_res == NULL) {
733 device_printf(dev, "can't allocate IO memory\n");
736 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
737 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
742 error = jme_intr_alloc(dev);
749 reg = CSR_READ_4(sc, JME_CHIPMODE);
750 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
752 sc->jme_caps |= JME_CAP_FPGA;
754 device_printf(dev, "FPGA revision: 0x%04x\n",
755 (reg & CHIPMODE_FPGA_REV_MASK) >>
756 CHIPMODE_FPGA_REV_SHIFT);
760 /* NOTE: FM revision is put in the upper 4 bits */
761 rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
762 rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
764 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
766 did = pci_get_device(dev);
768 case PCI_PRODUCT_JMICRON_JMC250:
769 if (rev == JME_REV1_A2)
770 sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
773 case PCI_PRODUCT_JMICRON_JMC260:
775 sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
779 panic("unknown device id 0x%04x\n", did);
781 if (rev >= JME_REV2) {
782 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
783 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
784 GHC_TXMAC_CLKSRC_1000;
787 /* Reset the ethernet controller. */
790 /* Map MSI/MSI-X vectors */
793 /* Get station address. */
794 reg = CSR_READ_4(sc, JME_SMBCSR);
795 if (reg & SMBCSR_EEPROM_PRESENT)
796 error = jme_eeprom_macaddr(sc, eaddr);
797 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
798 if (error != 0 && (bootverbose)) {
799 device_printf(dev, "ethernet hardware address "
800 "not found in EEPROM.\n");
802 jme_reg_macaddr(sc, eaddr);
807 * Integrated JR0211 has fixed PHY address whereas FPGA version
808 * requires PHY probing to get correct PHY address.
810 if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
811 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
812 GPREG0_PHY_ADDR_MASK;
814 device_printf(dev, "PHY is at address %d.\n",
821 /* Set max allowable DMA size. */
822 pcie_ptr = pci_get_pciecap_ptr(dev);
826 sc->jme_caps |= JME_CAP_PCIE;
827 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
829 device_printf(dev, "Read request size : %d bytes.\n",
830 128 << ((ctrl >> 12) & 0x07));
831 device_printf(dev, "TLP payload size : %d bytes.\n",
832 128 << ((ctrl >> 5) & 0x07));
834 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
835 case PCIEM_DEVCTL_MAX_READRQ_128:
836 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
838 case PCIEM_DEVCTL_MAX_READRQ_256:
839 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
842 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
845 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
847 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
848 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
852 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
853 sc->jme_caps |= JME_CAP_PMCAP;
861 /* Allocate DMA stuffs */
862 error = jme_dma_alloc(sc);
867 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
868 ifp->if_init = jme_init;
869 ifp->if_ioctl = jme_ioctl;
870 ifp->if_start = jme_start;
871 #ifdef DEVICE_POLLING
872 ifp->if_poll = jme_poll;
874 ifp->if_watchdog = jme_watchdog;
875 ifp->if_serialize = jme_serialize;
876 ifp->if_deserialize = jme_deserialize;
877 ifp->if_tryserialize = jme_tryserialize;
879 ifp->if_serialize_assert = jme_serialize_assert;
881 ifq_set_maxlen(&ifp->if_snd, sc->jme_tx_desc_cnt - JME_TXD_RSVD);
882 ifq_set_ready(&ifp->if_snd);
884 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
885 ifp->if_capabilities = IFCAP_HWCSUM |
887 IFCAP_VLAN_HWTAGGING;
888 if (sc->jme_rx_ring_cnt > JME_NRXRING_MIN)
889 ifp->if_capabilities |= IFCAP_RSS;
890 ifp->if_capenable = ifp->if_capabilities;
893 * Disable TXCSUM by default to improve bulk data
894 * transmit performance (+20Mbps improvement).
896 ifp->if_capenable &= ~IFCAP_TXCSUM;
898 if (ifp->if_capenable & IFCAP_TXCSUM)
899 ifp->if_hwassist = JME_CSUM_FEATURES;
901 /* Set up MII bus. */
902 error = mii_phy_probe(dev, &sc->jme_miibus,
903 jme_mediachange, jme_mediastatus);
905 device_printf(dev, "no PHY found!\n");
910 * Save PHYADDR for FPGA mode PHY.
912 if (sc->jme_caps & JME_CAP_FPGA) {
913 struct mii_data *mii = device_get_softc(sc->jme_miibus);
915 if (mii->mii_instance != 0) {
916 struct mii_softc *miisc;
918 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
919 if (miisc->mii_phy != 0) {
920 sc->jme_phyaddr = miisc->mii_phy;
924 if (sc->jme_phyaddr != 0) {
925 device_printf(sc->jme_dev,
926 "FPGA PHY is at %d\n", sc->jme_phyaddr);
928 jme_miibus_writereg(dev, sc->jme_phyaddr,
929 JMPHY_CONF, JMPHY_CONF_DEFFIFO);
931 /* XXX should we clear JME_WA_EXTFIFO */
936 ether_ifattach(ifp, eaddr, NULL);
938 /* Tell the upper layer(s) we support long frames. */
939 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
941 error = jme_intr_setup(dev);
954 jme_detach(device_t dev)
956 struct jme_softc *sc = device_get_softc(dev);
958 if (device_is_attached(dev)) {
959 struct ifnet *ifp = &sc->arpcom.ac_if;
961 ifnet_serialize_all(ifp);
963 jme_intr_teardown(dev);
964 ifnet_deserialize_all(ifp);
969 if (sc->jme_sysctl_tree != NULL)
970 sysctl_ctx_free(&sc->jme_sysctl_ctx);
972 if (sc->jme_miibus != NULL)
973 device_delete_child(dev, sc->jme_miibus);
974 bus_generic_detach(dev);
978 if (sc->jme_mem_res != NULL) {
979 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
989 jme_sysctl_node(struct jme_softc *sc)
993 char rx_ring_pkt[32];
997 sysctl_ctx_init(&sc->jme_sysctl_ctx);
998 sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
999 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
1000 device_get_nameunit(sc->jme_dev),
1002 if (sc->jme_sysctl_tree == NULL) {
1003 device_printf(sc->jme_dev, "can't add sysctl node\n");
1007 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1008 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1009 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1010 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
1012 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1013 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1014 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1015 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
1017 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1018 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1019 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1020 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
1022 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1023 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1024 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1025 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
1027 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1028 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1029 "rx_desc_count", CTLFLAG_RD, &sc->jme_rx_desc_cnt,
1030 0, "RX desc count");
1031 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1032 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1033 "tx_desc_count", CTLFLAG_RD, &sc->jme_tx_desc_cnt,
1034 0, "TX desc count");
1035 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1036 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1037 "rx_ring_count", CTLFLAG_RD, &sc->jme_rx_ring_cnt,
1038 0, "RX ring count");
1039 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1040 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1041 "rx_ring_inuse", CTLFLAG_RD, &sc->jme_rx_ring_inuse,
1042 0, "RX ring in use");
1043 #ifdef JME_RSS_DEBUG
1044 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1045 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1046 "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
1047 0, "RSS debug level");
1048 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1049 ksnprintf(rx_ring_pkt, sizeof(rx_ring_pkt), "rx_ring%d_pkt", r);
1050 SYSCTL_ADD_UINT(&sc->jme_sysctl_ctx,
1051 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1052 rx_ring_pkt, CTLFLAG_RW,
1053 &sc->jme_rx_ring_pkt[r],
1059 * Set default coalesce valves
1061 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1062 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1063 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1064 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1067 * Adjust coalesce valves, in case that the number of TX/RX
1068 * descs are set to small values by users.
1070 * NOTE: coal_max will not be zero, since number of descs
1071 * must aligned by JME_NDESC_ALIGN (16 currently)
1073 coal_max = sc->jme_tx_desc_cnt / 6;
1074 if (coal_max < sc->jme_tx_coal_pkt)
1075 sc->jme_tx_coal_pkt = coal_max;
1077 coal_max = sc->jme_rx_desc_cnt / 4;
1078 if (coal_max < sc->jme_rx_coal_pkt)
1079 sc->jme_rx_coal_pkt = coal_max;
1083 jme_dma_alloc(struct jme_softc *sc)
1085 struct jme_txdesc *txd;
1089 sc->jme_cdata.jme_txdesc =
1090 kmalloc(sc->jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1091 M_DEVBUF, M_WAITOK | M_ZERO);
1092 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1093 sc->jme_cdata.jme_rx_data[i].jme_rxdesc =
1094 kmalloc(sc->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1095 M_DEVBUF, M_WAITOK | M_ZERO);
1098 /* Create parent ring tag. */
1099 error = bus_dma_tag_create(NULL,/* parent */
1100 1, JME_RING_BOUNDARY, /* algnmnt, boundary */
1101 sc->jme_lowaddr, /* lowaddr */
1102 BUS_SPACE_MAXADDR, /* highaddr */
1103 NULL, NULL, /* filter, filterarg */
1104 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1106 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1108 &sc->jme_cdata.jme_ring_tag);
1110 device_printf(sc->jme_dev,
1111 "could not create parent ring DMA tag.\n");
1116 * Create DMA stuffs for TX ring
1118 error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1119 JME_TX_RING_ALIGN, 0,
1120 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1121 JME_TX_RING_SIZE(sc),
1122 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1124 device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1127 sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag;
1128 sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map;
1129 sc->jme_cdata.jme_tx_ring = dmem.dmem_addr;
1130 sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr;
1133 * Create DMA stuffs for RX rings
1135 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1136 error = jme_rxring_dma_alloc(sc, i);
1141 /* Create parent buffer tag. */
1142 error = bus_dma_tag_create(NULL,/* parent */
1143 1, 0, /* algnmnt, boundary */
1144 sc->jme_lowaddr, /* lowaddr */
1145 BUS_SPACE_MAXADDR, /* highaddr */
1146 NULL, NULL, /* filter, filterarg */
1147 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1149 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1151 &sc->jme_cdata.jme_buffer_tag);
1153 device_printf(sc->jme_dev,
1154 "could not create parent buffer DMA tag.\n");
1159 * Create DMA stuffs for shadow status block
1161 error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1162 JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1163 JME_SSB_SIZE, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1165 device_printf(sc->jme_dev,
1166 "could not create shadow status block.\n");
1169 sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1170 sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1171 sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1172 sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1175 * Create DMA stuffs for TX buffers
1178 /* Create tag for Tx buffers. */
1179 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1180 1, 0, /* algnmnt, boundary */
1181 BUS_SPACE_MAXADDR, /* lowaddr */
1182 BUS_SPACE_MAXADDR, /* highaddr */
1183 NULL, NULL, /* filter, filterarg */
1184 JME_JUMBO_FRAMELEN, /* maxsize */
1185 JME_MAXTXSEGS, /* nsegments */
1186 JME_MAXSEGSIZE, /* maxsegsize */
1187 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1188 &sc->jme_cdata.jme_tx_tag);
1190 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1194 /* Create DMA maps for Tx buffers. */
1195 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1196 txd = &sc->jme_cdata.jme_txdesc[i];
1197 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag,
1198 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1203 device_printf(sc->jme_dev,
1204 "could not create %dth Tx dmamap.\n", i);
1206 for (j = 0; j < i; ++j) {
1207 txd = &sc->jme_cdata.jme_txdesc[j];
1208 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1211 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1212 sc->jme_cdata.jme_tx_tag = NULL;
1218 * Create DMA stuffs for RX buffers
1220 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1221 error = jme_rxbuf_dma_alloc(sc, i);
1229 jme_dma_free(struct jme_softc *sc)
1231 struct jme_txdesc *txd;
1232 struct jme_rxdesc *rxd;
1233 struct jme_rxdata *rdata;
1237 if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1238 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1239 sc->jme_cdata.jme_tx_ring_map);
1240 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1241 sc->jme_cdata.jme_tx_ring,
1242 sc->jme_cdata.jme_tx_ring_map);
1243 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1244 sc->jme_cdata.jme_tx_ring_tag = NULL;
1248 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1249 rdata = &sc->jme_cdata.jme_rx_data[r];
1250 if (rdata->jme_rx_ring_tag != NULL) {
1251 bus_dmamap_unload(rdata->jme_rx_ring_tag,
1252 rdata->jme_rx_ring_map);
1253 bus_dmamem_free(rdata->jme_rx_ring_tag,
1255 rdata->jme_rx_ring_map);
1256 bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1257 rdata->jme_rx_ring_tag = NULL;
1262 if (sc->jme_cdata.jme_tx_tag != NULL) {
1263 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1264 txd = &sc->jme_cdata.jme_txdesc[i];
1265 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1268 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1269 sc->jme_cdata.jme_tx_tag = NULL;
1273 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1274 rdata = &sc->jme_cdata.jme_rx_data[r];
1275 if (rdata->jme_rx_tag != NULL) {
1276 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
1277 rxd = &rdata->jme_rxdesc[i];
1278 bus_dmamap_destroy(rdata->jme_rx_tag,
1281 bus_dmamap_destroy(rdata->jme_rx_tag,
1282 rdata->jme_rx_sparemap);
1283 bus_dma_tag_destroy(rdata->jme_rx_tag);
1284 rdata->jme_rx_tag = NULL;
1288 /* Shadow status block. */
1289 if (sc->jme_cdata.jme_ssb_tag != NULL) {
1290 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1291 sc->jme_cdata.jme_ssb_map);
1292 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1293 sc->jme_cdata.jme_ssb_block,
1294 sc->jme_cdata.jme_ssb_map);
1295 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1296 sc->jme_cdata.jme_ssb_tag = NULL;
1299 if (sc->jme_cdata.jme_buffer_tag != NULL) {
1300 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1301 sc->jme_cdata.jme_buffer_tag = NULL;
1303 if (sc->jme_cdata.jme_ring_tag != NULL) {
1304 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1305 sc->jme_cdata.jme_ring_tag = NULL;
1308 if (sc->jme_cdata.jme_txdesc != NULL) {
1309 kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1310 sc->jme_cdata.jme_txdesc = NULL;
1312 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1313 rdata = &sc->jme_cdata.jme_rx_data[r];
1314 if (rdata->jme_rxdesc != NULL) {
1315 kfree(rdata->jme_rxdesc, M_DEVBUF);
1316 rdata->jme_rxdesc = NULL;
1322 * Make sure the interface is stopped at reboot time.
1325 jme_shutdown(device_t dev)
1327 return jme_suspend(dev);
1332 * Unlike other ethernet controllers, JMC250 requires
1333 * explicit resetting link speed to 10/100Mbps as gigabit
1334 * link will cunsume more power than 375mA.
1335 * Note, we reset the link speed to 10/100Mbps with
1336 * auto-negotiation but we don't know whether that operation
1337 * would succeed or not as we have no control after powering
1338 * off. If the renegotiation fail WOL may not work. Running
1339 * at 1Gbps draws more power than 375mA at 3.3V which is
1340 * specified in PCI specification and that would result in
1341 * complete shutdowning power to ethernet controller.
1344 * Save current negotiated media speed/duplex/flow-control
1345 * to softc and restore the same link again after resuming.
1346 * PHY handling such as power down/resetting to 100Mbps
1347 * may be better handled in suspend method in phy driver.
1350 jme_setlinkspeed(struct jme_softc *sc)
1352 struct mii_data *mii;
1355 JME_LOCK_ASSERT(sc);
1357 mii = device_get_softc(sc->jme_miibus);
1360 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1361 switch IFM_SUBTYPE(mii->mii_media_active) {
1371 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1372 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1373 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1374 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1375 BMCR_AUTOEN | BMCR_STARTNEG);
1378 /* Poll link state until jme(4) get a 10/100 link. */
1379 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1381 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1382 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1392 pause("jmelnk", hz);
1395 if (i == MII_ANEGTICKS_GIGE)
1396 device_printf(sc->jme_dev, "establishing link failed, "
1397 "WOL may not work!");
1400 * No link, force MAC to have 100Mbps, full-duplex link.
1401 * This is the last resort and may/may not work.
1403 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1404 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1409 jme_setwol(struct jme_softc *sc)
1411 struct ifnet *ifp = &sc->arpcom.ac_if;
1416 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1417 /* No PME capability, PHY power down. */
1418 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1419 MII_BMCR, BMCR_PDOWN);
1423 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1424 pmcs = CSR_READ_4(sc, JME_PMCS);
1425 pmcs &= ~PMCS_WOL_ENB_MASK;
1426 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1427 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1428 /* Enable PME message. */
1429 gpr |= GPREG0_PME_ENB;
1430 /* For gigabit controllers, reset link speed to 10/100. */
1431 if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1432 jme_setlinkspeed(sc);
1435 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1436 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1439 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1440 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1441 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1442 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1443 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1444 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1445 /* No WOL, PHY power down. */
1446 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1447 MII_BMCR, BMCR_PDOWN);
1453 jme_suspend(device_t dev)
1455 struct jme_softc *sc = device_get_softc(dev);
1456 struct ifnet *ifp = &sc->arpcom.ac_if;
1458 ifnet_serialize_all(ifp);
1463 ifnet_deserialize_all(ifp);
1469 jme_resume(device_t dev)
1471 struct jme_softc *sc = device_get_softc(dev);
1472 struct ifnet *ifp = &sc->arpcom.ac_if;
1477 ifnet_serialize_all(ifp);
1480 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1483 pmstat = pci_read_config(sc->jme_dev,
1484 pmc + PCIR_POWER_STATUS, 2);
1485 /* Disable PME clear PME status. */
1486 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1487 pci_write_config(sc->jme_dev,
1488 pmc + PCIR_POWER_STATUS, pmstat, 2);
1492 if (ifp->if_flags & IFF_UP)
1495 ifnet_deserialize_all(ifp);
1501 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1503 struct jme_txdesc *txd;
1504 struct jme_desc *desc;
1506 bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1508 int error, i, prod, symbol_desc;
1509 uint32_t cflags, flag64;
1511 M_ASSERTPKTHDR((*m_head));
1513 prod = sc->jme_cdata.jme_tx_prod;
1514 txd = &sc->jme_cdata.jme_txdesc[prod];
1516 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1521 maxsegs = (sc->jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1522 (JME_TXD_RSVD + symbol_desc);
1523 if (maxsegs > JME_MAXTXSEGS)
1524 maxsegs = JME_MAXTXSEGS;
1525 KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc),
1526 ("not enough segments %d\n", maxsegs));
1528 error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag,
1529 txd->tx_dmamap, m_head,
1530 txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1534 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1535 BUS_DMASYNC_PREWRITE);
1540 /* Configure checksum offload. */
1541 if (m->m_pkthdr.csum_flags & CSUM_IP)
1542 cflags |= JME_TD_IPCSUM;
1543 if (m->m_pkthdr.csum_flags & CSUM_TCP)
1544 cflags |= JME_TD_TCPCSUM;
1545 if (m->m_pkthdr.csum_flags & CSUM_UDP)
1546 cflags |= JME_TD_UDPCSUM;
1548 /* Configure VLAN. */
1549 if (m->m_flags & M_VLANTAG) {
1550 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1551 cflags |= JME_TD_VLAN_TAG;
1554 desc = &sc->jme_cdata.jme_tx_ring[prod];
1555 desc->flags = htole32(cflags);
1556 desc->addr_hi = htole32(m->m_pkthdr.len);
1557 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1559 * Use 64bits TX desc chain format.
1561 * The first TX desc of the chain, which is setup here,
1562 * is just a symbol TX desc carrying no payload.
1564 flag64 = JME_TD_64BIT;
1568 /* No effective TX desc is consumed */
1572 * Use 32bits TX desc chain format.
1574 * The first TX desc of the chain, which is setup here,
1575 * is an effective TX desc carrying the first segment of
1579 desc->buflen = htole32(txsegs[0].ds_len);
1580 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1582 /* One effective TX desc is consumed */
1585 sc->jme_cdata.jme_tx_cnt++;
1586 KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1587 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1588 JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1590 txd->tx_ndesc = 1 - i;
1591 for (; i < nsegs; i++) {
1592 desc = &sc->jme_cdata.jme_tx_ring[prod];
1593 desc->flags = htole32(JME_TD_OWN | flag64);
1594 desc->buflen = htole32(txsegs[i].ds_len);
1595 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1596 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1598 sc->jme_cdata.jme_tx_cnt++;
1599 KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1600 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1601 JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1604 /* Update producer index. */
1605 sc->jme_cdata.jme_tx_prod = prod;
1607 * Finally request interrupt and give the first descriptor
1608 * owenership to hardware.
1610 desc = txd->tx_desc;
1611 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1614 txd->tx_ndesc += nsegs;
1624 jme_start(struct ifnet *ifp)
1626 struct jme_softc *sc = ifp->if_softc;
1627 struct mbuf *m_head;
1630 ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
1632 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1633 ifq_purge(&ifp->if_snd);
1637 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1640 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1643 while (!ifq_is_empty(&ifp->if_snd)) {
1645 * Check number of available TX descs, always
1646 * leave JME_TXD_RSVD free TX descs.
1648 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1649 sc->jme_tx_desc_cnt - JME_TXD_RSVD) {
1650 ifp->if_flags |= IFF_OACTIVE;
1654 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1659 * Pack the data into the transmit ring. If we
1660 * don't have room, set the OACTIVE flag and wait
1661 * for the NIC to drain the ring.
1663 if (jme_encap(sc, &m_head)) {
1664 KKASSERT(m_head == NULL);
1666 ifp->if_flags |= IFF_OACTIVE;
1672 * If there's a BPF listener, bounce a copy of this frame
1675 ETHER_BPF_MTAP(ifp, m_head);
1680 * Reading TXCSR takes very long time under heavy load
1681 * so cache TXCSR value and writes the ORed value with
1682 * the kick command to the TXCSR. This saves one register
1685 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1686 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1687 /* Set a timeout in case the chip goes out to lunch. */
1688 ifp->if_timer = JME_TX_TIMEOUT;
1693 jme_watchdog(struct ifnet *ifp)
1695 struct jme_softc *sc = ifp->if_softc;
1697 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1699 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1700 if_printf(ifp, "watchdog timeout (missed link)\n");
1707 if (sc->jme_cdata.jme_tx_cnt == 0) {
1708 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1710 if (!ifq_is_empty(&ifp->if_snd))
1715 if_printf(ifp, "watchdog timeout\n");
1718 if (!ifq_is_empty(&ifp->if_snd))
1723 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1725 struct jme_softc *sc = ifp->if_softc;
1726 struct mii_data *mii = device_get_softc(sc->jme_miibus);
1727 struct ifreq *ifr = (struct ifreq *)data;
1728 int error = 0, mask;
1730 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1734 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1735 (!(sc->jme_caps & JME_CAP_JUMBO) &&
1736 ifr->ifr_mtu > JME_MAX_MTU)) {
1741 if (ifp->if_mtu != ifr->ifr_mtu) {
1743 * No special configuration is required when interface
1744 * MTU is changed but availability of Tx checksum
1745 * offload should be chcked against new MTU size as
1746 * FIFO size is just 2K.
1748 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1749 ifp->if_capenable &= ~IFCAP_TXCSUM;
1750 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1752 ifp->if_mtu = ifr->ifr_mtu;
1753 if (ifp->if_flags & IFF_RUNNING)
1759 if (ifp->if_flags & IFF_UP) {
1760 if (ifp->if_flags & IFF_RUNNING) {
1761 if ((ifp->if_flags ^ sc->jme_if_flags) &
1762 (IFF_PROMISC | IFF_ALLMULTI))
1768 if (ifp->if_flags & IFF_RUNNING)
1771 sc->jme_if_flags = ifp->if_flags;
1776 if (ifp->if_flags & IFF_RUNNING)
1782 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1786 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1788 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1789 ifp->if_capenable ^= IFCAP_TXCSUM;
1790 if (IFCAP_TXCSUM & ifp->if_capenable)
1791 ifp->if_hwassist |= JME_CSUM_FEATURES;
1793 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1795 if (mask & IFCAP_RXCSUM) {
1798 ifp->if_capenable ^= IFCAP_RXCSUM;
1799 reg = CSR_READ_4(sc, JME_RXMAC);
1800 reg &= ~RXMAC_CSUM_ENB;
1801 if (ifp->if_capenable & IFCAP_RXCSUM)
1802 reg |= RXMAC_CSUM_ENB;
1803 CSR_WRITE_4(sc, JME_RXMAC, reg);
1806 if (mask & IFCAP_VLAN_HWTAGGING) {
1807 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1811 if (mask & IFCAP_RSS) {
1812 ifp->if_capenable ^= IFCAP_RSS;
1813 if (ifp->if_flags & IFF_RUNNING)
1819 error = ether_ioctl(ifp, cmd, data);
1826 jme_mac_config(struct jme_softc *sc)
1828 struct mii_data *mii;
1829 uint32_t ghc, rxmac, txmac, txpause, gp1;
1830 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1832 mii = device_get_softc(sc->jme_miibus);
1834 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1836 CSR_WRITE_4(sc, JME_GHC, 0);
1838 rxmac = CSR_READ_4(sc, JME_RXMAC);
1839 rxmac &= ~RXMAC_FC_ENB;
1840 txmac = CSR_READ_4(sc, JME_TXMAC);
1841 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1842 txpause = CSR_READ_4(sc, JME_TXPFC);
1843 txpause &= ~TXPFC_PAUSE_ENB;
1844 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1845 ghc |= GHC_FULL_DUPLEX;
1846 rxmac &= ~RXMAC_COLL_DET_ENB;
1847 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1848 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1851 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1852 txpause |= TXPFC_PAUSE_ENB;
1853 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1854 rxmac |= RXMAC_FC_ENB;
1856 /* Disable retry transmit timer/retry limit. */
1857 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1858 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1860 rxmac |= RXMAC_COLL_DET_ENB;
1861 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1862 /* Enable retry transmit timer/retry limit. */
1863 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1864 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1868 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1870 gp1 = CSR_READ_4(sc, JME_GPREG1);
1871 gp1 &= ~GPREG1_WA_HDX;
1873 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1876 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1878 ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1880 gp1 |= GPREG1_WA_HDX;
1884 ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1886 gp1 |= GPREG1_WA_HDX;
1889 * Use extended FIFO depth to workaround CRC errors
1890 * emitted by chips before JMC250B
1892 phyconf = JMPHY_CONF_EXTFIFO;
1896 if (sc->jme_caps & JME_CAP_FASTETH)
1899 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
1901 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1907 CSR_WRITE_4(sc, JME_GHC, ghc);
1908 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1909 CSR_WRITE_4(sc, JME_TXMAC, txmac);
1910 CSR_WRITE_4(sc, JME_TXPFC, txpause);
1912 if (sc->jme_workaround & JME_WA_EXTFIFO) {
1913 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1914 JMPHY_CONF, phyconf);
1916 if (sc->jme_workaround & JME_WA_HDX)
1917 CSR_WRITE_4(sc, JME_GPREG1, gp1);
1923 struct jme_softc *sc = xsc;
1924 struct ifnet *ifp = &sc->arpcom.ac_if;
1928 ASSERT_SERIALIZED(&sc->jme_serialize);
1930 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1931 if (status == 0 || status == 0xFFFFFFFF)
1934 /* Disable interrupts. */
1935 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1937 status = CSR_READ_4(sc, JME_INTR_STATUS);
1938 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1941 /* Reset PCC counter/timer and Ack interrupts. */
1942 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1944 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1945 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1947 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
1948 if (status & jme_rx_status[r].jme_coal) {
1949 status |= jme_rx_status[r].jme_coal |
1950 jme_rx_status[r].jme_comp;
1954 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1956 if (ifp->if_flags & IFF_RUNNING) {
1957 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1958 jme_rx_intr(sc, status);
1960 if (status & INTR_RXQ_DESC_EMPTY) {
1962 * Notify hardware availability of new Rx buffers.
1963 * Reading RXCSR takes very long time under heavy
1964 * load so cache RXCSR value and writes the ORed
1965 * value with the kick command to the RXCSR. This
1966 * saves one register access cycle.
1968 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1969 RXCSR_RX_ENB | RXCSR_RXQ_START);
1972 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1973 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
1975 if (!ifq_is_empty(&ifp->if_snd))
1977 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
1981 /* Reenable interrupts. */
1982 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1986 jme_txeof(struct jme_softc *sc)
1988 struct ifnet *ifp = &sc->arpcom.ac_if;
1989 struct jme_txdesc *txd;
1993 cons = sc->jme_cdata.jme_tx_cons;
1994 if (cons == sc->jme_cdata.jme_tx_prod)
1998 * Go through our Tx list and free mbufs for those
1999 * frames which have been transmitted.
2001 while (cons != sc->jme_cdata.jme_tx_prod) {
2002 txd = &sc->jme_cdata.jme_txdesc[cons];
2003 KASSERT(txd->tx_m != NULL,
2004 ("%s: freeing NULL mbuf!\n", __func__));
2006 status = le32toh(txd->tx_desc->flags);
2007 if ((status & JME_TD_OWN) == JME_TD_OWN)
2010 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2014 if (status & JME_TD_COLLISION) {
2015 ifp->if_collisions +=
2016 le32toh(txd->tx_desc->buflen) &
2017 JME_TD_BUF_LEN_MASK;
2022 * Only the first descriptor of multi-descriptor
2023 * transmission is updated so driver have to skip entire
2024 * chained buffers for the transmiited frame. In other
2025 * words, JME_TD_OWN bit is valid only at the first
2026 * descriptor of a multi-descriptor transmission.
2028 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2029 sc->jme_cdata.jme_tx_ring[cons].flags = 0;
2030 JME_DESC_INC(cons, sc->jme_tx_desc_cnt);
2033 /* Reclaim transferred mbufs. */
2034 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2037 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2038 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2039 ("%s: Active Tx desc counter was garbled\n", __func__));
2042 sc->jme_cdata.jme_tx_cons = cons;
2044 if (sc->jme_cdata.jme_tx_cnt == 0)
2047 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
2048 sc->jme_tx_desc_cnt - JME_TXD_RSVD)
2049 ifp->if_flags &= ~IFF_OACTIVE;
2052 static __inline void
2053 jme_discard_rxbufs(struct jme_softc *sc, int ring, int cons, int count)
2055 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2058 for (i = 0; i < count; ++i) {
2059 struct jme_desc *desc = &rdata->jme_rx_ring[cons];
2061 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2062 desc->buflen = htole32(MCLBYTES);
2063 JME_DESC_INC(cons, sc->jme_rx_desc_cnt);
2067 static __inline struct pktinfo *
2068 jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2070 if (flags & JME_RD_IPV4)
2071 pi->pi_netisr = NETISR_IP;
2072 else if (flags & JME_RD_IPV6)
2073 pi->pi_netisr = NETISR_IPV6;
2078 pi->pi_l3proto = IPPROTO_UNKNOWN;
2080 if (flags & JME_RD_MORE_FRAG)
2081 pi->pi_flags |= PKTINFO_FLAG_FRAG;
2082 else if (flags & JME_RD_TCP)
2083 pi->pi_l3proto = IPPROTO_TCP;
2084 else if (flags & JME_RD_UDP)
2085 pi->pi_l3proto = IPPROTO_UDP;
2091 /* Receive a frame. */
2093 jme_rxpkt(struct jme_softc *sc, int ring, struct mbuf_chain *chain)
2095 struct ifnet *ifp = &sc->arpcom.ac_if;
2096 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2097 struct jme_desc *desc;
2098 struct jme_rxdesc *rxd;
2099 struct mbuf *mp, *m;
2100 uint32_t flags, status, hash, hashinfo;
2101 int cons, count, nsegs;
2103 cons = rdata->jme_rx_cons;
2104 desc = &rdata->jme_rx_ring[cons];
2105 flags = le32toh(desc->flags);
2106 status = le32toh(desc->buflen);
2107 hash = le32toh(desc->addr_hi);
2108 hashinfo = le32toh(desc->addr_lo);
2109 nsegs = JME_RX_NSEGS(status);
2111 JME_RSS_DPRINTF(sc, 15, "ring%d, flags 0x%08x, "
2112 "hash 0x%08x, hash info 0x%08x\n",
2113 ring, flags, hash, hashinfo);
2115 if (status & JME_RX_ERR_STAT) {
2117 jme_discard_rxbufs(sc, ring, cons, nsegs);
2118 #ifdef JME_SHOW_ERRORS
2119 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2120 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2122 rdata->jme_rx_cons += nsegs;
2123 rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2127 rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2128 for (count = 0; count < nsegs; count++,
2129 JME_DESC_INC(cons, sc->jme_rx_desc_cnt)) {
2130 rxd = &rdata->jme_rxdesc[cons];
2133 /* Add a new receive buffer to the ring. */
2134 if (jme_newbuf(sc, ring, rxd, 0) != 0) {
2137 jme_discard_rxbufs(sc, ring, cons, nsegs - count);
2138 if (rdata->jme_rxhead != NULL) {
2139 m_freem(rdata->jme_rxhead);
2140 JME_RXCHAIN_RESET(sc, ring);
2146 * Assume we've received a full sized frame.
2147 * Actual size is fixed when we encounter the end of
2148 * multi-segmented frame.
2150 mp->m_len = MCLBYTES;
2152 /* Chain received mbufs. */
2153 if (rdata->jme_rxhead == NULL) {
2154 rdata->jme_rxhead = mp;
2155 rdata->jme_rxtail = mp;
2158 * Receive processor can receive a maximum frame
2159 * size of 65535 bytes.
2161 rdata->jme_rxtail->m_next = mp;
2162 rdata->jme_rxtail = mp;
2165 if (count == nsegs - 1) {
2166 struct pktinfo pi0, *pi;
2168 /* Last desc. for this frame. */
2169 m = rdata->jme_rxhead;
2170 m->m_pkthdr.len = rdata->jme_rxlen;
2172 /* Set first mbuf size. */
2173 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2174 /* Set last mbuf size. */
2175 mp->m_len = rdata->jme_rxlen -
2176 ((MCLBYTES - JME_RX_PAD_BYTES) +
2177 (MCLBYTES * (nsegs - 2)));
2179 m->m_len = rdata->jme_rxlen;
2181 m->m_pkthdr.rcvif = ifp;
2184 * Account for 10bytes auto padding which is used
2185 * to align IP header on 32bit boundary. Also note,
2186 * CRC bytes is automatically removed by the
2189 m->m_data += JME_RX_PAD_BYTES;
2191 /* Set checksum information. */
2192 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2193 (flags & JME_RD_IPV4)) {
2194 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2195 if (flags & JME_RD_IPCSUM)
2196 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2197 if ((flags & JME_RD_MORE_FRAG) == 0 &&
2198 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2199 (JME_RD_TCP | JME_RD_TCPCSUM) ||
2200 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2201 (JME_RD_UDP | JME_RD_UDPCSUM))) {
2202 m->m_pkthdr.csum_flags |=
2203 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2204 m->m_pkthdr.csum_data = 0xffff;
2208 /* Check for VLAN tagged packets. */
2209 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2210 (flags & JME_RD_VLAN_TAG)) {
2211 m->m_pkthdr.ether_vlantag =
2212 flags & JME_RD_VLAN_MASK;
2213 m->m_flags |= M_VLANTAG;
2218 if (ifp->if_capenable & IFCAP_RSS)
2219 pi = jme_pktinfo(&pi0, flags);
2224 (hashinfo & JME_RD_HASH_FN_MASK) != 0) {
2225 m->m_flags |= M_HASH;
2226 m->m_pkthdr.hash = toeplitz_hash(hash);
2229 #ifdef JME_RSS_DEBUG
2231 JME_RSS_DPRINTF(sc, 10,
2232 "isr %d flags %08x, l3 %d %s\n",
2233 pi->pi_netisr, pi->pi_flags,
2235 (m->m_flags & M_HASH) ? "hash" : "");
2240 ether_input_chain(ifp, m, pi, chain);
2242 /* Reset mbuf chains. */
2243 JME_RXCHAIN_RESET(sc, ring);
2244 #ifdef JME_RSS_DEBUG
2245 sc->jme_rx_ring_pkt[ring]++;
2250 rdata->jme_rx_cons += nsegs;
2251 rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2255 jme_rxeof_chain(struct jme_softc *sc, int ring, struct mbuf_chain *chain,
2258 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2259 struct jme_desc *desc;
2260 int nsegs, prog, pktlen;
2264 #ifdef DEVICE_POLLING
2265 if (count >= 0 && count-- == 0)
2268 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2269 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2271 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2275 * Check number of segments against received bytes.
2276 * Non-matching value would indicate that hardware
2277 * is still trying to update Rx descriptors. I'm not
2278 * sure whether this check is needed.
2280 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2281 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2282 if (nsegs != howmany(pktlen, MCLBYTES)) {
2283 if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) "
2284 "and packet size(%d) mismach\n",
2289 /* Received a frame. */
2290 jme_rxpkt(sc, ring, chain);
2297 jme_rxeof(struct jme_softc *sc, int ring)
2299 struct mbuf_chain chain[MAXCPU];
2301 ether_input_chain_init(chain);
2302 if (jme_rxeof_chain(sc, ring, chain, -1))
2303 ether_input_dispatch(chain);
2309 struct jme_softc *sc = xsc;
2310 struct ifnet *ifp = &sc->arpcom.ac_if;
2311 struct mii_data *mii = device_get_softc(sc->jme_miibus);
2313 ifnet_serialize_all(ifp);
2316 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2318 ifnet_deserialize_all(ifp);
2322 jme_reset(struct jme_softc *sc)
2326 /* Make sure that TX and RX are stopped */
2331 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2335 * Hold reset bit before stop reset
2338 /* Disable TXMAC and TXOFL clock sources */
2339 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2340 /* Disable RXMAC clock source */
2341 val = CSR_READ_4(sc, JME_GPREG1);
2342 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2344 CSR_READ_4(sc, JME_GHC);
2347 CSR_WRITE_4(sc, JME_GHC, 0);
2349 CSR_READ_4(sc, JME_GHC);
2352 * Clear reset bit after stop reset
2355 /* Enable TXMAC and TXOFL clock sources */
2356 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2357 /* Enable RXMAC clock source */
2358 val = CSR_READ_4(sc, JME_GPREG1);
2359 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2361 CSR_READ_4(sc, JME_GHC);
2363 /* Disable TXMAC and TXOFL clock sources */
2364 CSR_WRITE_4(sc, JME_GHC, 0);
2365 /* Disable RXMAC clock source */
2366 val = CSR_READ_4(sc, JME_GPREG1);
2367 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2369 CSR_READ_4(sc, JME_GHC);
2371 /* Enable TX and RX */
2372 val = CSR_READ_4(sc, JME_TXCSR);
2373 CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB);
2374 val = CSR_READ_4(sc, JME_RXCSR);
2375 CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB);
2377 CSR_READ_4(sc, JME_TXCSR);
2378 CSR_READ_4(sc, JME_RXCSR);
2380 /* Enable TXMAC and TXOFL clock sources */
2381 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2382 /* Eisable RXMAC clock source */
2383 val = CSR_READ_4(sc, JME_GPREG1);
2384 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2386 CSR_READ_4(sc, JME_GHC);
2388 /* Stop TX and RX */
2396 struct jme_softc *sc = xsc;
2397 struct ifnet *ifp = &sc->arpcom.ac_if;
2398 struct mii_data *mii;
2399 uint8_t eaddr[ETHER_ADDR_LEN];
2404 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2407 * Cancel any pending I/O.
2412 * Reset the chip to a known state.
2417 * Setup MSI/MSI-X vectors to interrupts mapping
2422 howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES);
2423 KKASSERT(sc->jme_txd_spare >= 1);
2426 * If we use 64bit address mode for transmitting, each Tx request
2427 * needs one more symbol descriptor.
2429 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
2430 sc->jme_txd_spare += 1;
2432 if (ifp->if_capenable & IFCAP_RSS)
2435 jme_disable_rss(sc);
2437 /* Init RX descriptors */
2438 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2439 error = jme_init_rx_ring(sc, r);
2441 if_printf(ifp, "initialization failed: "
2442 "no memory for %dth RX ring.\n", r);
2448 /* Init TX descriptors */
2449 jme_init_tx_ring(sc);
2451 /* Initialize shadow status block. */
2454 /* Reprogram the station address. */
2455 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2456 CSR_WRITE_4(sc, JME_PAR0,
2457 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2458 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2461 * Configure Tx queue.
2462 * Tx priority queue weight value : 0
2463 * Tx FIFO threshold for processing next packet : 16QW
2464 * Maximum Tx DMA length : 512
2465 * Allow Tx DMA burst.
2467 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2468 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2469 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2470 sc->jme_txcsr |= sc->jme_tx_dma_size;
2471 sc->jme_txcsr |= TXCSR_DMA_BURST;
2472 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2474 /* Set Tx descriptor counter. */
2475 CSR_WRITE_4(sc, JME_TXQDC, sc->jme_tx_desc_cnt);
2477 /* Set Tx ring address to the hardware. */
2478 paddr = sc->jme_cdata.jme_tx_ring_paddr;
2479 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2480 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2482 /* Configure TxMAC parameters. */
2483 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2484 reg |= TXMAC_THRESH_1_PKT;
2485 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2486 CSR_WRITE_4(sc, JME_TXMAC, reg);
2489 * Configure Rx queue.
2490 * FIFO full threshold for transmitting Tx pause packet : 128T
2491 * FIFO threshold for processing next packet : 128QW
2493 * Max Rx DMA length : 128
2494 * Rx descriptor retry : 32
2495 * Rx descriptor retry time gap : 256ns
2496 * Don't receive runt/bad frame.
2498 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2501 * Since Rx FIFO size is 4K bytes, receiving frames larger
2502 * than 4K bytes will suffer from Rx FIFO overruns. So
2503 * decrease FIFO threshold to reduce the FIFO overruns for
2504 * frames larger than 4000 bytes.
2505 * For best performance of standard MTU sized frames use
2506 * maximum allowable FIFO threshold, 128QW.
2508 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2510 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2512 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2514 /* Improve PCI Express compatibility */
2515 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2517 sc->jme_rxcsr |= sc->jme_rx_dma_size;
2518 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2519 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2520 /* XXX TODO DROP_BAD */
2522 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2523 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2525 /* Set Rx descriptor counter. */
2526 CSR_WRITE_4(sc, JME_RXQDC, sc->jme_rx_desc_cnt);
2528 /* Set Rx ring address to the hardware. */
2529 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
2530 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2531 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2534 /* Clear receive filter. */
2535 CSR_WRITE_4(sc, JME_RXMAC, 0);
2537 /* Set up the receive filter. */
2542 * Disable all WOL bits as WOL can interfere normal Rx
2543 * operation. Also clear WOL detection status bits.
2545 reg = CSR_READ_4(sc, JME_PMCS);
2546 reg &= ~PMCS_WOL_ENB_MASK;
2547 CSR_WRITE_4(sc, JME_PMCS, reg);
2550 * Pad 10bytes right before received frame. This will greatly
2551 * help Rx performance on strict-alignment architectures as
2552 * it does not need to copy the frame to align the payload.
2554 reg = CSR_READ_4(sc, JME_RXMAC);
2555 reg |= RXMAC_PAD_10BYTES;
2557 if (ifp->if_capenable & IFCAP_RXCSUM)
2558 reg |= RXMAC_CSUM_ENB;
2559 CSR_WRITE_4(sc, JME_RXMAC, reg);
2561 /* Configure general purpose reg0 */
2562 reg = CSR_READ_4(sc, JME_GPREG0);
2563 reg &= ~GPREG0_PCC_UNIT_MASK;
2564 /* Set PCC timer resolution to micro-seconds unit. */
2565 reg |= GPREG0_PCC_UNIT_US;
2567 * Disable all shadow register posting as we have to read
2568 * JME_INTR_STATUS register in jme_intr. Also it seems
2569 * that it's hard to synchronize interrupt status between
2570 * hardware and software with shadow posting due to
2571 * requirements of bus_dmamap_sync(9).
2573 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2574 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2575 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2576 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2577 /* Disable posting of DW0. */
2578 reg &= ~GPREG0_POST_DW0_ENB;
2579 /* Clear PME message. */
2580 reg &= ~GPREG0_PME_ENB;
2581 /* Set PHY address. */
2582 reg &= ~GPREG0_PHY_ADDR_MASK;
2583 reg |= sc->jme_phyaddr;
2584 CSR_WRITE_4(sc, JME_GPREG0, reg);
2586 /* Configure Tx queue 0 packet completion coalescing. */
2587 jme_set_tx_coal(sc);
2589 /* Configure Rx queue 0 packet completion coalescing. */
2590 jme_set_rx_coal(sc);
2592 /* Configure shadow status block but don't enable posting. */
2593 paddr = sc->jme_cdata.jme_ssb_block_paddr;
2594 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2595 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2597 /* Disable Timer 1 and Timer 2. */
2598 CSR_WRITE_4(sc, JME_TIMER1, 0);
2599 CSR_WRITE_4(sc, JME_TIMER2, 0);
2601 /* Configure retry transmit period, retry limit value. */
2602 CSR_WRITE_4(sc, JME_TXTRHD,
2603 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2604 TXTRHD_RT_PERIOD_MASK) |
2605 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2606 TXTRHD_RT_LIMIT_SHIFT));
2608 #ifdef DEVICE_POLLING
2609 if (!(ifp->if_flags & IFF_POLLING))
2611 /* Initialize the interrupt mask. */
2612 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2613 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2616 * Enabling Tx/Rx DMA engines and Rx queue processing is
2617 * done after detection of valid link in jme_miibus_statchg.
2619 sc->jme_flags &= ~JME_FLAG_LINK;
2621 /* Set the current media. */
2622 mii = device_get_softc(sc->jme_miibus);
2625 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2627 ifp->if_flags |= IFF_RUNNING;
2628 ifp->if_flags &= ~IFF_OACTIVE;
2632 jme_stop(struct jme_softc *sc)
2634 struct ifnet *ifp = &sc->arpcom.ac_if;
2635 struct jme_txdesc *txd;
2636 struct jme_rxdesc *rxd;
2637 struct jme_rxdata *rdata;
2640 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2643 * Mark the interface down and cancel the watchdog timer.
2645 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2648 callout_stop(&sc->jme_tick_ch);
2649 sc->jme_flags &= ~JME_FLAG_LINK;
2652 * Disable interrupts.
2654 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2655 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2657 /* Disable updating shadow status block. */
2658 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2659 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2661 /* Stop receiver, transmitter. */
2666 * Free partial finished RX segments
2668 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2669 rdata = &sc->jme_cdata.jme_rx_data[r];
2670 if (rdata->jme_rxhead != NULL)
2671 m_freem(rdata->jme_rxhead);
2672 JME_RXCHAIN_RESET(sc, r);
2676 * Free RX and TX mbufs still in the queues.
2678 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2679 rdata = &sc->jme_cdata.jme_rx_data[r];
2680 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2681 rxd = &rdata->jme_rxdesc[i];
2682 if (rxd->rx_m != NULL) {
2683 bus_dmamap_unload(rdata->jme_rx_tag,
2690 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2691 txd = &sc->jme_cdata.jme_txdesc[i];
2692 if (txd->tx_m != NULL) {
2693 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2703 jme_stop_tx(struct jme_softc *sc)
2708 reg = CSR_READ_4(sc, JME_TXCSR);
2709 if ((reg & TXCSR_TX_ENB) == 0)
2711 reg &= ~TXCSR_TX_ENB;
2712 CSR_WRITE_4(sc, JME_TXCSR, reg);
2713 for (i = JME_TIMEOUT; i > 0; i--) {
2715 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2719 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2723 jme_stop_rx(struct jme_softc *sc)
2728 reg = CSR_READ_4(sc, JME_RXCSR);
2729 if ((reg & RXCSR_RX_ENB) == 0)
2731 reg &= ~RXCSR_RX_ENB;
2732 CSR_WRITE_4(sc, JME_RXCSR, reg);
2733 for (i = JME_TIMEOUT; i > 0; i--) {
2735 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2739 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2743 jme_init_tx_ring(struct jme_softc *sc)
2745 struct jme_chain_data *cd;
2746 struct jme_txdesc *txd;
2749 sc->jme_cdata.jme_tx_prod = 0;
2750 sc->jme_cdata.jme_tx_cons = 0;
2751 sc->jme_cdata.jme_tx_cnt = 0;
2753 cd = &sc->jme_cdata;
2754 bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2755 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2756 txd = &sc->jme_cdata.jme_txdesc[i];
2758 txd->tx_desc = &cd->jme_tx_ring[i];
2764 jme_init_ssb(struct jme_softc *sc)
2766 struct jme_chain_data *cd;
2768 cd = &sc->jme_cdata;
2769 bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2773 jme_init_rx_ring(struct jme_softc *sc, int ring)
2775 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2776 struct jme_rxdesc *rxd;
2779 KKASSERT(rdata->jme_rxhead == NULL &&
2780 rdata->jme_rxtail == NULL &&
2781 rdata->jme_rxlen == 0);
2782 rdata->jme_rx_cons = 0;
2784 bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(sc));
2785 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2788 rxd = &rdata->jme_rxdesc[i];
2790 rxd->rx_desc = &rdata->jme_rx_ring[i];
2791 error = jme_newbuf(sc, ring, rxd, 1);
2799 jme_newbuf(struct jme_softc *sc, int ring, struct jme_rxdesc *rxd, int init)
2801 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2802 struct jme_desc *desc;
2804 bus_dma_segment_t segs;
2808 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2812 * JMC250 has 64bit boundary alignment limitation so jme(4)
2813 * takes advantage of 10 bytes padding feature of hardware
2814 * in order not to copy entire frame to align IP header on
2817 m->m_len = m->m_pkthdr.len = MCLBYTES;
2819 error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
2820 rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
2825 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2829 if (rxd->rx_m != NULL) {
2830 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
2831 BUS_DMASYNC_POSTREAD);
2832 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
2834 map = rxd->rx_dmamap;
2835 rxd->rx_dmamap = rdata->jme_rx_sparemap;
2836 rdata->jme_rx_sparemap = map;
2839 desc = rxd->rx_desc;
2840 desc->buflen = htole32(segs.ds_len);
2841 desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr));
2842 desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr));
2843 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2849 jme_set_vlan(struct jme_softc *sc)
2851 struct ifnet *ifp = &sc->arpcom.ac_if;
2854 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2856 reg = CSR_READ_4(sc, JME_RXMAC);
2857 reg &= ~RXMAC_VLAN_ENB;
2858 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2859 reg |= RXMAC_VLAN_ENB;
2860 CSR_WRITE_4(sc, JME_RXMAC, reg);
2864 jme_set_filter(struct jme_softc *sc)
2866 struct ifnet *ifp = &sc->arpcom.ac_if;
2867 struct ifmultiaddr *ifma;
2872 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2874 rxcfg = CSR_READ_4(sc, JME_RXMAC);
2875 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2879 * Always accept frames destined to our station address.
2880 * Always accept broadcast frames.
2882 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2884 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2885 if (ifp->if_flags & IFF_PROMISC)
2886 rxcfg |= RXMAC_PROMISC;
2887 if (ifp->if_flags & IFF_ALLMULTI)
2888 rxcfg |= RXMAC_ALLMULTI;
2889 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2890 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2891 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2896 * Set up the multicast address filter by passing all multicast
2897 * addresses through a CRC generator, and then using the low-order
2898 * 6 bits as an index into the 64 bit multicast hash table. The
2899 * high order bits select the register, while the rest of the bits
2900 * select the bit within the register.
2902 rxcfg |= RXMAC_MULTICAST;
2903 bzero(mchash, sizeof(mchash));
2905 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2906 if (ifma->ifma_addr->sa_family != AF_LINK)
2908 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2909 ifma->ifma_addr), ETHER_ADDR_LEN);
2911 /* Just want the 6 least significant bits. */
2914 /* Set the corresponding bit in the hash table. */
2915 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2918 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2919 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2920 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2924 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
2926 struct jme_softc *sc = arg1;
2927 struct ifnet *ifp = &sc->arpcom.ac_if;
2930 ifnet_serialize_all(ifp);
2932 v = sc->jme_tx_coal_to;
2933 error = sysctl_handle_int(oidp, &v, 0, req);
2934 if (error || req->newptr == NULL)
2937 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2942 if (v != sc->jme_tx_coal_to) {
2943 sc->jme_tx_coal_to = v;
2944 if (ifp->if_flags & IFF_RUNNING)
2945 jme_set_tx_coal(sc);
2948 ifnet_deserialize_all(ifp);
2953 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2955 struct jme_softc *sc = arg1;
2956 struct ifnet *ifp = &sc->arpcom.ac_if;
2959 ifnet_serialize_all(ifp);
2961 v = sc->jme_tx_coal_pkt;
2962 error = sysctl_handle_int(oidp, &v, 0, req);
2963 if (error || req->newptr == NULL)
2966 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
2971 if (v != sc->jme_tx_coal_pkt) {
2972 sc->jme_tx_coal_pkt = v;
2973 if (ifp->if_flags & IFF_RUNNING)
2974 jme_set_tx_coal(sc);
2977 ifnet_deserialize_all(ifp);
2982 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
2984 struct jme_softc *sc = arg1;
2985 struct ifnet *ifp = &sc->arpcom.ac_if;
2988 ifnet_serialize_all(ifp);
2990 v = sc->jme_rx_coal_to;
2991 error = sysctl_handle_int(oidp, &v, 0, req);
2992 if (error || req->newptr == NULL)
2995 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
3000 if (v != sc->jme_rx_coal_to) {
3001 sc->jme_rx_coal_to = v;
3002 if (ifp->if_flags & IFF_RUNNING)
3003 jme_set_rx_coal(sc);
3006 ifnet_deserialize_all(ifp);
3011 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3013 struct jme_softc *sc = arg1;
3014 struct ifnet *ifp = &sc->arpcom.ac_if;
3017 ifnet_serialize_all(ifp);
3019 v = sc->jme_rx_coal_pkt;
3020 error = sysctl_handle_int(oidp, &v, 0, req);
3021 if (error || req->newptr == NULL)
3024 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
3029 if (v != sc->jme_rx_coal_pkt) {
3030 sc->jme_rx_coal_pkt = v;
3031 if (ifp->if_flags & IFF_RUNNING)
3032 jme_set_rx_coal(sc);
3035 ifnet_deserialize_all(ifp);
3040 jme_set_tx_coal(struct jme_softc *sc)
3044 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3046 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3047 PCCTX_COAL_PKT_MASK;
3048 reg |= PCCTX_COAL_TXQ0;
3049 CSR_WRITE_4(sc, JME_PCCTX, reg);
3053 jme_set_rx_coal(struct jme_softc *sc)
3058 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3060 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3061 PCCRX_COAL_PKT_MASK;
3062 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
3063 if (r < sc->jme_rx_ring_inuse)
3064 CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3066 CSR_WRITE_4(sc, JME_PCCRX(r), 0);
3070 #ifdef DEVICE_POLLING
3073 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3075 struct jme_softc *sc = ifp->if_softc;
3076 struct mbuf_chain chain[MAXCPU];
3080 ASSERT_SERIALIZED(&sc->jme_serialize);
3084 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3087 case POLL_DEREGISTER:
3088 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
3091 case POLL_AND_CHECK_STATUS:
3093 status = CSR_READ_4(sc, JME_INTR_STATUS);
3095 ether_input_chain_init(chain);
3096 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
3097 struct jme_rxdata *rdata =
3098 &sc->jme_cdata.jme_rx_data[r];
3100 lwkt_serialize_enter(&rdata->jme_rx_serialize);
3101 prog += jme_rxeof_chain(sc, r, chain, count);
3102 lwkt_serialize_exit(&rdata->jme_rx_serialize);
3105 ether_input_dispatch(chain);
3107 if (status & INTR_RXQ_DESC_EMPTY) {
3108 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3109 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3110 RXCSR_RX_ENB | RXCSR_RXQ_START);
3113 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
3115 if (!ifq_is_empty(&ifp->if_snd))
3117 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
3122 #endif /* DEVICE_POLLING */
3125 jme_rxring_dma_alloc(struct jme_softc *sc, int ring)
3127 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3131 error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
3132 JME_RX_RING_ALIGN, 0,
3133 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3134 JME_RX_RING_SIZE(sc),
3135 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3137 device_printf(sc->jme_dev,
3138 "could not allocate %dth Rx ring.\n", ring);
3141 rdata->jme_rx_ring_tag = dmem.dmem_tag;
3142 rdata->jme_rx_ring_map = dmem.dmem_map;
3143 rdata->jme_rx_ring = dmem.dmem_addr;
3144 rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
3150 jme_rxbuf_dma_alloc(struct jme_softc *sc, int ring)
3152 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3155 /* Create tag for Rx buffers. */
3156 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
3157 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
3158 BUS_SPACE_MAXADDR, /* lowaddr */
3159 BUS_SPACE_MAXADDR, /* highaddr */
3160 NULL, NULL, /* filter, filterarg */
3161 MCLBYTES, /* maxsize */
3163 MCLBYTES, /* maxsegsize */
3164 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
3165 &rdata->jme_rx_tag);
3167 device_printf(sc->jme_dev,
3168 "could not create %dth Rx DMA tag.\n", ring);
3172 /* Create DMA maps for Rx buffers. */
3173 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3174 &rdata->jme_rx_sparemap);
3176 device_printf(sc->jme_dev,
3177 "could not create %dth spare Rx dmamap.\n", ring);
3178 bus_dma_tag_destroy(rdata->jme_rx_tag);
3179 rdata->jme_rx_tag = NULL;
3182 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
3183 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3185 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3190 device_printf(sc->jme_dev,
3191 "could not create %dth Rx dmamap "
3192 "for %dth RX ring.\n", i, ring);
3194 for (j = 0; j < i; ++j) {
3195 rxd = &rdata->jme_rxdesc[j];
3196 bus_dmamap_destroy(rdata->jme_rx_tag,
3199 bus_dmamap_destroy(rdata->jme_rx_tag,
3200 rdata->jme_rx_sparemap);
3201 bus_dma_tag_destroy(rdata->jme_rx_tag);
3202 rdata->jme_rx_tag = NULL;
3210 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3212 struct mbuf_chain chain[MAXCPU];
3215 ether_input_chain_init(chain);
3216 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
3217 if (status & jme_rx_status[r].jme_coal) {
3218 struct jme_rxdata *rdata =
3219 &sc->jme_cdata.jme_rx_data[r];
3221 lwkt_serialize_enter(&rdata->jme_rx_serialize);
3222 prog += jme_rxeof_chain(sc, r, chain, -1);
3223 lwkt_serialize_exit(&rdata->jme_rx_serialize);
3227 ether_input_dispatch(chain);
3231 jme_enable_rss(struct jme_softc *sc)
3234 uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3237 sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
3239 KASSERT(sc->jme_rx_ring_inuse == JME_NRXRING_2 ||
3240 sc->jme_rx_ring_inuse == JME_NRXRING_4,
3241 ("%s: invalid # of RX rings (%d)\n",
3242 sc->arpcom.ac_if.if_xname, sc->jme_rx_ring_inuse));
3244 rssc = RSSC_HASH_64_ENTRY;
3245 rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3246 rssc |= sc->jme_rx_ring_inuse >> 1;
3247 JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3248 CSR_WRITE_4(sc, JME_RSSC, rssc);
3250 toeplitz_get_key(key, sizeof(key));
3251 for (i = 0; i < RSSKEY_NREGS; ++i) {
3254 keyreg = RSSKEY_REGVAL(key, i);
3255 JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg);
3257 CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg);
3261 * Create redirect table in following fashion:
3262 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3265 for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3268 q = i % sc->jme_rx_ring_inuse;
3269 ind |= q << (i * 8);
3271 JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3273 for (i = 0; i < RSSTBL_NREGS; ++i)
3274 CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3278 jme_disable_rss(struct jme_softc *sc)
3280 sc->jme_rx_ring_inuse = JME_NRXRING_1;
3281 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3285 jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3287 struct jme_softc *sc = ifp->if_softc;
3290 case IFNET_SERIALIZE_ALL:
3291 lwkt_serialize_array_enter(sc->jme_serialize_arr,
3292 sc->jme_serialize_cnt, 0);
3295 case IFNET_SERIALIZE_MAIN:
3296 lwkt_serialize_enter(&sc->jme_serialize);
3299 case IFNET_SERIALIZE_TX:
3300 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
3303 case IFNET_SERIALIZE_RX(0):
3304 lwkt_serialize_enter(
3305 &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize);
3308 case IFNET_SERIALIZE_RX(1):
3309 lwkt_serialize_enter(
3310 &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize);
3313 case IFNET_SERIALIZE_RX(2):
3314 lwkt_serialize_enter(
3315 &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize);
3318 case IFNET_SERIALIZE_RX(3):
3319 lwkt_serialize_enter(
3320 &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize);
3324 panic("%s unsupported serialize type\n", ifp->if_xname);
3329 jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3331 struct jme_softc *sc = ifp->if_softc;
3334 case IFNET_SERIALIZE_ALL:
3335 lwkt_serialize_array_exit(sc->jme_serialize_arr,
3336 sc->jme_serialize_cnt, 0);
3339 case IFNET_SERIALIZE_MAIN:
3340 lwkt_serialize_exit(&sc->jme_serialize);
3343 case IFNET_SERIALIZE_TX:
3344 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
3347 case IFNET_SERIALIZE_RX(0):
3348 lwkt_serialize_exit(
3349 &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize);
3352 case IFNET_SERIALIZE_RX(1):
3353 lwkt_serialize_exit(
3354 &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize);
3357 case IFNET_SERIALIZE_RX(2):
3358 lwkt_serialize_exit(
3359 &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize);
3362 case IFNET_SERIALIZE_RX(3):
3363 lwkt_serialize_exit(
3364 &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize);
3368 panic("%s unsupported serialize type\n", ifp->if_xname);
3373 jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3375 struct jme_softc *sc = ifp->if_softc;
3378 case IFNET_SERIALIZE_ALL:
3379 return lwkt_serialize_array_try(sc->jme_serialize_arr,
3380 sc->jme_serialize_cnt, 0);
3382 case IFNET_SERIALIZE_MAIN:
3383 return lwkt_serialize_try(&sc->jme_serialize);
3385 case IFNET_SERIALIZE_TX:
3386 return lwkt_serialize_try(&sc->jme_cdata.jme_tx_serialize);
3388 case IFNET_SERIALIZE_RX(0):
3389 return lwkt_serialize_try(
3390 &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize);
3392 case IFNET_SERIALIZE_RX(1):
3393 return lwkt_serialize_try(
3394 &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize);
3396 case IFNET_SERIALIZE_RX(2):
3397 return lwkt_serialize_try(
3398 &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize);
3400 case IFNET_SERIALIZE_RX(3):
3401 return lwkt_serialize_try(
3402 &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize);
3405 panic("%s unsupported serialize type\n", ifp->if_xname);
3412 jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
3413 boolean_t serialized)
3415 struct jme_softc *sc = ifp->if_softc;
3416 struct jme_rxdata *rdata;
3420 case IFNET_SERIALIZE_ALL:
3422 for (i = 0; i < sc->jme_serialize_cnt; ++i)
3423 ASSERT_SERIALIZED(sc->jme_serialize_arr[i]);
3425 for (i = 0; i < sc->jme_serialize_cnt; ++i)
3426 ASSERT_NOT_SERIALIZED(sc->jme_serialize_arr[i]);
3430 case IFNET_SERIALIZE_MAIN:
3432 ASSERT_SERIALIZED(&sc->jme_serialize);
3434 ASSERT_NOT_SERIALIZED(&sc->jme_serialize);
3437 case IFNET_SERIALIZE_TX:
3439 ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
3441 ASSERT_NOT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
3444 case IFNET_SERIALIZE_RX(0):
3445 rdata = &sc->jme_cdata.jme_rx_data[0];
3447 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3449 ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3452 case IFNET_SERIALIZE_RX(1):
3453 rdata = &sc->jme_cdata.jme_rx_data[1];
3455 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3457 ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3460 case IFNET_SERIALIZE_RX(2):
3461 rdata = &sc->jme_cdata.jme_rx_data[2];
3463 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3465 ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3468 case IFNET_SERIALIZE_RX(3):
3469 rdata = &sc->jme_cdata.jme_rx_data[3];
3471 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3473 ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3477 panic("%s unsupported serialize type\n", ifp->if_xname);
3481 #endif /* INVARIANTS */
3484 jme_msix_try_alloc(device_t dev)
3486 struct jme_softc *sc = device_get_softc(dev);
3487 struct jme_msix_data *msix;
3488 int error, i, r, msix_enable, msix_count;
3491 msix_count = 1 + sc->jme_rx_ring_cnt;
3492 KKASSERT(msix_count <= JME_NMSIX);
3494 msix_enable = jme_msix_enable;
3495 ksnprintf(env, sizeof(env), "hw.%s.msix.enable",
3496 device_get_nameunit(dev));
3497 kgetenv_int(env, &msix_enable);
3500 * We leave the 1st MSI-X vector unused, so we
3501 * actually need msix_count + 1 MSI-X vectors.
3503 if (!msix_enable || pci_msix_count(dev) < (msix_count + 1))
3506 for (i = 0; i < msix_count; ++i)
3507 sc->jme_msix[i].jme_msix_rid = -1;
3511 msix = &sc->jme_msix[i++];
3512 msix->jme_msix_cpuid = 0; /* XXX Put TX to cpu0 */
3513 msix->jme_msix_arg = &sc->jme_cdata;
3514 msix->jme_msix_func = jme_msix_tx;
3515 msix->jme_msix_intrs = INTR_TXQ_COAL | INTR_TXQ_COAL_TO;
3516 msix->jme_msix_serialize = &sc->jme_cdata.jme_tx_serialize;
3517 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s tx",
3518 device_get_nameunit(dev));
3520 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
3521 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3523 msix = &sc->jme_msix[i++];
3524 msix->jme_msix_cpuid = r; /* XXX Put RX to cpuX */
3525 msix->jme_msix_arg = rdata;
3526 msix->jme_msix_func = jme_msix_rx;
3527 msix->jme_msix_intrs = rdata->jme_rx_coal | rdata->jme_rx_empty;
3528 msix->jme_msix_serialize = &rdata->jme_rx_serialize;
3529 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc),
3530 "%s rx%d", device_get_nameunit(dev), r);
3533 KKASSERT(i == msix_count);
3535 error = pci_setup_msix(dev);
3539 /* Setup jme_msix_cnt early, so we could cleanup */
3540 sc->jme_msix_cnt = msix_count;
3542 for (i = 0; i < msix_count; ++i) {
3543 msix = &sc->jme_msix[i];
3545 msix->jme_msix_vector = i + 1;
3546 error = pci_alloc_msix_vector(dev, msix->jme_msix_vector,
3547 &msix->jme_msix_rid, msix->jme_msix_cpuid);
3551 msix->jme_msix_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3552 &msix->jme_msix_rid, RF_ACTIVE);
3553 if (msix->jme_msix_res == NULL) {
3559 for (i = 0; i < JME_INTR_CNT; ++i) {
3560 uint32_t intr_mask = (1 << i);
3563 if ((JME_INTRS & intr_mask) == 0)
3566 for (x = 0; x < msix_count; ++x) {
3567 msix = &sc->jme_msix[x];
3568 if (msix->jme_msix_intrs & intr_mask) {
3571 reg = i / JME_MSINUM_FACTOR;
3572 KKASSERT(reg < JME_MSINUM_CNT);
3574 shift = (i % JME_MSINUM_FACTOR) * 4;
3576 sc->jme_msinum[reg] |=
3577 (msix->jme_msix_vector << shift);
3585 for (i = 0; i < JME_MSINUM_CNT; ++i) {
3586 device_printf(dev, "MSINUM%d: %#x\n", i,
3591 pci_enable_msix(dev);
3592 sc->jme_irq_type = PCI_INTR_TYPE_MSIX;
3600 jme_intr_alloc(device_t dev)
3602 struct jme_softc *sc = device_get_softc(dev);
3605 jme_msix_try_alloc(dev);
3607 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3608 sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable,
3609 &sc->jme_irq_rid, &irq_flags);
3611 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3612 &sc->jme_irq_rid, irq_flags);
3613 if (sc->jme_irq_res == NULL) {
3614 device_printf(dev, "can't allocate irq\n");
3622 jme_msix_free(device_t dev)
3624 struct jme_softc *sc = device_get_softc(dev);
3627 KKASSERT(sc->jme_msix_cnt > 1);
3629 for (i = 0; i < sc->jme_msix_cnt; ++i) {
3630 struct jme_msix_data *msix = &sc->jme_msix[i];
3632 if (msix->jme_msix_res != NULL) {
3633 bus_release_resource(dev, SYS_RES_IRQ,
3634 msix->jme_msix_rid, msix->jme_msix_res);
3635 msix->jme_msix_res = NULL;
3637 if (msix->jme_msix_rid >= 0) {
3638 pci_release_msix_vector(dev, msix->jme_msix_rid);
3639 msix->jme_msix_rid = -1;
3642 pci_teardown_msix(dev);
3646 jme_intr_free(device_t dev)
3648 struct jme_softc *sc = device_get_softc(dev);
3650 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3651 if (sc->jme_irq_res != NULL) {
3652 bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
3655 if (sc->jme_irq_type == PCI_INTR_TYPE_MSI)
3656 pci_release_msi(dev);
3663 jme_msix_tx(void *xcd)
3665 struct jme_chain_data *cd = xcd;
3666 struct jme_softc *sc = cd->jme_sc;
3667 struct ifnet *ifp = &sc->arpcom.ac_if;
3669 ASSERT_SERIALIZED(&cd->jme_tx_serialize);
3671 CSR_WRITE_4(sc, JME_INTR_STATUS,
3672 INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP);
3674 if (ifp->if_flags & IFF_RUNNING) {
3676 if (!ifq_is_empty(&ifp->if_snd))
3682 jme_msix_rx(void *xrdata)
3684 struct jme_rxdata *rdata = xrdata;
3685 struct jme_softc *sc = rdata->jme_sc;
3686 struct ifnet *ifp = &sc->arpcom.ac_if;
3689 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3691 status = CSR_READ_4(sc, JME_INTR_STATUS);
3692 status &= (rdata->jme_rx_coal | rdata->jme_rx_empty);
3694 if (status & rdata->jme_rx_coal) {
3695 status |= (rdata->jme_rx_coal | rdata->jme_rx_comp);
3696 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3699 if (ifp->if_flags & IFF_RUNNING) {
3700 if (status & rdata->jme_rx_coal) {
3701 struct mbuf_chain chain[MAXCPU];
3704 ether_input_chain_init(chain);
3706 prog = jme_rxeof_chain(sc, rdata->jme_rx_idx,
3709 ether_input_dispatch(chain);
3712 if (status & rdata->jme_rx_empty) {
3713 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3714 RXCSR_RX_ENB | RXCSR_RXQ_START);
3720 jme_set_msinum(struct jme_softc *sc)
3724 for (i = 0; i < JME_MSINUM_CNT; ++i)
3725 CSR_WRITE_4(sc, JME_MSINUM(i), sc->jme_msinum[i]);
3729 jme_intr_setup(device_t dev)
3731 struct jme_softc *sc = device_get_softc(dev);
3732 struct ifnet *ifp = &sc->arpcom.ac_if;
3735 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3736 return jme_msix_setup(dev);
3738 error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE,
3739 jme_intr, sc, &sc->jme_irq_handle, &sc->jme_serialize);
3741 device_printf(dev, "could not set up interrupt handler.\n");
3745 ifp->if_cpuid = rman_get_cpuid(sc->jme_irq_res);
3746 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
3751 jme_intr_teardown(device_t dev)
3753 struct jme_softc *sc = device_get_softc(dev);
3755 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3756 jme_msix_teardown(dev, sc->jme_msix_cnt);
3758 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
3762 jme_msix_setup(device_t dev)
3764 struct jme_softc *sc = device_get_softc(dev);
3765 struct ifnet *ifp = &sc->arpcom.ac_if;
3768 for (x = 0; x < sc->jme_msix_cnt; ++x) {
3769 struct jme_msix_data *msix = &sc->jme_msix[x];
3772 error = bus_setup_intr_descr(dev, msix->jme_msix_res,
3773 INTR_MPSAFE, msix->jme_msix_func, msix->jme_msix_arg,
3774 &msix->jme_msix_handle, msix->jme_msix_serialize,
3775 msix->jme_msix_desc);
3777 device_printf(dev, "could not set up %s "
3778 "interrupt handler.\n", msix->jme_msix_desc);
3779 jme_msix_teardown(dev, x);
3783 ifp->if_cpuid = 0; /* XXX */
3788 jme_msix_teardown(device_t dev, int msix_count)
3790 struct jme_softc *sc = device_get_softc(dev);
3793 for (x = 0; x < msix_count; ++x) {
3794 struct jme_msix_data *msix = &sc->jme_msix[x];
3796 bus_teardown_intr(dev, msix->jme_msix_res,
3797 msix->jme_msix_handle);