2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
30 #include "opt_polling.h"
34 #include <sys/param.h>
35 #include <sys/endian.h>
36 #include <sys/kernel.h>
38 #include <sys/interrupt.h>
39 #include <sys/malloc.h>
42 #include <sys/serialize.h>
43 #include <sys/serialize2.h>
44 #include <sys/socket.h>
45 #include <sys/sockio.h>
46 #include <sys/sysctl.h>
48 #include <net/ethernet.h>
51 #include <net/if_arp.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/ifq_var.h>
55 #include <net/toeplitz.h>
56 #include <net/toeplitz2.h>
57 #include <net/vlan/if_vlan_var.h>
58 #include <net/vlan/if_vlan_ether.h>
60 #include <netinet/in.h>
62 #include <dev/netif/mii_layer/miivar.h>
63 #include <dev/netif/mii_layer/jmphyreg.h>
65 #include <bus/pci/pcireg.h>
66 #include <bus/pci/pcivar.h>
67 #include <bus/pci/pcidevs.h>
69 #include <dev/netif/jme/if_jmereg.h>
70 #include <dev/netif/jme/if_jmevar.h>
72 #include "miibus_if.h"
74 /* Define the following to disable printing Rx errors. */
75 #undef JME_SHOW_ERRORS
77 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
80 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
82 if ((sc)->jme_rss_debug >= (lvl)) \
83 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
85 #else /* !JME_RSS_DEBUG */
86 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
87 #endif /* JME_RSS_DEBUG */
89 static int jme_probe(device_t);
90 static int jme_attach(device_t);
91 static int jme_detach(device_t);
92 static int jme_shutdown(device_t);
93 static int jme_suspend(device_t);
94 static int jme_resume(device_t);
96 static int jme_miibus_readreg(device_t, int, int);
97 static int jme_miibus_writereg(device_t, int, int, int);
98 static void jme_miibus_statchg(device_t);
100 static void jme_init(void *);
101 static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
102 static void jme_start(struct ifnet *);
103 static void jme_watchdog(struct ifnet *);
104 static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
105 static int jme_mediachange(struct ifnet *);
106 #ifdef DEVICE_POLLING
107 static void jme_poll(struct ifnet *, enum poll_cmd, int);
109 static void jme_serialize(struct ifnet *, enum ifnet_serialize);
110 static void jme_deserialize(struct ifnet *, enum ifnet_serialize);
111 static int jme_tryserialize(struct ifnet *, enum ifnet_serialize);
113 static void jme_serialize_assert(struct ifnet *, enum ifnet_serialize,
117 static void jme_intr(void *);
118 static void jme_msix_tx(void *);
119 static void jme_msix_rx(void *);
120 static void jme_txeof(struct jme_softc *);
121 static void jme_rxeof(struct jme_rxdata *, int);
122 static void jme_rx_intr(struct jme_softc *, uint32_t);
124 static int jme_msix_setup(device_t);
125 static void jme_msix_teardown(device_t, int);
126 static int jme_intr_setup(device_t);
127 static void jme_intr_teardown(device_t);
128 static void jme_msix_try_alloc(device_t);
129 static void jme_msix_free(device_t);
130 static int jme_intr_alloc(device_t);
131 static void jme_intr_free(device_t);
132 static int jme_dma_alloc(struct jme_softc *);
133 static void jme_dma_free(struct jme_softc *);
134 static int jme_init_rx_ring(struct jme_rxdata *);
135 static void jme_init_tx_ring(struct jme_softc *);
136 static void jme_init_ssb(struct jme_softc *);
137 static int jme_newbuf(struct jme_rxdata *, struct jme_rxdesc *, int);
138 static int jme_encap(struct jme_softc *, struct mbuf **);
139 static void jme_rxpkt(struct jme_rxdata *);
140 static int jme_rxring_dma_alloc(struct jme_rxdata *);
141 static int jme_rxbuf_dma_alloc(struct jme_rxdata *);
143 static void jme_tick(void *);
144 static void jme_stop(struct jme_softc *);
145 static void jme_reset(struct jme_softc *);
146 static void jme_set_msinum(struct jme_softc *);
147 static void jme_set_vlan(struct jme_softc *);
148 static void jme_set_filter(struct jme_softc *);
149 static void jme_stop_tx(struct jme_softc *);
150 static void jme_stop_rx(struct jme_softc *);
151 static void jme_mac_config(struct jme_softc *);
152 static void jme_reg_macaddr(struct jme_softc *, uint8_t[]);
153 static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
154 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
156 static void jme_setwol(struct jme_softc *);
157 static void jme_setlinkspeed(struct jme_softc *);
159 static void jme_set_tx_coal(struct jme_softc *);
160 static void jme_set_rx_coal(struct jme_softc *);
161 static void jme_enable_rss(struct jme_softc *);
162 static void jme_disable_rss(struct jme_softc *);
164 static void jme_sysctl_node(struct jme_softc *);
165 static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
166 static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
167 static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
168 static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
171 * Devices supported by this driver.
173 static const struct jme_dev {
174 uint16_t jme_vendorid;
175 uint16_t jme_deviceid;
177 const char *jme_name;
179 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
181 "JMicron Inc, JMC250 Gigabit Ethernet" },
182 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
184 "JMicron Inc, JMC260 Fast Ethernet" },
188 static device_method_t jme_methods[] = {
189 /* Device interface. */
190 DEVMETHOD(device_probe, jme_probe),
191 DEVMETHOD(device_attach, jme_attach),
192 DEVMETHOD(device_detach, jme_detach),
193 DEVMETHOD(device_shutdown, jme_shutdown),
194 DEVMETHOD(device_suspend, jme_suspend),
195 DEVMETHOD(device_resume, jme_resume),
198 DEVMETHOD(bus_print_child, bus_generic_print_child),
199 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
202 DEVMETHOD(miibus_readreg, jme_miibus_readreg),
203 DEVMETHOD(miibus_writereg, jme_miibus_writereg),
204 DEVMETHOD(miibus_statchg, jme_miibus_statchg),
209 static driver_t jme_driver = {
212 sizeof(struct jme_softc)
215 static devclass_t jme_devclass;
217 DECLARE_DUMMY_MODULE(if_jme);
218 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
219 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL);
220 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL);
222 static const struct {
226 } jme_rx_status[JME_NRXRING_MAX] = {
227 { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP,
228 INTR_RXQ0_DESC_EMPTY },
229 { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP,
230 INTR_RXQ1_DESC_EMPTY },
231 { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP,
232 INTR_RXQ2_DESC_EMPTY },
233 { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP,
234 INTR_RXQ3_DESC_EMPTY }
237 static int jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
238 static int jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
239 static int jme_rx_ring_count = 1;
240 static int jme_msi_enable = 1;
241 static int jme_msix_enable = 1;
243 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
244 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
245 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
246 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable);
247 TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable);
250 * Read a PHY register on the MII of the JMC250.
253 jme_miibus_readreg(device_t dev, int phy, int reg)
255 struct jme_softc *sc = device_get_softc(dev);
259 /* For FPGA version, PHY address 0 should be ignored. */
260 if (sc->jme_caps & JME_CAP_FPGA) {
264 if (sc->jme_phyaddr != phy)
268 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
269 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
271 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
273 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
277 device_printf(sc->jme_dev, "phy read timeout: "
278 "phy %d, reg %d\n", phy, reg);
282 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
286 * Write a PHY register on the MII of the JMC250.
289 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
291 struct jme_softc *sc = device_get_softc(dev);
294 /* For FPGA version, PHY address 0 should be ignored. */
295 if (sc->jme_caps & JME_CAP_FPGA) {
299 if (sc->jme_phyaddr != phy)
303 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
304 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
305 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
307 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
309 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
313 device_printf(sc->jme_dev, "phy write timeout: "
314 "phy %d, reg %d\n", phy, reg);
321 * Callback from MII layer when media changes.
324 jme_miibus_statchg(device_t dev)
326 struct jme_softc *sc = device_get_softc(dev);
327 struct ifnet *ifp = &sc->arpcom.ac_if;
328 struct mii_data *mii;
329 struct jme_txdesc *txd;
333 ASSERT_IFNET_SERIALIZED_ALL(ifp);
335 if ((ifp->if_flags & IFF_RUNNING) == 0)
338 mii = device_get_softc(sc->jme_miibus);
340 sc->jme_flags &= ~JME_FLAG_LINK;
341 if ((mii->mii_media_status & IFM_AVALID) != 0) {
342 switch (IFM_SUBTYPE(mii->mii_media_active)) {
345 sc->jme_flags |= JME_FLAG_LINK;
348 if (sc->jme_caps & JME_CAP_FASTETH)
350 sc->jme_flags |= JME_FLAG_LINK;
358 * Disabling Rx/Tx MACs have a side-effect of resetting
359 * JME_TXNDA/JME_RXNDA register to the first address of
360 * Tx/Rx descriptor address. So driver should reset its
361 * internal procucer/consumer pointer and reclaim any
362 * allocated resources. Note, just saving the value of
363 * JME_TXNDA and JME_RXNDA registers before stopping MAC
364 * and restoring JME_TXNDA/JME_RXNDA register is not
365 * sufficient to make sure correct MAC state because
366 * stopping MAC operation can take a while and hardware
367 * might have updated JME_TXNDA/JME_RXNDA registers
368 * during the stop operation.
371 /* Disable interrupts */
372 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
375 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
377 callout_stop(&sc->jme_tick_ch);
379 /* Stop receiver/transmitter. */
383 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
384 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
386 jme_rxeof(rdata, -1);
387 if (rdata->jme_rxhead != NULL)
388 m_freem(rdata->jme_rxhead);
389 JME_RXCHAIN_RESET(rdata);
392 * Reuse configured Rx descriptors and reset
393 * procuder/consumer index.
395 rdata->jme_rx_cons = 0;
399 if (sc->jme_cdata.jme_tx_cnt != 0) {
400 /* Remove queued packets for transmit. */
401 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
402 txd = &sc->jme_cdata.jme_txdesc[i];
403 if (txd->tx_m != NULL) {
405 sc->jme_cdata.jme_tx_tag,
414 jme_init_tx_ring(sc);
416 /* Initialize shadow status block. */
419 /* Program MAC with resolved speed/duplex/flow-control. */
420 if (sc->jme_flags & JME_FLAG_LINK) {
423 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
425 /* Set Tx ring address to the hardware. */
426 paddr = sc->jme_cdata.jme_tx_ring_paddr;
427 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
428 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
430 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
431 CSR_WRITE_4(sc, JME_RXCSR,
432 sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
434 /* Set Rx ring address to the hardware. */
435 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
436 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
437 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
440 /* Restart receiver/transmitter. */
441 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
443 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
446 ifp->if_flags |= IFF_RUNNING;
447 ifp->if_flags &= ~IFF_OACTIVE;
448 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
450 #ifdef DEVICE_POLLING
451 if (!(ifp->if_flags & IFF_POLLING))
453 /* Reenable interrupts. */
454 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
458 * Get the current interface media status.
461 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
463 struct jme_softc *sc = ifp->if_softc;
464 struct mii_data *mii = device_get_softc(sc->jme_miibus);
466 ASSERT_IFNET_SERIALIZED_ALL(ifp);
469 ifmr->ifm_status = mii->mii_media_status;
470 ifmr->ifm_active = mii->mii_media_active;
474 * Set hardware to newly-selected media.
477 jme_mediachange(struct ifnet *ifp)
479 struct jme_softc *sc = ifp->if_softc;
480 struct mii_data *mii = device_get_softc(sc->jme_miibus);
483 ASSERT_IFNET_SERIALIZED_ALL(ifp);
485 if (mii->mii_instance != 0) {
486 struct mii_softc *miisc;
488 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
489 mii_phy_reset(miisc);
491 error = mii_mediachg(mii);
497 jme_probe(device_t dev)
499 const struct jme_dev *sp;
502 vid = pci_get_vendor(dev);
503 did = pci_get_device(dev);
504 for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
505 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
506 struct jme_softc *sc = device_get_softc(dev);
508 sc->jme_caps = sp->jme_caps;
509 device_set_desc(dev, sp->jme_name);
517 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
523 for (i = JME_TIMEOUT; i > 0; i--) {
524 reg = CSR_READ_4(sc, JME_SMBCSR);
525 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
531 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
535 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
536 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
537 for (i = JME_TIMEOUT; i > 0; i--) {
539 reg = CSR_READ_4(sc, JME_SMBINTF);
540 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
545 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
549 reg = CSR_READ_4(sc, JME_SMBINTF);
550 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
556 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
558 uint8_t fup, reg, val;
563 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
564 fup != JME_EEPROM_SIG0)
566 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
567 fup != JME_EEPROM_SIG1)
571 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
573 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
574 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
575 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0)
577 if (reg >= JME_PAR0 &&
578 reg < JME_PAR0 + ETHER_ADDR_LEN) {
579 if (jme_eeprom_read_byte(sc, offset + 2,
582 eaddr[reg - JME_PAR0] = val;
586 /* Check for the end of EEPROM descriptor. */
587 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
589 /* Try next eeprom descriptor. */
590 offset += JME_EEPROM_DESC_BYTES;
591 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
593 if (match == ETHER_ADDR_LEN)
600 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
604 /* Read station address. */
605 par0 = CSR_READ_4(sc, JME_PAR0);
606 par1 = CSR_READ_4(sc, JME_PAR1);
608 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
609 device_printf(sc->jme_dev,
610 "generating fake ethernet address.\n");
611 par0 = karc4random();
612 /* Set OUI to JMicron. */
616 eaddr[3] = (par0 >> 16) & 0xff;
617 eaddr[4] = (par0 >> 8) & 0xff;
618 eaddr[5] = par0 & 0xff;
620 eaddr[0] = (par0 >> 0) & 0xFF;
621 eaddr[1] = (par0 >> 8) & 0xFF;
622 eaddr[2] = (par0 >> 16) & 0xFF;
623 eaddr[3] = (par0 >> 24) & 0xFF;
624 eaddr[4] = (par1 >> 0) & 0xFF;
625 eaddr[5] = (par1 >> 8) & 0xFF;
630 jme_attach(device_t dev)
632 struct jme_softc *sc = device_get_softc(dev);
633 struct ifnet *ifp = &sc->arpcom.ac_if;
636 uint8_t pcie_ptr, rev;
637 int error = 0, i, j, rx_desc_cnt;
638 uint8_t eaddr[ETHER_ADDR_LEN];
640 lwkt_serialize_init(&sc->jme_serialize);
641 lwkt_serialize_init(&sc->jme_cdata.jme_tx_serialize);
642 for (i = 0; i < JME_NRXRING_MAX; ++i) {
644 &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize);
647 rx_desc_cnt = device_getenv_int(dev, "rx_desc_count",
649 rx_desc_cnt = roundup(rx_desc_cnt, JME_NDESC_ALIGN);
650 if (rx_desc_cnt > JME_NDESC_MAX)
651 rx_desc_cnt = JME_NDESC_MAX;
653 sc->jme_tx_desc_cnt = device_getenv_int(dev, "tx_desc_count",
655 sc->jme_tx_desc_cnt = roundup(sc->jme_tx_desc_cnt, JME_NDESC_ALIGN);
656 if (sc->jme_tx_desc_cnt > JME_NDESC_MAX)
657 sc->jme_tx_desc_cnt = JME_NDESC_MAX;
662 sc->jme_cdata.jme_rx_ring_cnt = device_getenv_int(dev, "rx_ring_count",
664 sc->jme_cdata.jme_rx_ring_cnt =
665 if_ring_count2(sc->jme_cdata.jme_rx_ring_cnt, JME_NRXRING_MAX);
668 sc->jme_serialize_arr[i++] = &sc->jme_serialize;
669 sc->jme_serialize_arr[i++] = &sc->jme_cdata.jme_tx_serialize;
670 for (j = 0; j < sc->jme_cdata.jme_rx_ring_cnt; ++j) {
671 sc->jme_serialize_arr[i++] =
672 &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize;
674 KKASSERT(i <= JME_NSERIALIZE);
675 sc->jme_serialize_cnt = i;
677 sc->jme_cdata.jme_sc = sc;
678 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
679 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
682 rdata->jme_rx_coal = jme_rx_status[i].jme_coal;
683 rdata->jme_rx_comp = jme_rx_status[i].jme_comp;
684 rdata->jme_rx_empty = jme_rx_status[i].jme_empty;
685 rdata->jme_rx_idx = i;
686 rdata->jme_rx_desc_cnt = rx_desc_cnt;
690 sc->jme_lowaddr = BUS_SPACE_MAXADDR;
692 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
694 callout_init(&sc->jme_tick_ch);
697 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
700 irq = pci_read_config(dev, PCIR_INTLINE, 4);
701 mem = pci_read_config(dev, JME_PCIR_BAR, 4);
703 device_printf(dev, "chip is in D%d power mode "
704 "-- setting to D0\n", pci_get_powerstate(dev));
706 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
708 pci_write_config(dev, PCIR_INTLINE, irq, 4);
709 pci_write_config(dev, JME_PCIR_BAR, mem, 4);
711 #endif /* !BURN_BRIDGE */
713 /* Enable bus mastering */
714 pci_enable_busmaster(dev);
719 * JMC250 supports both memory mapped and I/O register space
720 * access. Because I/O register access should use different
721 * BARs to access registers it's waste of time to use I/O
722 * register spce access. JMC250 uses 16K to map entire memory
725 sc->jme_mem_rid = JME_PCIR_BAR;
726 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
727 &sc->jme_mem_rid, RF_ACTIVE);
728 if (sc->jme_mem_res == NULL) {
729 device_printf(dev, "can't allocate IO memory\n");
732 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
733 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
738 error = jme_intr_alloc(dev);
745 reg = CSR_READ_4(sc, JME_CHIPMODE);
746 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
748 sc->jme_caps |= JME_CAP_FPGA;
750 device_printf(dev, "FPGA revision: 0x%04x\n",
751 (reg & CHIPMODE_FPGA_REV_MASK) >>
752 CHIPMODE_FPGA_REV_SHIFT);
756 /* NOTE: FM revision is put in the upper 4 bits */
757 rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
758 rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
760 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
762 did = pci_get_device(dev);
764 case PCI_PRODUCT_JMICRON_JMC250:
765 if (rev == JME_REV1_A2)
766 sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
769 case PCI_PRODUCT_JMICRON_JMC260:
771 sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
775 panic("unknown device id 0x%04x\n", did);
777 if (rev >= JME_REV2) {
778 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
779 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
780 GHC_TXMAC_CLKSRC_1000;
783 /* Reset the ethernet controller. */
786 /* Map MSI/MSI-X vectors */
789 /* Get station address. */
790 reg = CSR_READ_4(sc, JME_SMBCSR);
791 if (reg & SMBCSR_EEPROM_PRESENT)
792 error = jme_eeprom_macaddr(sc, eaddr);
793 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
794 if (error != 0 && (bootverbose)) {
795 device_printf(dev, "ethernet hardware address "
796 "not found in EEPROM.\n");
798 jme_reg_macaddr(sc, eaddr);
803 * Integrated JR0211 has fixed PHY address whereas FPGA version
804 * requires PHY probing to get correct PHY address.
806 if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
807 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
808 GPREG0_PHY_ADDR_MASK;
810 device_printf(dev, "PHY is at address %d.\n",
817 /* Set max allowable DMA size. */
818 pcie_ptr = pci_get_pciecap_ptr(dev);
822 sc->jme_caps |= JME_CAP_PCIE;
823 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
825 device_printf(dev, "Read request size : %d bytes.\n",
826 128 << ((ctrl >> 12) & 0x07));
827 device_printf(dev, "TLP payload size : %d bytes.\n",
828 128 << ((ctrl >> 5) & 0x07));
830 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
831 case PCIEM_DEVCTL_MAX_READRQ_128:
832 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
834 case PCIEM_DEVCTL_MAX_READRQ_256:
835 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
838 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
841 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
843 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
844 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
848 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
849 sc->jme_caps |= JME_CAP_PMCAP;
857 /* Allocate DMA stuffs */
858 error = jme_dma_alloc(sc);
863 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
864 ifp->if_init = jme_init;
865 ifp->if_ioctl = jme_ioctl;
866 ifp->if_start = jme_start;
867 #ifdef DEVICE_POLLING
868 ifp->if_poll = jme_poll;
870 ifp->if_watchdog = jme_watchdog;
871 ifp->if_serialize = jme_serialize;
872 ifp->if_deserialize = jme_deserialize;
873 ifp->if_tryserialize = jme_tryserialize;
875 ifp->if_serialize_assert = jme_serialize_assert;
877 ifq_set_maxlen(&ifp->if_snd, sc->jme_tx_desc_cnt - JME_TXD_RSVD);
878 ifq_set_ready(&ifp->if_snd);
880 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
881 ifp->if_capabilities = IFCAP_HWCSUM |
883 IFCAP_VLAN_HWTAGGING;
884 if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN)
885 ifp->if_capabilities |= IFCAP_RSS;
886 ifp->if_capenable = ifp->if_capabilities;
889 * Disable TXCSUM by default to improve bulk data
890 * transmit performance (+20Mbps improvement).
892 ifp->if_capenable &= ~IFCAP_TXCSUM;
894 if (ifp->if_capenable & IFCAP_TXCSUM)
895 ifp->if_hwassist = JME_CSUM_FEATURES;
897 /* Set up MII bus. */
898 error = mii_phy_probe(dev, &sc->jme_miibus,
899 jme_mediachange, jme_mediastatus);
901 device_printf(dev, "no PHY found!\n");
906 * Save PHYADDR for FPGA mode PHY.
908 if (sc->jme_caps & JME_CAP_FPGA) {
909 struct mii_data *mii = device_get_softc(sc->jme_miibus);
911 if (mii->mii_instance != 0) {
912 struct mii_softc *miisc;
914 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
915 if (miisc->mii_phy != 0) {
916 sc->jme_phyaddr = miisc->mii_phy;
920 if (sc->jme_phyaddr != 0) {
921 device_printf(sc->jme_dev,
922 "FPGA PHY is at %d\n", sc->jme_phyaddr);
924 jme_miibus_writereg(dev, sc->jme_phyaddr,
925 JMPHY_CONF, JMPHY_CONF_DEFFIFO);
927 /* XXX should we clear JME_WA_EXTFIFO */
932 ether_ifattach(ifp, eaddr, NULL);
934 /* Tell the upper layer(s) we support long frames. */
935 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
937 error = jme_intr_setup(dev);
950 jme_detach(device_t dev)
952 struct jme_softc *sc = device_get_softc(dev);
954 if (device_is_attached(dev)) {
955 struct ifnet *ifp = &sc->arpcom.ac_if;
957 ifnet_serialize_all(ifp);
959 jme_intr_teardown(dev);
960 ifnet_deserialize_all(ifp);
965 if (sc->jme_sysctl_tree != NULL)
966 sysctl_ctx_free(&sc->jme_sysctl_ctx);
968 if (sc->jme_miibus != NULL)
969 device_delete_child(dev, sc->jme_miibus);
970 bus_generic_detach(dev);
974 if (sc->jme_mem_res != NULL) {
975 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
985 jme_sysctl_node(struct jme_softc *sc)
992 sysctl_ctx_init(&sc->jme_sysctl_ctx);
993 sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
994 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
995 device_get_nameunit(sc->jme_dev),
997 if (sc->jme_sysctl_tree == NULL) {
998 device_printf(sc->jme_dev, "can't add sysctl node\n");
1002 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1003 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1004 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1005 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
1007 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1008 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1009 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1010 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
1012 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1013 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1014 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1015 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
1017 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1018 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1019 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1020 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
1022 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1023 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1024 "rx_desc_count", CTLFLAG_RD,
1025 &sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt,
1026 0, "RX desc count");
1027 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1028 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1029 "tx_desc_count", CTLFLAG_RD, &sc->jme_tx_desc_cnt,
1030 0, "TX desc count");
1031 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1032 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1033 "rx_ring_count", CTLFLAG_RD,
1034 &sc->jme_cdata.jme_rx_ring_cnt,
1035 0, "RX ring count");
1036 #ifdef JME_RSS_DEBUG
1037 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1038 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1039 "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
1040 0, "RSS debug level");
1041 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1042 char rx_ring_pkt[32];
1044 ksnprintf(rx_ring_pkt, sizeof(rx_ring_pkt), "rx_ring%d_pkt", r);
1045 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx,
1046 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1047 rx_ring_pkt, CTLFLAG_RW,
1048 &sc->jme_cdata.jme_rx_data[r].jme_rx_pkt, "RXed packets");
1053 * Set default coalesce valves
1055 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1056 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1057 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1058 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1061 * Adjust coalesce valves, in case that the number of TX/RX
1062 * descs are set to small values by users.
1064 * NOTE: coal_max will not be zero, since number of descs
1065 * must aligned by JME_NDESC_ALIGN (16 currently)
1067 coal_max = sc->jme_tx_desc_cnt / 6;
1068 if (coal_max < sc->jme_tx_coal_pkt)
1069 sc->jme_tx_coal_pkt = coal_max;
1071 coal_max = sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt / 4;
1072 if (coal_max < sc->jme_rx_coal_pkt)
1073 sc->jme_rx_coal_pkt = coal_max;
1077 jme_dma_alloc(struct jme_softc *sc)
1079 struct jme_txdesc *txd;
1083 sc->jme_cdata.jme_txdesc =
1084 kmalloc(sc->jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1085 M_DEVBUF, M_WAITOK | M_ZERO);
1086 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1087 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
1090 kmalloc(rdata->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1091 M_DEVBUF, M_WAITOK | M_ZERO);
1094 /* Create parent ring tag. */
1095 error = bus_dma_tag_create(NULL,/* parent */
1096 1, JME_RING_BOUNDARY, /* algnmnt, boundary */
1097 sc->jme_lowaddr, /* lowaddr */
1098 BUS_SPACE_MAXADDR, /* highaddr */
1099 NULL, NULL, /* filter, filterarg */
1100 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1102 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1104 &sc->jme_cdata.jme_ring_tag);
1106 device_printf(sc->jme_dev,
1107 "could not create parent ring DMA tag.\n");
1112 * Create DMA stuffs for TX ring
1114 error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1115 JME_TX_RING_ALIGN, 0,
1116 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1117 JME_TX_RING_SIZE(sc),
1118 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1120 device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1123 sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag;
1124 sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map;
1125 sc->jme_cdata.jme_tx_ring = dmem.dmem_addr;
1126 sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr;
1129 * Create DMA stuffs for RX rings
1131 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1132 error = jme_rxring_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1137 /* Create parent buffer tag. */
1138 error = bus_dma_tag_create(NULL,/* parent */
1139 1, 0, /* algnmnt, boundary */
1140 sc->jme_lowaddr, /* lowaddr */
1141 BUS_SPACE_MAXADDR, /* highaddr */
1142 NULL, NULL, /* filter, filterarg */
1143 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1145 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1147 &sc->jme_cdata.jme_buffer_tag);
1149 device_printf(sc->jme_dev,
1150 "could not create parent buffer DMA tag.\n");
1155 * Create DMA stuffs for shadow status block
1157 error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1158 JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1159 JME_SSB_SIZE, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1161 device_printf(sc->jme_dev,
1162 "could not create shadow status block.\n");
1165 sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1166 sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1167 sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1168 sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1171 * Create DMA stuffs for TX buffers
1174 /* Create tag for Tx buffers. */
1175 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1176 1, 0, /* algnmnt, boundary */
1177 BUS_SPACE_MAXADDR, /* lowaddr */
1178 BUS_SPACE_MAXADDR, /* highaddr */
1179 NULL, NULL, /* filter, filterarg */
1180 JME_JUMBO_FRAMELEN, /* maxsize */
1181 JME_MAXTXSEGS, /* nsegments */
1182 JME_MAXSEGSIZE, /* maxsegsize */
1183 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1184 &sc->jme_cdata.jme_tx_tag);
1186 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1190 /* Create DMA maps for Tx buffers. */
1191 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1192 txd = &sc->jme_cdata.jme_txdesc[i];
1193 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag,
1194 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1199 device_printf(sc->jme_dev,
1200 "could not create %dth Tx dmamap.\n", i);
1202 for (j = 0; j < i; ++j) {
1203 txd = &sc->jme_cdata.jme_txdesc[j];
1204 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1207 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1208 sc->jme_cdata.jme_tx_tag = NULL;
1214 * Create DMA stuffs for RX buffers
1216 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1217 error = jme_rxbuf_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1225 jme_dma_free(struct jme_softc *sc)
1227 struct jme_txdesc *txd;
1228 struct jme_rxdesc *rxd;
1229 struct jme_rxdata *rdata;
1233 if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1234 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1235 sc->jme_cdata.jme_tx_ring_map);
1236 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1237 sc->jme_cdata.jme_tx_ring,
1238 sc->jme_cdata.jme_tx_ring_map);
1239 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1240 sc->jme_cdata.jme_tx_ring_tag = NULL;
1244 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1245 rdata = &sc->jme_cdata.jme_rx_data[r];
1246 if (rdata->jme_rx_ring_tag != NULL) {
1247 bus_dmamap_unload(rdata->jme_rx_ring_tag,
1248 rdata->jme_rx_ring_map);
1249 bus_dmamem_free(rdata->jme_rx_ring_tag,
1251 rdata->jme_rx_ring_map);
1252 bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1253 rdata->jme_rx_ring_tag = NULL;
1258 if (sc->jme_cdata.jme_tx_tag != NULL) {
1259 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1260 txd = &sc->jme_cdata.jme_txdesc[i];
1261 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1264 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1265 sc->jme_cdata.jme_tx_tag = NULL;
1269 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1270 rdata = &sc->jme_cdata.jme_rx_data[r];
1271 if (rdata->jme_rx_tag != NULL) {
1272 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
1273 rxd = &rdata->jme_rxdesc[i];
1274 bus_dmamap_destroy(rdata->jme_rx_tag,
1277 bus_dmamap_destroy(rdata->jme_rx_tag,
1278 rdata->jme_rx_sparemap);
1279 bus_dma_tag_destroy(rdata->jme_rx_tag);
1280 rdata->jme_rx_tag = NULL;
1284 /* Shadow status block. */
1285 if (sc->jme_cdata.jme_ssb_tag != NULL) {
1286 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1287 sc->jme_cdata.jme_ssb_map);
1288 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1289 sc->jme_cdata.jme_ssb_block,
1290 sc->jme_cdata.jme_ssb_map);
1291 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1292 sc->jme_cdata.jme_ssb_tag = NULL;
1295 if (sc->jme_cdata.jme_buffer_tag != NULL) {
1296 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1297 sc->jme_cdata.jme_buffer_tag = NULL;
1299 if (sc->jme_cdata.jme_ring_tag != NULL) {
1300 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1301 sc->jme_cdata.jme_ring_tag = NULL;
1304 if (sc->jme_cdata.jme_txdesc != NULL) {
1305 kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1306 sc->jme_cdata.jme_txdesc = NULL;
1308 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1309 rdata = &sc->jme_cdata.jme_rx_data[r];
1310 if (rdata->jme_rxdesc != NULL) {
1311 kfree(rdata->jme_rxdesc, M_DEVBUF);
1312 rdata->jme_rxdesc = NULL;
1318 * Make sure the interface is stopped at reboot time.
1321 jme_shutdown(device_t dev)
1323 return jme_suspend(dev);
1328 * Unlike other ethernet controllers, JMC250 requires
1329 * explicit resetting link speed to 10/100Mbps as gigabit
1330 * link will cunsume more power than 375mA.
1331 * Note, we reset the link speed to 10/100Mbps with
1332 * auto-negotiation but we don't know whether that operation
1333 * would succeed or not as we have no control after powering
1334 * off. If the renegotiation fail WOL may not work. Running
1335 * at 1Gbps draws more power than 375mA at 3.3V which is
1336 * specified in PCI specification and that would result in
1337 * complete shutdowning power to ethernet controller.
1340 * Save current negotiated media speed/duplex/flow-control
1341 * to softc and restore the same link again after resuming.
1342 * PHY handling such as power down/resetting to 100Mbps
1343 * may be better handled in suspend method in phy driver.
1346 jme_setlinkspeed(struct jme_softc *sc)
1348 struct mii_data *mii;
1351 JME_LOCK_ASSERT(sc);
1353 mii = device_get_softc(sc->jme_miibus);
1356 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1357 switch IFM_SUBTYPE(mii->mii_media_active) {
1367 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1368 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1369 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1370 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1371 BMCR_AUTOEN | BMCR_STARTNEG);
1374 /* Poll link state until jme(4) get a 10/100 link. */
1375 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1377 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1378 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1388 pause("jmelnk", hz);
1391 if (i == MII_ANEGTICKS_GIGE)
1392 device_printf(sc->jme_dev, "establishing link failed, "
1393 "WOL may not work!");
1396 * No link, force MAC to have 100Mbps, full-duplex link.
1397 * This is the last resort and may/may not work.
1399 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1400 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1405 jme_setwol(struct jme_softc *sc)
1407 struct ifnet *ifp = &sc->arpcom.ac_if;
1412 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1413 /* No PME capability, PHY power down. */
1414 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1415 MII_BMCR, BMCR_PDOWN);
1419 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1420 pmcs = CSR_READ_4(sc, JME_PMCS);
1421 pmcs &= ~PMCS_WOL_ENB_MASK;
1422 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1423 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1424 /* Enable PME message. */
1425 gpr |= GPREG0_PME_ENB;
1426 /* For gigabit controllers, reset link speed to 10/100. */
1427 if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1428 jme_setlinkspeed(sc);
1431 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1432 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1435 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1436 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1437 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1438 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1439 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1440 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1441 /* No WOL, PHY power down. */
1442 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1443 MII_BMCR, BMCR_PDOWN);
1449 jme_suspend(device_t dev)
1451 struct jme_softc *sc = device_get_softc(dev);
1452 struct ifnet *ifp = &sc->arpcom.ac_if;
1454 ifnet_serialize_all(ifp);
1459 ifnet_deserialize_all(ifp);
1465 jme_resume(device_t dev)
1467 struct jme_softc *sc = device_get_softc(dev);
1468 struct ifnet *ifp = &sc->arpcom.ac_if;
1473 ifnet_serialize_all(ifp);
1476 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1479 pmstat = pci_read_config(sc->jme_dev,
1480 pmc + PCIR_POWER_STATUS, 2);
1481 /* Disable PME clear PME status. */
1482 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1483 pci_write_config(sc->jme_dev,
1484 pmc + PCIR_POWER_STATUS, pmstat, 2);
1488 if (ifp->if_flags & IFF_UP)
1491 ifnet_deserialize_all(ifp);
1497 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1499 struct jme_txdesc *txd;
1500 struct jme_desc *desc;
1502 bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1504 int error, i, prod, symbol_desc;
1505 uint32_t cflags, flag64;
1507 M_ASSERTPKTHDR((*m_head));
1509 prod = sc->jme_cdata.jme_tx_prod;
1510 txd = &sc->jme_cdata.jme_txdesc[prod];
1512 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1517 maxsegs = (sc->jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1518 (JME_TXD_RSVD + symbol_desc);
1519 if (maxsegs > JME_MAXTXSEGS)
1520 maxsegs = JME_MAXTXSEGS;
1521 KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc),
1522 ("not enough segments %d\n", maxsegs));
1524 error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag,
1525 txd->tx_dmamap, m_head,
1526 txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1530 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1531 BUS_DMASYNC_PREWRITE);
1536 /* Configure checksum offload. */
1537 if (m->m_pkthdr.csum_flags & CSUM_IP)
1538 cflags |= JME_TD_IPCSUM;
1539 if (m->m_pkthdr.csum_flags & CSUM_TCP)
1540 cflags |= JME_TD_TCPCSUM;
1541 if (m->m_pkthdr.csum_flags & CSUM_UDP)
1542 cflags |= JME_TD_UDPCSUM;
1544 /* Configure VLAN. */
1545 if (m->m_flags & M_VLANTAG) {
1546 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1547 cflags |= JME_TD_VLAN_TAG;
1550 desc = &sc->jme_cdata.jme_tx_ring[prod];
1551 desc->flags = htole32(cflags);
1552 desc->addr_hi = htole32(m->m_pkthdr.len);
1553 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1555 * Use 64bits TX desc chain format.
1557 * The first TX desc of the chain, which is setup here,
1558 * is just a symbol TX desc carrying no payload.
1560 flag64 = JME_TD_64BIT;
1564 /* No effective TX desc is consumed */
1568 * Use 32bits TX desc chain format.
1570 * The first TX desc of the chain, which is setup here,
1571 * is an effective TX desc carrying the first segment of
1575 desc->buflen = htole32(txsegs[0].ds_len);
1576 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1578 /* One effective TX desc is consumed */
1581 sc->jme_cdata.jme_tx_cnt++;
1582 KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1583 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1584 JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1586 txd->tx_ndesc = 1 - i;
1587 for (; i < nsegs; i++) {
1588 desc = &sc->jme_cdata.jme_tx_ring[prod];
1589 desc->flags = htole32(JME_TD_OWN | flag64);
1590 desc->buflen = htole32(txsegs[i].ds_len);
1591 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1592 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1594 sc->jme_cdata.jme_tx_cnt++;
1595 KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1596 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1597 JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1600 /* Update producer index. */
1601 sc->jme_cdata.jme_tx_prod = prod;
1603 * Finally request interrupt and give the first descriptor
1604 * owenership to hardware.
1606 desc = txd->tx_desc;
1607 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1610 txd->tx_ndesc += nsegs;
1620 jme_start(struct ifnet *ifp)
1622 struct jme_softc *sc = ifp->if_softc;
1623 struct mbuf *m_head;
1626 ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
1628 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1629 ifq_purge(&ifp->if_snd);
1633 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1636 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1639 while (!ifq_is_empty(&ifp->if_snd)) {
1641 * Check number of available TX descs, always
1642 * leave JME_TXD_RSVD free TX descs.
1644 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1645 sc->jme_tx_desc_cnt - JME_TXD_RSVD) {
1646 ifp->if_flags |= IFF_OACTIVE;
1650 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1655 * Pack the data into the transmit ring. If we
1656 * don't have room, set the OACTIVE flag and wait
1657 * for the NIC to drain the ring.
1659 if (jme_encap(sc, &m_head)) {
1660 KKASSERT(m_head == NULL);
1662 ifp->if_flags |= IFF_OACTIVE;
1668 * If there's a BPF listener, bounce a copy of this frame
1671 ETHER_BPF_MTAP(ifp, m_head);
1676 * Reading TXCSR takes very long time under heavy load
1677 * so cache TXCSR value and writes the ORed value with
1678 * the kick command to the TXCSR. This saves one register
1681 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1682 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1683 /* Set a timeout in case the chip goes out to lunch. */
1684 ifp->if_timer = JME_TX_TIMEOUT;
1689 jme_watchdog(struct ifnet *ifp)
1691 struct jme_softc *sc = ifp->if_softc;
1693 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1695 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1696 if_printf(ifp, "watchdog timeout (missed link)\n");
1703 if (sc->jme_cdata.jme_tx_cnt == 0) {
1704 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1706 if (!ifq_is_empty(&ifp->if_snd))
1711 if_printf(ifp, "watchdog timeout\n");
1714 if (!ifq_is_empty(&ifp->if_snd))
1719 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1721 struct jme_softc *sc = ifp->if_softc;
1722 struct mii_data *mii = device_get_softc(sc->jme_miibus);
1723 struct ifreq *ifr = (struct ifreq *)data;
1724 int error = 0, mask;
1726 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1730 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1731 (!(sc->jme_caps & JME_CAP_JUMBO) &&
1732 ifr->ifr_mtu > JME_MAX_MTU)) {
1737 if (ifp->if_mtu != ifr->ifr_mtu) {
1739 * No special configuration is required when interface
1740 * MTU is changed but availability of Tx checksum
1741 * offload should be chcked against new MTU size as
1742 * FIFO size is just 2K.
1744 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1745 ifp->if_capenable &= ~IFCAP_TXCSUM;
1746 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1748 ifp->if_mtu = ifr->ifr_mtu;
1749 if (ifp->if_flags & IFF_RUNNING)
1755 if (ifp->if_flags & IFF_UP) {
1756 if (ifp->if_flags & IFF_RUNNING) {
1757 if ((ifp->if_flags ^ sc->jme_if_flags) &
1758 (IFF_PROMISC | IFF_ALLMULTI))
1764 if (ifp->if_flags & IFF_RUNNING)
1767 sc->jme_if_flags = ifp->if_flags;
1772 if (ifp->if_flags & IFF_RUNNING)
1778 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1782 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1784 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1785 ifp->if_capenable ^= IFCAP_TXCSUM;
1786 if (IFCAP_TXCSUM & ifp->if_capenable)
1787 ifp->if_hwassist |= JME_CSUM_FEATURES;
1789 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1791 if (mask & IFCAP_RXCSUM) {
1794 ifp->if_capenable ^= IFCAP_RXCSUM;
1795 reg = CSR_READ_4(sc, JME_RXMAC);
1796 reg &= ~RXMAC_CSUM_ENB;
1797 if (ifp->if_capenable & IFCAP_RXCSUM)
1798 reg |= RXMAC_CSUM_ENB;
1799 CSR_WRITE_4(sc, JME_RXMAC, reg);
1802 if (mask & IFCAP_VLAN_HWTAGGING) {
1803 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1807 if (mask & IFCAP_RSS)
1808 ifp->if_capenable ^= IFCAP_RSS;
1812 error = ether_ioctl(ifp, cmd, data);
1819 jme_mac_config(struct jme_softc *sc)
1821 struct mii_data *mii;
1822 uint32_t ghc, rxmac, txmac, txpause, gp1;
1823 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1825 mii = device_get_softc(sc->jme_miibus);
1827 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1829 CSR_WRITE_4(sc, JME_GHC, 0);
1831 rxmac = CSR_READ_4(sc, JME_RXMAC);
1832 rxmac &= ~RXMAC_FC_ENB;
1833 txmac = CSR_READ_4(sc, JME_TXMAC);
1834 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1835 txpause = CSR_READ_4(sc, JME_TXPFC);
1836 txpause &= ~TXPFC_PAUSE_ENB;
1837 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1838 ghc |= GHC_FULL_DUPLEX;
1839 rxmac &= ~RXMAC_COLL_DET_ENB;
1840 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1841 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1844 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1845 txpause |= TXPFC_PAUSE_ENB;
1846 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1847 rxmac |= RXMAC_FC_ENB;
1849 /* Disable retry transmit timer/retry limit. */
1850 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1851 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1853 rxmac |= RXMAC_COLL_DET_ENB;
1854 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1855 /* Enable retry transmit timer/retry limit. */
1856 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1857 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1861 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1863 gp1 = CSR_READ_4(sc, JME_GPREG1);
1864 gp1 &= ~GPREG1_WA_HDX;
1866 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1869 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1871 ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1873 gp1 |= GPREG1_WA_HDX;
1877 ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1879 gp1 |= GPREG1_WA_HDX;
1882 * Use extended FIFO depth to workaround CRC errors
1883 * emitted by chips before JMC250B
1885 phyconf = JMPHY_CONF_EXTFIFO;
1889 if (sc->jme_caps & JME_CAP_FASTETH)
1892 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
1894 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1900 CSR_WRITE_4(sc, JME_GHC, ghc);
1901 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1902 CSR_WRITE_4(sc, JME_TXMAC, txmac);
1903 CSR_WRITE_4(sc, JME_TXPFC, txpause);
1905 if (sc->jme_workaround & JME_WA_EXTFIFO) {
1906 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1907 JMPHY_CONF, phyconf);
1909 if (sc->jme_workaround & JME_WA_HDX)
1910 CSR_WRITE_4(sc, JME_GPREG1, gp1);
1916 struct jme_softc *sc = xsc;
1917 struct ifnet *ifp = &sc->arpcom.ac_if;
1921 ASSERT_SERIALIZED(&sc->jme_serialize);
1923 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1924 if (status == 0 || status == 0xFFFFFFFF)
1927 /* Disable interrupts. */
1928 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1930 status = CSR_READ_4(sc, JME_INTR_STATUS);
1931 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1934 /* Reset PCC counter/timer and Ack interrupts. */
1935 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1937 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1938 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1940 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1941 if (status & jme_rx_status[r].jme_coal) {
1942 status |= jme_rx_status[r].jme_coal |
1943 jme_rx_status[r].jme_comp;
1947 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1949 if (ifp->if_flags & IFF_RUNNING) {
1950 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1951 jme_rx_intr(sc, status);
1953 if (status & INTR_RXQ_DESC_EMPTY) {
1955 * Notify hardware availability of new Rx buffers.
1956 * Reading RXCSR takes very long time under heavy
1957 * load so cache RXCSR value and writes the ORed
1958 * value with the kick command to the RXCSR. This
1959 * saves one register access cycle.
1961 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1962 RXCSR_RX_ENB | RXCSR_RXQ_START);
1965 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1966 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
1968 if (!ifq_is_empty(&ifp->if_snd))
1970 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
1974 /* Reenable interrupts. */
1975 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1979 jme_txeof(struct jme_softc *sc)
1981 struct ifnet *ifp = &sc->arpcom.ac_if;
1982 struct jme_txdesc *txd;
1986 cons = sc->jme_cdata.jme_tx_cons;
1987 if (cons == sc->jme_cdata.jme_tx_prod)
1991 * Go through our Tx list and free mbufs for those
1992 * frames which have been transmitted.
1994 while (cons != sc->jme_cdata.jme_tx_prod) {
1995 txd = &sc->jme_cdata.jme_txdesc[cons];
1996 KASSERT(txd->tx_m != NULL,
1997 ("%s: freeing NULL mbuf!\n", __func__));
1999 status = le32toh(txd->tx_desc->flags);
2000 if ((status & JME_TD_OWN) == JME_TD_OWN)
2003 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2007 if (status & JME_TD_COLLISION) {
2008 ifp->if_collisions +=
2009 le32toh(txd->tx_desc->buflen) &
2010 JME_TD_BUF_LEN_MASK;
2015 * Only the first descriptor of multi-descriptor
2016 * transmission is updated so driver have to skip entire
2017 * chained buffers for the transmiited frame. In other
2018 * words, JME_TD_OWN bit is valid only at the first
2019 * descriptor of a multi-descriptor transmission.
2021 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2022 sc->jme_cdata.jme_tx_ring[cons].flags = 0;
2023 JME_DESC_INC(cons, sc->jme_tx_desc_cnt);
2026 /* Reclaim transferred mbufs. */
2027 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2030 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2031 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2032 ("%s: Active Tx desc counter was garbled\n", __func__));
2035 sc->jme_cdata.jme_tx_cons = cons;
2037 if (sc->jme_cdata.jme_tx_cnt == 0)
2040 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
2041 sc->jme_tx_desc_cnt - JME_TXD_RSVD)
2042 ifp->if_flags &= ~IFF_OACTIVE;
2045 static __inline void
2046 jme_discard_rxbufs(struct jme_rxdata *rdata, int cons, int count)
2050 for (i = 0; i < count; ++i) {
2051 struct jme_desc *desc = &rdata->jme_rx_ring[cons];
2053 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2054 desc->buflen = htole32(MCLBYTES);
2055 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2059 static __inline struct pktinfo *
2060 jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2062 if (flags & JME_RD_IPV4)
2063 pi->pi_netisr = NETISR_IP;
2064 else if (flags & JME_RD_IPV6)
2065 pi->pi_netisr = NETISR_IPV6;
2070 pi->pi_l3proto = IPPROTO_UNKNOWN;
2072 if (flags & JME_RD_MORE_FRAG)
2073 pi->pi_flags |= PKTINFO_FLAG_FRAG;
2074 else if (flags & JME_RD_TCP)
2075 pi->pi_l3proto = IPPROTO_TCP;
2076 else if (flags & JME_RD_UDP)
2077 pi->pi_l3proto = IPPROTO_UDP;
2083 /* Receive a frame. */
2085 jme_rxpkt(struct jme_rxdata *rdata)
2087 struct ifnet *ifp = &rdata->jme_sc->arpcom.ac_if;
2088 struct jme_desc *desc;
2089 struct jme_rxdesc *rxd;
2090 struct mbuf *mp, *m;
2091 uint32_t flags, status, hash, hashinfo;
2092 int cons, count, nsegs;
2094 cons = rdata->jme_rx_cons;
2095 desc = &rdata->jme_rx_ring[cons];
2096 flags = le32toh(desc->flags);
2097 status = le32toh(desc->buflen);
2098 hash = le32toh(desc->addr_hi);
2099 hashinfo = le32toh(desc->addr_lo);
2100 nsegs = JME_RX_NSEGS(status);
2102 JME_RSS_DPRINTF(rdata->jme_sc, 15, "ring%d, flags 0x%08x, "
2103 "hash 0x%08x, hash info 0x%08x\n",
2104 rdata->jme_rx_idx, flags, hash, hashinfo);
2106 if (status & JME_RX_ERR_STAT) {
2108 jme_discard_rxbufs(rdata, cons, nsegs);
2109 #ifdef JME_SHOW_ERRORS
2110 if_printf(ifp, "%s : receive error = 0x%b\n",
2111 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2113 rdata->jme_rx_cons += nsegs;
2114 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2118 rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2119 for (count = 0; count < nsegs; count++,
2120 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) {
2121 rxd = &rdata->jme_rxdesc[cons];
2124 /* Add a new receive buffer to the ring. */
2125 if (jme_newbuf(rdata, rxd, 0) != 0) {
2128 jme_discard_rxbufs(rdata, cons, nsegs - count);
2129 if (rdata->jme_rxhead != NULL) {
2130 m_freem(rdata->jme_rxhead);
2131 JME_RXCHAIN_RESET(rdata);
2137 * Assume we've received a full sized frame.
2138 * Actual size is fixed when we encounter the end of
2139 * multi-segmented frame.
2141 mp->m_len = MCLBYTES;
2143 /* Chain received mbufs. */
2144 if (rdata->jme_rxhead == NULL) {
2145 rdata->jme_rxhead = mp;
2146 rdata->jme_rxtail = mp;
2149 * Receive processor can receive a maximum frame
2150 * size of 65535 bytes.
2152 rdata->jme_rxtail->m_next = mp;
2153 rdata->jme_rxtail = mp;
2156 if (count == nsegs - 1) {
2157 struct pktinfo pi0, *pi;
2159 /* Last desc. for this frame. */
2160 m = rdata->jme_rxhead;
2161 m->m_pkthdr.len = rdata->jme_rxlen;
2163 /* Set first mbuf size. */
2164 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2165 /* Set last mbuf size. */
2166 mp->m_len = rdata->jme_rxlen -
2167 ((MCLBYTES - JME_RX_PAD_BYTES) +
2168 (MCLBYTES * (nsegs - 2)));
2170 m->m_len = rdata->jme_rxlen;
2172 m->m_pkthdr.rcvif = ifp;
2175 * Account for 10bytes auto padding which is used
2176 * to align IP header on 32bit boundary. Also note,
2177 * CRC bytes is automatically removed by the
2180 m->m_data += JME_RX_PAD_BYTES;
2182 /* Set checksum information. */
2183 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2184 (flags & JME_RD_IPV4)) {
2185 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2186 if (flags & JME_RD_IPCSUM)
2187 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2188 if ((flags & JME_RD_MORE_FRAG) == 0 &&
2189 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2190 (JME_RD_TCP | JME_RD_TCPCSUM) ||
2191 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2192 (JME_RD_UDP | JME_RD_UDPCSUM))) {
2193 m->m_pkthdr.csum_flags |=
2194 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2195 m->m_pkthdr.csum_data = 0xffff;
2199 /* Check for VLAN tagged packets. */
2200 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2201 (flags & JME_RD_VLAN_TAG)) {
2202 m->m_pkthdr.ether_vlantag =
2203 flags & JME_RD_VLAN_MASK;
2204 m->m_flags |= M_VLANTAG;
2209 if (ifp->if_capenable & IFCAP_RSS)
2210 pi = jme_pktinfo(&pi0, flags);
2215 (hashinfo & JME_RD_HASH_FN_MASK) != 0) {
2216 m->m_flags |= M_HASH;
2217 m->m_pkthdr.hash = toeplitz_hash(hash);
2220 #ifdef JME_RSS_DEBUG
2222 JME_RSS_DPRINTF(rdata->jme_sc, 10,
2223 "isr %d flags %08x, l3 %d %s\n",
2224 pi->pi_netisr, pi->pi_flags,
2226 (m->m_flags & M_HASH) ? "hash" : "");
2231 ether_input_pkt(ifp, m, pi);
2233 /* Reset mbuf chains. */
2234 JME_RXCHAIN_RESET(rdata);
2235 #ifdef JME_RSS_DEBUG
2236 rdata->jme_rx_pkt++;
2241 rdata->jme_rx_cons += nsegs;
2242 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2246 jme_rxeof(struct jme_rxdata *rdata, int count)
2248 struct jme_desc *desc;
2252 #ifdef DEVICE_POLLING
2253 if (count >= 0 && count-- == 0)
2256 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2257 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2259 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2263 * Check number of segments against received bytes.
2264 * Non-matching value would indicate that hardware
2265 * is still trying to update Rx descriptors. I'm not
2266 * sure whether this check is needed.
2268 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2269 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2270 if (nsegs != howmany(pktlen, MCLBYTES)) {
2271 if_printf(&rdata->jme_sc->arpcom.ac_if,
2272 "RX fragment count(%d) and "
2273 "packet size(%d) mismach\n", nsegs, pktlen);
2277 /* Received a frame. */
2285 struct jme_softc *sc = xsc;
2286 struct ifnet *ifp = &sc->arpcom.ac_if;
2287 struct mii_data *mii = device_get_softc(sc->jme_miibus);
2289 ifnet_serialize_all(ifp);
2292 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2294 ifnet_deserialize_all(ifp);
2298 jme_reset(struct jme_softc *sc)
2302 /* Make sure that TX and RX are stopped */
2307 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2311 * Hold reset bit before stop reset
2314 /* Disable TXMAC and TXOFL clock sources */
2315 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2316 /* Disable RXMAC clock source */
2317 val = CSR_READ_4(sc, JME_GPREG1);
2318 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2320 CSR_READ_4(sc, JME_GHC);
2323 CSR_WRITE_4(sc, JME_GHC, 0);
2325 CSR_READ_4(sc, JME_GHC);
2328 * Clear reset bit after stop reset
2331 /* Enable TXMAC and TXOFL clock sources */
2332 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2333 /* Enable RXMAC clock source */
2334 val = CSR_READ_4(sc, JME_GPREG1);
2335 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2337 CSR_READ_4(sc, JME_GHC);
2339 /* Disable TXMAC and TXOFL clock sources */
2340 CSR_WRITE_4(sc, JME_GHC, 0);
2341 /* Disable RXMAC clock source */
2342 val = CSR_READ_4(sc, JME_GPREG1);
2343 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2345 CSR_READ_4(sc, JME_GHC);
2347 /* Enable TX and RX */
2348 val = CSR_READ_4(sc, JME_TXCSR);
2349 CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB);
2350 val = CSR_READ_4(sc, JME_RXCSR);
2351 CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB);
2353 CSR_READ_4(sc, JME_TXCSR);
2354 CSR_READ_4(sc, JME_RXCSR);
2356 /* Enable TXMAC and TXOFL clock sources */
2357 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2358 /* Eisable RXMAC clock source */
2359 val = CSR_READ_4(sc, JME_GPREG1);
2360 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2362 CSR_READ_4(sc, JME_GHC);
2364 /* Stop TX and RX */
2372 struct jme_softc *sc = xsc;
2373 struct ifnet *ifp = &sc->arpcom.ac_if;
2374 struct mii_data *mii;
2375 uint8_t eaddr[ETHER_ADDR_LEN];
2380 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2383 * Cancel any pending I/O.
2388 * Reset the chip to a known state.
2393 * Setup MSI/MSI-X vectors to interrupts mapping
2398 howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES);
2399 KKASSERT(sc->jme_txd_spare >= 1);
2402 * If we use 64bit address mode for transmitting, each Tx request
2403 * needs one more symbol descriptor.
2405 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
2406 sc->jme_txd_spare += 1;
2408 if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN)
2411 jme_disable_rss(sc);
2413 /* Init RX descriptors */
2414 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2415 error = jme_init_rx_ring(&sc->jme_cdata.jme_rx_data[r]);
2417 if_printf(ifp, "initialization failed: "
2418 "no memory for %dth RX ring.\n", r);
2424 /* Init TX descriptors */
2425 jme_init_tx_ring(sc);
2427 /* Initialize shadow status block. */
2430 /* Reprogram the station address. */
2431 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2432 CSR_WRITE_4(sc, JME_PAR0,
2433 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2434 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2437 * Configure Tx queue.
2438 * Tx priority queue weight value : 0
2439 * Tx FIFO threshold for processing next packet : 16QW
2440 * Maximum Tx DMA length : 512
2441 * Allow Tx DMA burst.
2443 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2444 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2445 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2446 sc->jme_txcsr |= sc->jme_tx_dma_size;
2447 sc->jme_txcsr |= TXCSR_DMA_BURST;
2448 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2450 /* Set Tx descriptor counter. */
2451 CSR_WRITE_4(sc, JME_TXQDC, sc->jme_tx_desc_cnt);
2453 /* Set Tx ring address to the hardware. */
2454 paddr = sc->jme_cdata.jme_tx_ring_paddr;
2455 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2456 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2458 /* Configure TxMAC parameters. */
2459 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2460 reg |= TXMAC_THRESH_1_PKT;
2461 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2462 CSR_WRITE_4(sc, JME_TXMAC, reg);
2465 * Configure Rx queue.
2466 * FIFO full threshold for transmitting Tx pause packet : 128T
2467 * FIFO threshold for processing next packet : 128QW
2469 * Max Rx DMA length : 128
2470 * Rx descriptor retry : 32
2471 * Rx descriptor retry time gap : 256ns
2472 * Don't receive runt/bad frame.
2474 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2477 * Since Rx FIFO size is 4K bytes, receiving frames larger
2478 * than 4K bytes will suffer from Rx FIFO overruns. So
2479 * decrease FIFO threshold to reduce the FIFO overruns for
2480 * frames larger than 4000 bytes.
2481 * For best performance of standard MTU sized frames use
2482 * maximum allowable FIFO threshold, 128QW.
2484 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2486 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2488 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2490 /* Improve PCI Express compatibility */
2491 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2493 sc->jme_rxcsr |= sc->jme_rx_dma_size;
2494 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2495 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2496 /* XXX TODO DROP_BAD */
2498 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2499 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
2501 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2503 /* Set Rx descriptor counter. */
2504 CSR_WRITE_4(sc, JME_RXQDC, rdata->jme_rx_desc_cnt);
2506 /* Set Rx ring address to the hardware. */
2507 paddr = rdata->jme_rx_ring_paddr;
2508 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2509 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2512 /* Clear receive filter. */
2513 CSR_WRITE_4(sc, JME_RXMAC, 0);
2515 /* Set up the receive filter. */
2520 * Disable all WOL bits as WOL can interfere normal Rx
2521 * operation. Also clear WOL detection status bits.
2523 reg = CSR_READ_4(sc, JME_PMCS);
2524 reg &= ~PMCS_WOL_ENB_MASK;
2525 CSR_WRITE_4(sc, JME_PMCS, reg);
2528 * Pad 10bytes right before received frame. This will greatly
2529 * help Rx performance on strict-alignment architectures as
2530 * it does not need to copy the frame to align the payload.
2532 reg = CSR_READ_4(sc, JME_RXMAC);
2533 reg |= RXMAC_PAD_10BYTES;
2535 if (ifp->if_capenable & IFCAP_RXCSUM)
2536 reg |= RXMAC_CSUM_ENB;
2537 CSR_WRITE_4(sc, JME_RXMAC, reg);
2539 /* Configure general purpose reg0 */
2540 reg = CSR_READ_4(sc, JME_GPREG0);
2541 reg &= ~GPREG0_PCC_UNIT_MASK;
2542 /* Set PCC timer resolution to micro-seconds unit. */
2543 reg |= GPREG0_PCC_UNIT_US;
2545 * Disable all shadow register posting as we have to read
2546 * JME_INTR_STATUS register in jme_intr. Also it seems
2547 * that it's hard to synchronize interrupt status between
2548 * hardware and software with shadow posting due to
2549 * requirements of bus_dmamap_sync(9).
2551 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2552 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2553 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2554 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2555 /* Disable posting of DW0. */
2556 reg &= ~GPREG0_POST_DW0_ENB;
2557 /* Clear PME message. */
2558 reg &= ~GPREG0_PME_ENB;
2559 /* Set PHY address. */
2560 reg &= ~GPREG0_PHY_ADDR_MASK;
2561 reg |= sc->jme_phyaddr;
2562 CSR_WRITE_4(sc, JME_GPREG0, reg);
2564 /* Configure Tx queue 0 packet completion coalescing. */
2565 jme_set_tx_coal(sc);
2567 /* Configure Rx queues packet completion coalescing. */
2568 jme_set_rx_coal(sc);
2570 /* Configure shadow status block but don't enable posting. */
2571 paddr = sc->jme_cdata.jme_ssb_block_paddr;
2572 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2573 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2575 /* Disable Timer 1 and Timer 2. */
2576 CSR_WRITE_4(sc, JME_TIMER1, 0);
2577 CSR_WRITE_4(sc, JME_TIMER2, 0);
2579 /* Configure retry transmit period, retry limit value. */
2580 CSR_WRITE_4(sc, JME_TXTRHD,
2581 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2582 TXTRHD_RT_PERIOD_MASK) |
2583 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2584 TXTRHD_RT_LIMIT_SHIFT));
2586 #ifdef DEVICE_POLLING
2587 if (!(ifp->if_flags & IFF_POLLING))
2589 /* Initialize the interrupt mask. */
2590 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2591 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2594 * Enabling Tx/Rx DMA engines and Rx queue processing is
2595 * done after detection of valid link in jme_miibus_statchg.
2597 sc->jme_flags &= ~JME_FLAG_LINK;
2599 /* Set the current media. */
2600 mii = device_get_softc(sc->jme_miibus);
2603 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2605 ifp->if_flags |= IFF_RUNNING;
2606 ifp->if_flags &= ~IFF_OACTIVE;
2610 jme_stop(struct jme_softc *sc)
2612 struct ifnet *ifp = &sc->arpcom.ac_if;
2613 struct jme_txdesc *txd;
2614 struct jme_rxdesc *rxd;
2615 struct jme_rxdata *rdata;
2618 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2621 * Mark the interface down and cancel the watchdog timer.
2623 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2626 callout_stop(&sc->jme_tick_ch);
2627 sc->jme_flags &= ~JME_FLAG_LINK;
2630 * Disable interrupts.
2632 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2633 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2635 /* Disable updating shadow status block. */
2636 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2637 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2639 /* Stop receiver, transmitter. */
2644 * Free partial finished RX segments
2646 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2647 rdata = &sc->jme_cdata.jme_rx_data[r];
2648 if (rdata->jme_rxhead != NULL)
2649 m_freem(rdata->jme_rxhead);
2650 JME_RXCHAIN_RESET(rdata);
2654 * Free RX and TX mbufs still in the queues.
2656 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2657 rdata = &sc->jme_cdata.jme_rx_data[r];
2658 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
2659 rxd = &rdata->jme_rxdesc[i];
2660 if (rxd->rx_m != NULL) {
2661 bus_dmamap_unload(rdata->jme_rx_tag,
2668 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2669 txd = &sc->jme_cdata.jme_txdesc[i];
2670 if (txd->tx_m != NULL) {
2671 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2681 jme_stop_tx(struct jme_softc *sc)
2686 reg = CSR_READ_4(sc, JME_TXCSR);
2687 if ((reg & TXCSR_TX_ENB) == 0)
2689 reg &= ~TXCSR_TX_ENB;
2690 CSR_WRITE_4(sc, JME_TXCSR, reg);
2691 for (i = JME_TIMEOUT; i > 0; i--) {
2693 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2697 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2701 jme_stop_rx(struct jme_softc *sc)
2706 reg = CSR_READ_4(sc, JME_RXCSR);
2707 if ((reg & RXCSR_RX_ENB) == 0)
2709 reg &= ~RXCSR_RX_ENB;
2710 CSR_WRITE_4(sc, JME_RXCSR, reg);
2711 for (i = JME_TIMEOUT; i > 0; i--) {
2713 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2717 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2721 jme_init_tx_ring(struct jme_softc *sc)
2723 struct jme_chain_data *cd;
2724 struct jme_txdesc *txd;
2727 sc->jme_cdata.jme_tx_prod = 0;
2728 sc->jme_cdata.jme_tx_cons = 0;
2729 sc->jme_cdata.jme_tx_cnt = 0;
2731 cd = &sc->jme_cdata;
2732 bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2733 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2734 txd = &sc->jme_cdata.jme_txdesc[i];
2736 txd->tx_desc = &cd->jme_tx_ring[i];
2742 jme_init_ssb(struct jme_softc *sc)
2744 struct jme_chain_data *cd;
2746 cd = &sc->jme_cdata;
2747 bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2751 jme_init_rx_ring(struct jme_rxdata *rdata)
2753 struct jme_rxdesc *rxd;
2756 KKASSERT(rdata->jme_rxhead == NULL &&
2757 rdata->jme_rxtail == NULL &&
2758 rdata->jme_rxlen == 0);
2759 rdata->jme_rx_cons = 0;
2761 bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(rdata));
2762 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
2765 rxd = &rdata->jme_rxdesc[i];
2767 rxd->rx_desc = &rdata->jme_rx_ring[i];
2768 error = jme_newbuf(rdata, rxd, 1);
2776 jme_newbuf(struct jme_rxdata *rdata, struct jme_rxdesc *rxd, int init)
2778 struct jme_desc *desc;
2780 bus_dma_segment_t segs;
2784 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2788 * JMC250 has 64bit boundary alignment limitation so jme(4)
2789 * takes advantage of 10 bytes padding feature of hardware
2790 * in order not to copy entire frame to align IP header on
2793 m->m_len = m->m_pkthdr.len = MCLBYTES;
2795 error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
2796 rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
2801 if_printf(&rdata->jme_sc->arpcom.ac_if,
2802 "can't load RX mbuf\n");
2807 if (rxd->rx_m != NULL) {
2808 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
2809 BUS_DMASYNC_POSTREAD);
2810 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
2812 map = rxd->rx_dmamap;
2813 rxd->rx_dmamap = rdata->jme_rx_sparemap;
2814 rdata->jme_rx_sparemap = map;
2817 desc = rxd->rx_desc;
2818 desc->buflen = htole32(segs.ds_len);
2819 desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr));
2820 desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr));
2821 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2827 jme_set_vlan(struct jme_softc *sc)
2829 struct ifnet *ifp = &sc->arpcom.ac_if;
2832 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2834 reg = CSR_READ_4(sc, JME_RXMAC);
2835 reg &= ~RXMAC_VLAN_ENB;
2836 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2837 reg |= RXMAC_VLAN_ENB;
2838 CSR_WRITE_4(sc, JME_RXMAC, reg);
2842 jme_set_filter(struct jme_softc *sc)
2844 struct ifnet *ifp = &sc->arpcom.ac_if;
2845 struct ifmultiaddr *ifma;
2850 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2852 rxcfg = CSR_READ_4(sc, JME_RXMAC);
2853 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2857 * Always accept frames destined to our station address.
2858 * Always accept broadcast frames.
2860 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2862 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2863 if (ifp->if_flags & IFF_PROMISC)
2864 rxcfg |= RXMAC_PROMISC;
2865 if (ifp->if_flags & IFF_ALLMULTI)
2866 rxcfg |= RXMAC_ALLMULTI;
2867 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2868 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2869 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2874 * Set up the multicast address filter by passing all multicast
2875 * addresses through a CRC generator, and then using the low-order
2876 * 6 bits as an index into the 64 bit multicast hash table. The
2877 * high order bits select the register, while the rest of the bits
2878 * select the bit within the register.
2880 rxcfg |= RXMAC_MULTICAST;
2881 bzero(mchash, sizeof(mchash));
2883 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2884 if (ifma->ifma_addr->sa_family != AF_LINK)
2886 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2887 ifma->ifma_addr), ETHER_ADDR_LEN);
2889 /* Just want the 6 least significant bits. */
2892 /* Set the corresponding bit in the hash table. */
2893 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2896 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2897 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2898 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2902 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
2904 struct jme_softc *sc = arg1;
2905 struct ifnet *ifp = &sc->arpcom.ac_if;
2908 ifnet_serialize_all(ifp);
2910 v = sc->jme_tx_coal_to;
2911 error = sysctl_handle_int(oidp, &v, 0, req);
2912 if (error || req->newptr == NULL)
2915 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2920 if (v != sc->jme_tx_coal_to) {
2921 sc->jme_tx_coal_to = v;
2922 if (ifp->if_flags & IFF_RUNNING)
2923 jme_set_tx_coal(sc);
2926 ifnet_deserialize_all(ifp);
2931 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2933 struct jme_softc *sc = arg1;
2934 struct ifnet *ifp = &sc->arpcom.ac_if;
2937 ifnet_serialize_all(ifp);
2939 v = sc->jme_tx_coal_pkt;
2940 error = sysctl_handle_int(oidp, &v, 0, req);
2941 if (error || req->newptr == NULL)
2944 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
2949 if (v != sc->jme_tx_coal_pkt) {
2950 sc->jme_tx_coal_pkt = v;
2951 if (ifp->if_flags & IFF_RUNNING)
2952 jme_set_tx_coal(sc);
2955 ifnet_deserialize_all(ifp);
2960 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
2962 struct jme_softc *sc = arg1;
2963 struct ifnet *ifp = &sc->arpcom.ac_if;
2966 ifnet_serialize_all(ifp);
2968 v = sc->jme_rx_coal_to;
2969 error = sysctl_handle_int(oidp, &v, 0, req);
2970 if (error || req->newptr == NULL)
2973 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
2978 if (v != sc->jme_rx_coal_to) {
2979 sc->jme_rx_coal_to = v;
2980 if (ifp->if_flags & IFF_RUNNING)
2981 jme_set_rx_coal(sc);
2984 ifnet_deserialize_all(ifp);
2989 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
2991 struct jme_softc *sc = arg1;
2992 struct ifnet *ifp = &sc->arpcom.ac_if;
2995 ifnet_serialize_all(ifp);
2997 v = sc->jme_rx_coal_pkt;
2998 error = sysctl_handle_int(oidp, &v, 0, req);
2999 if (error || req->newptr == NULL)
3002 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
3007 if (v != sc->jme_rx_coal_pkt) {
3008 sc->jme_rx_coal_pkt = v;
3009 if (ifp->if_flags & IFF_RUNNING)
3010 jme_set_rx_coal(sc);
3013 ifnet_deserialize_all(ifp);
3018 jme_set_tx_coal(struct jme_softc *sc)
3022 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3024 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3025 PCCTX_COAL_PKT_MASK;
3026 reg |= PCCTX_COAL_TXQ0;
3027 CSR_WRITE_4(sc, JME_PCCTX, reg);
3031 jme_set_rx_coal(struct jme_softc *sc)
3036 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3038 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3039 PCCRX_COAL_PKT_MASK;
3040 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r)
3041 CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3044 #ifdef DEVICE_POLLING
3047 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3049 struct jme_softc *sc = ifp->if_softc;
3053 ASSERT_SERIALIZED(&sc->jme_serialize);
3057 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3060 case POLL_DEREGISTER:
3061 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
3064 case POLL_AND_CHECK_STATUS:
3066 status = CSR_READ_4(sc, JME_INTR_STATUS);
3068 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3069 struct jme_rxdata *rdata =
3070 &sc->jme_cdata.jme_rx_data[r];
3072 lwkt_serialize_enter(&rdata->jme_rx_serialize);
3073 jme_rxeof(rdata, count);
3074 lwkt_serialize_exit(&rdata->jme_rx_serialize);
3077 if (status & INTR_RXQ_DESC_EMPTY) {
3078 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3079 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3080 RXCSR_RX_ENB | RXCSR_RXQ_START);
3083 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
3085 if (!ifq_is_empty(&ifp->if_snd))
3087 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
3092 #endif /* DEVICE_POLLING */
3095 jme_rxring_dma_alloc(struct jme_rxdata *rdata)
3100 error = bus_dmamem_coherent(rdata->jme_sc->jme_cdata.jme_ring_tag,
3101 JME_RX_RING_ALIGN, 0,
3102 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3103 JME_RX_RING_SIZE(rdata),
3104 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3106 device_printf(rdata->jme_sc->jme_dev,
3107 "could not allocate %dth Rx ring.\n", rdata->jme_rx_idx);
3110 rdata->jme_rx_ring_tag = dmem.dmem_tag;
3111 rdata->jme_rx_ring_map = dmem.dmem_map;
3112 rdata->jme_rx_ring = dmem.dmem_addr;
3113 rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
3119 jme_rxbuf_dma_alloc(struct jme_rxdata *rdata)
3123 /* Create tag for Rx buffers. */
3124 error = bus_dma_tag_create(
3125 rdata->jme_sc->jme_cdata.jme_buffer_tag,/* parent */
3126 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
3127 BUS_SPACE_MAXADDR, /* lowaddr */
3128 BUS_SPACE_MAXADDR, /* highaddr */
3129 NULL, NULL, /* filter, filterarg */
3130 MCLBYTES, /* maxsize */
3132 MCLBYTES, /* maxsegsize */
3133 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
3134 &rdata->jme_rx_tag);
3136 device_printf(rdata->jme_sc->jme_dev,
3137 "could not create %dth Rx DMA tag.\n", rdata->jme_rx_idx);
3141 /* Create DMA maps for Rx buffers. */
3142 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3143 &rdata->jme_rx_sparemap);
3145 device_printf(rdata->jme_sc->jme_dev,
3146 "could not create %dth spare Rx dmamap.\n",
3148 bus_dma_tag_destroy(rdata->jme_rx_tag);
3149 rdata->jme_rx_tag = NULL;
3152 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
3153 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3155 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3160 device_printf(rdata->jme_sc->jme_dev,
3161 "could not create %dth Rx dmamap "
3162 "for %dth RX ring.\n", i, rdata->jme_rx_idx);
3164 for (j = 0; j < i; ++j) {
3165 rxd = &rdata->jme_rxdesc[j];
3166 bus_dmamap_destroy(rdata->jme_rx_tag,
3169 bus_dmamap_destroy(rdata->jme_rx_tag,
3170 rdata->jme_rx_sparemap);
3171 bus_dma_tag_destroy(rdata->jme_rx_tag);
3172 rdata->jme_rx_tag = NULL;
3180 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3184 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3185 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3187 if (status & rdata->jme_rx_coal) {
3188 lwkt_serialize_enter(&rdata->jme_rx_serialize);
3189 jme_rxeof(rdata, -1);
3190 lwkt_serialize_exit(&rdata->jme_rx_serialize);
3196 jme_enable_rss(struct jme_softc *sc)
3199 uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3202 KASSERT(sc->jme_rx_ring_cnt == JME_NRXRING_2 ||
3203 sc->jme_rx_ring_cnt == JME_NRXRING_4,
3204 ("%s: invalid # of RX rings (%d)\n",
3205 sc->arpcom.ac_if.if_xname, sc->jme_rx_ring_cnt));
3207 rssc = RSSC_HASH_64_ENTRY;
3208 rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3209 rssc |= sc->jme_cdata.jme_rx_ring_cnt >> 1;
3210 JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3211 CSR_WRITE_4(sc, JME_RSSC, rssc);
3213 toeplitz_get_key(key, sizeof(key));
3214 for (i = 0; i < RSSKEY_NREGS; ++i) {
3217 keyreg = RSSKEY_REGVAL(key, i);
3218 JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg);
3220 CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg);
3224 * Create redirect table in following fashion:
3225 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3228 for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3231 q = i % sc->jme_cdata.jme_rx_ring_cnt;
3232 ind |= q << (i * 8);
3234 JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3236 for (i = 0; i < RSSTBL_NREGS; ++i)
3237 CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3241 jme_disable_rss(struct jme_softc *sc)
3243 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3247 jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3249 struct jme_softc *sc = ifp->if_softc;
3252 case IFNET_SERIALIZE_ALL:
3253 lwkt_serialize_array_enter(sc->jme_serialize_arr,
3254 sc->jme_serialize_cnt, 0);
3257 case IFNET_SERIALIZE_MAIN:
3258 lwkt_serialize_enter(&sc->jme_serialize);
3261 case IFNET_SERIALIZE_TX:
3262 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
3265 case IFNET_SERIALIZE_RX(0):
3266 lwkt_serialize_enter(
3267 &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize);
3270 case IFNET_SERIALIZE_RX(1):
3271 lwkt_serialize_enter(
3272 &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize);
3275 case IFNET_SERIALIZE_RX(2):
3276 lwkt_serialize_enter(
3277 &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize);
3280 case IFNET_SERIALIZE_RX(3):
3281 lwkt_serialize_enter(
3282 &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize);
3286 panic("%s unsupported serialize type\n", ifp->if_xname);
3291 jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3293 struct jme_softc *sc = ifp->if_softc;
3296 case IFNET_SERIALIZE_ALL:
3297 lwkt_serialize_array_exit(sc->jme_serialize_arr,
3298 sc->jme_serialize_cnt, 0);
3301 case IFNET_SERIALIZE_MAIN:
3302 lwkt_serialize_exit(&sc->jme_serialize);
3305 case IFNET_SERIALIZE_TX:
3306 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
3309 case IFNET_SERIALIZE_RX(0):
3310 lwkt_serialize_exit(
3311 &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize);
3314 case IFNET_SERIALIZE_RX(1):
3315 lwkt_serialize_exit(
3316 &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize);
3319 case IFNET_SERIALIZE_RX(2):
3320 lwkt_serialize_exit(
3321 &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize);
3324 case IFNET_SERIALIZE_RX(3):
3325 lwkt_serialize_exit(
3326 &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize);
3330 panic("%s unsupported serialize type\n", ifp->if_xname);
3335 jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3337 struct jme_softc *sc = ifp->if_softc;
3340 case IFNET_SERIALIZE_ALL:
3341 return lwkt_serialize_array_try(sc->jme_serialize_arr,
3342 sc->jme_serialize_cnt, 0);
3344 case IFNET_SERIALIZE_MAIN:
3345 return lwkt_serialize_try(&sc->jme_serialize);
3347 case IFNET_SERIALIZE_TX:
3348 return lwkt_serialize_try(&sc->jme_cdata.jme_tx_serialize);
3350 case IFNET_SERIALIZE_RX(0):
3351 return lwkt_serialize_try(
3352 &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize);
3354 case IFNET_SERIALIZE_RX(1):
3355 return lwkt_serialize_try(
3356 &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize);
3358 case IFNET_SERIALIZE_RX(2):
3359 return lwkt_serialize_try(
3360 &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize);
3362 case IFNET_SERIALIZE_RX(3):
3363 return lwkt_serialize_try(
3364 &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize);
3367 panic("%s unsupported serialize type\n", ifp->if_xname);
3374 jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
3375 boolean_t serialized)
3377 struct jme_softc *sc = ifp->if_softc;
3378 struct jme_rxdata *rdata;
3382 case IFNET_SERIALIZE_ALL:
3384 for (i = 0; i < sc->jme_serialize_cnt; ++i)
3385 ASSERT_SERIALIZED(sc->jme_serialize_arr[i]);
3387 for (i = 0; i < sc->jme_serialize_cnt; ++i)
3388 ASSERT_NOT_SERIALIZED(sc->jme_serialize_arr[i]);
3392 case IFNET_SERIALIZE_MAIN:
3394 ASSERT_SERIALIZED(&sc->jme_serialize);
3396 ASSERT_NOT_SERIALIZED(&sc->jme_serialize);
3399 case IFNET_SERIALIZE_TX:
3401 ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
3403 ASSERT_NOT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
3406 case IFNET_SERIALIZE_RX(0):
3407 rdata = &sc->jme_cdata.jme_rx_data[0];
3409 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3411 ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3414 case IFNET_SERIALIZE_RX(1):
3415 rdata = &sc->jme_cdata.jme_rx_data[1];
3417 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3419 ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3422 case IFNET_SERIALIZE_RX(2):
3423 rdata = &sc->jme_cdata.jme_rx_data[2];
3425 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3427 ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3430 case IFNET_SERIALIZE_RX(3):
3431 rdata = &sc->jme_cdata.jme_rx_data[3];
3433 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3435 ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3439 panic("%s unsupported serialize type\n", ifp->if_xname);
3443 #endif /* INVARIANTS */
3446 jme_msix_try_alloc(device_t dev)
3448 struct jme_softc *sc = device_get_softc(dev);
3449 struct jme_msix_data *msix;
3450 int error, i, r, msix_enable, msix_count;
3452 msix_count = 1 + sc->jme_cdata.jme_rx_ring_cnt;
3453 KKASSERT(msix_count <= JME_NMSIX);
3455 msix_enable = device_getenv_int(dev, "msix.enable", jme_msix_enable);
3458 * We leave the 1st MSI-X vector unused, so we
3459 * actually need msix_count + 1 MSI-X vectors.
3461 if (!msix_enable || pci_msix_count(dev) < (msix_count + 1))
3464 for (i = 0; i < msix_count; ++i)
3465 sc->jme_msix[i].jme_msix_rid = -1;
3469 msix = &sc->jme_msix[i++];
3470 msix->jme_msix_cpuid = 0; /* XXX Put TX to cpu0 */
3471 msix->jme_msix_arg = &sc->jme_cdata;
3472 msix->jme_msix_func = jme_msix_tx;
3473 msix->jme_msix_intrs = INTR_TXQ_COAL | INTR_TXQ_COAL_TO;
3474 msix->jme_msix_serialize = &sc->jme_cdata.jme_tx_serialize;
3475 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s tx",
3476 device_get_nameunit(dev));
3478 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3479 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3481 msix = &sc->jme_msix[i++];
3482 msix->jme_msix_cpuid = r; /* XXX Put RX to cpuX */
3483 msix->jme_msix_arg = rdata;
3484 msix->jme_msix_func = jme_msix_rx;
3485 msix->jme_msix_intrs = rdata->jme_rx_coal | rdata->jme_rx_empty;
3486 msix->jme_msix_serialize = &rdata->jme_rx_serialize;
3487 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc),
3488 "%s rx%d", device_get_nameunit(dev), r);
3491 KKASSERT(i == msix_count);
3493 error = pci_setup_msix(dev);
3497 /* Setup jme_msix_cnt early, so we could cleanup */
3498 sc->jme_msix_cnt = msix_count;
3500 for (i = 0; i < msix_count; ++i) {
3501 msix = &sc->jme_msix[i];
3503 msix->jme_msix_vector = i + 1;
3504 error = pci_alloc_msix_vector(dev, msix->jme_msix_vector,
3505 &msix->jme_msix_rid, msix->jme_msix_cpuid);
3509 msix->jme_msix_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3510 &msix->jme_msix_rid, RF_ACTIVE);
3511 if (msix->jme_msix_res == NULL) {
3517 for (i = 0; i < JME_INTR_CNT; ++i) {
3518 uint32_t intr_mask = (1 << i);
3521 if ((JME_INTRS & intr_mask) == 0)
3524 for (x = 0; x < msix_count; ++x) {
3525 msix = &sc->jme_msix[x];
3526 if (msix->jme_msix_intrs & intr_mask) {
3529 reg = i / JME_MSINUM_FACTOR;
3530 KKASSERT(reg < JME_MSINUM_CNT);
3532 shift = (i % JME_MSINUM_FACTOR) * 4;
3534 sc->jme_msinum[reg] |=
3535 (msix->jme_msix_vector << shift);
3543 for (i = 0; i < JME_MSINUM_CNT; ++i) {
3544 device_printf(dev, "MSINUM%d: %#x\n", i,
3549 pci_enable_msix(dev);
3550 sc->jme_irq_type = PCI_INTR_TYPE_MSIX;
3558 jme_intr_alloc(device_t dev)
3560 struct jme_softc *sc = device_get_softc(dev);
3563 jme_msix_try_alloc(dev);
3565 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3566 sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable,
3567 &sc->jme_irq_rid, &irq_flags);
3569 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3570 &sc->jme_irq_rid, irq_flags);
3571 if (sc->jme_irq_res == NULL) {
3572 device_printf(dev, "can't allocate irq\n");
3580 jme_msix_free(device_t dev)
3582 struct jme_softc *sc = device_get_softc(dev);
3585 KKASSERT(sc->jme_msix_cnt > 1);
3587 for (i = 0; i < sc->jme_msix_cnt; ++i) {
3588 struct jme_msix_data *msix = &sc->jme_msix[i];
3590 if (msix->jme_msix_res != NULL) {
3591 bus_release_resource(dev, SYS_RES_IRQ,
3592 msix->jme_msix_rid, msix->jme_msix_res);
3593 msix->jme_msix_res = NULL;
3595 if (msix->jme_msix_rid >= 0) {
3596 pci_release_msix_vector(dev, msix->jme_msix_rid);
3597 msix->jme_msix_rid = -1;
3600 pci_teardown_msix(dev);
3604 jme_intr_free(device_t dev)
3606 struct jme_softc *sc = device_get_softc(dev);
3608 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3609 if (sc->jme_irq_res != NULL) {
3610 bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
3613 if (sc->jme_irq_type == PCI_INTR_TYPE_MSI)
3614 pci_release_msi(dev);
3621 jme_msix_tx(void *xcd)
3623 struct jme_chain_data *cd = xcd;
3624 struct jme_softc *sc = cd->jme_sc;
3625 struct ifnet *ifp = &sc->arpcom.ac_if;
3627 ASSERT_SERIALIZED(&cd->jme_tx_serialize);
3629 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3631 CSR_WRITE_4(sc, JME_INTR_STATUS,
3632 INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP);
3634 if (ifp->if_flags & IFF_RUNNING) {
3636 if (!ifq_is_empty(&ifp->if_snd))
3640 CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3644 jme_msix_rx(void *xrdata)
3646 struct jme_rxdata *rdata = xrdata;
3647 struct jme_softc *sc = rdata->jme_sc;
3648 struct ifnet *ifp = &sc->arpcom.ac_if;
3651 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3653 CSR_WRITE_4(sc, JME_INTR_MASK_CLR,
3654 (rdata->jme_rx_coal | rdata->jme_rx_empty));
3656 status = CSR_READ_4(sc, JME_INTR_STATUS);
3657 status &= (rdata->jme_rx_coal | rdata->jme_rx_empty);
3659 if (status & rdata->jme_rx_coal)
3660 status |= (rdata->jme_rx_coal | rdata->jme_rx_comp);
3661 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3663 if (ifp->if_flags & IFF_RUNNING) {
3664 if (status & rdata->jme_rx_coal)
3665 jme_rxeof(rdata, -1);
3667 if (status & rdata->jme_rx_empty) {
3668 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3669 RXCSR_RX_ENB | RXCSR_RXQ_START);
3673 CSR_WRITE_4(sc, JME_INTR_MASK_SET,
3674 (rdata->jme_rx_coal | rdata->jme_rx_empty));
3678 jme_set_msinum(struct jme_softc *sc)
3682 for (i = 0; i < JME_MSINUM_CNT; ++i)
3683 CSR_WRITE_4(sc, JME_MSINUM(i), sc->jme_msinum[i]);
3687 jme_intr_setup(device_t dev)
3689 struct jme_softc *sc = device_get_softc(dev);
3690 struct ifnet *ifp = &sc->arpcom.ac_if;
3693 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3694 return jme_msix_setup(dev);
3696 error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE,
3697 jme_intr, sc, &sc->jme_irq_handle, &sc->jme_serialize);
3699 device_printf(dev, "could not set up interrupt handler.\n");
3703 ifp->if_cpuid = rman_get_cpuid(sc->jme_irq_res);
3704 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
3709 jme_intr_teardown(device_t dev)
3711 struct jme_softc *sc = device_get_softc(dev);
3713 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3714 jme_msix_teardown(dev, sc->jme_msix_cnt);
3716 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
3720 jme_msix_setup(device_t dev)
3722 struct jme_softc *sc = device_get_softc(dev);
3723 struct ifnet *ifp = &sc->arpcom.ac_if;
3726 for (x = 0; x < sc->jme_msix_cnt; ++x) {
3727 struct jme_msix_data *msix = &sc->jme_msix[x];
3730 error = bus_setup_intr_descr(dev, msix->jme_msix_res,
3731 INTR_MPSAFE, msix->jme_msix_func, msix->jme_msix_arg,
3732 &msix->jme_msix_handle, msix->jme_msix_serialize,
3733 msix->jme_msix_desc);
3735 device_printf(dev, "could not set up %s "
3736 "interrupt handler.\n", msix->jme_msix_desc);
3737 jme_msix_teardown(dev, x);
3741 ifp->if_cpuid = 0; /* XXX */
3746 jme_msix_teardown(device_t dev, int msix_count)
3748 struct jme_softc *sc = device_get_softc(dev);
3751 for (x = 0; x < msix_count; ++x) {
3752 struct jme_msix_data *msix = &sc->jme_msix[x];
3754 bus_teardown_intr(dev, msix->jme_msix_res,
3755 msix->jme_msix_handle);