2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28 * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.12 2008/11/26 11:55:18 sephe Exp $
31 #include "opt_polling.h"
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
41 #include <sys/serialize.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
46 #include <net/ethernet.h>
49 #include <net/if_arp.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 #include <net/ifq_var.h>
53 #include <net/vlan/if_vlan_var.h>
54 #include <net/vlan/if_vlan_ether.h>
56 #include <dev/netif/mii_layer/miivar.h>
57 #include <dev/netif/mii_layer/jmphyreg.h>
59 #include <bus/pci/pcireg.h>
60 #include <bus/pci/pcivar.h>
61 #include <bus/pci/pcidevs.h>
63 #include <dev/netif/jme/if_jmereg.h>
64 #include <dev/netif/jme/if_jmevar.h>
66 #include "miibus_if.h"
68 /* Define the following to disable printing Rx errors. */
69 #undef JME_SHOW_ERRORS
71 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
76 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
78 if ((sc)->jme_rss_debug > (lvl)) \
79 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
81 #else /* !JME_RSS_DEBUG */
82 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
83 #endif /* JME_RSS_DEBUG */
85 static int jme_probe(device_t);
86 static int jme_attach(device_t);
87 static int jme_detach(device_t);
88 static int jme_shutdown(device_t);
89 static int jme_suspend(device_t);
90 static int jme_resume(device_t);
92 static int jme_miibus_readreg(device_t, int, int);
93 static int jme_miibus_writereg(device_t, int, int, int);
94 static void jme_miibus_statchg(device_t);
96 static void jme_init(void *);
97 static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
98 static void jme_start(struct ifnet *);
99 static void jme_watchdog(struct ifnet *);
100 static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
101 static int jme_mediachange(struct ifnet *);
102 #ifdef DEVICE_POLLING
103 static void jme_poll(struct ifnet *, enum poll_cmd, int);
106 static void jme_intr(void *);
107 static void jme_txeof(struct jme_softc *);
108 static void jme_rxeof(struct jme_softc *, int);
109 static int jme_rxeof_chain(struct jme_softc *, int,
110 struct mbuf_chain *, int);
111 static void jme_rx_intr(struct jme_softc *, uint32_t);
113 static int jme_dma_alloc(struct jme_softc *);
114 static void jme_dma_free(struct jme_softc *, int);
115 static void jme_dmamap_ring_cb(void *, bus_dma_segment_t *, int, int);
116 static void jme_dmamap_buf_cb(void *, bus_dma_segment_t *, int,
118 static int jme_init_rx_ring(struct jme_softc *, int);
119 static void jme_init_tx_ring(struct jme_softc *);
120 static void jme_init_ssb(struct jme_softc *);
121 static int jme_newbuf(struct jme_softc *, int, struct jme_rxdesc *, int);
122 static int jme_encap(struct jme_softc *, struct mbuf **);
123 static void jme_rxpkt(struct jme_softc *, int, struct mbuf_chain *);
124 static int jme_rxring_dma_alloc(struct jme_softc *, bus_addr_t, int);
125 static int jme_rxbuf_dma_alloc(struct jme_softc *, int);
127 static void jme_tick(void *);
128 static void jme_stop(struct jme_softc *);
129 static void jme_reset(struct jme_softc *);
130 static void jme_set_vlan(struct jme_softc *);
131 static void jme_set_filter(struct jme_softc *);
132 static void jme_stop_tx(struct jme_softc *);
133 static void jme_stop_rx(struct jme_softc *);
134 static void jme_mac_config(struct jme_softc *);
135 static void jme_reg_macaddr(struct jme_softc *, uint8_t[]);
136 static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
137 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
139 static void jme_setwol(struct jme_softc *);
140 static void jme_setlinkspeed(struct jme_softc *);
142 static void jme_set_tx_coal(struct jme_softc *);
143 static void jme_set_rx_coal(struct jme_softc *);
144 static void jme_enable_rss(struct jme_softc *);
145 static void jme_disable_rss(struct jme_softc *);
147 static void jme_sysctl_node(struct jme_softc *);
148 static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
149 static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
150 static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
151 static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
154 * Devices supported by this driver.
156 static const struct jme_dev {
157 uint16_t jme_vendorid;
158 uint16_t jme_deviceid;
160 const char *jme_name;
162 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
164 "JMicron Inc, JMC250 Gigabit Ethernet" },
165 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
167 "JMicron Inc, JMC260 Fast Ethernet" },
171 static device_method_t jme_methods[] = {
172 /* Device interface. */
173 DEVMETHOD(device_probe, jme_probe),
174 DEVMETHOD(device_attach, jme_attach),
175 DEVMETHOD(device_detach, jme_detach),
176 DEVMETHOD(device_shutdown, jme_shutdown),
177 DEVMETHOD(device_suspend, jme_suspend),
178 DEVMETHOD(device_resume, jme_resume),
181 DEVMETHOD(bus_print_child, bus_generic_print_child),
182 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
185 DEVMETHOD(miibus_readreg, jme_miibus_readreg),
186 DEVMETHOD(miibus_writereg, jme_miibus_writereg),
187 DEVMETHOD(miibus_statchg, jme_miibus_statchg),
192 static driver_t jme_driver = {
195 sizeof(struct jme_softc)
198 static devclass_t jme_devclass;
200 DECLARE_DUMMY_MODULE(if_jme);
201 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
202 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, 0, 0);
203 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0);
205 static const struct {
208 } jme_rx_status[JME_NRXRING_MAX] = {
209 { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP },
210 { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP },
211 { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP },
212 { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP }
215 static int jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
216 static int jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
217 static int jme_rx_ring_count = JME_NRXRING_DEF;
219 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
220 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
221 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
224 * Read a PHY register on the MII of the JMC250.
227 jme_miibus_readreg(device_t dev, int phy, int reg)
229 struct jme_softc *sc = device_get_softc(dev);
233 /* For FPGA version, PHY address 0 should be ignored. */
234 if (sc->jme_caps & JME_CAP_FPGA) {
238 if (sc->jme_phyaddr != phy)
242 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
243 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
245 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
247 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
251 device_printf(sc->jme_dev, "phy read timeout: "
252 "phy %d, reg %d\n", phy, reg);
256 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
260 * Write a PHY register on the MII of the JMC250.
263 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
265 struct jme_softc *sc = device_get_softc(dev);
268 /* For FPGA version, PHY address 0 should be ignored. */
269 if (sc->jme_caps & JME_CAP_FPGA) {
273 if (sc->jme_phyaddr != phy)
277 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
278 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
279 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
281 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
283 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
287 device_printf(sc->jme_dev, "phy write timeout: "
288 "phy %d, reg %d\n", phy, reg);
295 * Callback from MII layer when media changes.
298 jme_miibus_statchg(device_t dev)
300 struct jme_softc *sc = device_get_softc(dev);
301 struct ifnet *ifp = &sc->arpcom.ac_if;
302 struct mii_data *mii;
303 struct jme_txdesc *txd;
307 ASSERT_SERIALIZED(ifp->if_serializer);
309 if ((ifp->if_flags & IFF_RUNNING) == 0)
312 mii = device_get_softc(sc->jme_miibus);
314 sc->jme_flags &= ~JME_FLAG_LINK;
315 if ((mii->mii_media_status & IFM_AVALID) != 0) {
316 switch (IFM_SUBTYPE(mii->mii_media_active)) {
319 sc->jme_flags |= JME_FLAG_LINK;
322 if (sc->jme_caps & JME_CAP_FASTETH)
324 sc->jme_flags |= JME_FLAG_LINK;
332 * Disabling Rx/Tx MACs have a side-effect of resetting
333 * JME_TXNDA/JME_RXNDA register to the first address of
334 * Tx/Rx descriptor address. So driver should reset its
335 * internal procucer/consumer pointer and reclaim any
336 * allocated resources. Note, just saving the value of
337 * JME_TXNDA and JME_RXNDA registers before stopping MAC
338 * and restoring JME_TXNDA/JME_RXNDA register is not
339 * sufficient to make sure correct MAC state because
340 * stopping MAC operation can take a while and hardware
341 * might have updated JME_TXNDA/JME_RXNDA registers
342 * during the stop operation.
345 /* Disable interrupts */
346 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
349 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
351 callout_stop(&sc->jme_tick_ch);
353 /* Stop receiver/transmitter. */
357 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
358 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
361 if (rdata->jme_rxhead != NULL)
362 m_freem(rdata->jme_rxhead);
363 JME_RXCHAIN_RESET(sc, r);
366 * Reuse configured Rx descriptors and reset
367 * procuder/consumer index.
369 rdata->jme_rx_cons = 0;
373 if (sc->jme_cdata.jme_tx_cnt != 0) {
374 /* Remove queued packets for transmit. */
375 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
376 txd = &sc->jme_cdata.jme_txdesc[i];
377 if (txd->tx_m != NULL) {
379 sc->jme_cdata.jme_tx_tag,
388 jme_init_tx_ring(sc);
390 /* Initialize shadow status block. */
393 /* Program MAC with resolved speed/duplex/flow-control. */
394 if (sc->jme_flags & JME_FLAG_LINK) {
397 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
399 /* Set Tx ring address to the hardware. */
400 paddr = sc->jme_cdata.jme_tx_ring_paddr;
401 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
402 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
404 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
405 CSR_WRITE_4(sc, JME_RXCSR,
406 sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
408 /* Set Rx ring address to the hardware. */
409 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
410 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
411 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
414 /* Restart receiver/transmitter. */
415 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
417 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
420 ifp->if_flags |= IFF_RUNNING;
421 ifp->if_flags &= ~IFF_OACTIVE;
422 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
424 #ifdef DEVICE_POLLING
425 if (!(ifp->if_flags & IFF_POLLING))
427 /* Reenable interrupts. */
428 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
432 * Get the current interface media status.
435 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
437 struct jme_softc *sc = ifp->if_softc;
438 struct mii_data *mii = device_get_softc(sc->jme_miibus);
440 ASSERT_SERIALIZED(ifp->if_serializer);
443 ifmr->ifm_status = mii->mii_media_status;
444 ifmr->ifm_active = mii->mii_media_active;
448 * Set hardware to newly-selected media.
451 jme_mediachange(struct ifnet *ifp)
453 struct jme_softc *sc = ifp->if_softc;
454 struct mii_data *mii = device_get_softc(sc->jme_miibus);
457 ASSERT_SERIALIZED(ifp->if_serializer);
459 if (mii->mii_instance != 0) {
460 struct mii_softc *miisc;
462 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
463 mii_phy_reset(miisc);
465 error = mii_mediachg(mii);
471 jme_probe(device_t dev)
473 const struct jme_dev *sp;
476 vid = pci_get_vendor(dev);
477 did = pci_get_device(dev);
478 for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
479 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
480 struct jme_softc *sc = device_get_softc(dev);
482 sc->jme_caps = sp->jme_caps;
483 device_set_desc(dev, sp->jme_name);
491 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
497 for (i = JME_TIMEOUT; i > 0; i--) {
498 reg = CSR_READ_4(sc, JME_SMBCSR);
499 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
505 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
509 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
510 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
511 for (i = JME_TIMEOUT; i > 0; i--) {
513 reg = CSR_READ_4(sc, JME_SMBINTF);
514 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
519 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
523 reg = CSR_READ_4(sc, JME_SMBINTF);
524 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
530 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
532 uint8_t fup, reg, val;
537 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
538 fup != JME_EEPROM_SIG0)
540 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
541 fup != JME_EEPROM_SIG1)
545 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
547 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
548 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
549 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0)
551 if (reg >= JME_PAR0 &&
552 reg < JME_PAR0 + ETHER_ADDR_LEN) {
553 if (jme_eeprom_read_byte(sc, offset + 2,
556 eaddr[reg - JME_PAR0] = val;
560 /* Check for the end of EEPROM descriptor. */
561 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
563 /* Try next eeprom descriptor. */
564 offset += JME_EEPROM_DESC_BYTES;
565 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
567 if (match == ETHER_ADDR_LEN)
574 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
578 /* Read station address. */
579 par0 = CSR_READ_4(sc, JME_PAR0);
580 par1 = CSR_READ_4(sc, JME_PAR1);
582 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
583 device_printf(sc->jme_dev,
584 "generating fake ethernet address.\n");
585 par0 = karc4random();
586 /* Set OUI to JMicron. */
590 eaddr[3] = (par0 >> 16) & 0xff;
591 eaddr[4] = (par0 >> 8) & 0xff;
592 eaddr[5] = par0 & 0xff;
594 eaddr[0] = (par0 >> 0) & 0xFF;
595 eaddr[1] = (par0 >> 8) & 0xFF;
596 eaddr[2] = (par0 >> 16) & 0xFF;
597 eaddr[3] = (par0 >> 24) & 0xFF;
598 eaddr[4] = (par1 >> 0) & 0xFF;
599 eaddr[5] = (par1 >> 8) & 0xFF;
604 jme_attach(device_t dev)
606 struct jme_softc *sc = device_get_softc(dev);
607 struct ifnet *ifp = &sc->arpcom.ac_if;
610 uint8_t pcie_ptr, rev;
612 uint8_t eaddr[ETHER_ADDR_LEN];
614 sc->jme_rx_desc_cnt = roundup(jme_rx_desc_count, JME_NDESC_ALIGN);
615 if (sc->jme_rx_desc_cnt > JME_NDESC_MAX)
616 sc->jme_rx_desc_cnt = JME_NDESC_MAX;
618 sc->jme_tx_desc_cnt = roundup(jme_tx_desc_count, JME_NDESC_ALIGN);
619 if (sc->jme_tx_desc_cnt > JME_NDESC_MAX)
620 sc->jme_tx_desc_cnt = JME_NDESC_MAX;
622 sc->jme_rx_ring_cnt = jme_rx_ring_count;
623 if (sc->jme_rx_ring_cnt <= 0)
624 sc->jme_rx_ring_cnt = JME_NRXRING_1;
625 if (sc->jme_rx_ring_cnt > ncpus2)
626 sc->jme_rx_ring_cnt = ncpus2;
628 if (sc->jme_rx_ring_cnt >= JME_NRXRING_4)
629 sc->jme_rx_ring_cnt = JME_NRXRING_4;
630 else if (sc->jme_rx_ring_cnt >= JME_NRXRING_2)
631 sc->jme_rx_ring_cnt = JME_NRXRING_2;
633 if (sc->jme_rx_ring_cnt > JME_NRXRING_MIN) {
634 sc->jme_caps |= JME_CAP_RSS;
635 sc->jme_flags |= JME_FLAG_RSS;
637 sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
640 sc->jme_lowaddr = BUS_SPACE_MAXADDR;
642 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
644 callout_init(&sc->jme_tick_ch);
647 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
650 irq = pci_read_config(dev, PCIR_INTLINE, 4);
651 mem = pci_read_config(dev, JME_PCIR_BAR, 4);
653 device_printf(dev, "chip is in D%d power mode "
654 "-- setting to D0\n", pci_get_powerstate(dev));
656 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
658 pci_write_config(dev, PCIR_INTLINE, irq, 4);
659 pci_write_config(dev, JME_PCIR_BAR, mem, 4);
661 #endif /* !BURN_BRIDGE */
663 /* Enable bus mastering */
664 pci_enable_busmaster(dev);
669 * JMC250 supports both memory mapped and I/O register space
670 * access. Because I/O register access should use different
671 * BARs to access registers it's waste of time to use I/O
672 * register spce access. JMC250 uses 16K to map entire memory
675 sc->jme_mem_rid = JME_PCIR_BAR;
676 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
677 &sc->jme_mem_rid, RF_ACTIVE);
678 if (sc->jme_mem_res == NULL) {
679 device_printf(dev, "can't allocate IO memory\n");
682 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
683 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
689 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
691 RF_SHAREABLE | RF_ACTIVE);
692 if (sc->jme_irq_res == NULL) {
693 device_printf(dev, "can't allocate irq\n");
701 reg = CSR_READ_4(sc, JME_CHIPMODE);
702 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
704 sc->jme_caps |= JME_CAP_FPGA;
706 device_printf(dev, "FPGA revision: 0x%04x\n",
707 (reg & CHIPMODE_FPGA_REV_MASK) >>
708 CHIPMODE_FPGA_REV_SHIFT);
712 /* NOTE: FM revision is put in the upper 4 bits */
713 rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
714 rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
716 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
718 did = pci_get_device(dev);
720 case PCI_PRODUCT_JMICRON_JMC250:
721 if (rev == JME_REV1_A2)
722 sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
725 case PCI_PRODUCT_JMICRON_JMC260:
727 sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
731 panic("unknown device id 0x%04x\n", did);
733 if (rev >= JME_REV2) {
734 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
735 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
736 GHC_TXMAC_CLKSRC_1000;
739 /* Reset the ethernet controller. */
742 /* Get station address. */
743 reg = CSR_READ_4(sc, JME_SMBCSR);
744 if (reg & SMBCSR_EEPROM_PRESENT)
745 error = jme_eeprom_macaddr(sc, eaddr);
746 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
747 if (error != 0 && (bootverbose)) {
748 device_printf(dev, "ethernet hardware address "
749 "not found in EEPROM.\n");
751 jme_reg_macaddr(sc, eaddr);
756 * Integrated JR0211 has fixed PHY address whereas FPGA version
757 * requires PHY probing to get correct PHY address.
759 if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
760 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
761 GPREG0_PHY_ADDR_MASK;
763 device_printf(dev, "PHY is at address %d.\n",
770 /* Set max allowable DMA size. */
771 pcie_ptr = pci_get_pciecap_ptr(dev);
775 sc->jme_caps |= JME_CAP_PCIE;
776 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
778 device_printf(dev, "Read request size : %d bytes.\n",
779 128 << ((ctrl >> 12) & 0x07));
780 device_printf(dev, "TLP payload size : %d bytes.\n",
781 128 << ((ctrl >> 5) & 0x07));
783 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
784 case PCIEM_DEVCTL_MAX_READRQ_128:
785 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
787 case PCIEM_DEVCTL_MAX_READRQ_256:
788 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
791 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
794 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
796 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
797 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
801 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
802 sc->jme_caps |= JME_CAP_PMCAP;
810 /* Allocate DMA stuffs */
811 error = jme_dma_alloc(sc);
816 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
817 ifp->if_init = jme_init;
818 ifp->if_ioctl = jme_ioctl;
819 ifp->if_start = jme_start;
820 #ifdef DEVICE_POLLING
821 ifp->if_poll = jme_poll;
823 ifp->if_watchdog = jme_watchdog;
824 ifq_set_maxlen(&ifp->if_snd, sc->jme_tx_desc_cnt - JME_TXD_RSVD);
825 ifq_set_ready(&ifp->if_snd);
827 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
828 ifp->if_capabilities = IFCAP_HWCSUM |
830 IFCAP_VLAN_HWTAGGING;
831 ifp->if_hwassist = JME_CSUM_FEATURES;
832 ifp->if_capenable = ifp->if_capabilities;
834 /* Set up MII bus. */
835 error = mii_phy_probe(dev, &sc->jme_miibus,
836 jme_mediachange, jme_mediastatus);
838 device_printf(dev, "no PHY found!\n");
843 * Save PHYADDR for FPGA mode PHY.
845 if (sc->jme_caps & JME_CAP_FPGA) {
846 struct mii_data *mii = device_get_softc(sc->jme_miibus);
848 if (mii->mii_instance != 0) {
849 struct mii_softc *miisc;
851 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
852 if (miisc->mii_phy != 0) {
853 sc->jme_phyaddr = miisc->mii_phy;
857 if (sc->jme_phyaddr != 0) {
858 device_printf(sc->jme_dev,
859 "FPGA PHY is at %d\n", sc->jme_phyaddr);
861 jme_miibus_writereg(dev, sc->jme_phyaddr,
862 JMPHY_CONF, JMPHY_CONF_DEFFIFO);
864 /* XXX should we clear JME_WA_EXTFIFO */
869 ether_ifattach(ifp, eaddr, NULL);
871 /* Tell the upper layer(s) we support long frames. */
872 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
874 error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, jme_intr, sc,
875 &sc->jme_irq_handle, ifp->if_serializer);
877 device_printf(dev, "could not set up interrupt handler.\n");
882 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->jme_irq_res));
883 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
891 jme_detach(device_t dev)
893 struct jme_softc *sc = device_get_softc(dev);
895 if (device_is_attached(dev)) {
896 struct ifnet *ifp = &sc->arpcom.ac_if;
898 lwkt_serialize_enter(ifp->if_serializer);
900 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
901 lwkt_serialize_exit(ifp->if_serializer);
906 if (sc->jme_sysctl_tree != NULL)
907 sysctl_ctx_free(&sc->jme_sysctl_ctx);
909 if (sc->jme_miibus != NULL)
910 device_delete_child(dev, sc->jme_miibus);
911 bus_generic_detach(dev);
913 if (sc->jme_irq_res != NULL) {
914 bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
918 if (sc->jme_mem_res != NULL) {
919 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
929 jme_sysctl_node(struct jme_softc *sc)
933 char rx_ring_pkt[32];
937 sysctl_ctx_init(&sc->jme_sysctl_ctx);
938 sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
939 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
940 device_get_nameunit(sc->jme_dev),
942 if (sc->jme_sysctl_tree == NULL) {
943 device_printf(sc->jme_dev, "can't add sysctl node\n");
947 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
948 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
949 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
950 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
952 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
953 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
954 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
955 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
957 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
958 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
959 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
960 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
962 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
963 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
964 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
965 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
967 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
968 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
969 "rx_desc_count", CTLFLAG_RD, &sc->jme_rx_desc_cnt,
971 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
972 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
973 "tx_desc_count", CTLFLAG_RD, &sc->jme_tx_desc_cnt,
975 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
976 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
977 "rx_ring_count", CTLFLAG_RD, &sc->jme_rx_ring_cnt,
979 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
980 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
981 "rx_ring_inuse", CTLFLAG_RD, &sc->jme_rx_ring_inuse,
982 0, "RX ring in use");
984 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
985 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
986 "rss_debug", CTLFLAG_RD, &sc->jme_rss_debug,
987 0, "RSS debug level");
988 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
989 ksnprintf(rx_ring_pkt, sizeof(rx_ring_pkt), "rx_ring%d_pkt", r);
990 SYSCTL_ADD_UINT(&sc->jme_sysctl_ctx,
991 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
992 rx_ring_pkt, CTLFLAG_RD,
993 &sc->jme_rx_ring_pkt[r],
999 * Set default coalesce valves
1001 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1002 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1003 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1004 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1007 * Adjust coalesce valves, in case that the number of TX/RX
1008 * descs are set to small values by users.
1010 * NOTE: coal_max will not be zero, since number of descs
1011 * must aligned by JME_NDESC_ALIGN (16 currently)
1013 coal_max = sc->jme_tx_desc_cnt / 6;
1014 if (coal_max < sc->jme_tx_coal_pkt)
1015 sc->jme_tx_coal_pkt = coal_max;
1017 coal_max = sc->jme_rx_desc_cnt / 4;
1018 if (coal_max < sc->jme_rx_coal_pkt)
1019 sc->jme_rx_coal_pkt = coal_max;
1023 jme_dmamap_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1028 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1029 *((bus_addr_t *)arg) = segs->ds_addr;
1033 jme_dmamap_buf_cb(void *xctx, bus_dma_segment_t *segs, int nsegs,
1034 bus_size_t mapsz __unused, int error)
1036 struct jme_dmamap_ctx *ctx = xctx;
1042 if (nsegs > ctx->nsegs) {
1048 for (i = 0; i < nsegs; ++i)
1049 ctx->segs[i] = segs[i];
1053 jme_dma_alloc(struct jme_softc *sc)
1055 struct jme_txdesc *txd;
1056 bus_addr_t busaddr, lowaddr;
1059 sc->jme_cdata.jme_txdesc =
1060 kmalloc(sc->jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1061 M_DEVBUF, M_WAITOK | M_ZERO);
1062 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1063 sc->jme_cdata.jme_rx_data[i].jme_rxdesc =
1064 kmalloc(sc->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1065 M_DEVBUF, M_WAITOK | M_ZERO);
1068 lowaddr = sc->jme_lowaddr;
1070 /* Create parent ring tag. */
1071 error = bus_dma_tag_create(NULL,/* parent */
1072 1, 0, /* algnmnt, boundary */
1073 lowaddr, /* lowaddr */
1074 BUS_SPACE_MAXADDR, /* highaddr */
1075 NULL, NULL, /* filter, filterarg */
1076 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1078 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1080 &sc->jme_cdata.jme_ring_tag);
1082 device_printf(sc->jme_dev,
1083 "could not create parent ring DMA tag.\n");
1088 * Create DMA stuffs for TX ring
1091 /* Create tag for Tx ring. */
1092 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1093 JME_TX_RING_ALIGN, 0, /* algnmnt, boundary */
1094 lowaddr, /* lowaddr */
1095 BUS_SPACE_MAXADDR, /* highaddr */
1096 NULL, NULL, /* filter, filterarg */
1097 JME_TX_RING_SIZE(sc), /* maxsize */
1099 JME_TX_RING_SIZE(sc), /* maxsegsize */
1101 &sc->jme_cdata.jme_tx_ring_tag);
1103 device_printf(sc->jme_dev,
1104 "could not allocate Tx ring DMA tag.\n");
1108 /* Allocate DMA'able memory for TX ring */
1109 error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag,
1110 (void **)&sc->jme_cdata.jme_tx_ring,
1111 BUS_DMA_WAITOK | BUS_DMA_ZERO,
1112 &sc->jme_cdata.jme_tx_ring_map);
1114 device_printf(sc->jme_dev,
1115 "could not allocate DMA'able memory for Tx ring.\n");
1116 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1117 sc->jme_cdata.jme_tx_ring_tag = NULL;
1121 /* Load the DMA map for Tx ring. */
1122 error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag,
1123 sc->jme_cdata.jme_tx_ring_map, sc->jme_cdata.jme_tx_ring,
1124 JME_TX_RING_SIZE(sc), jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
1126 device_printf(sc->jme_dev,
1127 "could not load DMA'able memory for Tx ring.\n");
1128 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1129 sc->jme_cdata.jme_tx_ring,
1130 sc->jme_cdata.jme_tx_ring_map);
1131 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1132 sc->jme_cdata.jme_tx_ring_tag = NULL;
1135 sc->jme_cdata.jme_tx_ring_paddr = busaddr;
1138 * Create DMA stuffs for RX ring
1140 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1141 error = jme_rxring_dma_alloc(sc, lowaddr, i);
1146 if (lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1147 bus_addr_t ring_end;
1149 /* Tx/Rx descriptor queue should reside within 4GB boundary. */
1150 ring_end = sc->jme_cdata.jme_tx_ring_paddr +
1151 JME_TX_RING_SIZE(sc);
1152 if (JME_ADDR_HI(ring_end) !=
1153 JME_ADDR_HI(sc->jme_cdata.jme_tx_ring_paddr)) {
1154 device_printf(sc->jme_dev, "TX ring 4GB boundary "
1155 "crossed, switching to 32bit DMA address mode.\n");
1156 jme_dma_free(sc, 0);
1157 /* Limit DMA address space to 32bit and try again. */
1158 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1162 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1163 bus_addr_t ring_start;
1166 sc->jme_cdata.jme_rx_data[i].jme_rx_ring_paddr;
1167 ring_end = ring_start + JME_RX_RING_SIZE(sc);
1168 if (JME_ADDR_HI(ring_end) != JME_ADDR_HI(ring_start)) {
1169 device_printf(sc->jme_dev,
1170 "%dth RX ring 4GB boundary crossed, "
1171 "switching to 32bit DMA address mode.\n", i);
1172 jme_dma_free(sc, 0);
1174 * Limit DMA address space to 32bit and
1177 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1183 /* Create parent buffer tag. */
1184 error = bus_dma_tag_create(NULL,/* parent */
1185 1, 0, /* algnmnt, boundary */
1186 sc->jme_lowaddr, /* lowaddr */
1187 BUS_SPACE_MAXADDR, /* highaddr */
1188 NULL, NULL, /* filter, filterarg */
1189 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1191 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1193 &sc->jme_cdata.jme_buffer_tag);
1195 device_printf(sc->jme_dev,
1196 "could not create parent buffer DMA tag.\n");
1201 * Create DMA stuffs for shadow status block
1204 /* Create shadow status block tag. */
1205 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1206 JME_SSB_ALIGN, 0, /* algnmnt, boundary */
1207 sc->jme_lowaddr, /* lowaddr */
1208 BUS_SPACE_MAXADDR, /* highaddr */
1209 NULL, NULL, /* filter, filterarg */
1210 JME_SSB_SIZE, /* maxsize */
1212 JME_SSB_SIZE, /* maxsegsize */
1214 &sc->jme_cdata.jme_ssb_tag);
1216 device_printf(sc->jme_dev,
1217 "could not create shadow status block DMA tag.\n");
1221 /* Allocate DMA'able memory for shadow status block. */
1222 error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag,
1223 (void **)&sc->jme_cdata.jme_ssb_block,
1224 BUS_DMA_WAITOK | BUS_DMA_ZERO,
1225 &sc->jme_cdata.jme_ssb_map);
1227 device_printf(sc->jme_dev, "could not allocate DMA'able "
1228 "memory for shadow status block.\n");
1229 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1230 sc->jme_cdata.jme_ssb_tag = NULL;
1234 /* Load the DMA map for shadow status block */
1235 error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag,
1236 sc->jme_cdata.jme_ssb_map, sc->jme_cdata.jme_ssb_block,
1237 JME_SSB_SIZE, jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
1239 device_printf(sc->jme_dev, "could not load DMA'able memory "
1240 "for shadow status block.\n");
1241 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1242 sc->jme_cdata.jme_ssb_block,
1243 sc->jme_cdata.jme_ssb_map);
1244 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1245 sc->jme_cdata.jme_ssb_tag = NULL;
1248 sc->jme_cdata.jme_ssb_block_paddr = busaddr;
1251 * Create DMA stuffs for TX buffers
1254 /* Create tag for Tx buffers. */
1255 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1256 1, 0, /* algnmnt, boundary */
1257 sc->jme_lowaddr, /* lowaddr */
1258 BUS_SPACE_MAXADDR, /* highaddr */
1259 NULL, NULL, /* filter, filterarg */
1260 JME_TSO_MAXSIZE, /* maxsize */
1261 JME_MAXTXSEGS, /* nsegments */
1262 JME_TSO_MAXSEGSIZE, /* maxsegsize */
1264 &sc->jme_cdata.jme_tx_tag);
1266 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1270 /* Create DMA maps for Tx buffers. */
1271 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1272 txd = &sc->jme_cdata.jme_txdesc[i];
1273 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0,
1278 device_printf(sc->jme_dev,
1279 "could not create %dth Tx dmamap.\n", i);
1281 for (j = 0; j < i; ++j) {
1282 txd = &sc->jme_cdata.jme_txdesc[j];
1283 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1286 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1287 sc->jme_cdata.jme_tx_tag = NULL;
1293 * Create DMA stuffs for RX buffers
1295 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1296 error = jme_rxbuf_dma_alloc(sc, i);
1304 jme_dma_free(struct jme_softc *sc, int detach)
1306 struct jme_txdesc *txd;
1307 struct jme_rxdesc *rxd;
1308 struct jme_rxdata *rdata;
1312 if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1313 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1314 sc->jme_cdata.jme_tx_ring_map);
1315 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1316 sc->jme_cdata.jme_tx_ring,
1317 sc->jme_cdata.jme_tx_ring_map);
1318 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1319 sc->jme_cdata.jme_tx_ring_tag = NULL;
1323 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1324 rdata = &sc->jme_cdata.jme_rx_data[r];
1325 if (rdata->jme_rx_ring_tag != NULL) {
1326 bus_dmamap_unload(rdata->jme_rx_ring_tag,
1327 rdata->jme_rx_ring_map);
1328 bus_dmamem_free(rdata->jme_rx_ring_tag,
1330 rdata->jme_rx_ring_map);
1331 bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1332 rdata->jme_rx_ring_tag = NULL;
1337 if (sc->jme_cdata.jme_tx_tag != NULL) {
1338 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1339 txd = &sc->jme_cdata.jme_txdesc[i];
1340 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1343 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1344 sc->jme_cdata.jme_tx_tag = NULL;
1348 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1349 rdata = &sc->jme_cdata.jme_rx_data[r];
1350 if (rdata->jme_rx_tag != NULL) {
1351 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
1352 rxd = &rdata->jme_rxdesc[i];
1353 bus_dmamap_destroy(rdata->jme_rx_tag,
1356 bus_dmamap_destroy(rdata->jme_rx_tag,
1357 rdata->jme_rx_sparemap);
1358 bus_dma_tag_destroy(rdata->jme_rx_tag);
1359 rdata->jme_rx_tag = NULL;
1363 /* Shadow status block. */
1364 if (sc->jme_cdata.jme_ssb_tag != NULL) {
1365 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1366 sc->jme_cdata.jme_ssb_map);
1367 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1368 sc->jme_cdata.jme_ssb_block,
1369 sc->jme_cdata.jme_ssb_map);
1370 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1371 sc->jme_cdata.jme_ssb_tag = NULL;
1374 if (sc->jme_cdata.jme_buffer_tag != NULL) {
1375 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1376 sc->jme_cdata.jme_buffer_tag = NULL;
1378 if (sc->jme_cdata.jme_ring_tag != NULL) {
1379 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1380 sc->jme_cdata.jme_ring_tag = NULL;
1384 if (sc->jme_cdata.jme_txdesc != NULL) {
1385 kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1386 sc->jme_cdata.jme_txdesc = NULL;
1388 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1389 rdata = &sc->jme_cdata.jme_rx_data[r];
1390 if (rdata->jme_rxdesc != NULL) {
1391 kfree(rdata->jme_rxdesc, M_DEVBUF);
1392 rdata->jme_rxdesc = NULL;
1399 * Make sure the interface is stopped at reboot time.
1402 jme_shutdown(device_t dev)
1404 return jme_suspend(dev);
1409 * Unlike other ethernet controllers, JMC250 requires
1410 * explicit resetting link speed to 10/100Mbps as gigabit
1411 * link will cunsume more power than 375mA.
1412 * Note, we reset the link speed to 10/100Mbps with
1413 * auto-negotiation but we don't know whether that operation
1414 * would succeed or not as we have no control after powering
1415 * off. If the renegotiation fail WOL may not work. Running
1416 * at 1Gbps draws more power than 375mA at 3.3V which is
1417 * specified in PCI specification and that would result in
1418 * complete shutdowning power to ethernet controller.
1421 * Save current negotiated media speed/duplex/flow-control
1422 * to softc and restore the same link again after resuming.
1423 * PHY handling such as power down/resetting to 100Mbps
1424 * may be better handled in suspend method in phy driver.
1427 jme_setlinkspeed(struct jme_softc *sc)
1429 struct mii_data *mii;
1432 JME_LOCK_ASSERT(sc);
1434 mii = device_get_softc(sc->jme_miibus);
1437 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1438 switch IFM_SUBTYPE(mii->mii_media_active) {
1448 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1449 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1450 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1451 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1452 BMCR_AUTOEN | BMCR_STARTNEG);
1455 /* Poll link state until jme(4) get a 10/100 link. */
1456 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1458 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1459 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1469 pause("jmelnk", hz);
1472 if (i == MII_ANEGTICKS_GIGE)
1473 device_printf(sc->jme_dev, "establishing link failed, "
1474 "WOL may not work!");
1477 * No link, force MAC to have 100Mbps, full-duplex link.
1478 * This is the last resort and may/may not work.
1480 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1481 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1486 jme_setwol(struct jme_softc *sc)
1488 struct ifnet *ifp = &sc->arpcom.ac_if;
1493 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1494 /* No PME capability, PHY power down. */
1495 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1496 MII_BMCR, BMCR_PDOWN);
1500 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1501 pmcs = CSR_READ_4(sc, JME_PMCS);
1502 pmcs &= ~PMCS_WOL_ENB_MASK;
1503 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1504 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1505 /* Enable PME message. */
1506 gpr |= GPREG0_PME_ENB;
1507 /* For gigabit controllers, reset link speed to 10/100. */
1508 if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1509 jme_setlinkspeed(sc);
1512 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1513 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1516 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1517 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1518 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1519 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1520 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1521 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1522 /* No WOL, PHY power down. */
1523 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1524 MII_BMCR, BMCR_PDOWN);
1530 jme_suspend(device_t dev)
1532 struct jme_softc *sc = device_get_softc(dev);
1533 struct ifnet *ifp = &sc->arpcom.ac_if;
1535 lwkt_serialize_enter(ifp->if_serializer);
1540 lwkt_serialize_exit(ifp->if_serializer);
1546 jme_resume(device_t dev)
1548 struct jme_softc *sc = device_get_softc(dev);
1549 struct ifnet *ifp = &sc->arpcom.ac_if;
1554 lwkt_serialize_enter(ifp->if_serializer);
1557 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1560 pmstat = pci_read_config(sc->jme_dev,
1561 pmc + PCIR_POWER_STATUS, 2);
1562 /* Disable PME clear PME status. */
1563 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1564 pci_write_config(sc->jme_dev,
1565 pmc + PCIR_POWER_STATUS, pmstat, 2);
1569 if (ifp->if_flags & IFF_UP)
1572 lwkt_serialize_exit(ifp->if_serializer);
1578 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1580 struct jme_txdesc *txd;
1581 struct jme_desc *desc;
1583 struct jme_dmamap_ctx ctx;
1584 bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1586 int error, i, prod, symbol_desc;
1587 uint32_t cflags, flag64;
1589 M_ASSERTPKTHDR((*m_head));
1591 prod = sc->jme_cdata.jme_tx_prod;
1592 txd = &sc->jme_cdata.jme_txdesc[prod];
1594 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1599 maxsegs = (sc->jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1600 (JME_TXD_RSVD + symbol_desc);
1601 if (maxsegs > JME_MAXTXSEGS)
1602 maxsegs = JME_MAXTXSEGS;
1603 KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc),
1604 ("not enough segments %d\n", maxsegs));
1606 ctx.nsegs = maxsegs;
1608 error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1609 *m_head, jme_dmamap_buf_cb, &ctx,
1611 if (!error && ctx.nsegs == 0) {
1612 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1615 if (error == EFBIG) {
1616 m = m_defrag(*m_head, MB_DONTWAIT);
1618 if_printf(&sc->arpcom.ac_if,
1619 "could not defrag TX mbuf\n");
1625 ctx.nsegs = maxsegs;
1627 error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_tx_tag,
1628 txd->tx_dmamap, *m_head,
1629 jme_dmamap_buf_cb, &ctx,
1631 if (error || ctx.nsegs == 0) {
1632 if_printf(&sc->arpcom.ac_if,
1633 "could not load defragged TX mbuf\n");
1635 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
1642 if_printf(&sc->arpcom.ac_if, "could not load TX mbuf\n");
1649 /* Configure checksum offload. */
1650 if (m->m_pkthdr.csum_flags & CSUM_IP)
1651 cflags |= JME_TD_IPCSUM;
1652 if (m->m_pkthdr.csum_flags & CSUM_TCP)
1653 cflags |= JME_TD_TCPCSUM;
1654 if (m->m_pkthdr.csum_flags & CSUM_UDP)
1655 cflags |= JME_TD_UDPCSUM;
1657 /* Configure VLAN. */
1658 if (m->m_flags & M_VLANTAG) {
1659 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1660 cflags |= JME_TD_VLAN_TAG;
1663 desc = &sc->jme_cdata.jme_tx_ring[prod];
1664 desc->flags = htole32(cflags);
1665 desc->addr_hi = htole32(m->m_pkthdr.len);
1666 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1668 * Use 64bits TX desc chain format.
1670 * The first TX desc of the chain, which is setup here,
1671 * is just a symbol TX desc carrying no payload.
1673 flag64 = JME_TD_64BIT;
1677 /* No effective TX desc is consumed */
1681 * Use 32bits TX desc chain format.
1683 * The first TX desc of the chain, which is setup here,
1684 * is an effective TX desc carrying the first segment of
1688 desc->buflen = htole32(txsegs[0].ds_len);
1689 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1691 /* One effective TX desc is consumed */
1694 sc->jme_cdata.jme_tx_cnt++;
1695 KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1696 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1697 JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1699 txd->tx_ndesc = 1 - i;
1700 for (; i < ctx.nsegs; i++) {
1701 desc = &sc->jme_cdata.jme_tx_ring[prod];
1702 desc->flags = htole32(JME_TD_OWN | flag64);
1703 desc->buflen = htole32(txsegs[i].ds_len);
1704 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1705 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1707 sc->jme_cdata.jme_tx_cnt++;
1708 KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1709 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1710 JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1713 /* Update producer index. */
1714 sc->jme_cdata.jme_tx_prod = prod;
1716 * Finally request interrupt and give the first descriptor
1717 * owenership to hardware.
1719 desc = txd->tx_desc;
1720 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1723 txd->tx_ndesc += ctx.nsegs;
1725 /* Sync descriptors. */
1726 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1727 BUS_DMASYNC_PREWRITE);
1728 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1729 sc->jme_cdata.jme_tx_ring_map, BUS_DMASYNC_PREWRITE);
1738 jme_start(struct ifnet *ifp)
1740 struct jme_softc *sc = ifp->if_softc;
1741 struct mbuf *m_head;
1744 ASSERT_SERIALIZED(ifp->if_serializer);
1746 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1747 ifq_purge(&ifp->if_snd);
1751 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1754 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1757 while (!ifq_is_empty(&ifp->if_snd)) {
1759 * Check number of available TX descs, always
1760 * leave JME_TXD_RSVD free TX descs.
1762 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1763 sc->jme_tx_desc_cnt - JME_TXD_RSVD) {
1764 ifp->if_flags |= IFF_OACTIVE;
1768 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1773 * Pack the data into the transmit ring. If we
1774 * don't have room, set the OACTIVE flag and wait
1775 * for the NIC to drain the ring.
1777 if (jme_encap(sc, &m_head)) {
1778 KKASSERT(m_head == NULL);
1780 ifp->if_flags |= IFF_OACTIVE;
1786 * If there's a BPF listener, bounce a copy of this frame
1789 ETHER_BPF_MTAP(ifp, m_head);
1794 * Reading TXCSR takes very long time under heavy load
1795 * so cache TXCSR value and writes the ORed value with
1796 * the kick command to the TXCSR. This saves one register
1799 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1800 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1801 /* Set a timeout in case the chip goes out to lunch. */
1802 ifp->if_timer = JME_TX_TIMEOUT;
1807 jme_watchdog(struct ifnet *ifp)
1809 struct jme_softc *sc = ifp->if_softc;
1811 ASSERT_SERIALIZED(ifp->if_serializer);
1813 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1814 if_printf(ifp, "watchdog timeout (missed link)\n");
1821 if (sc->jme_cdata.jme_tx_cnt == 0) {
1822 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1824 if (!ifq_is_empty(&ifp->if_snd))
1829 if_printf(ifp, "watchdog timeout\n");
1832 if (!ifq_is_empty(&ifp->if_snd))
1837 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1839 struct jme_softc *sc = ifp->if_softc;
1840 struct mii_data *mii = device_get_softc(sc->jme_miibus);
1841 struct ifreq *ifr = (struct ifreq *)data;
1842 int error = 0, mask;
1844 ASSERT_SERIALIZED(ifp->if_serializer);
1848 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1849 (!(sc->jme_caps & JME_CAP_JUMBO) &&
1850 ifr->ifr_mtu > JME_MAX_MTU)) {
1855 if (ifp->if_mtu != ifr->ifr_mtu) {
1857 * No special configuration is required when interface
1858 * MTU is changed but availability of Tx checksum
1859 * offload should be chcked against new MTU size as
1860 * FIFO size is just 2K.
1862 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1863 ifp->if_capenable &= ~IFCAP_TXCSUM;
1864 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1866 ifp->if_mtu = ifr->ifr_mtu;
1867 if (ifp->if_flags & IFF_RUNNING)
1873 if (ifp->if_flags & IFF_UP) {
1874 if (ifp->if_flags & IFF_RUNNING) {
1875 if ((ifp->if_flags ^ sc->jme_if_flags) &
1876 (IFF_PROMISC | IFF_ALLMULTI))
1882 if (ifp->if_flags & IFF_RUNNING)
1885 sc->jme_if_flags = ifp->if_flags;
1890 if (ifp->if_flags & IFF_RUNNING)
1896 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1900 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1902 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1903 if (IFCAP_TXCSUM & ifp->if_capabilities) {
1904 ifp->if_capenable ^= IFCAP_TXCSUM;
1905 if (IFCAP_TXCSUM & ifp->if_capenable)
1906 ifp->if_hwassist |= JME_CSUM_FEATURES;
1908 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1911 if ((mask & IFCAP_RXCSUM) &&
1912 (IFCAP_RXCSUM & ifp->if_capabilities)) {
1915 ifp->if_capenable ^= IFCAP_RXCSUM;
1916 reg = CSR_READ_4(sc, JME_RXMAC);
1917 reg &= ~RXMAC_CSUM_ENB;
1918 if (ifp->if_capenable & IFCAP_RXCSUM)
1919 reg |= RXMAC_CSUM_ENB;
1920 CSR_WRITE_4(sc, JME_RXMAC, reg);
1923 if ((mask & IFCAP_VLAN_HWTAGGING) &&
1924 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities)) {
1925 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1931 error = ether_ioctl(ifp, cmd, data);
1938 jme_mac_config(struct jme_softc *sc)
1940 struct mii_data *mii;
1941 uint32_t ghc, rxmac, txmac, txpause, gp1;
1942 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1944 mii = device_get_softc(sc->jme_miibus);
1946 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1948 CSR_WRITE_4(sc, JME_GHC, 0);
1950 rxmac = CSR_READ_4(sc, JME_RXMAC);
1951 rxmac &= ~RXMAC_FC_ENB;
1952 txmac = CSR_READ_4(sc, JME_TXMAC);
1953 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1954 txpause = CSR_READ_4(sc, JME_TXPFC);
1955 txpause &= ~TXPFC_PAUSE_ENB;
1956 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1957 ghc |= GHC_FULL_DUPLEX;
1958 rxmac &= ~RXMAC_COLL_DET_ENB;
1959 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1960 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1963 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1964 txpause |= TXPFC_PAUSE_ENB;
1965 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1966 rxmac |= RXMAC_FC_ENB;
1968 /* Disable retry transmit timer/retry limit. */
1969 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1970 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1972 rxmac |= RXMAC_COLL_DET_ENB;
1973 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1974 /* Enable retry transmit timer/retry limit. */
1975 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1976 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1980 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1982 gp1 = CSR_READ_4(sc, JME_GPREG1);
1983 gp1 &= ~GPREG1_WA_HDX;
1985 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1988 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1990 ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1992 gp1 |= GPREG1_WA_HDX;
1996 ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1998 gp1 |= GPREG1_WA_HDX;
2001 * Use extended FIFO depth to workaround CRC errors
2002 * emitted by chips before JMC250B
2004 phyconf = JMPHY_CONF_EXTFIFO;
2008 if (sc->jme_caps & JME_CAP_FASTETH)
2011 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
2013 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
2019 CSR_WRITE_4(sc, JME_GHC, ghc);
2020 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
2021 CSR_WRITE_4(sc, JME_TXMAC, txmac);
2022 CSR_WRITE_4(sc, JME_TXPFC, txpause);
2024 if (sc->jme_workaround & JME_WA_EXTFIFO) {
2025 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2026 JMPHY_CONF, phyconf);
2028 if (sc->jme_workaround & JME_WA_HDX)
2029 CSR_WRITE_4(sc, JME_GPREG1, gp1);
2035 struct jme_softc *sc = xsc;
2036 struct ifnet *ifp = &sc->arpcom.ac_if;
2040 ASSERT_SERIALIZED(ifp->if_serializer);
2042 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2043 if (status == 0 || status == 0xFFFFFFFF)
2046 /* Disable interrupts. */
2047 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2049 status = CSR_READ_4(sc, JME_INTR_STATUS);
2050 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2053 /* Reset PCC counter/timer and Ack interrupts. */
2054 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2056 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
2057 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2059 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2060 if (status & jme_rx_status[r].jme_coal) {
2061 status |= jme_rx_status[r].jme_coal |
2062 jme_rx_status[r].jme_comp;
2066 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2068 if (ifp->if_flags & IFF_RUNNING) {
2069 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
2070 jme_rx_intr(sc, status);
2072 if (status & INTR_RXQ_DESC_EMPTY) {
2074 * Notify hardware availability of new Rx buffers.
2075 * Reading RXCSR takes very long time under heavy
2076 * load so cache RXCSR value and writes the ORed
2077 * value with the kick command to the RXCSR. This
2078 * saves one register access cycle.
2080 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2081 RXCSR_RX_ENB | RXCSR_RXQ_START);
2084 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
2086 if (!ifq_is_empty(&ifp->if_snd))
2091 /* Reenable interrupts. */
2092 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2096 jme_txeof(struct jme_softc *sc)
2098 struct ifnet *ifp = &sc->arpcom.ac_if;
2099 struct jme_txdesc *txd;
2103 cons = sc->jme_cdata.jme_tx_cons;
2104 if (cons == sc->jme_cdata.jme_tx_prod)
2107 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2108 sc->jme_cdata.jme_tx_ring_map,
2109 BUS_DMASYNC_POSTREAD);
2112 * Go through our Tx list and free mbufs for those
2113 * frames which have been transmitted.
2115 while (cons != sc->jme_cdata.jme_tx_prod) {
2116 txd = &sc->jme_cdata.jme_txdesc[cons];
2117 KASSERT(txd->tx_m != NULL,
2118 ("%s: freeing NULL mbuf!\n", __func__));
2120 status = le32toh(txd->tx_desc->flags);
2121 if ((status & JME_TD_OWN) == JME_TD_OWN)
2124 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2128 if (status & JME_TD_COLLISION) {
2129 ifp->if_collisions +=
2130 le32toh(txd->tx_desc->buflen) &
2131 JME_TD_BUF_LEN_MASK;
2136 * Only the first descriptor of multi-descriptor
2137 * transmission is updated so driver have to skip entire
2138 * chained buffers for the transmiited frame. In other
2139 * words, JME_TD_OWN bit is valid only at the first
2140 * descriptor of a multi-descriptor transmission.
2142 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2143 sc->jme_cdata.jme_tx_ring[cons].flags = 0;
2144 JME_DESC_INC(cons, sc->jme_tx_desc_cnt);
2147 /* Reclaim transferred mbufs. */
2148 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2151 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2152 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2153 ("%s: Active Tx desc counter was garbled\n", __func__));
2156 sc->jme_cdata.jme_tx_cons = cons;
2158 if (sc->jme_cdata.jme_tx_cnt == 0)
2161 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
2162 sc->jme_tx_desc_cnt - JME_TXD_RSVD)
2163 ifp->if_flags &= ~IFF_OACTIVE;
2165 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2166 sc->jme_cdata.jme_tx_ring_map,
2167 BUS_DMASYNC_PREWRITE);
2170 static __inline void
2171 jme_discard_rxbufs(struct jme_softc *sc, int ring, int cons, int count)
2173 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2176 for (i = 0; i < count; ++i) {
2177 struct jme_desc *desc = &rdata->jme_rx_ring[cons];
2179 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2180 desc->buflen = htole32(MCLBYTES);
2181 JME_DESC_INC(cons, sc->jme_rx_desc_cnt);
2185 /* Receive a frame. */
2187 jme_rxpkt(struct jme_softc *sc, int ring, struct mbuf_chain *chain)
2189 struct ifnet *ifp = &sc->arpcom.ac_if;
2190 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2191 struct jme_desc *desc;
2192 struct jme_rxdesc *rxd;
2193 struct mbuf *mp, *m;
2194 uint32_t flags, status;
2195 int cons, count, nsegs;
2197 cons = rdata->jme_rx_cons;
2198 desc = &rdata->jme_rx_ring[cons];
2199 flags = le32toh(desc->flags);
2200 status = le32toh(desc->buflen);
2201 nsegs = JME_RX_NSEGS(status);
2203 JME_RSS_DPRINTF(sc, 10, "ring%d, flags 0x%08x, "
2204 "hash 0x%08x, hash type 0x%08x\n",
2205 ring, flags, desc->addr_hi, desc->addr_lo);
2207 if (status & JME_RX_ERR_STAT) {
2209 jme_discard_rxbufs(sc, ring, cons, nsegs);
2210 #ifdef JME_SHOW_ERRORS
2211 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2212 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2214 rdata->jme_rx_cons += nsegs;
2215 rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2219 rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2220 for (count = 0; count < nsegs; count++,
2221 JME_DESC_INC(cons, sc->jme_rx_desc_cnt)) {
2222 rxd = &rdata->jme_rxdesc[cons];
2225 /* Add a new receive buffer to the ring. */
2226 if (jme_newbuf(sc, ring, rxd, 0) != 0) {
2229 jme_discard_rxbufs(sc, ring, cons, nsegs - count);
2230 if (rdata->jme_rxhead != NULL) {
2231 m_freem(rdata->jme_rxhead);
2232 JME_RXCHAIN_RESET(sc, ring);
2238 * Assume we've received a full sized frame.
2239 * Actual size is fixed when we encounter the end of
2240 * multi-segmented frame.
2242 mp->m_len = MCLBYTES;
2244 /* Chain received mbufs. */
2245 if (rdata->jme_rxhead == NULL) {
2246 rdata->jme_rxhead = mp;
2247 rdata->jme_rxtail = mp;
2250 * Receive processor can receive a maximum frame
2251 * size of 65535 bytes.
2253 mp->m_flags &= ~M_PKTHDR;
2254 rdata->jme_rxtail->m_next = mp;
2255 rdata->jme_rxtail = mp;
2258 if (count == nsegs - 1) {
2259 /* Last desc. for this frame. */
2260 m = rdata->jme_rxhead;
2261 /* XXX assert PKTHDR? */
2262 m->m_flags |= M_PKTHDR;
2263 m->m_pkthdr.len = rdata->jme_rxlen;
2265 /* Set first mbuf size. */
2266 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2267 /* Set last mbuf size. */
2268 mp->m_len = rdata->jme_rxlen -
2269 ((MCLBYTES - JME_RX_PAD_BYTES) +
2270 (MCLBYTES * (nsegs - 2)));
2272 m->m_len = rdata->jme_rxlen;
2274 m->m_pkthdr.rcvif = ifp;
2277 * Account for 10bytes auto padding which is used
2278 * to align IP header on 32bit boundary. Also note,
2279 * CRC bytes is automatically removed by the
2282 m->m_data += JME_RX_PAD_BYTES;
2284 /* Set checksum information. */
2285 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2286 (flags & JME_RD_IPV4)) {
2287 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2288 if (flags & JME_RD_IPCSUM)
2289 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2290 if ((flags & JME_RD_MORE_FRAG) == 0 &&
2291 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2292 (JME_RD_TCP | JME_RD_TCPCSUM) ||
2293 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2294 (JME_RD_UDP | JME_RD_UDPCSUM))) {
2295 m->m_pkthdr.csum_flags |=
2296 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2297 m->m_pkthdr.csum_data = 0xffff;
2301 /* Check for VLAN tagged packets. */
2302 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2303 (flags & JME_RD_VLAN_TAG)) {
2304 m->m_pkthdr.ether_vlantag =
2305 flags & JME_RD_VLAN_MASK;
2306 m->m_flags |= M_VLANTAG;
2311 ether_input_chain(ifp, m, chain);
2313 /* Reset mbuf chains. */
2314 JME_RXCHAIN_RESET(sc, ring);
2315 #ifdef JME_RSS_DEBUG
2316 sc->jme_rx_ring_pkt[ring]++;
2321 rdata->jme_rx_cons += nsegs;
2322 rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2326 jme_rxeof_chain(struct jme_softc *sc, int ring, struct mbuf_chain *chain,
2329 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2330 struct jme_desc *desc;
2331 int nsegs, prog, pktlen;
2333 bus_dmamap_sync(rdata->jme_rx_ring_tag, rdata->jme_rx_ring_map,
2334 BUS_DMASYNC_POSTREAD);
2338 #ifdef DEVICE_POLLING
2339 if (count >= 0 && count-- == 0)
2342 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2343 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2345 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2349 * Check number of segments against received bytes.
2350 * Non-matching value would indicate that hardware
2351 * is still trying to update Rx descriptors. I'm not
2352 * sure whether this check is needed.
2354 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2355 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2356 if (nsegs != howmany(pktlen, MCLBYTES)) {
2357 if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) "
2358 "and packet size(%d) mismach\n",
2363 /* Received a frame. */
2364 jme_rxpkt(sc, ring, chain);
2369 bus_dmamap_sync(rdata->jme_rx_ring_tag, rdata->jme_rx_ring_map,
2370 BUS_DMASYNC_PREWRITE);
2376 jme_rxeof(struct jme_softc *sc, int ring)
2378 struct mbuf_chain chain[MAXCPU];
2380 ether_input_chain_init(chain);
2381 if (jme_rxeof_chain(sc, ring, chain, -1))
2382 ether_input_dispatch(chain);
2388 struct jme_softc *sc = xsc;
2389 struct ifnet *ifp = &sc->arpcom.ac_if;
2390 struct mii_data *mii = device_get_softc(sc->jme_miibus);
2392 lwkt_serialize_enter(ifp->if_serializer);
2395 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2397 lwkt_serialize_exit(ifp->if_serializer);
2401 jme_reset(struct jme_softc *sc)
2404 /* Stop receiver, transmitter. */
2408 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2410 CSR_WRITE_4(sc, JME_GHC, 0);
2416 struct jme_softc *sc = xsc;
2417 struct ifnet *ifp = &sc->arpcom.ac_if;
2418 struct mii_data *mii;
2419 uint8_t eaddr[ETHER_ADDR_LEN];
2424 ASSERT_SERIALIZED(ifp->if_serializer);
2427 * Cancel any pending I/O.
2432 * Reset the chip to a known state.
2437 howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES);
2438 KKASSERT(sc->jme_txd_spare >= 1);
2441 * If we use 64bit address mode for transmitting, each Tx request
2442 * needs one more symbol descriptor.
2444 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
2445 sc->jme_txd_spare += 1;
2447 if (sc->jme_flags & JME_FLAG_RSS)
2450 jme_disable_rss(sc);
2452 /* Init RX descriptors */
2453 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2454 error = jme_init_rx_ring(sc, r);
2456 if_printf(ifp, "initialization failed: "
2457 "no memory for %dth RX ring.\n", r);
2463 /* Init TX descriptors */
2464 jme_init_tx_ring(sc);
2466 /* Initialize shadow status block. */
2469 /* Reprogram the station address. */
2470 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2471 CSR_WRITE_4(sc, JME_PAR0,
2472 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2473 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2476 * Configure Tx queue.
2477 * Tx priority queue weight value : 0
2478 * Tx FIFO threshold for processing next packet : 16QW
2479 * Maximum Tx DMA length : 512
2480 * Allow Tx DMA burst.
2482 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2483 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2484 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2485 sc->jme_txcsr |= sc->jme_tx_dma_size;
2486 sc->jme_txcsr |= TXCSR_DMA_BURST;
2487 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2489 /* Set Tx descriptor counter. */
2490 CSR_WRITE_4(sc, JME_TXQDC, sc->jme_tx_desc_cnt);
2492 /* Set Tx ring address to the hardware. */
2493 paddr = sc->jme_cdata.jme_tx_ring_paddr;
2494 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2495 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2497 /* Configure TxMAC parameters. */
2498 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2499 reg |= TXMAC_THRESH_1_PKT;
2500 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2501 CSR_WRITE_4(sc, JME_TXMAC, reg);
2504 * Configure Rx queue.
2505 * FIFO full threshold for transmitting Tx pause packet : 128T
2506 * FIFO threshold for processing next packet : 128QW
2508 * Max Rx DMA length : 128
2509 * Rx descriptor retry : 32
2510 * Rx descriptor retry time gap : 256ns
2511 * Don't receive runt/bad frame.
2513 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2516 * Since Rx FIFO size is 4K bytes, receiving frames larger
2517 * than 4K bytes will suffer from Rx FIFO overruns. So
2518 * decrease FIFO threshold to reduce the FIFO overruns for
2519 * frames larger than 4000 bytes.
2520 * For best performance of standard MTU sized frames use
2521 * maximum allowable FIFO threshold, 128QW.
2523 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2525 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2527 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2529 /* Improve PCI Express compatibility */
2530 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2532 sc->jme_rxcsr |= sc->jme_rx_dma_size;
2533 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2534 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2535 /* XXX TODO DROP_BAD */
2537 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2538 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2540 /* Set Rx descriptor counter. */
2541 CSR_WRITE_4(sc, JME_RXQDC, sc->jme_rx_desc_cnt);
2543 /* Set Rx ring address to the hardware. */
2544 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
2545 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2546 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2549 /* Clear receive filter. */
2550 CSR_WRITE_4(sc, JME_RXMAC, 0);
2552 /* Set up the receive filter. */
2557 * Disable all WOL bits as WOL can interfere normal Rx
2558 * operation. Also clear WOL detection status bits.
2560 reg = CSR_READ_4(sc, JME_PMCS);
2561 reg &= ~PMCS_WOL_ENB_MASK;
2562 CSR_WRITE_4(sc, JME_PMCS, reg);
2565 * Pad 10bytes right before received frame. This will greatly
2566 * help Rx performance on strict-alignment architectures as
2567 * it does not need to copy the frame to align the payload.
2569 reg = CSR_READ_4(sc, JME_RXMAC);
2570 reg |= RXMAC_PAD_10BYTES;
2572 if (ifp->if_capenable & IFCAP_RXCSUM)
2573 reg |= RXMAC_CSUM_ENB;
2574 CSR_WRITE_4(sc, JME_RXMAC, reg);
2576 /* Configure general purpose reg0 */
2577 reg = CSR_READ_4(sc, JME_GPREG0);
2578 reg &= ~GPREG0_PCC_UNIT_MASK;
2579 /* Set PCC timer resolution to micro-seconds unit. */
2580 reg |= GPREG0_PCC_UNIT_US;
2582 * Disable all shadow register posting as we have to read
2583 * JME_INTR_STATUS register in jme_intr. Also it seems
2584 * that it's hard to synchronize interrupt status between
2585 * hardware and software with shadow posting due to
2586 * requirements of bus_dmamap_sync(9).
2588 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2589 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2590 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2591 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2592 /* Disable posting of DW0. */
2593 reg &= ~GPREG0_POST_DW0_ENB;
2594 /* Clear PME message. */
2595 reg &= ~GPREG0_PME_ENB;
2596 /* Set PHY address. */
2597 reg &= ~GPREG0_PHY_ADDR_MASK;
2598 reg |= sc->jme_phyaddr;
2599 CSR_WRITE_4(sc, JME_GPREG0, reg);
2601 /* Configure Tx queue 0 packet completion coalescing. */
2602 jme_set_tx_coal(sc);
2604 /* Configure Rx queue 0 packet completion coalescing. */
2605 jme_set_rx_coal(sc);
2607 /* Configure shadow status block but don't enable posting. */
2608 paddr = sc->jme_cdata.jme_ssb_block_paddr;
2609 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2610 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2612 /* Disable Timer 1 and Timer 2. */
2613 CSR_WRITE_4(sc, JME_TIMER1, 0);
2614 CSR_WRITE_4(sc, JME_TIMER2, 0);
2616 /* Configure retry transmit period, retry limit value. */
2617 CSR_WRITE_4(sc, JME_TXTRHD,
2618 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2619 TXTRHD_RT_PERIOD_MASK) |
2620 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2621 TXTRHD_RT_LIMIT_SHIFT));
2623 #ifdef DEVICE_POLLING
2624 if (!(ifp->if_flags & IFF_POLLING))
2626 /* Initialize the interrupt mask. */
2627 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2628 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2631 * Enabling Tx/Rx DMA engines and Rx queue processing is
2632 * done after detection of valid link in jme_miibus_statchg.
2634 sc->jme_flags &= ~JME_FLAG_LINK;
2636 /* Set the current media. */
2637 mii = device_get_softc(sc->jme_miibus);
2640 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2642 ifp->if_flags |= IFF_RUNNING;
2643 ifp->if_flags &= ~IFF_OACTIVE;
2647 jme_stop(struct jme_softc *sc)
2649 struct ifnet *ifp = &sc->arpcom.ac_if;
2650 struct jme_txdesc *txd;
2651 struct jme_rxdesc *rxd;
2652 struct jme_rxdata *rdata;
2655 ASSERT_SERIALIZED(ifp->if_serializer);
2658 * Mark the interface down and cancel the watchdog timer.
2660 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2663 callout_stop(&sc->jme_tick_ch);
2664 sc->jme_flags &= ~JME_FLAG_LINK;
2667 * Disable interrupts.
2669 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2670 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2672 /* Disable updating shadow status block. */
2673 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2674 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2676 /* Stop receiver, transmitter. */
2681 * Free partial finished RX segments
2683 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2684 rdata = &sc->jme_cdata.jme_rx_data[r];
2685 if (rdata->jme_rxhead != NULL)
2686 m_freem(rdata->jme_rxhead);
2687 JME_RXCHAIN_RESET(sc, r);
2691 * Free RX and TX mbufs still in the queues.
2693 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2694 rdata = &sc->jme_cdata.jme_rx_data[r];
2695 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2696 rxd = &rdata->jme_rxdesc[i];
2697 if (rxd->rx_m != NULL) {
2698 bus_dmamap_unload(rdata->jme_rx_tag,
2705 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2706 txd = &sc->jme_cdata.jme_txdesc[i];
2707 if (txd->tx_m != NULL) {
2708 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2718 jme_stop_tx(struct jme_softc *sc)
2723 reg = CSR_READ_4(sc, JME_TXCSR);
2724 if ((reg & TXCSR_TX_ENB) == 0)
2726 reg &= ~TXCSR_TX_ENB;
2727 CSR_WRITE_4(sc, JME_TXCSR, reg);
2728 for (i = JME_TIMEOUT; i > 0; i--) {
2730 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2734 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2738 jme_stop_rx(struct jme_softc *sc)
2743 reg = CSR_READ_4(sc, JME_RXCSR);
2744 if ((reg & RXCSR_RX_ENB) == 0)
2746 reg &= ~RXCSR_RX_ENB;
2747 CSR_WRITE_4(sc, JME_RXCSR, reg);
2748 for (i = JME_TIMEOUT; i > 0; i--) {
2750 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2754 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2758 jme_init_tx_ring(struct jme_softc *sc)
2760 struct jme_chain_data *cd;
2761 struct jme_txdesc *txd;
2764 sc->jme_cdata.jme_tx_prod = 0;
2765 sc->jme_cdata.jme_tx_cons = 0;
2766 sc->jme_cdata.jme_tx_cnt = 0;
2768 cd = &sc->jme_cdata;
2769 bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2770 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2771 txd = &sc->jme_cdata.jme_txdesc[i];
2773 txd->tx_desc = &cd->jme_tx_ring[i];
2777 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2778 sc->jme_cdata.jme_tx_ring_map,
2779 BUS_DMASYNC_PREWRITE);
2783 jme_init_ssb(struct jme_softc *sc)
2785 struct jme_chain_data *cd;
2787 cd = &sc->jme_cdata;
2788 bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2789 bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map,
2790 BUS_DMASYNC_PREWRITE);
2794 jme_init_rx_ring(struct jme_softc *sc, int ring)
2796 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2797 struct jme_rxdesc *rxd;
2800 KKASSERT(rdata->jme_rxhead == NULL &&
2801 rdata->jme_rxtail == NULL &&
2802 rdata->jme_rxlen == 0);
2803 rdata->jme_rx_cons = 0;
2805 bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(sc));
2806 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2809 rxd = &rdata->jme_rxdesc[i];
2811 rxd->rx_desc = &rdata->jme_rx_ring[i];
2812 error = jme_newbuf(sc, ring, rxd, 1);
2817 bus_dmamap_sync(rdata->jme_rx_ring_tag, rdata->jme_rx_ring_map,
2818 BUS_DMASYNC_PREWRITE);
2823 jme_newbuf(struct jme_softc *sc, int ring, struct jme_rxdesc *rxd, int init)
2825 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2826 struct jme_desc *desc;
2828 struct jme_dmamap_ctx ctx;
2829 bus_dma_segment_t segs;
2833 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2837 * JMC250 has 64bit boundary alignment limitation so jme(4)
2838 * takes advantage of 10 bytes padding feature of hardware
2839 * in order not to copy entire frame to align IP header on
2842 m->m_len = m->m_pkthdr.len = MCLBYTES;
2846 error = bus_dmamap_load_mbuf(rdata->jme_rx_tag,
2847 rdata->jme_rx_sparemap,
2848 m, jme_dmamap_buf_cb, &ctx,
2850 if (error || ctx.nsegs == 0) {
2852 bus_dmamap_unload(rdata->jme_rx_tag,
2853 rdata->jme_rx_sparemap);
2855 if_printf(&sc->arpcom.ac_if, "too many segments?!\n");
2860 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2864 if (rxd->rx_m != NULL) {
2865 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
2866 BUS_DMASYNC_POSTREAD);
2867 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
2869 map = rxd->rx_dmamap;
2870 rxd->rx_dmamap = rdata->jme_rx_sparemap;
2871 rdata->jme_rx_sparemap = map;
2874 desc = rxd->rx_desc;
2875 desc->buflen = htole32(segs.ds_len);
2876 desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr));
2877 desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr));
2878 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2884 jme_set_vlan(struct jme_softc *sc)
2886 struct ifnet *ifp = &sc->arpcom.ac_if;
2889 ASSERT_SERIALIZED(ifp->if_serializer);
2891 reg = CSR_READ_4(sc, JME_RXMAC);
2892 reg &= ~RXMAC_VLAN_ENB;
2893 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2894 reg |= RXMAC_VLAN_ENB;
2895 CSR_WRITE_4(sc, JME_RXMAC, reg);
2899 jme_set_filter(struct jme_softc *sc)
2901 struct ifnet *ifp = &sc->arpcom.ac_if;
2902 struct ifmultiaddr *ifma;
2907 ASSERT_SERIALIZED(ifp->if_serializer);
2909 rxcfg = CSR_READ_4(sc, JME_RXMAC);
2910 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2914 * Always accept frames destined to our station address.
2915 * Always accept broadcast frames.
2917 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2919 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2920 if (ifp->if_flags & IFF_PROMISC)
2921 rxcfg |= RXMAC_PROMISC;
2922 if (ifp->if_flags & IFF_ALLMULTI)
2923 rxcfg |= RXMAC_ALLMULTI;
2924 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2925 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2926 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2931 * Set up the multicast address filter by passing all multicast
2932 * addresses through a CRC generator, and then using the low-order
2933 * 6 bits as an index into the 64 bit multicast hash table. The
2934 * high order bits select the register, while the rest of the bits
2935 * select the bit within the register.
2937 rxcfg |= RXMAC_MULTICAST;
2938 bzero(mchash, sizeof(mchash));
2940 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2941 if (ifma->ifma_addr->sa_family != AF_LINK)
2943 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2944 ifma->ifma_addr), ETHER_ADDR_LEN);
2946 /* Just want the 6 least significant bits. */
2949 /* Set the corresponding bit in the hash table. */
2950 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2953 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2954 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2955 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2959 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
2961 struct jme_softc *sc = arg1;
2962 struct ifnet *ifp = &sc->arpcom.ac_if;
2965 lwkt_serialize_enter(ifp->if_serializer);
2967 v = sc->jme_tx_coal_to;
2968 error = sysctl_handle_int(oidp, &v, 0, req);
2969 if (error || req->newptr == NULL)
2972 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2977 if (v != sc->jme_tx_coal_to) {
2978 sc->jme_tx_coal_to = v;
2979 if (ifp->if_flags & IFF_RUNNING)
2980 jme_set_tx_coal(sc);
2983 lwkt_serialize_exit(ifp->if_serializer);
2988 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2990 struct jme_softc *sc = arg1;
2991 struct ifnet *ifp = &sc->arpcom.ac_if;
2994 lwkt_serialize_enter(ifp->if_serializer);
2996 v = sc->jme_tx_coal_pkt;
2997 error = sysctl_handle_int(oidp, &v, 0, req);
2998 if (error || req->newptr == NULL)
3001 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
3006 if (v != sc->jme_tx_coal_pkt) {
3007 sc->jme_tx_coal_pkt = v;
3008 if (ifp->if_flags & IFF_RUNNING)
3009 jme_set_tx_coal(sc);
3012 lwkt_serialize_exit(ifp->if_serializer);
3017 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
3019 struct jme_softc *sc = arg1;
3020 struct ifnet *ifp = &sc->arpcom.ac_if;
3023 lwkt_serialize_enter(ifp->if_serializer);
3025 v = sc->jme_rx_coal_to;
3026 error = sysctl_handle_int(oidp, &v, 0, req);
3027 if (error || req->newptr == NULL)
3030 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
3035 if (v != sc->jme_rx_coal_to) {
3036 sc->jme_rx_coal_to = v;
3037 if (ifp->if_flags & IFF_RUNNING)
3038 jme_set_rx_coal(sc);
3041 lwkt_serialize_exit(ifp->if_serializer);
3046 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3048 struct jme_softc *sc = arg1;
3049 struct ifnet *ifp = &sc->arpcom.ac_if;
3052 lwkt_serialize_enter(ifp->if_serializer);
3054 v = sc->jme_rx_coal_pkt;
3055 error = sysctl_handle_int(oidp, &v, 0, req);
3056 if (error || req->newptr == NULL)
3059 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
3064 if (v != sc->jme_rx_coal_pkt) {
3065 sc->jme_rx_coal_pkt = v;
3066 if (ifp->if_flags & IFF_RUNNING)
3067 jme_set_rx_coal(sc);
3070 lwkt_serialize_exit(ifp->if_serializer);
3075 jme_set_tx_coal(struct jme_softc *sc)
3079 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3081 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3082 PCCTX_COAL_PKT_MASK;
3083 reg |= PCCTX_COAL_TXQ0;
3084 CSR_WRITE_4(sc, JME_PCCTX, reg);
3088 jme_set_rx_coal(struct jme_softc *sc)
3093 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3095 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3096 PCCRX_COAL_PKT_MASK;
3097 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
3098 if (r < sc->jme_rx_ring_inuse)
3099 CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3101 CSR_WRITE_4(sc, JME_PCCRX(r), 0);
3105 #ifdef DEVICE_POLLING
3108 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3110 struct jme_softc *sc = ifp->if_softc;
3111 struct mbuf_chain chain[MAXCPU];
3115 ASSERT_SERIALIZED(ifp->if_serializer);
3119 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3122 case POLL_DEREGISTER:
3123 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
3126 case POLL_AND_CHECK_STATUS:
3128 status = CSR_READ_4(sc, JME_INTR_STATUS);
3130 ether_input_chain_init(chain);
3131 for (r = 0; r < sc->jme_rx_ring_inuse; ++r)
3132 prog += jme_rxeof_chain(sc, r, chain, count);
3134 ether_input_dispatch(chain);
3136 if (status & INTR_RXQ_DESC_EMPTY) {
3137 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3138 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3139 RXCSR_RX_ENB | RXCSR_RXQ_START);
3143 if (!ifq_is_empty(&ifp->if_snd))
3149 #endif /* DEVICE_POLLING */
3152 jme_rxring_dma_alloc(struct jme_softc *sc, bus_addr_t lowaddr, int ring)
3154 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3158 /* Create tag for Rx ring. */
3159 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
3160 JME_RX_RING_ALIGN, 0, /* algnmnt, boundary */
3161 lowaddr, /* lowaddr */
3162 BUS_SPACE_MAXADDR, /* highaddr */
3163 NULL, NULL, /* filter, filterarg */
3164 JME_RX_RING_SIZE(sc), /* maxsize */
3166 JME_RX_RING_SIZE(sc), /* maxsegsize */
3168 &rdata->jme_rx_ring_tag);
3170 device_printf(sc->jme_dev,
3171 "could not allocate %dth Rx ring DMA tag.\n", ring);
3175 /* Allocate DMA'able memory for RX ring */
3176 error = bus_dmamem_alloc(rdata->jme_rx_ring_tag,
3177 (void **)&rdata->jme_rx_ring,
3178 BUS_DMA_WAITOK | BUS_DMA_ZERO,
3179 &rdata->jme_rx_ring_map);
3181 device_printf(sc->jme_dev,
3182 "could not allocate DMA'able memory for "
3183 "%dth Rx ring.\n", ring);
3184 bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
3185 rdata->jme_rx_ring_tag = NULL;
3189 /* Load the DMA map for Rx ring. */
3190 error = bus_dmamap_load(rdata->jme_rx_ring_tag, rdata->jme_rx_ring_map,
3191 rdata->jme_rx_ring, JME_RX_RING_SIZE(sc),
3192 jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
3194 device_printf(sc->jme_dev,
3195 "could not load DMA'able memory for %dth Rx ring.\n", ring);
3196 bus_dmamem_free(rdata->jme_rx_ring_tag, rdata->jme_rx_ring,
3197 rdata->jme_rx_ring_map);
3198 bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
3199 rdata->jme_rx_ring_tag = NULL;
3202 rdata->jme_rx_ring_paddr = busaddr;
3208 jme_rxbuf_dma_alloc(struct jme_softc *sc, int ring)
3210 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3213 /* Create tag for Rx buffers. */
3214 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
3215 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
3216 sc->jme_lowaddr, /* lowaddr */
3217 BUS_SPACE_MAXADDR, /* highaddr */
3218 NULL, NULL, /* filter, filterarg */
3219 MCLBYTES, /* maxsize */
3221 MCLBYTES, /* maxsegsize */
3223 &rdata->jme_rx_tag);
3225 device_printf(sc->jme_dev,
3226 "could not create %dth Rx DMA tag.\n", ring);
3230 /* Create DMA maps for Rx buffers. */
3231 error = bus_dmamap_create(rdata->jme_rx_tag, 0,
3232 &rdata->jme_rx_sparemap);
3234 device_printf(sc->jme_dev,
3235 "could not create %dth spare Rx dmamap.\n", ring);
3236 bus_dma_tag_destroy(rdata->jme_rx_tag);
3237 rdata->jme_rx_tag = NULL;
3240 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
3241 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3243 error = bus_dmamap_create(rdata->jme_rx_tag, 0,
3248 device_printf(sc->jme_dev,
3249 "could not create %dth Rx dmamap "
3250 "for %dth RX ring.\n", i, ring);
3252 for (j = 0; j < i; ++j) {
3253 rxd = &rdata->jme_rxdesc[j];
3254 bus_dmamap_destroy(rdata->jme_rx_tag,
3257 bus_dmamap_destroy(rdata->jme_rx_tag,
3258 rdata->jme_rx_sparemap);
3259 bus_dma_tag_destroy(rdata->jme_rx_tag);
3260 rdata->jme_rx_tag = NULL;
3268 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3270 struct mbuf_chain chain[MAXCPU];
3273 ether_input_chain_init(chain);
3274 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
3275 if (status & jme_rx_status[r].jme_coal)
3276 prog += jme_rxeof_chain(sc, r, chain, -1);
3279 ether_input_dispatch(chain);
3283 jme_enable_rss(struct jme_softc *sc)
3285 uint32_t rssc, key, ind;
3288 sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
3290 rssc = RSSC_HASH_64_ENTRY;
3291 rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3292 rssc |= sc->jme_rx_ring_inuse >> 1;
3293 JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3294 CSR_WRITE_4(sc, JME_RSSC, rssc);
3296 key = 0x6d5a6d5a; /* XXX */
3297 for (i = 0; i < RSSKEY_NREGS; ++i)
3298 CSR_WRITE_4(sc, RSSKEY_REG(i), key);
3301 if (sc->jme_rx_ring_inuse == JME_NRXRING_2) {
3303 } else if (sc->jme_rx_ring_inuse == JME_NRXRING_4) {
3306 panic("%s: invalid # of RX rings (%d)\n",
3307 sc->arpcom.ac_if.if_xname, sc->jme_rx_ring_inuse);
3309 JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3310 for (i = 0; i < RSSTBL_NREGS; ++i)
3311 CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3315 jme_disable_rss(struct jme_softc *sc)
3317 sc->jme_rx_ring_inuse = JME_NRXRING_1;
3318 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);