2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
30 #include "opt_polling.h"
34 #include <sys/param.h>
35 #include <sys/endian.h>
36 #include <sys/kernel.h>
38 #include <sys/interrupt.h>
39 #include <sys/malloc.h>
42 #include <sys/serialize.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
47 #include <net/ethernet.h>
50 #include <net/if_arp.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/ifq_var.h>
54 #include <net/toeplitz.h>
55 #include <net/toeplitz2.h>
56 #include <net/vlan/if_vlan_var.h>
57 #include <net/vlan/if_vlan_ether.h>
59 #include <netinet/in.h>
61 #include <dev/netif/mii_layer/miivar.h>
62 #include <dev/netif/mii_layer/jmphyreg.h>
64 #include <bus/pci/pcireg.h>
65 #include <bus/pci/pcivar.h>
66 #include <bus/pci/pcidevs.h>
68 #include <dev/netif/jme/if_jmereg.h>
69 #include <dev/netif/jme/if_jmevar.h>
71 #include "miibus_if.h"
73 /* Define the following to disable printing Rx errors. */
74 #undef JME_SHOW_ERRORS
76 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
79 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
81 if ((sc)->jme_rss_debug >= (lvl)) \
82 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
84 #else /* !JME_RSS_DEBUG */
85 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
86 #endif /* JME_RSS_DEBUG */
88 static int jme_probe(device_t);
89 static int jme_attach(device_t);
90 static int jme_detach(device_t);
91 static int jme_shutdown(device_t);
92 static int jme_suspend(device_t);
93 static int jme_resume(device_t);
95 static int jme_miibus_readreg(device_t, int, int);
96 static int jme_miibus_writereg(device_t, int, int, int);
97 static void jme_miibus_statchg(device_t);
99 static void jme_init(void *);
100 static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
101 static void jme_start(struct ifnet *);
102 static void jme_watchdog(struct ifnet *);
103 static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
104 static int jme_mediachange(struct ifnet *);
105 #ifdef DEVICE_POLLING
106 static void jme_poll(struct ifnet *, enum poll_cmd, int);
109 static void jme_intr(void *);
110 static void jme_txeof(struct jme_softc *);
111 static void jme_rxeof(struct jme_softc *, int);
112 static int jme_rxeof_chain(struct jme_softc *, int,
113 struct mbuf_chain *, int);
114 static void jme_rx_intr(struct jme_softc *, uint32_t);
116 static int jme_dma_alloc(struct jme_softc *);
117 static void jme_dma_free(struct jme_softc *);
118 static int jme_init_rx_ring(struct jme_softc *, int);
119 static void jme_init_tx_ring(struct jme_softc *);
120 static void jme_init_ssb(struct jme_softc *);
121 static int jme_newbuf(struct jme_softc *, int, struct jme_rxdesc *, int);
122 static int jme_encap(struct jme_softc *, struct mbuf **);
123 static void jme_rxpkt(struct jme_softc *, int, struct mbuf_chain *);
124 static int jme_rxring_dma_alloc(struct jme_softc *, int);
125 static int jme_rxbuf_dma_alloc(struct jme_softc *, int);
127 static void jme_tick(void *);
128 static void jme_stop(struct jme_softc *);
129 static void jme_reset(struct jme_softc *);
130 static void jme_set_vlan(struct jme_softc *);
131 static void jme_set_filter(struct jme_softc *);
132 static void jme_stop_tx(struct jme_softc *);
133 static void jme_stop_rx(struct jme_softc *);
134 static void jme_mac_config(struct jme_softc *);
135 static void jme_reg_macaddr(struct jme_softc *, uint8_t[]);
136 static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
137 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
139 static void jme_setwol(struct jme_softc *);
140 static void jme_setlinkspeed(struct jme_softc *);
142 static void jme_set_tx_coal(struct jme_softc *);
143 static void jme_set_rx_coal(struct jme_softc *);
144 static void jme_enable_rss(struct jme_softc *);
145 static void jme_disable_rss(struct jme_softc *);
147 static void jme_sysctl_node(struct jme_softc *);
148 static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
149 static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
150 static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
151 static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
154 * Devices supported by this driver.
156 static const struct jme_dev {
157 uint16_t jme_vendorid;
158 uint16_t jme_deviceid;
160 const char *jme_name;
162 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
164 "JMicron Inc, JMC250 Gigabit Ethernet" },
165 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
167 "JMicron Inc, JMC260 Fast Ethernet" },
171 static device_method_t jme_methods[] = {
172 /* Device interface. */
173 DEVMETHOD(device_probe, jme_probe),
174 DEVMETHOD(device_attach, jme_attach),
175 DEVMETHOD(device_detach, jme_detach),
176 DEVMETHOD(device_shutdown, jme_shutdown),
177 DEVMETHOD(device_suspend, jme_suspend),
178 DEVMETHOD(device_resume, jme_resume),
181 DEVMETHOD(bus_print_child, bus_generic_print_child),
182 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
185 DEVMETHOD(miibus_readreg, jme_miibus_readreg),
186 DEVMETHOD(miibus_writereg, jme_miibus_writereg),
187 DEVMETHOD(miibus_statchg, jme_miibus_statchg),
192 static driver_t jme_driver = {
195 sizeof(struct jme_softc)
198 static devclass_t jme_devclass;
200 DECLARE_DUMMY_MODULE(if_jme);
201 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
202 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL);
203 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL);
205 static const struct {
208 } jme_rx_status[JME_NRXRING_MAX] = {
209 { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP },
210 { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP },
211 { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP },
212 { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP }
215 static int jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
216 static int jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
217 static int jme_rx_ring_count = JME_NRXRING_DEF;
219 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
220 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
221 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
224 * Read a PHY register on the MII of the JMC250.
227 jme_miibus_readreg(device_t dev, int phy, int reg)
229 struct jme_softc *sc = device_get_softc(dev);
233 /* For FPGA version, PHY address 0 should be ignored. */
234 if (sc->jme_caps & JME_CAP_FPGA) {
238 if (sc->jme_phyaddr != phy)
242 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
243 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
245 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
247 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
251 device_printf(sc->jme_dev, "phy read timeout: "
252 "phy %d, reg %d\n", phy, reg);
256 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
260 * Write a PHY register on the MII of the JMC250.
263 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
265 struct jme_softc *sc = device_get_softc(dev);
268 /* For FPGA version, PHY address 0 should be ignored. */
269 if (sc->jme_caps & JME_CAP_FPGA) {
273 if (sc->jme_phyaddr != phy)
277 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
278 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
279 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
281 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
283 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
287 device_printf(sc->jme_dev, "phy write timeout: "
288 "phy %d, reg %d\n", phy, reg);
295 * Callback from MII layer when media changes.
298 jme_miibus_statchg(device_t dev)
300 struct jme_softc *sc = device_get_softc(dev);
301 struct ifnet *ifp = &sc->arpcom.ac_if;
302 struct mii_data *mii;
303 struct jme_txdesc *txd;
307 ASSERT_SERIALIZED(ifp->if_serializer);
309 if ((ifp->if_flags & IFF_RUNNING) == 0)
312 mii = device_get_softc(sc->jme_miibus);
314 sc->jme_flags &= ~JME_FLAG_LINK;
315 if ((mii->mii_media_status & IFM_AVALID) != 0) {
316 switch (IFM_SUBTYPE(mii->mii_media_active)) {
319 sc->jme_flags |= JME_FLAG_LINK;
322 if (sc->jme_caps & JME_CAP_FASTETH)
324 sc->jme_flags |= JME_FLAG_LINK;
332 * Disabling Rx/Tx MACs have a side-effect of resetting
333 * JME_TXNDA/JME_RXNDA register to the first address of
334 * Tx/Rx descriptor address. So driver should reset its
335 * internal procucer/consumer pointer and reclaim any
336 * allocated resources. Note, just saving the value of
337 * JME_TXNDA and JME_RXNDA registers before stopping MAC
338 * and restoring JME_TXNDA/JME_RXNDA register is not
339 * sufficient to make sure correct MAC state because
340 * stopping MAC operation can take a while and hardware
341 * might have updated JME_TXNDA/JME_RXNDA registers
342 * during the stop operation.
345 /* Disable interrupts */
346 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
349 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
351 callout_stop(&sc->jme_tick_ch);
353 /* Stop receiver/transmitter. */
357 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
358 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
361 if (rdata->jme_rxhead != NULL)
362 m_freem(rdata->jme_rxhead);
363 JME_RXCHAIN_RESET(sc, r);
366 * Reuse configured Rx descriptors and reset
367 * procuder/consumer index.
369 rdata->jme_rx_cons = 0;
373 if (sc->jme_cdata.jme_tx_cnt != 0) {
374 /* Remove queued packets for transmit. */
375 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
376 txd = &sc->jme_cdata.jme_txdesc[i];
377 if (txd->tx_m != NULL) {
379 sc->jme_cdata.jme_tx_tag,
388 jme_init_tx_ring(sc);
390 /* Initialize shadow status block. */
393 /* Program MAC with resolved speed/duplex/flow-control. */
394 if (sc->jme_flags & JME_FLAG_LINK) {
397 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
399 /* Set Tx ring address to the hardware. */
400 paddr = sc->jme_cdata.jme_tx_ring_paddr;
401 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
402 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
404 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
405 CSR_WRITE_4(sc, JME_RXCSR,
406 sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
408 /* Set Rx ring address to the hardware. */
409 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
410 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
411 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
414 /* Restart receiver/transmitter. */
415 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
417 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
420 ifp->if_flags |= IFF_RUNNING;
421 ifp->if_flags &= ~IFF_OACTIVE;
422 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
424 #ifdef DEVICE_POLLING
425 if (!(ifp->if_flags & IFF_POLLING))
427 /* Reenable interrupts. */
428 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
432 * Get the current interface media status.
435 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
437 struct jme_softc *sc = ifp->if_softc;
438 struct mii_data *mii = device_get_softc(sc->jme_miibus);
440 ASSERT_SERIALIZED(ifp->if_serializer);
443 ifmr->ifm_status = mii->mii_media_status;
444 ifmr->ifm_active = mii->mii_media_active;
448 * Set hardware to newly-selected media.
451 jme_mediachange(struct ifnet *ifp)
453 struct jme_softc *sc = ifp->if_softc;
454 struct mii_data *mii = device_get_softc(sc->jme_miibus);
457 ASSERT_SERIALIZED(ifp->if_serializer);
459 if (mii->mii_instance != 0) {
460 struct mii_softc *miisc;
462 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
463 mii_phy_reset(miisc);
465 error = mii_mediachg(mii);
471 jme_probe(device_t dev)
473 const struct jme_dev *sp;
476 vid = pci_get_vendor(dev);
477 did = pci_get_device(dev);
478 for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
479 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
480 struct jme_softc *sc = device_get_softc(dev);
482 sc->jme_caps = sp->jme_caps;
483 device_set_desc(dev, sp->jme_name);
491 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
497 for (i = JME_TIMEOUT; i > 0; i--) {
498 reg = CSR_READ_4(sc, JME_SMBCSR);
499 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
505 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
509 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
510 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
511 for (i = JME_TIMEOUT; i > 0; i--) {
513 reg = CSR_READ_4(sc, JME_SMBINTF);
514 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
519 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
523 reg = CSR_READ_4(sc, JME_SMBINTF);
524 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
530 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
532 uint8_t fup, reg, val;
537 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
538 fup != JME_EEPROM_SIG0)
540 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
541 fup != JME_EEPROM_SIG1)
545 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
547 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
548 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
549 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0)
551 if (reg >= JME_PAR0 &&
552 reg < JME_PAR0 + ETHER_ADDR_LEN) {
553 if (jme_eeprom_read_byte(sc, offset + 2,
556 eaddr[reg - JME_PAR0] = val;
560 /* Check for the end of EEPROM descriptor. */
561 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
563 /* Try next eeprom descriptor. */
564 offset += JME_EEPROM_DESC_BYTES;
565 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
567 if (match == ETHER_ADDR_LEN)
574 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
578 /* Read station address. */
579 par0 = CSR_READ_4(sc, JME_PAR0);
580 par1 = CSR_READ_4(sc, JME_PAR1);
582 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
583 device_printf(sc->jme_dev,
584 "generating fake ethernet address.\n");
585 par0 = karc4random();
586 /* Set OUI to JMicron. */
590 eaddr[3] = (par0 >> 16) & 0xff;
591 eaddr[4] = (par0 >> 8) & 0xff;
592 eaddr[5] = par0 & 0xff;
594 eaddr[0] = (par0 >> 0) & 0xFF;
595 eaddr[1] = (par0 >> 8) & 0xFF;
596 eaddr[2] = (par0 >> 16) & 0xFF;
597 eaddr[3] = (par0 >> 24) & 0xFF;
598 eaddr[4] = (par1 >> 0) & 0xFF;
599 eaddr[5] = (par1 >> 8) & 0xFF;
604 jme_attach(device_t dev)
606 struct jme_softc *sc = device_get_softc(dev);
607 struct ifnet *ifp = &sc->arpcom.ac_if;
610 uint8_t pcie_ptr, rev;
612 uint8_t eaddr[ETHER_ADDR_LEN];
614 sc->jme_rx_desc_cnt = roundup(jme_rx_desc_count, JME_NDESC_ALIGN);
615 if (sc->jme_rx_desc_cnt > JME_NDESC_MAX)
616 sc->jme_rx_desc_cnt = JME_NDESC_MAX;
618 sc->jme_tx_desc_cnt = roundup(jme_tx_desc_count, JME_NDESC_ALIGN);
619 if (sc->jme_tx_desc_cnt > JME_NDESC_MAX)
620 sc->jme_tx_desc_cnt = JME_NDESC_MAX;
623 * Calculate rx rings based on ncpus2
625 sc->jme_rx_ring_cnt = jme_rx_ring_count;
626 if (sc->jme_rx_ring_cnt <= 0)
627 sc->jme_rx_ring_cnt = JME_NRXRING_1;
628 if (sc->jme_rx_ring_cnt > ncpus2)
629 sc->jme_rx_ring_cnt = ncpus2;
631 if (sc->jme_rx_ring_cnt >= JME_NRXRING_4)
632 sc->jme_rx_ring_cnt = JME_NRXRING_4;
633 else if (sc->jme_rx_ring_cnt >= JME_NRXRING_2)
634 sc->jme_rx_ring_cnt = JME_NRXRING_2;
635 sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
638 sc->jme_lowaddr = BUS_SPACE_MAXADDR;
640 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
642 callout_init(&sc->jme_tick_ch);
645 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
648 irq = pci_read_config(dev, PCIR_INTLINE, 4);
649 mem = pci_read_config(dev, JME_PCIR_BAR, 4);
651 device_printf(dev, "chip is in D%d power mode "
652 "-- setting to D0\n", pci_get_powerstate(dev));
654 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
656 pci_write_config(dev, PCIR_INTLINE, irq, 4);
657 pci_write_config(dev, JME_PCIR_BAR, mem, 4);
659 #endif /* !BURN_BRIDGE */
661 /* Enable bus mastering */
662 pci_enable_busmaster(dev);
667 * JMC250 supports both memory mapped and I/O register space
668 * access. Because I/O register access should use different
669 * BARs to access registers it's waste of time to use I/O
670 * register spce access. JMC250 uses 16K to map entire memory
673 sc->jme_mem_rid = JME_PCIR_BAR;
674 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
675 &sc->jme_mem_rid, RF_ACTIVE);
676 if (sc->jme_mem_res == NULL) {
677 device_printf(dev, "can't allocate IO memory\n");
680 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
681 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
687 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
689 RF_SHAREABLE | RF_ACTIVE);
690 if (sc->jme_irq_res == NULL) {
691 device_printf(dev, "can't allocate irq\n");
699 reg = CSR_READ_4(sc, JME_CHIPMODE);
700 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
702 sc->jme_caps |= JME_CAP_FPGA;
704 device_printf(dev, "FPGA revision: 0x%04x\n",
705 (reg & CHIPMODE_FPGA_REV_MASK) >>
706 CHIPMODE_FPGA_REV_SHIFT);
710 /* NOTE: FM revision is put in the upper 4 bits */
711 rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
712 rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
714 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
716 did = pci_get_device(dev);
718 case PCI_PRODUCT_JMICRON_JMC250:
719 if (rev == JME_REV1_A2)
720 sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
723 case PCI_PRODUCT_JMICRON_JMC260:
725 sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
729 panic("unknown device id 0x%04x\n", did);
731 if (rev >= JME_REV2) {
732 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
733 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
734 GHC_TXMAC_CLKSRC_1000;
737 /* Reset the ethernet controller. */
740 /* Get station address. */
741 reg = CSR_READ_4(sc, JME_SMBCSR);
742 if (reg & SMBCSR_EEPROM_PRESENT)
743 error = jme_eeprom_macaddr(sc, eaddr);
744 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
745 if (error != 0 && (bootverbose)) {
746 device_printf(dev, "ethernet hardware address "
747 "not found in EEPROM.\n");
749 jme_reg_macaddr(sc, eaddr);
754 * Integrated JR0211 has fixed PHY address whereas FPGA version
755 * requires PHY probing to get correct PHY address.
757 if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
758 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
759 GPREG0_PHY_ADDR_MASK;
761 device_printf(dev, "PHY is at address %d.\n",
768 /* Set max allowable DMA size. */
769 pcie_ptr = pci_get_pciecap_ptr(dev);
773 sc->jme_caps |= JME_CAP_PCIE;
774 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
776 device_printf(dev, "Read request size : %d bytes.\n",
777 128 << ((ctrl >> 12) & 0x07));
778 device_printf(dev, "TLP payload size : %d bytes.\n",
779 128 << ((ctrl >> 5) & 0x07));
781 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
782 case PCIEM_DEVCTL_MAX_READRQ_128:
783 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
785 case PCIEM_DEVCTL_MAX_READRQ_256:
786 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
789 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
792 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
794 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
795 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
799 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
800 sc->jme_caps |= JME_CAP_PMCAP;
808 /* Allocate DMA stuffs */
809 error = jme_dma_alloc(sc);
814 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
815 ifp->if_init = jme_init;
816 ifp->if_ioctl = jme_ioctl;
817 ifp->if_start = jme_start;
818 #ifdef DEVICE_POLLING
819 ifp->if_poll = jme_poll;
821 ifp->if_watchdog = jme_watchdog;
822 ifq_set_maxlen(&ifp->if_snd, sc->jme_tx_desc_cnt - JME_TXD_RSVD);
823 ifq_set_ready(&ifp->if_snd);
825 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
826 ifp->if_capabilities = IFCAP_HWCSUM |
828 IFCAP_VLAN_HWTAGGING;
829 if (sc->jme_rx_ring_cnt > JME_NRXRING_MIN)
830 ifp->if_capabilities |= IFCAP_RSS;
831 ifp->if_capenable = ifp->if_capabilities;
834 * Disable TXCSUM by default to improve bulk data
835 * transmit performance (+20Mbps improvement).
837 ifp->if_capenable &= ~IFCAP_TXCSUM;
839 if (ifp->if_capenable & IFCAP_TXCSUM)
840 ifp->if_hwassist = JME_CSUM_FEATURES;
842 /* Set up MII bus. */
843 error = mii_phy_probe(dev, &sc->jme_miibus,
844 jme_mediachange, jme_mediastatus);
846 device_printf(dev, "no PHY found!\n");
851 * Save PHYADDR for FPGA mode PHY.
853 if (sc->jme_caps & JME_CAP_FPGA) {
854 struct mii_data *mii = device_get_softc(sc->jme_miibus);
856 if (mii->mii_instance != 0) {
857 struct mii_softc *miisc;
859 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
860 if (miisc->mii_phy != 0) {
861 sc->jme_phyaddr = miisc->mii_phy;
865 if (sc->jme_phyaddr != 0) {
866 device_printf(sc->jme_dev,
867 "FPGA PHY is at %d\n", sc->jme_phyaddr);
869 jme_miibus_writereg(dev, sc->jme_phyaddr,
870 JMPHY_CONF, JMPHY_CONF_DEFFIFO);
872 /* XXX should we clear JME_WA_EXTFIFO */
877 ether_ifattach(ifp, eaddr, NULL);
879 /* Tell the upper layer(s) we support long frames. */
880 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
882 error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, jme_intr, sc,
883 &sc->jme_irq_handle, ifp->if_serializer);
885 device_printf(dev, "could not set up interrupt handler.\n");
890 ifp->if_cpuid = rman_get_cpuid(sc->jme_irq_res);
891 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
899 jme_detach(device_t dev)
901 struct jme_softc *sc = device_get_softc(dev);
903 if (device_is_attached(dev)) {
904 struct ifnet *ifp = &sc->arpcom.ac_if;
906 lwkt_serialize_enter(ifp->if_serializer);
908 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
909 lwkt_serialize_exit(ifp->if_serializer);
914 if (sc->jme_sysctl_tree != NULL)
915 sysctl_ctx_free(&sc->jme_sysctl_ctx);
917 if (sc->jme_miibus != NULL)
918 device_delete_child(dev, sc->jme_miibus);
919 bus_generic_detach(dev);
921 if (sc->jme_irq_res != NULL) {
922 bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
926 if (sc->jme_mem_res != NULL) {
927 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
937 jme_sysctl_node(struct jme_softc *sc)
941 char rx_ring_pkt[32];
945 sysctl_ctx_init(&sc->jme_sysctl_ctx);
946 sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
947 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
948 device_get_nameunit(sc->jme_dev),
950 if (sc->jme_sysctl_tree == NULL) {
951 device_printf(sc->jme_dev, "can't add sysctl node\n");
955 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
956 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
957 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
958 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
960 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
961 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
962 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
963 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
965 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
966 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
967 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
968 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
970 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
971 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
972 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
973 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
975 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
976 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
977 "rx_desc_count", CTLFLAG_RD, &sc->jme_rx_desc_cnt,
979 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
980 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
981 "tx_desc_count", CTLFLAG_RD, &sc->jme_tx_desc_cnt,
983 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
984 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
985 "rx_ring_count", CTLFLAG_RD, &sc->jme_rx_ring_cnt,
987 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
988 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
989 "rx_ring_inuse", CTLFLAG_RD, &sc->jme_rx_ring_inuse,
990 0, "RX ring in use");
992 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
993 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
994 "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
995 0, "RSS debug level");
996 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
997 ksnprintf(rx_ring_pkt, sizeof(rx_ring_pkt), "rx_ring%d_pkt", r);
998 SYSCTL_ADD_UINT(&sc->jme_sysctl_ctx,
999 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1000 rx_ring_pkt, CTLFLAG_RW,
1001 &sc->jme_rx_ring_pkt[r],
1007 * Set default coalesce valves
1009 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1010 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1011 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1012 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1015 * Adjust coalesce valves, in case that the number of TX/RX
1016 * descs are set to small values by users.
1018 * NOTE: coal_max will not be zero, since number of descs
1019 * must aligned by JME_NDESC_ALIGN (16 currently)
1021 coal_max = sc->jme_tx_desc_cnt / 6;
1022 if (coal_max < sc->jme_tx_coal_pkt)
1023 sc->jme_tx_coal_pkt = coal_max;
1025 coal_max = sc->jme_rx_desc_cnt / 4;
1026 if (coal_max < sc->jme_rx_coal_pkt)
1027 sc->jme_rx_coal_pkt = coal_max;
1031 jme_dma_alloc(struct jme_softc *sc)
1033 struct jme_txdesc *txd;
1037 sc->jme_cdata.jme_txdesc =
1038 kmalloc(sc->jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1039 M_DEVBUF, M_WAITOK | M_ZERO);
1040 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1041 sc->jme_cdata.jme_rx_data[i].jme_rxdesc =
1042 kmalloc(sc->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1043 M_DEVBUF, M_WAITOK | M_ZERO);
1046 /* Create parent ring tag. */
1047 error = bus_dma_tag_create(NULL,/* parent */
1048 1, JME_RING_BOUNDARY, /* algnmnt, boundary */
1049 sc->jme_lowaddr, /* lowaddr */
1050 BUS_SPACE_MAXADDR, /* highaddr */
1051 NULL, NULL, /* filter, filterarg */
1052 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1054 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1056 &sc->jme_cdata.jme_ring_tag);
1058 device_printf(sc->jme_dev,
1059 "could not create parent ring DMA tag.\n");
1064 * Create DMA stuffs for TX ring
1066 error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1067 JME_TX_RING_ALIGN, 0,
1068 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1069 JME_TX_RING_SIZE(sc),
1070 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1072 device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1075 sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag;
1076 sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map;
1077 sc->jme_cdata.jme_tx_ring = dmem.dmem_addr;
1078 sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr;
1081 * Create DMA stuffs for RX rings
1083 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1084 error = jme_rxring_dma_alloc(sc, i);
1089 /* Create parent buffer tag. */
1090 error = bus_dma_tag_create(NULL,/* parent */
1091 1, 0, /* algnmnt, boundary */
1092 sc->jme_lowaddr, /* lowaddr */
1093 BUS_SPACE_MAXADDR, /* highaddr */
1094 NULL, NULL, /* filter, filterarg */
1095 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1097 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1099 &sc->jme_cdata.jme_buffer_tag);
1101 device_printf(sc->jme_dev,
1102 "could not create parent buffer DMA tag.\n");
1107 * Create DMA stuffs for shadow status block
1109 error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1110 JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1111 JME_SSB_SIZE, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1113 device_printf(sc->jme_dev,
1114 "could not create shadow status block.\n");
1117 sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1118 sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1119 sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1120 sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1123 * Create DMA stuffs for TX buffers
1126 /* Create tag for Tx buffers. */
1127 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1128 1, 0, /* algnmnt, boundary */
1129 BUS_SPACE_MAXADDR, /* lowaddr */
1130 BUS_SPACE_MAXADDR, /* highaddr */
1131 NULL, NULL, /* filter, filterarg */
1132 JME_JUMBO_FRAMELEN, /* maxsize */
1133 JME_MAXTXSEGS, /* nsegments */
1134 JME_MAXSEGSIZE, /* maxsegsize */
1135 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1136 &sc->jme_cdata.jme_tx_tag);
1138 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1142 /* Create DMA maps for Tx buffers. */
1143 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1144 txd = &sc->jme_cdata.jme_txdesc[i];
1145 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag,
1146 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1151 device_printf(sc->jme_dev,
1152 "could not create %dth Tx dmamap.\n", i);
1154 for (j = 0; j < i; ++j) {
1155 txd = &sc->jme_cdata.jme_txdesc[j];
1156 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1159 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1160 sc->jme_cdata.jme_tx_tag = NULL;
1166 * Create DMA stuffs for RX buffers
1168 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1169 error = jme_rxbuf_dma_alloc(sc, i);
1177 jme_dma_free(struct jme_softc *sc)
1179 struct jme_txdesc *txd;
1180 struct jme_rxdesc *rxd;
1181 struct jme_rxdata *rdata;
1185 if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1186 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1187 sc->jme_cdata.jme_tx_ring_map);
1188 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1189 sc->jme_cdata.jme_tx_ring,
1190 sc->jme_cdata.jme_tx_ring_map);
1191 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1192 sc->jme_cdata.jme_tx_ring_tag = NULL;
1196 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1197 rdata = &sc->jme_cdata.jme_rx_data[r];
1198 if (rdata->jme_rx_ring_tag != NULL) {
1199 bus_dmamap_unload(rdata->jme_rx_ring_tag,
1200 rdata->jme_rx_ring_map);
1201 bus_dmamem_free(rdata->jme_rx_ring_tag,
1203 rdata->jme_rx_ring_map);
1204 bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1205 rdata->jme_rx_ring_tag = NULL;
1210 if (sc->jme_cdata.jme_tx_tag != NULL) {
1211 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1212 txd = &sc->jme_cdata.jme_txdesc[i];
1213 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1216 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1217 sc->jme_cdata.jme_tx_tag = NULL;
1221 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1222 rdata = &sc->jme_cdata.jme_rx_data[r];
1223 if (rdata->jme_rx_tag != NULL) {
1224 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
1225 rxd = &rdata->jme_rxdesc[i];
1226 bus_dmamap_destroy(rdata->jme_rx_tag,
1229 bus_dmamap_destroy(rdata->jme_rx_tag,
1230 rdata->jme_rx_sparemap);
1231 bus_dma_tag_destroy(rdata->jme_rx_tag);
1232 rdata->jme_rx_tag = NULL;
1236 /* Shadow status block. */
1237 if (sc->jme_cdata.jme_ssb_tag != NULL) {
1238 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1239 sc->jme_cdata.jme_ssb_map);
1240 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1241 sc->jme_cdata.jme_ssb_block,
1242 sc->jme_cdata.jme_ssb_map);
1243 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1244 sc->jme_cdata.jme_ssb_tag = NULL;
1247 if (sc->jme_cdata.jme_buffer_tag != NULL) {
1248 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1249 sc->jme_cdata.jme_buffer_tag = NULL;
1251 if (sc->jme_cdata.jme_ring_tag != NULL) {
1252 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1253 sc->jme_cdata.jme_ring_tag = NULL;
1256 if (sc->jme_cdata.jme_txdesc != NULL) {
1257 kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1258 sc->jme_cdata.jme_txdesc = NULL;
1260 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1261 rdata = &sc->jme_cdata.jme_rx_data[r];
1262 if (rdata->jme_rxdesc != NULL) {
1263 kfree(rdata->jme_rxdesc, M_DEVBUF);
1264 rdata->jme_rxdesc = NULL;
1270 * Make sure the interface is stopped at reboot time.
1273 jme_shutdown(device_t dev)
1275 return jme_suspend(dev);
1280 * Unlike other ethernet controllers, JMC250 requires
1281 * explicit resetting link speed to 10/100Mbps as gigabit
1282 * link will cunsume more power than 375mA.
1283 * Note, we reset the link speed to 10/100Mbps with
1284 * auto-negotiation but we don't know whether that operation
1285 * would succeed or not as we have no control after powering
1286 * off. If the renegotiation fail WOL may not work. Running
1287 * at 1Gbps draws more power than 375mA at 3.3V which is
1288 * specified in PCI specification and that would result in
1289 * complete shutdowning power to ethernet controller.
1292 * Save current negotiated media speed/duplex/flow-control
1293 * to softc and restore the same link again after resuming.
1294 * PHY handling such as power down/resetting to 100Mbps
1295 * may be better handled in suspend method in phy driver.
1298 jme_setlinkspeed(struct jme_softc *sc)
1300 struct mii_data *mii;
1303 JME_LOCK_ASSERT(sc);
1305 mii = device_get_softc(sc->jme_miibus);
1308 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1309 switch IFM_SUBTYPE(mii->mii_media_active) {
1319 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1320 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1321 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1322 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1323 BMCR_AUTOEN | BMCR_STARTNEG);
1326 /* Poll link state until jme(4) get a 10/100 link. */
1327 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1329 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1330 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1340 pause("jmelnk", hz);
1343 if (i == MII_ANEGTICKS_GIGE)
1344 device_printf(sc->jme_dev, "establishing link failed, "
1345 "WOL may not work!");
1348 * No link, force MAC to have 100Mbps, full-duplex link.
1349 * This is the last resort and may/may not work.
1351 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1352 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1357 jme_setwol(struct jme_softc *sc)
1359 struct ifnet *ifp = &sc->arpcom.ac_if;
1364 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1365 /* No PME capability, PHY power down. */
1366 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1367 MII_BMCR, BMCR_PDOWN);
1371 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1372 pmcs = CSR_READ_4(sc, JME_PMCS);
1373 pmcs &= ~PMCS_WOL_ENB_MASK;
1374 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1375 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1376 /* Enable PME message. */
1377 gpr |= GPREG0_PME_ENB;
1378 /* For gigabit controllers, reset link speed to 10/100. */
1379 if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1380 jme_setlinkspeed(sc);
1383 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1384 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1387 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1388 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1389 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1390 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1391 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1392 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1393 /* No WOL, PHY power down. */
1394 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1395 MII_BMCR, BMCR_PDOWN);
1401 jme_suspend(device_t dev)
1403 struct jme_softc *sc = device_get_softc(dev);
1404 struct ifnet *ifp = &sc->arpcom.ac_if;
1406 lwkt_serialize_enter(ifp->if_serializer);
1411 lwkt_serialize_exit(ifp->if_serializer);
1417 jme_resume(device_t dev)
1419 struct jme_softc *sc = device_get_softc(dev);
1420 struct ifnet *ifp = &sc->arpcom.ac_if;
1425 lwkt_serialize_enter(ifp->if_serializer);
1428 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1431 pmstat = pci_read_config(sc->jme_dev,
1432 pmc + PCIR_POWER_STATUS, 2);
1433 /* Disable PME clear PME status. */
1434 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1435 pci_write_config(sc->jme_dev,
1436 pmc + PCIR_POWER_STATUS, pmstat, 2);
1440 if (ifp->if_flags & IFF_UP)
1443 lwkt_serialize_exit(ifp->if_serializer);
1449 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1451 struct jme_txdesc *txd;
1452 struct jme_desc *desc;
1454 bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1456 int error, i, prod, symbol_desc;
1457 uint32_t cflags, flag64;
1459 M_ASSERTPKTHDR((*m_head));
1461 prod = sc->jme_cdata.jme_tx_prod;
1462 txd = &sc->jme_cdata.jme_txdesc[prod];
1464 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1469 maxsegs = (sc->jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1470 (JME_TXD_RSVD + symbol_desc);
1471 if (maxsegs > JME_MAXTXSEGS)
1472 maxsegs = JME_MAXTXSEGS;
1473 KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc),
1474 ("not enough segments %d\n", maxsegs));
1476 error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag,
1477 txd->tx_dmamap, m_head,
1478 txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1482 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1483 BUS_DMASYNC_PREWRITE);
1488 /* Configure checksum offload. */
1489 if (m->m_pkthdr.csum_flags & CSUM_IP)
1490 cflags |= JME_TD_IPCSUM;
1491 if (m->m_pkthdr.csum_flags & CSUM_TCP)
1492 cflags |= JME_TD_TCPCSUM;
1493 if (m->m_pkthdr.csum_flags & CSUM_UDP)
1494 cflags |= JME_TD_UDPCSUM;
1496 /* Configure VLAN. */
1497 if (m->m_flags & M_VLANTAG) {
1498 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1499 cflags |= JME_TD_VLAN_TAG;
1502 desc = &sc->jme_cdata.jme_tx_ring[prod];
1503 desc->flags = htole32(cflags);
1504 desc->addr_hi = htole32(m->m_pkthdr.len);
1505 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1507 * Use 64bits TX desc chain format.
1509 * The first TX desc of the chain, which is setup here,
1510 * is just a symbol TX desc carrying no payload.
1512 flag64 = JME_TD_64BIT;
1516 /* No effective TX desc is consumed */
1520 * Use 32bits TX desc chain format.
1522 * The first TX desc of the chain, which is setup here,
1523 * is an effective TX desc carrying the first segment of
1527 desc->buflen = htole32(txsegs[0].ds_len);
1528 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1530 /* One effective TX desc is consumed */
1533 sc->jme_cdata.jme_tx_cnt++;
1534 KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1535 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1536 JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1538 txd->tx_ndesc = 1 - i;
1539 for (; i < nsegs; i++) {
1540 desc = &sc->jme_cdata.jme_tx_ring[prod];
1541 desc->flags = htole32(JME_TD_OWN | flag64);
1542 desc->buflen = htole32(txsegs[i].ds_len);
1543 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1544 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1546 sc->jme_cdata.jme_tx_cnt++;
1547 KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1548 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1549 JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1552 /* Update producer index. */
1553 sc->jme_cdata.jme_tx_prod = prod;
1555 * Finally request interrupt and give the first descriptor
1556 * owenership to hardware.
1558 desc = txd->tx_desc;
1559 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1562 txd->tx_ndesc += nsegs;
1572 jme_start(struct ifnet *ifp)
1574 struct jme_softc *sc = ifp->if_softc;
1575 struct mbuf *m_head;
1578 ASSERT_SERIALIZED(ifp->if_serializer);
1580 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1581 ifq_purge(&ifp->if_snd);
1585 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1588 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1591 while (!ifq_is_empty(&ifp->if_snd)) {
1593 * Check number of available TX descs, always
1594 * leave JME_TXD_RSVD free TX descs.
1596 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1597 sc->jme_tx_desc_cnt - JME_TXD_RSVD) {
1598 ifp->if_flags |= IFF_OACTIVE;
1602 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1607 * Pack the data into the transmit ring. If we
1608 * don't have room, set the OACTIVE flag and wait
1609 * for the NIC to drain the ring.
1611 if (jme_encap(sc, &m_head)) {
1612 KKASSERT(m_head == NULL);
1614 ifp->if_flags |= IFF_OACTIVE;
1620 * If there's a BPF listener, bounce a copy of this frame
1623 ETHER_BPF_MTAP(ifp, m_head);
1628 * Reading TXCSR takes very long time under heavy load
1629 * so cache TXCSR value and writes the ORed value with
1630 * the kick command to the TXCSR. This saves one register
1633 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1634 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1635 /* Set a timeout in case the chip goes out to lunch. */
1636 ifp->if_timer = JME_TX_TIMEOUT;
1641 jme_watchdog(struct ifnet *ifp)
1643 struct jme_softc *sc = ifp->if_softc;
1645 ASSERT_SERIALIZED(ifp->if_serializer);
1647 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1648 if_printf(ifp, "watchdog timeout (missed link)\n");
1655 if (sc->jme_cdata.jme_tx_cnt == 0) {
1656 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1658 if (!ifq_is_empty(&ifp->if_snd))
1663 if_printf(ifp, "watchdog timeout\n");
1666 if (!ifq_is_empty(&ifp->if_snd))
1671 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1673 struct jme_softc *sc = ifp->if_softc;
1674 struct mii_data *mii = device_get_softc(sc->jme_miibus);
1675 struct ifreq *ifr = (struct ifreq *)data;
1676 int error = 0, mask;
1678 ASSERT_SERIALIZED(ifp->if_serializer);
1682 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1683 (!(sc->jme_caps & JME_CAP_JUMBO) &&
1684 ifr->ifr_mtu > JME_MAX_MTU)) {
1689 if (ifp->if_mtu != ifr->ifr_mtu) {
1691 * No special configuration is required when interface
1692 * MTU is changed but availability of Tx checksum
1693 * offload should be chcked against new MTU size as
1694 * FIFO size is just 2K.
1696 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1697 ifp->if_capenable &= ~IFCAP_TXCSUM;
1698 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1700 ifp->if_mtu = ifr->ifr_mtu;
1701 if (ifp->if_flags & IFF_RUNNING)
1707 if (ifp->if_flags & IFF_UP) {
1708 if (ifp->if_flags & IFF_RUNNING) {
1709 if ((ifp->if_flags ^ sc->jme_if_flags) &
1710 (IFF_PROMISC | IFF_ALLMULTI))
1716 if (ifp->if_flags & IFF_RUNNING)
1719 sc->jme_if_flags = ifp->if_flags;
1724 if (ifp->if_flags & IFF_RUNNING)
1730 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1734 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1736 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1737 ifp->if_capenable ^= IFCAP_TXCSUM;
1738 if (IFCAP_TXCSUM & ifp->if_capenable)
1739 ifp->if_hwassist |= JME_CSUM_FEATURES;
1741 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1743 if (mask & IFCAP_RXCSUM) {
1746 ifp->if_capenable ^= IFCAP_RXCSUM;
1747 reg = CSR_READ_4(sc, JME_RXMAC);
1748 reg &= ~RXMAC_CSUM_ENB;
1749 if (ifp->if_capenable & IFCAP_RXCSUM)
1750 reg |= RXMAC_CSUM_ENB;
1751 CSR_WRITE_4(sc, JME_RXMAC, reg);
1754 if (mask & IFCAP_VLAN_HWTAGGING) {
1755 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1759 if (mask & IFCAP_RSS) {
1760 ifp->if_capenable ^= IFCAP_RSS;
1761 if (ifp->if_flags & IFF_RUNNING)
1767 error = ether_ioctl(ifp, cmd, data);
1774 jme_mac_config(struct jme_softc *sc)
1776 struct mii_data *mii;
1777 uint32_t ghc, rxmac, txmac, txpause, gp1;
1778 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1780 mii = device_get_softc(sc->jme_miibus);
1782 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1784 CSR_WRITE_4(sc, JME_GHC, 0);
1786 rxmac = CSR_READ_4(sc, JME_RXMAC);
1787 rxmac &= ~RXMAC_FC_ENB;
1788 txmac = CSR_READ_4(sc, JME_TXMAC);
1789 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1790 txpause = CSR_READ_4(sc, JME_TXPFC);
1791 txpause &= ~TXPFC_PAUSE_ENB;
1792 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1793 ghc |= GHC_FULL_DUPLEX;
1794 rxmac &= ~RXMAC_COLL_DET_ENB;
1795 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1796 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1799 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1800 txpause |= TXPFC_PAUSE_ENB;
1801 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1802 rxmac |= RXMAC_FC_ENB;
1804 /* Disable retry transmit timer/retry limit. */
1805 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1806 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1808 rxmac |= RXMAC_COLL_DET_ENB;
1809 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1810 /* Enable retry transmit timer/retry limit. */
1811 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1812 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1816 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1818 gp1 = CSR_READ_4(sc, JME_GPREG1);
1819 gp1 &= ~GPREG1_WA_HDX;
1821 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1824 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1826 ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1828 gp1 |= GPREG1_WA_HDX;
1832 ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1834 gp1 |= GPREG1_WA_HDX;
1837 * Use extended FIFO depth to workaround CRC errors
1838 * emitted by chips before JMC250B
1840 phyconf = JMPHY_CONF_EXTFIFO;
1844 if (sc->jme_caps & JME_CAP_FASTETH)
1847 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
1849 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1855 CSR_WRITE_4(sc, JME_GHC, ghc);
1856 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1857 CSR_WRITE_4(sc, JME_TXMAC, txmac);
1858 CSR_WRITE_4(sc, JME_TXPFC, txpause);
1860 if (sc->jme_workaround & JME_WA_EXTFIFO) {
1861 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1862 JMPHY_CONF, phyconf);
1864 if (sc->jme_workaround & JME_WA_HDX)
1865 CSR_WRITE_4(sc, JME_GPREG1, gp1);
1871 struct jme_softc *sc = xsc;
1872 struct ifnet *ifp = &sc->arpcom.ac_if;
1876 ASSERT_SERIALIZED(ifp->if_serializer);
1878 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1879 if (status == 0 || status == 0xFFFFFFFF)
1882 /* Disable interrupts. */
1883 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1885 status = CSR_READ_4(sc, JME_INTR_STATUS);
1886 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1889 /* Reset PCC counter/timer and Ack interrupts. */
1890 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1892 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1893 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1895 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
1896 if (status & jme_rx_status[r].jme_coal) {
1897 status |= jme_rx_status[r].jme_coal |
1898 jme_rx_status[r].jme_comp;
1902 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1904 if (ifp->if_flags & IFF_RUNNING) {
1905 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1906 jme_rx_intr(sc, status);
1908 if (status & INTR_RXQ_DESC_EMPTY) {
1910 * Notify hardware availability of new Rx buffers.
1911 * Reading RXCSR takes very long time under heavy
1912 * load so cache RXCSR value and writes the ORed
1913 * value with the kick command to the RXCSR. This
1914 * saves one register access cycle.
1916 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1917 RXCSR_RX_ENB | RXCSR_RXQ_START);
1920 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1922 if (!ifq_is_empty(&ifp->if_snd))
1927 /* Reenable interrupts. */
1928 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1932 jme_txeof(struct jme_softc *sc)
1934 struct ifnet *ifp = &sc->arpcom.ac_if;
1935 struct jme_txdesc *txd;
1939 cons = sc->jme_cdata.jme_tx_cons;
1940 if (cons == sc->jme_cdata.jme_tx_prod)
1944 * Go through our Tx list and free mbufs for those
1945 * frames which have been transmitted.
1947 while (cons != sc->jme_cdata.jme_tx_prod) {
1948 txd = &sc->jme_cdata.jme_txdesc[cons];
1949 KASSERT(txd->tx_m != NULL,
1950 ("%s: freeing NULL mbuf!\n", __func__));
1952 status = le32toh(txd->tx_desc->flags);
1953 if ((status & JME_TD_OWN) == JME_TD_OWN)
1956 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
1960 if (status & JME_TD_COLLISION) {
1961 ifp->if_collisions +=
1962 le32toh(txd->tx_desc->buflen) &
1963 JME_TD_BUF_LEN_MASK;
1968 * Only the first descriptor of multi-descriptor
1969 * transmission is updated so driver have to skip entire
1970 * chained buffers for the transmiited frame. In other
1971 * words, JME_TD_OWN bit is valid only at the first
1972 * descriptor of a multi-descriptor transmission.
1974 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
1975 sc->jme_cdata.jme_tx_ring[cons].flags = 0;
1976 JME_DESC_INC(cons, sc->jme_tx_desc_cnt);
1979 /* Reclaim transferred mbufs. */
1980 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1983 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
1984 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
1985 ("%s: Active Tx desc counter was garbled\n", __func__));
1988 sc->jme_cdata.jme_tx_cons = cons;
1990 if (sc->jme_cdata.jme_tx_cnt == 0)
1993 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
1994 sc->jme_tx_desc_cnt - JME_TXD_RSVD)
1995 ifp->if_flags &= ~IFF_OACTIVE;
1998 static __inline void
1999 jme_discard_rxbufs(struct jme_softc *sc, int ring, int cons, int count)
2001 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2004 for (i = 0; i < count; ++i) {
2005 struct jme_desc *desc = &rdata->jme_rx_ring[cons];
2007 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2008 desc->buflen = htole32(MCLBYTES);
2009 JME_DESC_INC(cons, sc->jme_rx_desc_cnt);
2013 static __inline struct pktinfo *
2014 jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2016 if (flags & JME_RD_IPV4)
2017 pi->pi_netisr = NETISR_IP;
2018 else if (flags & JME_RD_IPV6)
2019 pi->pi_netisr = NETISR_IPV6;
2024 pi->pi_l3proto = IPPROTO_UNKNOWN;
2026 if (flags & JME_RD_MORE_FRAG)
2027 pi->pi_flags |= PKTINFO_FLAG_FRAG;
2028 else if (flags & JME_RD_TCP)
2029 pi->pi_l3proto = IPPROTO_TCP;
2030 else if (flags & JME_RD_UDP)
2031 pi->pi_l3proto = IPPROTO_UDP;
2037 /* Receive a frame. */
2039 jme_rxpkt(struct jme_softc *sc, int ring, struct mbuf_chain *chain)
2041 struct ifnet *ifp = &sc->arpcom.ac_if;
2042 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2043 struct jme_desc *desc;
2044 struct jme_rxdesc *rxd;
2045 struct mbuf *mp, *m;
2046 uint32_t flags, status, hash, hashinfo;
2047 int cons, count, nsegs;
2049 cons = rdata->jme_rx_cons;
2050 desc = &rdata->jme_rx_ring[cons];
2051 flags = le32toh(desc->flags);
2052 status = le32toh(desc->buflen);
2053 hash = le32toh(desc->addr_hi);
2054 hashinfo = le32toh(desc->addr_lo);
2055 nsegs = JME_RX_NSEGS(status);
2057 JME_RSS_DPRINTF(sc, 15, "ring%d, flags 0x%08x, "
2058 "hash 0x%08x, hash info 0x%08x\n",
2059 ring, flags, hash, hashinfo);
2061 if (status & JME_RX_ERR_STAT) {
2063 jme_discard_rxbufs(sc, ring, cons, nsegs);
2064 #ifdef JME_SHOW_ERRORS
2065 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2066 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2068 rdata->jme_rx_cons += nsegs;
2069 rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2073 rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2074 for (count = 0; count < nsegs; count++,
2075 JME_DESC_INC(cons, sc->jme_rx_desc_cnt)) {
2076 rxd = &rdata->jme_rxdesc[cons];
2079 /* Add a new receive buffer to the ring. */
2080 if (jme_newbuf(sc, ring, rxd, 0) != 0) {
2083 jme_discard_rxbufs(sc, ring, cons, nsegs - count);
2084 if (rdata->jme_rxhead != NULL) {
2085 m_freem(rdata->jme_rxhead);
2086 JME_RXCHAIN_RESET(sc, ring);
2092 * Assume we've received a full sized frame.
2093 * Actual size is fixed when we encounter the end of
2094 * multi-segmented frame.
2096 mp->m_len = MCLBYTES;
2098 /* Chain received mbufs. */
2099 if (rdata->jme_rxhead == NULL) {
2100 rdata->jme_rxhead = mp;
2101 rdata->jme_rxtail = mp;
2104 * Receive processor can receive a maximum frame
2105 * size of 65535 bytes.
2107 rdata->jme_rxtail->m_next = mp;
2108 rdata->jme_rxtail = mp;
2111 if (count == nsegs - 1) {
2112 struct pktinfo pi0, *pi;
2114 /* Last desc. for this frame. */
2115 m = rdata->jme_rxhead;
2116 m->m_pkthdr.len = rdata->jme_rxlen;
2118 /* Set first mbuf size. */
2119 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2120 /* Set last mbuf size. */
2121 mp->m_len = rdata->jme_rxlen -
2122 ((MCLBYTES - JME_RX_PAD_BYTES) +
2123 (MCLBYTES * (nsegs - 2)));
2125 m->m_len = rdata->jme_rxlen;
2127 m->m_pkthdr.rcvif = ifp;
2130 * Account for 10bytes auto padding which is used
2131 * to align IP header on 32bit boundary. Also note,
2132 * CRC bytes is automatically removed by the
2135 m->m_data += JME_RX_PAD_BYTES;
2137 /* Set checksum information. */
2138 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2139 (flags & JME_RD_IPV4)) {
2140 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2141 if (flags & JME_RD_IPCSUM)
2142 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2143 if ((flags & JME_RD_MORE_FRAG) == 0 &&
2144 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2145 (JME_RD_TCP | JME_RD_TCPCSUM) ||
2146 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2147 (JME_RD_UDP | JME_RD_UDPCSUM))) {
2148 m->m_pkthdr.csum_flags |=
2149 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2150 m->m_pkthdr.csum_data = 0xffff;
2154 /* Check for VLAN tagged packets. */
2155 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2156 (flags & JME_RD_VLAN_TAG)) {
2157 m->m_pkthdr.ether_vlantag =
2158 flags & JME_RD_VLAN_MASK;
2159 m->m_flags |= M_VLANTAG;
2164 if (ifp->if_capenable & IFCAP_RSS)
2165 pi = jme_pktinfo(&pi0, flags);
2170 (hashinfo & JME_RD_HASH_FN_MASK) != 0) {
2171 m->m_flags |= M_HASH;
2172 m->m_pkthdr.hash = toeplitz_hash(hash);
2175 #ifdef JME_RSS_DEBUG
2177 JME_RSS_DPRINTF(sc, 10,
2178 "isr %d flags %08x, l3 %d %s\n",
2179 pi->pi_netisr, pi->pi_flags,
2181 (m->m_flags & M_HASH) ? "hash" : "");
2186 ether_input_chain(ifp, m, pi, chain);
2188 /* Reset mbuf chains. */
2189 JME_RXCHAIN_RESET(sc, ring);
2190 #ifdef JME_RSS_DEBUG
2191 sc->jme_rx_ring_pkt[ring]++;
2196 rdata->jme_rx_cons += nsegs;
2197 rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2201 jme_rxeof_chain(struct jme_softc *sc, int ring, struct mbuf_chain *chain,
2204 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2205 struct jme_desc *desc;
2206 int nsegs, prog, pktlen;
2210 #ifdef DEVICE_POLLING
2211 if (count >= 0 && count-- == 0)
2214 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2215 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2217 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2221 * Check number of segments against received bytes.
2222 * Non-matching value would indicate that hardware
2223 * is still trying to update Rx descriptors. I'm not
2224 * sure whether this check is needed.
2226 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2227 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2228 if (nsegs != howmany(pktlen, MCLBYTES)) {
2229 if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) "
2230 "and packet size(%d) mismach\n",
2235 /* Received a frame. */
2236 jme_rxpkt(sc, ring, chain);
2243 jme_rxeof(struct jme_softc *sc, int ring)
2245 struct mbuf_chain chain[MAXCPU];
2247 ether_input_chain_init(chain);
2248 if (jme_rxeof_chain(sc, ring, chain, -1))
2249 ether_input_dispatch(chain);
2255 struct jme_softc *sc = xsc;
2256 struct ifnet *ifp = &sc->arpcom.ac_if;
2257 struct mii_data *mii = device_get_softc(sc->jme_miibus);
2259 lwkt_serialize_enter(ifp->if_serializer);
2262 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2264 lwkt_serialize_exit(ifp->if_serializer);
2268 jme_reset(struct jme_softc *sc)
2272 /* Make sure that TX and RX are stopped */
2277 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2281 * Hold reset bit before stop reset
2284 /* Disable TXMAC and TXOFL clock sources */
2285 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2286 /* Disable RXMAC clock source */
2287 val = CSR_READ_4(sc, JME_GPREG1);
2288 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2290 CSR_READ_4(sc, JME_GHC);
2293 CSR_WRITE_4(sc, JME_GHC, 0);
2295 CSR_READ_4(sc, JME_GHC);
2298 * Clear reset bit after stop reset
2301 /* Enable TXMAC and TXOFL clock sources */
2302 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2303 /* Enable RXMAC clock source */
2304 val = CSR_READ_4(sc, JME_GPREG1);
2305 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2307 CSR_READ_4(sc, JME_GHC);
2309 /* Disable TXMAC and TXOFL clock sources */
2310 CSR_WRITE_4(sc, JME_GHC, 0);
2311 /* Disable RXMAC clock source */
2312 val = CSR_READ_4(sc, JME_GPREG1);
2313 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2315 CSR_READ_4(sc, JME_GHC);
2317 /* Enable TX and RX */
2318 val = CSR_READ_4(sc, JME_TXCSR);
2319 CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB);
2320 val = CSR_READ_4(sc, JME_RXCSR);
2321 CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB);
2323 CSR_READ_4(sc, JME_TXCSR);
2324 CSR_READ_4(sc, JME_RXCSR);
2326 /* Enable TXMAC and TXOFL clock sources */
2327 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2328 /* Eisable RXMAC clock source */
2329 val = CSR_READ_4(sc, JME_GPREG1);
2330 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2332 CSR_READ_4(sc, JME_GHC);
2334 /* Stop TX and RX */
2342 struct jme_softc *sc = xsc;
2343 struct ifnet *ifp = &sc->arpcom.ac_if;
2344 struct mii_data *mii;
2345 uint8_t eaddr[ETHER_ADDR_LEN];
2350 ASSERT_SERIALIZED(ifp->if_serializer);
2353 * Cancel any pending I/O.
2358 * Reset the chip to a known state.
2363 howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES);
2364 KKASSERT(sc->jme_txd_spare >= 1);
2367 * If we use 64bit address mode for transmitting, each Tx request
2368 * needs one more symbol descriptor.
2370 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
2371 sc->jme_txd_spare += 1;
2373 if (ifp->if_capenable & IFCAP_RSS)
2376 jme_disable_rss(sc);
2378 /* Init RX descriptors */
2379 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2380 error = jme_init_rx_ring(sc, r);
2382 if_printf(ifp, "initialization failed: "
2383 "no memory for %dth RX ring.\n", r);
2389 /* Init TX descriptors */
2390 jme_init_tx_ring(sc);
2392 /* Initialize shadow status block. */
2395 /* Reprogram the station address. */
2396 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2397 CSR_WRITE_4(sc, JME_PAR0,
2398 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2399 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2402 * Configure Tx queue.
2403 * Tx priority queue weight value : 0
2404 * Tx FIFO threshold for processing next packet : 16QW
2405 * Maximum Tx DMA length : 512
2406 * Allow Tx DMA burst.
2408 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2409 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2410 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2411 sc->jme_txcsr |= sc->jme_tx_dma_size;
2412 sc->jme_txcsr |= TXCSR_DMA_BURST;
2413 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2415 /* Set Tx descriptor counter. */
2416 CSR_WRITE_4(sc, JME_TXQDC, sc->jme_tx_desc_cnt);
2418 /* Set Tx ring address to the hardware. */
2419 paddr = sc->jme_cdata.jme_tx_ring_paddr;
2420 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2421 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2423 /* Configure TxMAC parameters. */
2424 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2425 reg |= TXMAC_THRESH_1_PKT;
2426 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2427 CSR_WRITE_4(sc, JME_TXMAC, reg);
2430 * Configure Rx queue.
2431 * FIFO full threshold for transmitting Tx pause packet : 128T
2432 * FIFO threshold for processing next packet : 128QW
2434 * Max Rx DMA length : 128
2435 * Rx descriptor retry : 32
2436 * Rx descriptor retry time gap : 256ns
2437 * Don't receive runt/bad frame.
2439 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2442 * Since Rx FIFO size is 4K bytes, receiving frames larger
2443 * than 4K bytes will suffer from Rx FIFO overruns. So
2444 * decrease FIFO threshold to reduce the FIFO overruns for
2445 * frames larger than 4000 bytes.
2446 * For best performance of standard MTU sized frames use
2447 * maximum allowable FIFO threshold, 128QW.
2449 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2451 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2453 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2455 /* Improve PCI Express compatibility */
2456 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2458 sc->jme_rxcsr |= sc->jme_rx_dma_size;
2459 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2460 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2461 /* XXX TODO DROP_BAD */
2463 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2464 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2466 /* Set Rx descriptor counter. */
2467 CSR_WRITE_4(sc, JME_RXQDC, sc->jme_rx_desc_cnt);
2469 /* Set Rx ring address to the hardware. */
2470 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
2471 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2472 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2475 /* Clear receive filter. */
2476 CSR_WRITE_4(sc, JME_RXMAC, 0);
2478 /* Set up the receive filter. */
2483 * Disable all WOL bits as WOL can interfere normal Rx
2484 * operation. Also clear WOL detection status bits.
2486 reg = CSR_READ_4(sc, JME_PMCS);
2487 reg &= ~PMCS_WOL_ENB_MASK;
2488 CSR_WRITE_4(sc, JME_PMCS, reg);
2491 * Pad 10bytes right before received frame. This will greatly
2492 * help Rx performance on strict-alignment architectures as
2493 * it does not need to copy the frame to align the payload.
2495 reg = CSR_READ_4(sc, JME_RXMAC);
2496 reg |= RXMAC_PAD_10BYTES;
2498 if (ifp->if_capenable & IFCAP_RXCSUM)
2499 reg |= RXMAC_CSUM_ENB;
2500 CSR_WRITE_4(sc, JME_RXMAC, reg);
2502 /* Configure general purpose reg0 */
2503 reg = CSR_READ_4(sc, JME_GPREG0);
2504 reg &= ~GPREG0_PCC_UNIT_MASK;
2505 /* Set PCC timer resolution to micro-seconds unit. */
2506 reg |= GPREG0_PCC_UNIT_US;
2508 * Disable all shadow register posting as we have to read
2509 * JME_INTR_STATUS register in jme_intr. Also it seems
2510 * that it's hard to synchronize interrupt status between
2511 * hardware and software with shadow posting due to
2512 * requirements of bus_dmamap_sync(9).
2514 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2515 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2516 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2517 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2518 /* Disable posting of DW0. */
2519 reg &= ~GPREG0_POST_DW0_ENB;
2520 /* Clear PME message. */
2521 reg &= ~GPREG0_PME_ENB;
2522 /* Set PHY address. */
2523 reg &= ~GPREG0_PHY_ADDR_MASK;
2524 reg |= sc->jme_phyaddr;
2525 CSR_WRITE_4(sc, JME_GPREG0, reg);
2527 /* Configure Tx queue 0 packet completion coalescing. */
2528 jme_set_tx_coal(sc);
2530 /* Configure Rx queue 0 packet completion coalescing. */
2531 jme_set_rx_coal(sc);
2533 /* Configure shadow status block but don't enable posting. */
2534 paddr = sc->jme_cdata.jme_ssb_block_paddr;
2535 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2536 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2538 /* Disable Timer 1 and Timer 2. */
2539 CSR_WRITE_4(sc, JME_TIMER1, 0);
2540 CSR_WRITE_4(sc, JME_TIMER2, 0);
2542 /* Configure retry transmit period, retry limit value. */
2543 CSR_WRITE_4(sc, JME_TXTRHD,
2544 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2545 TXTRHD_RT_PERIOD_MASK) |
2546 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2547 TXTRHD_RT_LIMIT_SHIFT));
2549 #ifdef DEVICE_POLLING
2550 if (!(ifp->if_flags & IFF_POLLING))
2552 /* Initialize the interrupt mask. */
2553 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2554 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2557 * Enabling Tx/Rx DMA engines and Rx queue processing is
2558 * done after detection of valid link in jme_miibus_statchg.
2560 sc->jme_flags &= ~JME_FLAG_LINK;
2562 /* Set the current media. */
2563 mii = device_get_softc(sc->jme_miibus);
2566 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2568 ifp->if_flags |= IFF_RUNNING;
2569 ifp->if_flags &= ~IFF_OACTIVE;
2573 jme_stop(struct jme_softc *sc)
2575 struct ifnet *ifp = &sc->arpcom.ac_if;
2576 struct jme_txdesc *txd;
2577 struct jme_rxdesc *rxd;
2578 struct jme_rxdata *rdata;
2581 ASSERT_SERIALIZED(ifp->if_serializer);
2584 * Mark the interface down and cancel the watchdog timer.
2586 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2589 callout_stop(&sc->jme_tick_ch);
2590 sc->jme_flags &= ~JME_FLAG_LINK;
2593 * Disable interrupts.
2595 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2596 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2598 /* Disable updating shadow status block. */
2599 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2600 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2602 /* Stop receiver, transmitter. */
2607 * Free partial finished RX segments
2609 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2610 rdata = &sc->jme_cdata.jme_rx_data[r];
2611 if (rdata->jme_rxhead != NULL)
2612 m_freem(rdata->jme_rxhead);
2613 JME_RXCHAIN_RESET(sc, r);
2617 * Free RX and TX mbufs still in the queues.
2619 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2620 rdata = &sc->jme_cdata.jme_rx_data[r];
2621 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2622 rxd = &rdata->jme_rxdesc[i];
2623 if (rxd->rx_m != NULL) {
2624 bus_dmamap_unload(rdata->jme_rx_tag,
2631 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2632 txd = &sc->jme_cdata.jme_txdesc[i];
2633 if (txd->tx_m != NULL) {
2634 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2644 jme_stop_tx(struct jme_softc *sc)
2649 reg = CSR_READ_4(sc, JME_TXCSR);
2650 if ((reg & TXCSR_TX_ENB) == 0)
2652 reg &= ~TXCSR_TX_ENB;
2653 CSR_WRITE_4(sc, JME_TXCSR, reg);
2654 for (i = JME_TIMEOUT; i > 0; i--) {
2656 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2660 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2664 jme_stop_rx(struct jme_softc *sc)
2669 reg = CSR_READ_4(sc, JME_RXCSR);
2670 if ((reg & RXCSR_RX_ENB) == 0)
2672 reg &= ~RXCSR_RX_ENB;
2673 CSR_WRITE_4(sc, JME_RXCSR, reg);
2674 for (i = JME_TIMEOUT; i > 0; i--) {
2676 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2680 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2684 jme_init_tx_ring(struct jme_softc *sc)
2686 struct jme_chain_data *cd;
2687 struct jme_txdesc *txd;
2690 sc->jme_cdata.jme_tx_prod = 0;
2691 sc->jme_cdata.jme_tx_cons = 0;
2692 sc->jme_cdata.jme_tx_cnt = 0;
2694 cd = &sc->jme_cdata;
2695 bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2696 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2697 txd = &sc->jme_cdata.jme_txdesc[i];
2699 txd->tx_desc = &cd->jme_tx_ring[i];
2705 jme_init_ssb(struct jme_softc *sc)
2707 struct jme_chain_data *cd;
2709 cd = &sc->jme_cdata;
2710 bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2714 jme_init_rx_ring(struct jme_softc *sc, int ring)
2716 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2717 struct jme_rxdesc *rxd;
2720 KKASSERT(rdata->jme_rxhead == NULL &&
2721 rdata->jme_rxtail == NULL &&
2722 rdata->jme_rxlen == 0);
2723 rdata->jme_rx_cons = 0;
2725 bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(sc));
2726 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2729 rxd = &rdata->jme_rxdesc[i];
2731 rxd->rx_desc = &rdata->jme_rx_ring[i];
2732 error = jme_newbuf(sc, ring, rxd, 1);
2740 jme_newbuf(struct jme_softc *sc, int ring, struct jme_rxdesc *rxd, int init)
2742 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2743 struct jme_desc *desc;
2745 bus_dma_segment_t segs;
2749 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2753 * JMC250 has 64bit boundary alignment limitation so jme(4)
2754 * takes advantage of 10 bytes padding feature of hardware
2755 * in order not to copy entire frame to align IP header on
2758 m->m_len = m->m_pkthdr.len = MCLBYTES;
2760 error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
2761 rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
2766 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2770 if (rxd->rx_m != NULL) {
2771 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
2772 BUS_DMASYNC_POSTREAD);
2773 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
2775 map = rxd->rx_dmamap;
2776 rxd->rx_dmamap = rdata->jme_rx_sparemap;
2777 rdata->jme_rx_sparemap = map;
2780 desc = rxd->rx_desc;
2781 desc->buflen = htole32(segs.ds_len);
2782 desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr));
2783 desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr));
2784 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2790 jme_set_vlan(struct jme_softc *sc)
2792 struct ifnet *ifp = &sc->arpcom.ac_if;
2795 ASSERT_SERIALIZED(ifp->if_serializer);
2797 reg = CSR_READ_4(sc, JME_RXMAC);
2798 reg &= ~RXMAC_VLAN_ENB;
2799 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2800 reg |= RXMAC_VLAN_ENB;
2801 CSR_WRITE_4(sc, JME_RXMAC, reg);
2805 jme_set_filter(struct jme_softc *sc)
2807 struct ifnet *ifp = &sc->arpcom.ac_if;
2808 struct ifmultiaddr *ifma;
2813 ASSERT_SERIALIZED(ifp->if_serializer);
2815 rxcfg = CSR_READ_4(sc, JME_RXMAC);
2816 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2820 * Always accept frames destined to our station address.
2821 * Always accept broadcast frames.
2823 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2825 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2826 if (ifp->if_flags & IFF_PROMISC)
2827 rxcfg |= RXMAC_PROMISC;
2828 if (ifp->if_flags & IFF_ALLMULTI)
2829 rxcfg |= RXMAC_ALLMULTI;
2830 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2831 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2832 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2837 * Set up the multicast address filter by passing all multicast
2838 * addresses through a CRC generator, and then using the low-order
2839 * 6 bits as an index into the 64 bit multicast hash table. The
2840 * high order bits select the register, while the rest of the bits
2841 * select the bit within the register.
2843 rxcfg |= RXMAC_MULTICAST;
2844 bzero(mchash, sizeof(mchash));
2846 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2847 if (ifma->ifma_addr->sa_family != AF_LINK)
2849 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2850 ifma->ifma_addr), ETHER_ADDR_LEN);
2852 /* Just want the 6 least significant bits. */
2855 /* Set the corresponding bit in the hash table. */
2856 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2859 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2860 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2861 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2865 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
2867 struct jme_softc *sc = arg1;
2868 struct ifnet *ifp = &sc->arpcom.ac_if;
2871 lwkt_serialize_enter(ifp->if_serializer);
2873 v = sc->jme_tx_coal_to;
2874 error = sysctl_handle_int(oidp, &v, 0, req);
2875 if (error || req->newptr == NULL)
2878 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2883 if (v != sc->jme_tx_coal_to) {
2884 sc->jme_tx_coal_to = v;
2885 if (ifp->if_flags & IFF_RUNNING)
2886 jme_set_tx_coal(sc);
2889 lwkt_serialize_exit(ifp->if_serializer);
2894 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2896 struct jme_softc *sc = arg1;
2897 struct ifnet *ifp = &sc->arpcom.ac_if;
2900 lwkt_serialize_enter(ifp->if_serializer);
2902 v = sc->jme_tx_coal_pkt;
2903 error = sysctl_handle_int(oidp, &v, 0, req);
2904 if (error || req->newptr == NULL)
2907 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
2912 if (v != sc->jme_tx_coal_pkt) {
2913 sc->jme_tx_coal_pkt = v;
2914 if (ifp->if_flags & IFF_RUNNING)
2915 jme_set_tx_coal(sc);
2918 lwkt_serialize_exit(ifp->if_serializer);
2923 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
2925 struct jme_softc *sc = arg1;
2926 struct ifnet *ifp = &sc->arpcom.ac_if;
2929 lwkt_serialize_enter(ifp->if_serializer);
2931 v = sc->jme_rx_coal_to;
2932 error = sysctl_handle_int(oidp, &v, 0, req);
2933 if (error || req->newptr == NULL)
2936 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
2941 if (v != sc->jme_rx_coal_to) {
2942 sc->jme_rx_coal_to = v;
2943 if (ifp->if_flags & IFF_RUNNING)
2944 jme_set_rx_coal(sc);
2947 lwkt_serialize_exit(ifp->if_serializer);
2952 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
2954 struct jme_softc *sc = arg1;
2955 struct ifnet *ifp = &sc->arpcom.ac_if;
2958 lwkt_serialize_enter(ifp->if_serializer);
2960 v = sc->jme_rx_coal_pkt;
2961 error = sysctl_handle_int(oidp, &v, 0, req);
2962 if (error || req->newptr == NULL)
2965 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
2970 if (v != sc->jme_rx_coal_pkt) {
2971 sc->jme_rx_coal_pkt = v;
2972 if (ifp->if_flags & IFF_RUNNING)
2973 jme_set_rx_coal(sc);
2976 lwkt_serialize_exit(ifp->if_serializer);
2981 jme_set_tx_coal(struct jme_softc *sc)
2985 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2987 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2988 PCCTX_COAL_PKT_MASK;
2989 reg |= PCCTX_COAL_TXQ0;
2990 CSR_WRITE_4(sc, JME_PCCTX, reg);
2994 jme_set_rx_coal(struct jme_softc *sc)
2999 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3001 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3002 PCCRX_COAL_PKT_MASK;
3003 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
3004 if (r < sc->jme_rx_ring_inuse)
3005 CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3007 CSR_WRITE_4(sc, JME_PCCRX(r), 0);
3011 #ifdef DEVICE_POLLING
3014 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3016 struct jme_softc *sc = ifp->if_softc;
3017 struct mbuf_chain chain[MAXCPU];
3021 ASSERT_SERIALIZED(ifp->if_serializer);
3025 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3028 case POLL_DEREGISTER:
3029 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
3032 case POLL_AND_CHECK_STATUS:
3034 status = CSR_READ_4(sc, JME_INTR_STATUS);
3036 ether_input_chain_init(chain);
3037 for (r = 0; r < sc->jme_rx_ring_inuse; ++r)
3038 prog += jme_rxeof_chain(sc, r, chain, count);
3040 ether_input_dispatch(chain);
3042 if (status & INTR_RXQ_DESC_EMPTY) {
3043 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3044 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3045 RXCSR_RX_ENB | RXCSR_RXQ_START);
3049 if (!ifq_is_empty(&ifp->if_snd))
3055 #endif /* DEVICE_POLLING */
3058 jme_rxring_dma_alloc(struct jme_softc *sc, int ring)
3060 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3064 error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
3065 JME_RX_RING_ALIGN, 0,
3066 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3067 JME_RX_RING_SIZE(sc),
3068 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3070 device_printf(sc->jme_dev,
3071 "could not allocate %dth Rx ring.\n", ring);
3074 rdata->jme_rx_ring_tag = dmem.dmem_tag;
3075 rdata->jme_rx_ring_map = dmem.dmem_map;
3076 rdata->jme_rx_ring = dmem.dmem_addr;
3077 rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
3083 jme_rxbuf_dma_alloc(struct jme_softc *sc, int ring)
3085 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3088 /* Create tag for Rx buffers. */
3089 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
3090 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
3091 BUS_SPACE_MAXADDR, /* lowaddr */
3092 BUS_SPACE_MAXADDR, /* highaddr */
3093 NULL, NULL, /* filter, filterarg */
3094 MCLBYTES, /* maxsize */
3096 MCLBYTES, /* maxsegsize */
3097 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
3098 &rdata->jme_rx_tag);
3100 device_printf(sc->jme_dev,
3101 "could not create %dth Rx DMA tag.\n", ring);
3105 /* Create DMA maps for Rx buffers. */
3106 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3107 &rdata->jme_rx_sparemap);
3109 device_printf(sc->jme_dev,
3110 "could not create %dth spare Rx dmamap.\n", ring);
3111 bus_dma_tag_destroy(rdata->jme_rx_tag);
3112 rdata->jme_rx_tag = NULL;
3115 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
3116 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3118 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3123 device_printf(sc->jme_dev,
3124 "could not create %dth Rx dmamap "
3125 "for %dth RX ring.\n", i, ring);
3127 for (j = 0; j < i; ++j) {
3128 rxd = &rdata->jme_rxdesc[j];
3129 bus_dmamap_destroy(rdata->jme_rx_tag,
3132 bus_dmamap_destroy(rdata->jme_rx_tag,
3133 rdata->jme_rx_sparemap);
3134 bus_dma_tag_destroy(rdata->jme_rx_tag);
3135 rdata->jme_rx_tag = NULL;
3143 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3145 struct mbuf_chain chain[MAXCPU];
3148 ether_input_chain_init(chain);
3149 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
3150 if (status & jme_rx_status[r].jme_coal)
3151 prog += jme_rxeof_chain(sc, r, chain, -1);
3154 ether_input_dispatch(chain);
3158 jme_enable_rss(struct jme_softc *sc)
3161 uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3164 sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
3166 KASSERT(sc->jme_rx_ring_inuse == JME_NRXRING_2 ||
3167 sc->jme_rx_ring_inuse == JME_NRXRING_4,
3168 ("%s: invalid # of RX rings (%d)\n",
3169 sc->arpcom.ac_if.if_xname, sc->jme_rx_ring_inuse));
3171 rssc = RSSC_HASH_64_ENTRY;
3172 rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3173 rssc |= sc->jme_rx_ring_inuse >> 1;
3174 JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3175 CSR_WRITE_4(sc, JME_RSSC, rssc);
3177 toeplitz_get_key(key, sizeof(key));
3178 for (i = 0; i < RSSKEY_NREGS; ++i) {
3181 keyreg = RSSKEY_REGVAL(key, i);
3182 JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg);
3184 CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg);
3188 * Create redirect table in following fashion:
3189 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3192 for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3195 q = i % sc->jme_rx_ring_inuse;
3196 ind |= q << (i * 8);
3198 JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3200 for (i = 0; i < RSSTBL_NREGS; ++i)
3201 CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3205 jme_disable_rss(struct jme_softc *sc)
3207 sc->jme_rx_ring_inuse = JME_NRXRING_1;
3208 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);