2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28 * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.12 2008/11/26 11:55:18 sephe Exp $
31 #include "opt_polling.h"
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
41 #include <sys/serialize.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
46 #include <net/ethernet.h>
49 #include <net/if_arp.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 #include <net/ifq_var.h>
53 #include <net/vlan/if_vlan_var.h>
54 #include <net/vlan/if_vlan_ether.h>
56 #include <dev/netif/mii_layer/miivar.h>
57 #include <dev/netif/mii_layer/jmphyreg.h>
59 #include <bus/pci/pcireg.h>
60 #include <bus/pci/pcivar.h>
61 #include <bus/pci/pcidevs.h>
63 #include <dev/netif/jme/if_jmereg.h>
64 #include <dev/netif/jme/if_jmevar.h>
66 #include "miibus_if.h"
68 /* Define the following to disable printing Rx errors. */
69 #undef JME_SHOW_ERRORS
71 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
73 static int jme_probe(device_t);
74 static int jme_attach(device_t);
75 static int jme_detach(device_t);
76 static int jme_shutdown(device_t);
77 static int jme_suspend(device_t);
78 static int jme_resume(device_t);
80 static int jme_miibus_readreg(device_t, int, int);
81 static int jme_miibus_writereg(device_t, int, int, int);
82 static void jme_miibus_statchg(device_t);
84 static void jme_init(void *);
85 static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
86 static void jme_start(struct ifnet *);
87 static void jme_watchdog(struct ifnet *);
88 static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
89 static int jme_mediachange(struct ifnet *);
91 static void jme_poll(struct ifnet *, enum poll_cmd, int);
94 static void jme_intr(void *);
95 static void jme_txeof(struct jme_softc *);
96 static void jme_rxeof(struct jme_softc *, int, int);
97 static void jme_rx_intr(struct jme_softc *, uint32_t);
99 static int jme_dma_alloc(struct jme_softc *);
100 static void jme_dma_free(struct jme_softc *, int);
101 static void jme_dmamap_ring_cb(void *, bus_dma_segment_t *, int, int);
102 static void jme_dmamap_buf_cb(void *, bus_dma_segment_t *, int,
104 static int jme_init_rx_ring(struct jme_softc *, int);
105 static void jme_init_tx_ring(struct jme_softc *);
106 static void jme_init_ssb(struct jme_softc *);
107 static int jme_newbuf(struct jme_softc *, int, struct jme_rxdesc *, int);
108 static int jme_encap(struct jme_softc *, struct mbuf **);
109 static void jme_rxpkt(struct jme_softc *, int, struct mbuf_chain *);
110 static int jme_rxring_dma_alloc(struct jme_softc *, bus_addr_t, int);
111 static int jme_rxbuf_dma_alloc(struct jme_softc *, int);
113 static void jme_tick(void *);
114 static void jme_stop(struct jme_softc *);
115 static void jme_reset(struct jme_softc *);
116 static void jme_set_vlan(struct jme_softc *);
117 static void jme_set_filter(struct jme_softc *);
118 static void jme_stop_tx(struct jme_softc *);
119 static void jme_stop_rx(struct jme_softc *);
120 static void jme_mac_config(struct jme_softc *);
121 static void jme_reg_macaddr(struct jme_softc *, uint8_t[]);
122 static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
123 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
125 static void jme_setwol(struct jme_softc *);
126 static void jme_setlinkspeed(struct jme_softc *);
128 static void jme_set_tx_coal(struct jme_softc *);
129 static void jme_set_rx_coal(struct jme_softc *);
131 static void jme_sysctl_node(struct jme_softc *);
132 static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
133 static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
134 static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
135 static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
138 * Devices supported by this driver.
140 static const struct jme_dev {
141 uint16_t jme_vendorid;
142 uint16_t jme_deviceid;
144 const char *jme_name;
146 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
148 "JMicron Inc, JMC250 Gigabit Ethernet" },
149 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
151 "JMicron Inc, JMC260 Fast Ethernet" },
155 static device_method_t jme_methods[] = {
156 /* Device interface. */
157 DEVMETHOD(device_probe, jme_probe),
158 DEVMETHOD(device_attach, jme_attach),
159 DEVMETHOD(device_detach, jme_detach),
160 DEVMETHOD(device_shutdown, jme_shutdown),
161 DEVMETHOD(device_suspend, jme_suspend),
162 DEVMETHOD(device_resume, jme_resume),
165 DEVMETHOD(bus_print_child, bus_generic_print_child),
166 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
169 DEVMETHOD(miibus_readreg, jme_miibus_readreg),
170 DEVMETHOD(miibus_writereg, jme_miibus_writereg),
171 DEVMETHOD(miibus_statchg, jme_miibus_statchg),
176 static driver_t jme_driver = {
179 sizeof(struct jme_softc)
182 static devclass_t jme_devclass;
184 DECLARE_DUMMY_MODULE(if_jme);
185 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
186 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, 0, 0);
187 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0);
189 static const struct {
192 } jme_rx_status[JME_NRXRING_MAX] = {
193 { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP },
194 { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP },
195 { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP },
196 { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP }
199 static int jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
200 static int jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
201 static int jme_rx_ring_count = 1;
203 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
204 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
207 * Read a PHY register on the MII of the JMC250.
210 jme_miibus_readreg(device_t dev, int phy, int reg)
212 struct jme_softc *sc = device_get_softc(dev);
216 /* For FPGA version, PHY address 0 should be ignored. */
217 if (sc->jme_caps & JME_CAP_FPGA) {
221 if (sc->jme_phyaddr != phy)
225 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
226 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
228 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
230 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
234 device_printf(sc->jme_dev, "phy read timeout: "
235 "phy %d, reg %d\n", phy, reg);
239 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
243 * Write a PHY register on the MII of the JMC250.
246 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
248 struct jme_softc *sc = device_get_softc(dev);
251 /* For FPGA version, PHY address 0 should be ignored. */
252 if (sc->jme_caps & JME_CAP_FPGA) {
256 if (sc->jme_phyaddr != phy)
260 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
261 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
262 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
264 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
266 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
270 device_printf(sc->jme_dev, "phy write timeout: "
271 "phy %d, reg %d\n", phy, reg);
278 * Callback from MII layer when media changes.
281 jme_miibus_statchg(device_t dev)
283 struct jme_softc *sc = device_get_softc(dev);
284 struct ifnet *ifp = &sc->arpcom.ac_if;
285 struct mii_data *mii;
286 struct jme_txdesc *txd;
290 ASSERT_SERIALIZED(ifp->if_serializer);
292 if ((ifp->if_flags & IFF_RUNNING) == 0)
295 mii = device_get_softc(sc->jme_miibus);
297 sc->jme_flags &= ~JME_FLAG_LINK;
298 if ((mii->mii_media_status & IFM_AVALID) != 0) {
299 switch (IFM_SUBTYPE(mii->mii_media_active)) {
302 sc->jme_flags |= JME_FLAG_LINK;
305 if (sc->jme_caps & JME_CAP_FASTETH)
307 sc->jme_flags |= JME_FLAG_LINK;
315 * Disabling Rx/Tx MACs have a side-effect of resetting
316 * JME_TXNDA/JME_RXNDA register to the first address of
317 * Tx/Rx descriptor address. So driver should reset its
318 * internal procucer/consumer pointer and reclaim any
319 * allocated resources. Note, just saving the value of
320 * JME_TXNDA and JME_RXNDA registers before stopping MAC
321 * and restoring JME_TXNDA/JME_RXNDA register is not
322 * sufficient to make sure correct MAC state because
323 * stopping MAC operation can take a while and hardware
324 * might have updated JME_TXNDA/JME_RXNDA registers
325 * during the stop operation.
328 /* Disable interrupts */
329 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
332 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
334 callout_stop(&sc->jme_tick_ch);
336 /* Stop receiver/transmitter. */
340 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
341 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
343 jme_rxeof(sc, r, -1);
344 if (rdata->jme_rxhead != NULL)
345 m_freem(rdata->jme_rxhead);
346 JME_RXCHAIN_RESET(sc, r);
349 * Reuse configured Rx descriptors and reset
350 * procuder/consumer index.
352 rdata->jme_rx_cons = 0;
356 if (sc->jme_cdata.jme_tx_cnt != 0) {
357 /* Remove queued packets for transmit. */
358 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
359 txd = &sc->jme_cdata.jme_txdesc[i];
360 if (txd->tx_m != NULL) {
362 sc->jme_cdata.jme_tx_tag,
371 jme_init_tx_ring(sc);
373 /* Initialize shadow status block. */
376 /* Program MAC with resolved speed/duplex/flow-control. */
377 if (sc->jme_flags & JME_FLAG_LINK) {
380 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
382 /* Set Tx ring address to the hardware. */
383 paddr = sc->jme_cdata.jme_tx_ring_paddr;
384 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
385 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
387 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
388 CSR_WRITE_4(sc, JME_RXCSR,
389 sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
391 /* Set Rx ring address to the hardware. */
392 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
393 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
394 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
397 /* Restart receiver/transmitter. */
398 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
400 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
403 ifp->if_flags |= IFF_RUNNING;
404 ifp->if_flags &= ~IFF_OACTIVE;
405 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
407 #ifdef DEVICE_POLLING
408 if (!(ifp->if_flags & IFF_POLLING))
410 /* Reenable interrupts. */
411 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
415 * Get the current interface media status.
418 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
420 struct jme_softc *sc = ifp->if_softc;
421 struct mii_data *mii = device_get_softc(sc->jme_miibus);
423 ASSERT_SERIALIZED(ifp->if_serializer);
426 ifmr->ifm_status = mii->mii_media_status;
427 ifmr->ifm_active = mii->mii_media_active;
431 * Set hardware to newly-selected media.
434 jme_mediachange(struct ifnet *ifp)
436 struct jme_softc *sc = ifp->if_softc;
437 struct mii_data *mii = device_get_softc(sc->jme_miibus);
440 ASSERT_SERIALIZED(ifp->if_serializer);
442 if (mii->mii_instance != 0) {
443 struct mii_softc *miisc;
445 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
446 mii_phy_reset(miisc);
448 error = mii_mediachg(mii);
454 jme_probe(device_t dev)
456 const struct jme_dev *sp;
459 vid = pci_get_vendor(dev);
460 did = pci_get_device(dev);
461 for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
462 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
463 struct jme_softc *sc = device_get_softc(dev);
465 sc->jme_caps = sp->jme_caps;
466 device_set_desc(dev, sp->jme_name);
474 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
480 for (i = JME_TIMEOUT; i > 0; i--) {
481 reg = CSR_READ_4(sc, JME_SMBCSR);
482 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
488 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
492 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
493 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
494 for (i = JME_TIMEOUT; i > 0; i--) {
496 reg = CSR_READ_4(sc, JME_SMBINTF);
497 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
502 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
506 reg = CSR_READ_4(sc, JME_SMBINTF);
507 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
513 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
515 uint8_t fup, reg, val;
520 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
521 fup != JME_EEPROM_SIG0)
523 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
524 fup != JME_EEPROM_SIG1)
528 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
530 /* Check for the end of EEPROM descriptor. */
531 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
533 if ((uint8_t)JME_EEPROM_MKDESC(JME_EEPROM_FUNC0,
534 JME_EEPROM_PAGE_BAR1) == fup) {
535 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0)
537 if (reg >= JME_PAR0 &&
538 reg < JME_PAR0 + ETHER_ADDR_LEN) {
539 if (jme_eeprom_read_byte(sc, offset + 2,
542 eaddr[reg - JME_PAR0] = val;
546 /* Try next eeprom descriptor. */
547 offset += JME_EEPROM_DESC_BYTES;
548 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
550 if (match == ETHER_ADDR_LEN)
557 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
561 /* Read station address. */
562 par0 = CSR_READ_4(sc, JME_PAR0);
563 par1 = CSR_READ_4(sc, JME_PAR1);
565 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
566 device_printf(sc->jme_dev,
567 "generating fake ethernet address.\n");
568 par0 = karc4random();
569 /* Set OUI to JMicron. */
573 eaddr[3] = (par0 >> 16) & 0xff;
574 eaddr[4] = (par0 >> 8) & 0xff;
575 eaddr[5] = par0 & 0xff;
577 eaddr[0] = (par0 >> 0) & 0xFF;
578 eaddr[1] = (par0 >> 8) & 0xFF;
579 eaddr[2] = (par0 >> 16) & 0xFF;
580 eaddr[3] = (par0 >> 24) & 0xFF;
581 eaddr[4] = (par1 >> 0) & 0xFF;
582 eaddr[5] = (par1 >> 8) & 0xFF;
587 jme_attach(device_t dev)
589 struct jme_softc *sc = device_get_softc(dev);
590 struct ifnet *ifp = &sc->arpcom.ac_if;
593 uint8_t pcie_ptr, rev;
595 uint8_t eaddr[ETHER_ADDR_LEN];
597 sc->jme_rx_desc_cnt = roundup(jme_rx_desc_count, JME_NDESC_ALIGN);
598 if (sc->jme_rx_desc_cnt > JME_NDESC_MAX)
599 sc->jme_rx_desc_cnt = JME_NDESC_MAX;
601 sc->jme_tx_desc_cnt = roundup(jme_tx_desc_count, JME_NDESC_ALIGN);
602 if (sc->jme_tx_desc_cnt > JME_NDESC_MAX)
603 sc->jme_tx_desc_cnt = JME_NDESC_MAX;
605 sc->jme_rx_ring_cnt = jme_rx_ring_count;
606 if (sc->jme_rx_ring_cnt <= 0)
607 sc->jme_rx_ring_cnt = 1;
608 if (sc->jme_rx_ring_cnt > ncpus2)
609 sc->jme_rx_ring_cnt = ncpus2;
610 if (sc->jme_rx_ring_cnt > JME_NRXRING_MAX)
611 sc->jme_rx_ring_cnt = JME_NRXRING_MAX;
613 if (sc->jme_rx_ring_cnt > 1) {
614 sc->jme_caps |= JME_CAP_RSS;
615 sc->jme_flags |= JME_FLAG_RSS;
617 sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
620 sc->jme_lowaddr = BUS_SPACE_MAXADDR;
622 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
624 callout_init(&sc->jme_tick_ch);
627 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
630 irq = pci_read_config(dev, PCIR_INTLINE, 4);
631 mem = pci_read_config(dev, JME_PCIR_BAR, 4);
633 device_printf(dev, "chip is in D%d power mode "
634 "-- setting to D0\n", pci_get_powerstate(dev));
636 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
638 pci_write_config(dev, PCIR_INTLINE, irq, 4);
639 pci_write_config(dev, JME_PCIR_BAR, mem, 4);
641 #endif /* !BURN_BRIDGE */
643 /* Enable bus mastering */
644 pci_enable_busmaster(dev);
649 * JMC250 supports both memory mapped and I/O register space
650 * access. Because I/O register access should use different
651 * BARs to access registers it's waste of time to use I/O
652 * register spce access. JMC250 uses 16K to map entire memory
655 sc->jme_mem_rid = JME_PCIR_BAR;
656 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
657 &sc->jme_mem_rid, RF_ACTIVE);
658 if (sc->jme_mem_res == NULL) {
659 device_printf(dev, "can't allocate IO memory\n");
662 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
663 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
669 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
671 RF_SHAREABLE | RF_ACTIVE);
672 if (sc->jme_irq_res == NULL) {
673 device_printf(dev, "can't allocate irq\n");
681 reg = CSR_READ_4(sc, JME_CHIPMODE);
682 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
684 sc->jme_caps |= JME_CAP_FPGA;
686 device_printf(dev, "FPGA revision: 0x%04x\n",
687 (reg & CHIPMODE_FPGA_REV_MASK) >>
688 CHIPMODE_FPGA_REV_SHIFT);
692 /* NOTE: FM revision is put in the upper 4 bits */
693 rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
694 rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
696 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
698 did = pci_get_device(dev);
700 case PCI_PRODUCT_JMICRON_JMC250:
701 if (rev == JME_REV1_A2)
702 sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
705 case PCI_PRODUCT_JMICRON_JMC260:
707 sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
711 panic("unknown device id 0x%04x\n", did);
713 if (rev >= JME_REV2) {
714 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
715 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
716 GHC_TXMAC_CLKSRC_1000;
719 /* Reset the ethernet controller. */
722 /* Get station address. */
723 reg = CSR_READ_4(sc, JME_SMBCSR);
724 if (reg & SMBCSR_EEPROM_PRESENT)
725 error = jme_eeprom_macaddr(sc, eaddr);
726 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
727 if (error != 0 && (bootverbose)) {
728 device_printf(dev, "ethernet hardware address "
729 "not found in EEPROM.\n");
731 jme_reg_macaddr(sc, eaddr);
736 * Integrated JR0211 has fixed PHY address whereas FPGA version
737 * requires PHY probing to get correct PHY address.
739 if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
740 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
741 GPREG0_PHY_ADDR_MASK;
743 device_printf(dev, "PHY is at address %d.\n",
750 /* Set max allowable DMA size. */
751 pcie_ptr = pci_get_pciecap_ptr(dev);
755 sc->jme_caps |= JME_CAP_PCIE;
756 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
758 device_printf(dev, "Read request size : %d bytes.\n",
759 128 << ((ctrl >> 12) & 0x07));
760 device_printf(dev, "TLP payload size : %d bytes.\n",
761 128 << ((ctrl >> 5) & 0x07));
763 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
764 case PCIEM_DEVCTL_MAX_READRQ_128:
765 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
767 case PCIEM_DEVCTL_MAX_READRQ_256:
768 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
771 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
774 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
776 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
777 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
781 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
782 sc->jme_caps |= JME_CAP_PMCAP;
790 /* Allocate DMA stuffs */
791 error = jme_dma_alloc(sc);
796 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
797 ifp->if_init = jme_init;
798 ifp->if_ioctl = jme_ioctl;
799 ifp->if_start = jme_start;
800 #ifdef DEVICE_POLLING
801 ifp->if_poll = jme_poll;
803 ifp->if_watchdog = jme_watchdog;
804 ifq_set_maxlen(&ifp->if_snd, sc->jme_tx_desc_cnt - JME_TXD_RSVD);
805 ifq_set_ready(&ifp->if_snd);
807 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
808 ifp->if_capabilities = IFCAP_HWCSUM |
810 IFCAP_VLAN_HWTAGGING;
811 ifp->if_hwassist = JME_CSUM_FEATURES;
812 ifp->if_capenable = ifp->if_capabilities;
814 /* Set up MII bus. */
815 error = mii_phy_probe(dev, &sc->jme_miibus,
816 jme_mediachange, jme_mediastatus);
818 device_printf(dev, "no PHY found!\n");
823 * Save PHYADDR for FPGA mode PHY.
825 if (sc->jme_caps & JME_CAP_FPGA) {
826 struct mii_data *mii = device_get_softc(sc->jme_miibus);
828 if (mii->mii_instance != 0) {
829 struct mii_softc *miisc;
831 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
832 if (miisc->mii_phy != 0) {
833 sc->jme_phyaddr = miisc->mii_phy;
837 if (sc->jme_phyaddr != 0) {
838 device_printf(sc->jme_dev,
839 "FPGA PHY is at %d\n", sc->jme_phyaddr);
841 jme_miibus_writereg(dev, sc->jme_phyaddr,
842 JMPHY_CONF, JMPHY_CONF_DEFFIFO);
844 /* XXX should we clear JME_WA_EXTFIFO */
849 ether_ifattach(ifp, eaddr, NULL);
851 /* Tell the upper layer(s) we support long frames. */
852 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
854 error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, jme_intr, sc,
855 &sc->jme_irq_handle, ifp->if_serializer);
857 device_printf(dev, "could not set up interrupt handler.\n");
862 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->jme_irq_res));
863 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
871 jme_detach(device_t dev)
873 struct jme_softc *sc = device_get_softc(dev);
875 if (device_is_attached(dev)) {
876 struct ifnet *ifp = &sc->arpcom.ac_if;
878 lwkt_serialize_enter(ifp->if_serializer);
880 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
881 lwkt_serialize_exit(ifp->if_serializer);
886 if (sc->jme_sysctl_tree != NULL)
887 sysctl_ctx_free(&sc->jme_sysctl_ctx);
889 if (sc->jme_miibus != NULL)
890 device_delete_child(dev, sc->jme_miibus);
891 bus_generic_detach(dev);
893 if (sc->jme_irq_res != NULL) {
894 bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
898 if (sc->jme_mem_res != NULL) {
899 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
909 jme_sysctl_node(struct jme_softc *sc)
913 sysctl_ctx_init(&sc->jme_sysctl_ctx);
914 sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
915 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
916 device_get_nameunit(sc->jme_dev),
918 if (sc->jme_sysctl_tree == NULL) {
919 device_printf(sc->jme_dev, "can't add sysctl node\n");
923 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
924 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
925 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
926 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
928 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
929 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
930 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
931 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
933 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
934 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
935 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
936 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
938 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
939 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
940 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
941 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
943 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
944 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
945 "rx_desc_count", CTLFLAG_RD, &sc->jme_rx_desc_cnt,
947 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
948 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
949 "tx_desc_count", CTLFLAG_RD, &sc->jme_tx_desc_cnt,
953 * Set default coalesce valves
955 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
956 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
957 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
958 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
961 * Adjust coalesce valves, in case that the number of TX/RX
962 * descs are set to small values by users.
964 * NOTE: coal_max will not be zero, since number of descs
965 * must aligned by JME_NDESC_ALIGN (16 currently)
967 coal_max = sc->jme_tx_desc_cnt / 6;
968 if (coal_max < sc->jme_tx_coal_pkt)
969 sc->jme_tx_coal_pkt = coal_max;
971 coal_max = sc->jme_rx_desc_cnt / 4;
972 if (coal_max < sc->jme_rx_coal_pkt)
973 sc->jme_rx_coal_pkt = coal_max;
977 jme_dmamap_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
982 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
983 *((bus_addr_t *)arg) = segs->ds_addr;
987 jme_dmamap_buf_cb(void *xctx, bus_dma_segment_t *segs, int nsegs,
988 bus_size_t mapsz __unused, int error)
990 struct jme_dmamap_ctx *ctx = xctx;
996 if (nsegs > ctx->nsegs) {
1002 for (i = 0; i < nsegs; ++i)
1003 ctx->segs[i] = segs[i];
1007 jme_dma_alloc(struct jme_softc *sc)
1009 struct jme_txdesc *txd;
1010 bus_addr_t busaddr, lowaddr;
1013 sc->jme_cdata.jme_txdesc =
1014 kmalloc(sc->jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1015 M_DEVBUF, M_WAITOK | M_ZERO);
1016 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1017 sc->jme_cdata.jme_rx_data[i].jme_rxdesc =
1018 kmalloc(sc->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1019 M_DEVBUF, M_WAITOK | M_ZERO);
1022 lowaddr = sc->jme_lowaddr;
1024 /* Create parent ring tag. */
1025 error = bus_dma_tag_create(NULL,/* parent */
1026 1, 0, /* algnmnt, boundary */
1027 lowaddr, /* lowaddr */
1028 BUS_SPACE_MAXADDR, /* highaddr */
1029 NULL, NULL, /* filter, filterarg */
1030 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1032 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1034 &sc->jme_cdata.jme_ring_tag);
1036 device_printf(sc->jme_dev,
1037 "could not create parent ring DMA tag.\n");
1042 * Create DMA stuffs for TX ring
1045 /* Create tag for Tx ring. */
1046 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1047 JME_TX_RING_ALIGN, 0, /* algnmnt, boundary */
1048 lowaddr, /* lowaddr */
1049 BUS_SPACE_MAXADDR, /* highaddr */
1050 NULL, NULL, /* filter, filterarg */
1051 JME_TX_RING_SIZE(sc), /* maxsize */
1053 JME_TX_RING_SIZE(sc), /* maxsegsize */
1055 &sc->jme_cdata.jme_tx_ring_tag);
1057 device_printf(sc->jme_dev,
1058 "could not allocate Tx ring DMA tag.\n");
1062 /* Allocate DMA'able memory for TX ring */
1063 error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag,
1064 (void **)&sc->jme_cdata.jme_tx_ring,
1065 BUS_DMA_WAITOK | BUS_DMA_ZERO,
1066 &sc->jme_cdata.jme_tx_ring_map);
1068 device_printf(sc->jme_dev,
1069 "could not allocate DMA'able memory for Tx ring.\n");
1070 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1071 sc->jme_cdata.jme_tx_ring_tag = NULL;
1075 /* Load the DMA map for Tx ring. */
1076 error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag,
1077 sc->jme_cdata.jme_tx_ring_map, sc->jme_cdata.jme_tx_ring,
1078 JME_TX_RING_SIZE(sc), jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
1080 device_printf(sc->jme_dev,
1081 "could not load DMA'able memory for Tx ring.\n");
1082 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1083 sc->jme_cdata.jme_tx_ring,
1084 sc->jme_cdata.jme_tx_ring_map);
1085 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1086 sc->jme_cdata.jme_tx_ring_tag = NULL;
1089 sc->jme_cdata.jme_tx_ring_paddr = busaddr;
1092 * Create DMA stuffs for RX ring
1094 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1095 error = jme_rxring_dma_alloc(sc, lowaddr, i);
1100 if (lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1101 bus_addr_t ring_end;
1103 /* Tx/Rx descriptor queue should reside within 4GB boundary. */
1104 ring_end = sc->jme_cdata.jme_tx_ring_paddr +
1105 JME_TX_RING_SIZE(sc);
1106 if (JME_ADDR_HI(ring_end) !=
1107 JME_ADDR_HI(sc->jme_cdata.jme_tx_ring_paddr)) {
1108 device_printf(sc->jme_dev, "TX ring 4GB boundary "
1109 "crossed, switching to 32bit DMA address mode.\n");
1110 jme_dma_free(sc, 0);
1111 /* Limit DMA address space to 32bit and try again. */
1112 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1116 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1117 bus_addr_t ring_start;
1120 sc->jme_cdata.jme_rx_data[i].jme_rx_ring_paddr;
1121 ring_end = ring_start + JME_RX_RING_SIZE(sc);
1122 if (JME_ADDR_HI(ring_end) != JME_ADDR_HI(ring_start)) {
1123 device_printf(sc->jme_dev,
1124 "%dth RX ring 4GB boundary crossed, "
1125 "switching to 32bit DMA address mode.\n", i);
1126 jme_dma_free(sc, 0);
1128 * Limit DMA address space to 32bit and
1131 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1137 /* Create parent buffer tag. */
1138 error = bus_dma_tag_create(NULL,/* parent */
1139 1, 0, /* algnmnt, boundary */
1140 sc->jme_lowaddr, /* lowaddr */
1141 BUS_SPACE_MAXADDR, /* highaddr */
1142 NULL, NULL, /* filter, filterarg */
1143 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1145 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1147 &sc->jme_cdata.jme_buffer_tag);
1149 device_printf(sc->jme_dev,
1150 "could not create parent buffer DMA tag.\n");
1155 * Create DMA stuffs for shadow status block
1158 /* Create shadow status block tag. */
1159 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1160 JME_SSB_ALIGN, 0, /* algnmnt, boundary */
1161 sc->jme_lowaddr, /* lowaddr */
1162 BUS_SPACE_MAXADDR, /* highaddr */
1163 NULL, NULL, /* filter, filterarg */
1164 JME_SSB_SIZE, /* maxsize */
1166 JME_SSB_SIZE, /* maxsegsize */
1168 &sc->jme_cdata.jme_ssb_tag);
1170 device_printf(sc->jme_dev,
1171 "could not create shadow status block DMA tag.\n");
1175 /* Allocate DMA'able memory for shadow status block. */
1176 error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag,
1177 (void **)&sc->jme_cdata.jme_ssb_block,
1178 BUS_DMA_WAITOK | BUS_DMA_ZERO,
1179 &sc->jme_cdata.jme_ssb_map);
1181 device_printf(sc->jme_dev, "could not allocate DMA'able "
1182 "memory for shadow status block.\n");
1183 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1184 sc->jme_cdata.jme_ssb_tag = NULL;
1188 /* Load the DMA map for shadow status block */
1189 error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag,
1190 sc->jme_cdata.jme_ssb_map, sc->jme_cdata.jme_ssb_block,
1191 JME_SSB_SIZE, jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
1193 device_printf(sc->jme_dev, "could not load DMA'able memory "
1194 "for shadow status block.\n");
1195 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1196 sc->jme_cdata.jme_ssb_block,
1197 sc->jme_cdata.jme_ssb_map);
1198 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1199 sc->jme_cdata.jme_ssb_tag = NULL;
1202 sc->jme_cdata.jme_ssb_block_paddr = busaddr;
1205 * Create DMA stuffs for TX buffers
1208 /* Create tag for Tx buffers. */
1209 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1210 1, 0, /* algnmnt, boundary */
1211 sc->jme_lowaddr, /* lowaddr */
1212 BUS_SPACE_MAXADDR, /* highaddr */
1213 NULL, NULL, /* filter, filterarg */
1214 JME_TSO_MAXSIZE, /* maxsize */
1215 JME_MAXTXSEGS, /* nsegments */
1216 JME_TSO_MAXSEGSIZE, /* maxsegsize */
1218 &sc->jme_cdata.jme_tx_tag);
1220 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1224 /* Create DMA maps for Tx buffers. */
1225 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1226 txd = &sc->jme_cdata.jme_txdesc[i];
1227 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0,
1232 device_printf(sc->jme_dev,
1233 "could not create %dth Tx dmamap.\n", i);
1235 for (j = 0; j < i; ++j) {
1236 txd = &sc->jme_cdata.jme_txdesc[j];
1237 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1240 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1241 sc->jme_cdata.jme_tx_tag = NULL;
1247 * Create DMA stuffs for RX buffers
1249 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1250 error = jme_rxbuf_dma_alloc(sc, i);
1258 jme_dma_free(struct jme_softc *sc, int detach)
1260 struct jme_txdesc *txd;
1261 struct jme_rxdesc *rxd;
1262 struct jme_rxdata *rdata;
1266 if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1267 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1268 sc->jme_cdata.jme_tx_ring_map);
1269 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1270 sc->jme_cdata.jme_tx_ring,
1271 sc->jme_cdata.jme_tx_ring_map);
1272 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1273 sc->jme_cdata.jme_tx_ring_tag = NULL;
1277 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1278 rdata = &sc->jme_cdata.jme_rx_data[r];
1279 if (rdata->jme_rx_ring_tag != NULL) {
1280 bus_dmamap_unload(rdata->jme_rx_ring_tag,
1281 rdata->jme_rx_ring_map);
1282 bus_dmamem_free(rdata->jme_rx_ring_tag,
1284 rdata->jme_rx_ring_map);
1285 bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1286 rdata->jme_rx_ring_tag = NULL;
1291 if (sc->jme_cdata.jme_tx_tag != NULL) {
1292 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1293 txd = &sc->jme_cdata.jme_txdesc[i];
1294 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1297 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1298 sc->jme_cdata.jme_tx_tag = NULL;
1302 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1303 rdata = &sc->jme_cdata.jme_rx_data[r];
1304 if (rdata->jme_rx_tag != NULL) {
1305 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
1306 rxd = &rdata->jme_rxdesc[i];
1307 bus_dmamap_destroy(rdata->jme_rx_tag,
1310 bus_dmamap_destroy(rdata->jme_rx_tag,
1311 rdata->jme_rx_sparemap);
1312 bus_dma_tag_destroy(rdata->jme_rx_tag);
1313 rdata->jme_rx_tag = NULL;
1317 /* Shadow status block. */
1318 if (sc->jme_cdata.jme_ssb_tag != NULL) {
1319 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1320 sc->jme_cdata.jme_ssb_map);
1321 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1322 sc->jme_cdata.jme_ssb_block,
1323 sc->jme_cdata.jme_ssb_map);
1324 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1325 sc->jme_cdata.jme_ssb_tag = NULL;
1328 if (sc->jme_cdata.jme_buffer_tag != NULL) {
1329 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1330 sc->jme_cdata.jme_buffer_tag = NULL;
1332 if (sc->jme_cdata.jme_ring_tag != NULL) {
1333 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1334 sc->jme_cdata.jme_ring_tag = NULL;
1338 if (sc->jme_cdata.jme_txdesc != NULL) {
1339 kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1340 sc->jme_cdata.jme_txdesc = NULL;
1342 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1343 rdata = &sc->jme_cdata.jme_rx_data[r];
1344 if (rdata->jme_rxdesc != NULL) {
1345 kfree(rdata->jme_rxdesc, M_DEVBUF);
1346 rdata->jme_rxdesc = NULL;
1353 * Make sure the interface is stopped at reboot time.
1356 jme_shutdown(device_t dev)
1358 return jme_suspend(dev);
1363 * Unlike other ethernet controllers, JMC250 requires
1364 * explicit resetting link speed to 10/100Mbps as gigabit
1365 * link will cunsume more power than 375mA.
1366 * Note, we reset the link speed to 10/100Mbps with
1367 * auto-negotiation but we don't know whether that operation
1368 * would succeed or not as we have no control after powering
1369 * off. If the renegotiation fail WOL may not work. Running
1370 * at 1Gbps draws more power than 375mA at 3.3V which is
1371 * specified in PCI specification and that would result in
1372 * complete shutdowning power to ethernet controller.
1375 * Save current negotiated media speed/duplex/flow-control
1376 * to softc and restore the same link again after resuming.
1377 * PHY handling such as power down/resetting to 100Mbps
1378 * may be better handled in suspend method in phy driver.
1381 jme_setlinkspeed(struct jme_softc *sc)
1383 struct mii_data *mii;
1386 JME_LOCK_ASSERT(sc);
1388 mii = device_get_softc(sc->jme_miibus);
1391 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1392 switch IFM_SUBTYPE(mii->mii_media_active) {
1402 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1403 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1404 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1405 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1406 BMCR_AUTOEN | BMCR_STARTNEG);
1409 /* Poll link state until jme(4) get a 10/100 link. */
1410 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1412 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1413 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1423 pause("jmelnk", hz);
1426 if (i == MII_ANEGTICKS_GIGE)
1427 device_printf(sc->jme_dev, "establishing link failed, "
1428 "WOL may not work!");
1431 * No link, force MAC to have 100Mbps, full-duplex link.
1432 * This is the last resort and may/may not work.
1434 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1435 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1440 jme_setwol(struct jme_softc *sc)
1442 struct ifnet *ifp = &sc->arpcom.ac_if;
1447 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1448 /* No PME capability, PHY power down. */
1449 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1450 MII_BMCR, BMCR_PDOWN);
1454 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1455 pmcs = CSR_READ_4(sc, JME_PMCS);
1456 pmcs &= ~PMCS_WOL_ENB_MASK;
1457 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1458 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1459 /* Enable PME message. */
1460 gpr |= GPREG0_PME_ENB;
1461 /* For gigabit controllers, reset link speed to 10/100. */
1462 if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1463 jme_setlinkspeed(sc);
1466 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1467 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1470 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1471 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1472 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1473 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1474 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1475 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1476 /* No WOL, PHY power down. */
1477 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1478 MII_BMCR, BMCR_PDOWN);
1484 jme_suspend(device_t dev)
1486 struct jme_softc *sc = device_get_softc(dev);
1487 struct ifnet *ifp = &sc->arpcom.ac_if;
1489 lwkt_serialize_enter(ifp->if_serializer);
1494 lwkt_serialize_exit(ifp->if_serializer);
1500 jme_resume(device_t dev)
1502 struct jme_softc *sc = device_get_softc(dev);
1503 struct ifnet *ifp = &sc->arpcom.ac_if;
1508 lwkt_serialize_enter(ifp->if_serializer);
1511 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1514 pmstat = pci_read_config(sc->jme_dev,
1515 pmc + PCIR_POWER_STATUS, 2);
1516 /* Disable PME clear PME status. */
1517 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1518 pci_write_config(sc->jme_dev,
1519 pmc + PCIR_POWER_STATUS, pmstat, 2);
1523 if (ifp->if_flags & IFF_UP)
1526 lwkt_serialize_exit(ifp->if_serializer);
1532 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1534 struct jme_txdesc *txd;
1535 struct jme_desc *desc;
1537 struct jme_dmamap_ctx ctx;
1538 bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1540 int error, i, prod, symbol_desc;
1541 uint32_t cflags, flag64;
1543 M_ASSERTPKTHDR((*m_head));
1545 prod = sc->jme_cdata.jme_tx_prod;
1546 txd = &sc->jme_cdata.jme_txdesc[prod];
1548 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1553 maxsegs = (sc->jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1554 (JME_TXD_RSVD + symbol_desc);
1555 if (maxsegs > JME_MAXTXSEGS)
1556 maxsegs = JME_MAXTXSEGS;
1557 KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc),
1558 ("not enough segments %d\n", maxsegs));
1560 ctx.nsegs = maxsegs;
1562 error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1563 *m_head, jme_dmamap_buf_cb, &ctx,
1565 if (!error && ctx.nsegs == 0) {
1566 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1569 if (error == EFBIG) {
1570 m = m_defrag(*m_head, MB_DONTWAIT);
1572 if_printf(&sc->arpcom.ac_if,
1573 "could not defrag TX mbuf\n");
1579 ctx.nsegs = maxsegs;
1581 error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_tx_tag,
1582 txd->tx_dmamap, *m_head,
1583 jme_dmamap_buf_cb, &ctx,
1585 if (error || ctx.nsegs == 0) {
1586 if_printf(&sc->arpcom.ac_if,
1587 "could not load defragged TX mbuf\n");
1589 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
1596 if_printf(&sc->arpcom.ac_if, "could not load TX mbuf\n");
1603 /* Configure checksum offload. */
1604 if (m->m_pkthdr.csum_flags & CSUM_IP)
1605 cflags |= JME_TD_IPCSUM;
1606 if (m->m_pkthdr.csum_flags & CSUM_TCP)
1607 cflags |= JME_TD_TCPCSUM;
1608 if (m->m_pkthdr.csum_flags & CSUM_UDP)
1609 cflags |= JME_TD_UDPCSUM;
1611 /* Configure VLAN. */
1612 if (m->m_flags & M_VLANTAG) {
1613 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1614 cflags |= JME_TD_VLAN_TAG;
1617 desc = &sc->jme_cdata.jme_tx_ring[prod];
1618 desc->flags = htole32(cflags);
1619 desc->addr_hi = htole32(m->m_pkthdr.len);
1620 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1622 * Use 64bits TX desc chain format.
1624 * The first TX desc of the chain, which is setup here,
1625 * is just a symbol TX desc carrying no payload.
1627 flag64 = JME_TD_64BIT;
1631 /* No effective TX desc is consumed */
1635 * Use 32bits TX desc chain format.
1637 * The first TX desc of the chain, which is setup here,
1638 * is an effective TX desc carrying the first segment of
1642 desc->buflen = htole32(txsegs[0].ds_len);
1643 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1645 /* One effective TX desc is consumed */
1648 sc->jme_cdata.jme_tx_cnt++;
1649 KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1650 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1651 JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1653 txd->tx_ndesc = 1 - i;
1654 for (; i < ctx.nsegs; i++) {
1655 desc = &sc->jme_cdata.jme_tx_ring[prod];
1656 desc->flags = htole32(JME_TD_OWN | flag64);
1657 desc->buflen = htole32(txsegs[i].ds_len);
1658 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1659 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1661 sc->jme_cdata.jme_tx_cnt++;
1662 KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1663 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1664 JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1667 /* Update producer index. */
1668 sc->jme_cdata.jme_tx_prod = prod;
1670 * Finally request interrupt and give the first descriptor
1671 * owenership to hardware.
1673 desc = txd->tx_desc;
1674 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1677 txd->tx_ndesc += ctx.nsegs;
1679 /* Sync descriptors. */
1680 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1681 BUS_DMASYNC_PREWRITE);
1682 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1683 sc->jme_cdata.jme_tx_ring_map, BUS_DMASYNC_PREWRITE);
1692 jme_start(struct ifnet *ifp)
1694 struct jme_softc *sc = ifp->if_softc;
1695 struct mbuf *m_head;
1698 ASSERT_SERIALIZED(ifp->if_serializer);
1700 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1701 ifq_purge(&ifp->if_snd);
1705 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1708 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1711 while (!ifq_is_empty(&ifp->if_snd)) {
1713 * Check number of available TX descs, always
1714 * leave JME_TXD_RSVD free TX descs.
1716 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1717 sc->jme_tx_desc_cnt - JME_TXD_RSVD) {
1718 ifp->if_flags |= IFF_OACTIVE;
1722 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1727 * Pack the data into the transmit ring. If we
1728 * don't have room, set the OACTIVE flag and wait
1729 * for the NIC to drain the ring.
1731 if (jme_encap(sc, &m_head)) {
1732 KKASSERT(m_head == NULL);
1734 ifp->if_flags |= IFF_OACTIVE;
1740 * If there's a BPF listener, bounce a copy of this frame
1743 ETHER_BPF_MTAP(ifp, m_head);
1748 * Reading TXCSR takes very long time under heavy load
1749 * so cache TXCSR value and writes the ORed value with
1750 * the kick command to the TXCSR. This saves one register
1753 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1754 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1755 /* Set a timeout in case the chip goes out to lunch. */
1756 ifp->if_timer = JME_TX_TIMEOUT;
1761 jme_watchdog(struct ifnet *ifp)
1763 struct jme_softc *sc = ifp->if_softc;
1765 ASSERT_SERIALIZED(ifp->if_serializer);
1767 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1768 if_printf(ifp, "watchdog timeout (missed link)\n");
1775 if (sc->jme_cdata.jme_tx_cnt == 0) {
1776 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1778 if (!ifq_is_empty(&ifp->if_snd))
1783 if_printf(ifp, "watchdog timeout\n");
1786 if (!ifq_is_empty(&ifp->if_snd))
1791 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1793 struct jme_softc *sc = ifp->if_softc;
1794 struct mii_data *mii = device_get_softc(sc->jme_miibus);
1795 struct ifreq *ifr = (struct ifreq *)data;
1796 int error = 0, mask;
1798 ASSERT_SERIALIZED(ifp->if_serializer);
1802 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1803 (!(sc->jme_caps & JME_CAP_JUMBO) &&
1804 ifr->ifr_mtu > JME_MAX_MTU)) {
1809 if (ifp->if_mtu != ifr->ifr_mtu) {
1811 * No special configuration is required when interface
1812 * MTU is changed but availability of Tx checksum
1813 * offload should be chcked against new MTU size as
1814 * FIFO size is just 2K.
1816 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1817 ifp->if_capenable &= ~IFCAP_TXCSUM;
1818 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1820 ifp->if_mtu = ifr->ifr_mtu;
1821 if (ifp->if_flags & IFF_RUNNING)
1827 if (ifp->if_flags & IFF_UP) {
1828 if (ifp->if_flags & IFF_RUNNING) {
1829 if ((ifp->if_flags ^ sc->jme_if_flags) &
1830 (IFF_PROMISC | IFF_ALLMULTI))
1836 if (ifp->if_flags & IFF_RUNNING)
1839 sc->jme_if_flags = ifp->if_flags;
1844 if (ifp->if_flags & IFF_RUNNING)
1850 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1854 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1856 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1857 if (IFCAP_TXCSUM & ifp->if_capabilities) {
1858 ifp->if_capenable ^= IFCAP_TXCSUM;
1859 if (IFCAP_TXCSUM & ifp->if_capenable)
1860 ifp->if_hwassist |= JME_CSUM_FEATURES;
1862 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1865 if ((mask & IFCAP_RXCSUM) &&
1866 (IFCAP_RXCSUM & ifp->if_capabilities)) {
1869 ifp->if_capenable ^= IFCAP_RXCSUM;
1870 reg = CSR_READ_4(sc, JME_RXMAC);
1871 reg &= ~RXMAC_CSUM_ENB;
1872 if (ifp->if_capenable & IFCAP_RXCSUM)
1873 reg |= RXMAC_CSUM_ENB;
1874 CSR_WRITE_4(sc, JME_RXMAC, reg);
1877 if ((mask & IFCAP_VLAN_HWTAGGING) &&
1878 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities)) {
1879 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1885 error = ether_ioctl(ifp, cmd, data);
1892 jme_mac_config(struct jme_softc *sc)
1894 struct mii_data *mii;
1895 uint32_t ghc, rxmac, txmac, txpause, gp1;
1896 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1898 mii = device_get_softc(sc->jme_miibus);
1900 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1902 CSR_WRITE_4(sc, JME_GHC, 0);
1904 rxmac = CSR_READ_4(sc, JME_RXMAC);
1905 rxmac &= ~RXMAC_FC_ENB;
1906 txmac = CSR_READ_4(sc, JME_TXMAC);
1907 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1908 txpause = CSR_READ_4(sc, JME_TXPFC);
1909 txpause &= ~TXPFC_PAUSE_ENB;
1910 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1911 ghc |= GHC_FULL_DUPLEX;
1912 rxmac &= ~RXMAC_COLL_DET_ENB;
1913 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1914 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1917 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1918 txpause |= TXPFC_PAUSE_ENB;
1919 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1920 rxmac |= RXMAC_FC_ENB;
1922 /* Disable retry transmit timer/retry limit. */
1923 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1924 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1926 rxmac |= RXMAC_COLL_DET_ENB;
1927 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1928 /* Enable retry transmit timer/retry limit. */
1929 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1930 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1934 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1936 gp1 = CSR_READ_4(sc, JME_GPREG1);
1937 gp1 &= ~GPREG1_WA_HDX;
1939 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1942 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1944 ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1946 gp1 |= GPREG1_WA_HDX;
1950 ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1952 gp1 |= GPREG1_WA_HDX;
1955 * Use extended FIFO depth to workaround CRC errors
1956 * emitted by chips before JMC250B
1958 phyconf = JMPHY_CONF_EXTFIFO;
1962 if (sc->jme_caps & JME_CAP_FASTETH)
1965 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
1967 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1973 CSR_WRITE_4(sc, JME_GHC, ghc);
1974 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1975 CSR_WRITE_4(sc, JME_TXMAC, txmac);
1976 CSR_WRITE_4(sc, JME_TXPFC, txpause);
1978 if (sc->jme_workaround & JME_WA_EXTFIFO) {
1979 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1980 JMPHY_CONF, phyconf);
1982 if (sc->jme_workaround & JME_WA_HDX)
1983 CSR_WRITE_4(sc, JME_GPREG1, gp1);
1989 struct jme_softc *sc = xsc;
1990 struct ifnet *ifp = &sc->arpcom.ac_if;
1994 ASSERT_SERIALIZED(ifp->if_serializer);
1996 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1997 if (status == 0 || status == 0xFFFFFFFF)
2000 /* Disable interrupts. */
2001 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2003 status = CSR_READ_4(sc, JME_INTR_STATUS);
2004 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2007 /* Reset PCC counter/timer and Ack interrupts. */
2008 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2010 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
2011 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2013 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2014 if (status & jme_rx_status[r].jme_coal) {
2015 status |= jme_rx_status[r].jme_coal |
2016 jme_rx_status[r].jme_comp;
2020 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2022 if (ifp->if_flags & IFF_RUNNING) {
2023 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
2024 jme_rx_intr(sc, status);
2026 if (status & INTR_RXQ_DESC_EMPTY) {
2028 * Notify hardware availability of new Rx buffers.
2029 * Reading RXCSR takes very long time under heavy
2030 * load so cache RXCSR value and writes the ORed
2031 * value with the kick command to the RXCSR. This
2032 * saves one register access cycle.
2034 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2035 RXCSR_RX_ENB | RXCSR_RXQ_START);
2038 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
2040 if (!ifq_is_empty(&ifp->if_snd))
2045 /* Reenable interrupts. */
2046 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2050 jme_txeof(struct jme_softc *sc)
2052 struct ifnet *ifp = &sc->arpcom.ac_if;
2053 struct jme_txdesc *txd;
2057 cons = sc->jme_cdata.jme_tx_cons;
2058 if (cons == sc->jme_cdata.jme_tx_prod)
2061 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2062 sc->jme_cdata.jme_tx_ring_map,
2063 BUS_DMASYNC_POSTREAD);
2066 * Go through our Tx list and free mbufs for those
2067 * frames which have been transmitted.
2069 while (cons != sc->jme_cdata.jme_tx_prod) {
2070 txd = &sc->jme_cdata.jme_txdesc[cons];
2071 KASSERT(txd->tx_m != NULL,
2072 ("%s: freeing NULL mbuf!\n", __func__));
2074 status = le32toh(txd->tx_desc->flags);
2075 if ((status & JME_TD_OWN) == JME_TD_OWN)
2078 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2082 if (status & JME_TD_COLLISION) {
2083 ifp->if_collisions +=
2084 le32toh(txd->tx_desc->buflen) &
2085 JME_TD_BUF_LEN_MASK;
2090 * Only the first descriptor of multi-descriptor
2091 * transmission is updated so driver have to skip entire
2092 * chained buffers for the transmiited frame. In other
2093 * words, JME_TD_OWN bit is valid only at the first
2094 * descriptor of a multi-descriptor transmission.
2096 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2097 sc->jme_cdata.jme_tx_ring[cons].flags = 0;
2098 JME_DESC_INC(cons, sc->jme_tx_desc_cnt);
2101 /* Reclaim transferred mbufs. */
2102 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2105 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2106 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2107 ("%s: Active Tx desc counter was garbled\n", __func__));
2110 sc->jme_cdata.jme_tx_cons = cons;
2112 if (sc->jme_cdata.jme_tx_cnt == 0)
2115 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
2116 sc->jme_tx_desc_cnt - JME_TXD_RSVD)
2117 ifp->if_flags &= ~IFF_OACTIVE;
2119 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2120 sc->jme_cdata.jme_tx_ring_map,
2121 BUS_DMASYNC_PREWRITE);
2124 static __inline void
2125 jme_discard_rxbufs(struct jme_softc *sc, int ring, int cons, int count)
2127 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2130 for (i = 0; i < count; ++i) {
2131 struct jme_desc *desc = &rdata->jme_rx_ring[cons];
2133 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2134 desc->buflen = htole32(MCLBYTES);
2135 JME_DESC_INC(cons, sc->jme_rx_desc_cnt);
2139 /* Receive a frame. */
2141 jme_rxpkt(struct jme_softc *sc, int ring, struct mbuf_chain *chain)
2143 struct ifnet *ifp = &sc->arpcom.ac_if;
2144 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2145 struct jme_desc *desc;
2146 struct jme_rxdesc *rxd;
2147 struct mbuf *mp, *m;
2148 uint32_t flags, status;
2149 int cons, count, nsegs;
2151 cons = rdata->jme_rx_cons;
2152 desc = &rdata->jme_rx_ring[cons];
2153 flags = le32toh(desc->flags);
2154 status = le32toh(desc->buflen);
2155 nsegs = JME_RX_NSEGS(status);
2157 if (status & JME_RX_ERR_STAT) {
2159 jme_discard_rxbufs(sc, ring, cons, nsegs);
2160 #ifdef JME_SHOW_ERRORS
2161 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2162 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2164 rdata->jme_rx_cons += nsegs;
2165 rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2169 rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2170 for (count = 0; count < nsegs; count++,
2171 JME_DESC_INC(cons, sc->jme_rx_desc_cnt)) {
2172 rxd = &rdata->jme_rxdesc[cons];
2175 /* Add a new receive buffer to the ring. */
2176 if (jme_newbuf(sc, ring, rxd, 0) != 0) {
2179 jme_discard_rxbufs(sc, ring, cons, nsegs - count);
2180 if (rdata->jme_rxhead != NULL) {
2181 m_freem(rdata->jme_rxhead);
2182 JME_RXCHAIN_RESET(sc, ring);
2188 * Assume we've received a full sized frame.
2189 * Actual size is fixed when we encounter the end of
2190 * multi-segmented frame.
2192 mp->m_len = MCLBYTES;
2194 /* Chain received mbufs. */
2195 if (rdata->jme_rxhead == NULL) {
2196 rdata->jme_rxhead = mp;
2197 rdata->jme_rxtail = mp;
2200 * Receive processor can receive a maximum frame
2201 * size of 65535 bytes.
2203 mp->m_flags &= ~M_PKTHDR;
2204 rdata->jme_rxtail->m_next = mp;
2205 rdata->jme_rxtail = mp;
2208 if (count == nsegs - 1) {
2209 /* Last desc. for this frame. */
2210 m = rdata->jme_rxhead;
2211 /* XXX assert PKTHDR? */
2212 m->m_flags |= M_PKTHDR;
2213 m->m_pkthdr.len = rdata->jme_rxlen;
2215 /* Set first mbuf size. */
2216 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2217 /* Set last mbuf size. */
2218 mp->m_len = rdata->jme_rxlen -
2219 ((MCLBYTES - JME_RX_PAD_BYTES) +
2220 (MCLBYTES * (nsegs - 2)));
2222 m->m_len = rdata->jme_rxlen;
2224 m->m_pkthdr.rcvif = ifp;
2227 * Account for 10bytes auto padding which is used
2228 * to align IP header on 32bit boundary. Also note,
2229 * CRC bytes is automatically removed by the
2232 m->m_data += JME_RX_PAD_BYTES;
2234 /* Set checksum information. */
2235 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2236 (flags & JME_RD_IPV4)) {
2237 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2238 if (flags & JME_RD_IPCSUM)
2239 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2240 if ((flags & JME_RD_MORE_FRAG) == 0 &&
2241 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2242 (JME_RD_TCP | JME_RD_TCPCSUM) ||
2243 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2244 (JME_RD_UDP | JME_RD_UDPCSUM))) {
2245 m->m_pkthdr.csum_flags |=
2246 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2247 m->m_pkthdr.csum_data = 0xffff;
2251 /* Check for VLAN tagged packets. */
2252 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2253 (flags & JME_RD_VLAN_TAG)) {
2254 m->m_pkthdr.ether_vlantag =
2255 flags & JME_RD_VLAN_MASK;
2256 m->m_flags |= M_VLANTAG;
2261 ether_input_chain(ifp, m, chain);
2263 /* Reset mbuf chains. */
2264 JME_RXCHAIN_RESET(sc, ring);
2268 rdata->jme_rx_cons += nsegs;
2269 rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2273 jme_rxeof(struct jme_softc *sc, int ring, int count)
2275 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2276 struct jme_desc *desc;
2277 int nsegs, prog, pktlen;
2278 struct mbuf_chain chain[MAXCPU];
2280 ether_input_chain_init(chain);
2282 bus_dmamap_sync(rdata->jme_rx_ring_tag, rdata->jme_rx_ring_map,
2283 BUS_DMASYNC_POSTREAD);
2287 #ifdef DEVICE_POLLING
2288 if (count >= 0 && count-- == 0)
2291 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2292 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2294 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2298 * Check number of segments against received bytes.
2299 * Non-matching value would indicate that hardware
2300 * is still trying to update Rx descriptors. I'm not
2301 * sure whether this check is needed.
2303 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2304 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2305 if (nsegs != howmany(pktlen, MCLBYTES)) {
2306 if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) "
2307 "and packet size(%d) mismach\n",
2312 /* Received a frame. */
2313 jme_rxpkt(sc, ring, chain);
2318 bus_dmamap_sync(rdata->jme_rx_ring_tag, rdata->jme_rx_ring_map,
2319 BUS_DMASYNC_PREWRITE);
2320 ether_input_dispatch(chain);
2327 struct jme_softc *sc = xsc;
2328 struct ifnet *ifp = &sc->arpcom.ac_if;
2329 struct mii_data *mii = device_get_softc(sc->jme_miibus);
2331 lwkt_serialize_enter(ifp->if_serializer);
2334 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2336 lwkt_serialize_exit(ifp->if_serializer);
2340 jme_reset(struct jme_softc *sc)
2343 /* Stop receiver, transmitter. */
2347 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2349 CSR_WRITE_4(sc, JME_GHC, 0);
2355 struct jme_softc *sc = xsc;
2356 struct ifnet *ifp = &sc->arpcom.ac_if;
2357 struct mii_data *mii;
2358 uint8_t eaddr[ETHER_ADDR_LEN];
2363 ASSERT_SERIALIZED(ifp->if_serializer);
2366 * Cancel any pending I/O.
2371 * Reset the chip to a known state.
2376 howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES);
2377 KKASSERT(sc->jme_txd_spare >= 1);
2380 * If we use 64bit address mode for transmitting, each Tx request
2381 * needs one more symbol descriptor.
2383 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
2384 sc->jme_txd_spare += 1;
2386 if (sc->jme_flags & JME_FLAG_RSS) {
2387 sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
2388 KKASSERT(sc->jme_rx_ring_inuse > 1);
2389 /* TODO: enable RSS */
2391 sc->jme_rx_ring_inuse = 1;
2394 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
2397 /* Init RX descriptors */
2398 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2399 error = jme_init_rx_ring(sc, r);
2401 if_printf(ifp, "initialization failed: "
2402 "no memory for %dth RX ring.\n", r);
2408 /* Init TX descriptors */
2409 jme_init_tx_ring(sc);
2411 /* Initialize shadow status block. */
2414 /* Reprogram the station address. */
2415 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2416 CSR_WRITE_4(sc, JME_PAR0,
2417 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2418 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2421 * Configure Tx queue.
2422 * Tx priority queue weight value : 0
2423 * Tx FIFO threshold for processing next packet : 16QW
2424 * Maximum Tx DMA length : 512
2425 * Allow Tx DMA burst.
2427 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2428 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2429 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2430 sc->jme_txcsr |= sc->jme_tx_dma_size;
2431 sc->jme_txcsr |= TXCSR_DMA_BURST;
2432 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2434 /* Set Tx descriptor counter. */
2435 CSR_WRITE_4(sc, JME_TXQDC, sc->jme_tx_desc_cnt);
2437 /* Set Tx ring address to the hardware. */
2438 paddr = sc->jme_cdata.jme_tx_ring_paddr;
2439 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2440 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2442 /* Configure TxMAC parameters. */
2443 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2444 reg |= TXMAC_THRESH_1_PKT;
2445 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2446 CSR_WRITE_4(sc, JME_TXMAC, reg);
2449 * Configure Rx queue.
2450 * FIFO full threshold for transmitting Tx pause packet : 128T
2451 * FIFO threshold for processing next packet : 128QW
2453 * Max Rx DMA length : 128
2454 * Rx descriptor retry : 32
2455 * Rx descriptor retry time gap : 256ns
2456 * Don't receive runt/bad frame.
2458 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2460 * Since Rx FIFO size is 4K bytes, receiving frames larger
2461 * than 4K bytes will suffer from Rx FIFO overruns. So
2462 * decrease FIFO threshold to reduce the FIFO overruns for
2463 * frames larger than 4000 bytes.
2464 * For best performance of standard MTU sized frames use
2465 * maximum allowable FIFO threshold, 128QW.
2467 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2469 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2471 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2472 sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
2473 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2474 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2475 /* XXX TODO DROP_BAD */
2477 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2478 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2480 /* Set Rx descriptor counter. */
2481 CSR_WRITE_4(sc, JME_RXQDC, sc->jme_rx_desc_cnt);
2483 /* Set Rx ring address to the hardware. */
2484 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
2485 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2486 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2489 /* Clear receive filter. */
2490 CSR_WRITE_4(sc, JME_RXMAC, 0);
2492 /* Set up the receive filter. */
2497 * Disable all WOL bits as WOL can interfere normal Rx
2498 * operation. Also clear WOL detection status bits.
2500 reg = CSR_READ_4(sc, JME_PMCS);
2501 reg &= ~PMCS_WOL_ENB_MASK;
2502 CSR_WRITE_4(sc, JME_PMCS, reg);
2505 * Pad 10bytes right before received frame. This will greatly
2506 * help Rx performance on strict-alignment architectures as
2507 * it does not need to copy the frame to align the payload.
2509 reg = CSR_READ_4(sc, JME_RXMAC);
2510 reg |= RXMAC_PAD_10BYTES;
2512 if (ifp->if_capenable & IFCAP_RXCSUM)
2513 reg |= RXMAC_CSUM_ENB;
2514 CSR_WRITE_4(sc, JME_RXMAC, reg);
2516 /* Configure general purpose reg0 */
2517 reg = CSR_READ_4(sc, JME_GPREG0);
2518 reg &= ~GPREG0_PCC_UNIT_MASK;
2519 /* Set PCC timer resolution to micro-seconds unit. */
2520 reg |= GPREG0_PCC_UNIT_US;
2522 * Disable all shadow register posting as we have to read
2523 * JME_INTR_STATUS register in jme_intr. Also it seems
2524 * that it's hard to synchronize interrupt status between
2525 * hardware and software with shadow posting due to
2526 * requirements of bus_dmamap_sync(9).
2528 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2529 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2530 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2531 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2532 /* Disable posting of DW0. */
2533 reg &= ~GPREG0_POST_DW0_ENB;
2534 /* Clear PME message. */
2535 reg &= ~GPREG0_PME_ENB;
2536 /* Set PHY address. */
2537 reg &= ~GPREG0_PHY_ADDR_MASK;
2538 reg |= sc->jme_phyaddr;
2539 CSR_WRITE_4(sc, JME_GPREG0, reg);
2541 /* Configure Tx queue 0 packet completion coalescing. */
2542 jme_set_tx_coal(sc);
2544 /* Configure Rx queue 0 packet completion coalescing. */
2545 jme_set_rx_coal(sc);
2547 /* Configure shadow status block but don't enable posting. */
2548 paddr = sc->jme_cdata.jme_ssb_block_paddr;
2549 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2550 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2552 /* Disable Timer 1 and Timer 2. */
2553 CSR_WRITE_4(sc, JME_TIMER1, 0);
2554 CSR_WRITE_4(sc, JME_TIMER2, 0);
2556 /* Configure retry transmit period, retry limit value. */
2557 CSR_WRITE_4(sc, JME_TXTRHD,
2558 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2559 TXTRHD_RT_PERIOD_MASK) |
2560 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2561 TXTRHD_RT_LIMIT_SHIFT));
2563 #ifdef DEVICE_POLLING
2564 if (!(ifp->if_flags & IFF_POLLING))
2566 /* Initialize the interrupt mask. */
2567 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2568 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2571 * Enabling Tx/Rx DMA engines and Rx queue processing is
2572 * done after detection of valid link in jme_miibus_statchg.
2574 sc->jme_flags &= ~JME_FLAG_LINK;
2576 /* Set the current media. */
2577 mii = device_get_softc(sc->jme_miibus);
2580 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2582 ifp->if_flags |= IFF_RUNNING;
2583 ifp->if_flags &= ~IFF_OACTIVE;
2587 jme_stop(struct jme_softc *sc)
2589 struct ifnet *ifp = &sc->arpcom.ac_if;
2590 struct jme_txdesc *txd;
2591 struct jme_rxdesc *rxd;
2592 struct jme_rxdata *rdata;
2595 ASSERT_SERIALIZED(ifp->if_serializer);
2598 * Mark the interface down and cancel the watchdog timer.
2600 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2603 callout_stop(&sc->jme_tick_ch);
2604 sc->jme_flags &= ~JME_FLAG_LINK;
2607 * Disable interrupts.
2609 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2610 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2612 /* Disable updating shadow status block. */
2613 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2614 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2616 /* Stop receiver, transmitter. */
2621 * Free partial finished RX segments
2623 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2624 rdata = &sc->jme_cdata.jme_rx_data[r];
2625 if (rdata->jme_rxhead != NULL)
2626 m_freem(rdata->jme_rxhead);
2627 JME_RXCHAIN_RESET(sc, r);
2631 * Free RX and TX mbufs still in the queues.
2633 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2634 rdata = &sc->jme_cdata.jme_rx_data[r];
2635 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2636 rxd = &rdata->jme_rxdesc[i];
2637 if (rxd->rx_m != NULL) {
2638 bus_dmamap_unload(rdata->jme_rx_tag,
2645 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2646 txd = &sc->jme_cdata.jme_txdesc[i];
2647 if (txd->tx_m != NULL) {
2648 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2658 jme_stop_tx(struct jme_softc *sc)
2663 reg = CSR_READ_4(sc, JME_TXCSR);
2664 if ((reg & TXCSR_TX_ENB) == 0)
2666 reg &= ~TXCSR_TX_ENB;
2667 CSR_WRITE_4(sc, JME_TXCSR, reg);
2668 for (i = JME_TIMEOUT; i > 0; i--) {
2670 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2674 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2678 jme_stop_rx(struct jme_softc *sc)
2683 reg = CSR_READ_4(sc, JME_RXCSR);
2684 if ((reg & RXCSR_RX_ENB) == 0)
2686 reg &= ~RXCSR_RX_ENB;
2687 CSR_WRITE_4(sc, JME_RXCSR, reg);
2688 for (i = JME_TIMEOUT; i > 0; i--) {
2690 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2694 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2698 jme_init_tx_ring(struct jme_softc *sc)
2700 struct jme_chain_data *cd;
2701 struct jme_txdesc *txd;
2704 sc->jme_cdata.jme_tx_prod = 0;
2705 sc->jme_cdata.jme_tx_cons = 0;
2706 sc->jme_cdata.jme_tx_cnt = 0;
2708 cd = &sc->jme_cdata;
2709 bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2710 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2711 txd = &sc->jme_cdata.jme_txdesc[i];
2713 txd->tx_desc = &cd->jme_tx_ring[i];
2717 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2718 sc->jme_cdata.jme_tx_ring_map,
2719 BUS_DMASYNC_PREWRITE);
2723 jme_init_ssb(struct jme_softc *sc)
2725 struct jme_chain_data *cd;
2727 cd = &sc->jme_cdata;
2728 bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2729 bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map,
2730 BUS_DMASYNC_PREWRITE);
2734 jme_init_rx_ring(struct jme_softc *sc, int ring)
2736 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2737 struct jme_rxdesc *rxd;
2740 KKASSERT(rdata->jme_rxhead == NULL &&
2741 rdata->jme_rxtail == NULL &&
2742 rdata->jme_rxlen == 0);
2743 rdata->jme_rx_cons = 0;
2745 bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(sc));
2746 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2749 rxd = &rdata->jme_rxdesc[i];
2751 rxd->rx_desc = &rdata->jme_rx_ring[i];
2752 error = jme_newbuf(sc, ring, rxd, 1);
2757 bus_dmamap_sync(rdata->jme_rx_ring_tag, rdata->jme_rx_ring_map,
2758 BUS_DMASYNC_PREWRITE);
2763 jme_newbuf(struct jme_softc *sc, int ring, struct jme_rxdesc *rxd, int init)
2765 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2766 struct jme_desc *desc;
2768 struct jme_dmamap_ctx ctx;
2769 bus_dma_segment_t segs;
2773 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2777 * JMC250 has 64bit boundary alignment limitation so jme(4)
2778 * takes advantage of 10 bytes padding feature of hardware
2779 * in order not to copy entire frame to align IP header on
2782 m->m_len = m->m_pkthdr.len = MCLBYTES;
2786 error = bus_dmamap_load_mbuf(rdata->jme_rx_tag,
2787 rdata->jme_rx_sparemap,
2788 m, jme_dmamap_buf_cb, &ctx,
2790 if (error || ctx.nsegs == 0) {
2792 bus_dmamap_unload(rdata->jme_rx_tag,
2793 rdata->jme_rx_sparemap);
2795 if_printf(&sc->arpcom.ac_if, "too many segments?!\n");
2800 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2804 if (rxd->rx_m != NULL) {
2805 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
2806 BUS_DMASYNC_POSTREAD);
2807 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
2809 map = rxd->rx_dmamap;
2810 rxd->rx_dmamap = rdata->jme_rx_sparemap;
2811 rdata->jme_rx_sparemap = map;
2814 desc = rxd->rx_desc;
2815 desc->buflen = htole32(segs.ds_len);
2816 desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr));
2817 desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr));
2818 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2824 jme_set_vlan(struct jme_softc *sc)
2826 struct ifnet *ifp = &sc->arpcom.ac_if;
2829 ASSERT_SERIALIZED(ifp->if_serializer);
2831 reg = CSR_READ_4(sc, JME_RXMAC);
2832 reg &= ~RXMAC_VLAN_ENB;
2833 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2834 reg |= RXMAC_VLAN_ENB;
2835 CSR_WRITE_4(sc, JME_RXMAC, reg);
2839 jme_set_filter(struct jme_softc *sc)
2841 struct ifnet *ifp = &sc->arpcom.ac_if;
2842 struct ifmultiaddr *ifma;
2847 ASSERT_SERIALIZED(ifp->if_serializer);
2849 rxcfg = CSR_READ_4(sc, JME_RXMAC);
2850 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2854 * Always accept frames destined to our station address.
2855 * Always accept broadcast frames.
2857 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2859 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2860 if (ifp->if_flags & IFF_PROMISC)
2861 rxcfg |= RXMAC_PROMISC;
2862 if (ifp->if_flags & IFF_ALLMULTI)
2863 rxcfg |= RXMAC_ALLMULTI;
2864 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2865 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2866 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2871 * Set up the multicast address filter by passing all multicast
2872 * addresses through a CRC generator, and then using the low-order
2873 * 6 bits as an index into the 64 bit multicast hash table. The
2874 * high order bits select the register, while the rest of the bits
2875 * select the bit within the register.
2877 rxcfg |= RXMAC_MULTICAST;
2878 bzero(mchash, sizeof(mchash));
2880 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2881 if (ifma->ifma_addr->sa_family != AF_LINK)
2883 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2884 ifma->ifma_addr), ETHER_ADDR_LEN);
2886 /* Just want the 6 least significant bits. */
2889 /* Set the corresponding bit in the hash table. */
2890 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2893 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2894 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2895 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2899 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
2901 struct jme_softc *sc = arg1;
2902 struct ifnet *ifp = &sc->arpcom.ac_if;
2905 lwkt_serialize_enter(ifp->if_serializer);
2907 v = sc->jme_tx_coal_to;
2908 error = sysctl_handle_int(oidp, &v, 0, req);
2909 if (error || req->newptr == NULL)
2912 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2917 if (v != sc->jme_tx_coal_to) {
2918 sc->jme_tx_coal_to = v;
2919 if (ifp->if_flags & IFF_RUNNING)
2920 jme_set_tx_coal(sc);
2923 lwkt_serialize_exit(ifp->if_serializer);
2928 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2930 struct jme_softc *sc = arg1;
2931 struct ifnet *ifp = &sc->arpcom.ac_if;
2934 lwkt_serialize_enter(ifp->if_serializer);
2936 v = sc->jme_tx_coal_pkt;
2937 error = sysctl_handle_int(oidp, &v, 0, req);
2938 if (error || req->newptr == NULL)
2941 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
2946 if (v != sc->jme_tx_coal_pkt) {
2947 sc->jme_tx_coal_pkt = v;
2948 if (ifp->if_flags & IFF_RUNNING)
2949 jme_set_tx_coal(sc);
2952 lwkt_serialize_exit(ifp->if_serializer);
2957 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
2959 struct jme_softc *sc = arg1;
2960 struct ifnet *ifp = &sc->arpcom.ac_if;
2963 lwkt_serialize_enter(ifp->if_serializer);
2965 v = sc->jme_rx_coal_to;
2966 error = sysctl_handle_int(oidp, &v, 0, req);
2967 if (error || req->newptr == NULL)
2970 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
2975 if (v != sc->jme_rx_coal_to) {
2976 sc->jme_rx_coal_to = v;
2977 if (ifp->if_flags & IFF_RUNNING)
2978 jme_set_rx_coal(sc);
2981 lwkt_serialize_exit(ifp->if_serializer);
2986 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
2988 struct jme_softc *sc = arg1;
2989 struct ifnet *ifp = &sc->arpcom.ac_if;
2992 lwkt_serialize_enter(ifp->if_serializer);
2994 v = sc->jme_rx_coal_pkt;
2995 error = sysctl_handle_int(oidp, &v, 0, req);
2996 if (error || req->newptr == NULL)
2999 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
3004 if (v != sc->jme_rx_coal_pkt) {
3005 sc->jme_rx_coal_pkt = v;
3006 if (ifp->if_flags & IFF_RUNNING)
3007 jme_set_rx_coal(sc);
3010 lwkt_serialize_exit(ifp->if_serializer);
3015 jme_set_tx_coal(struct jme_softc *sc)
3019 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3021 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3022 PCCTX_COAL_PKT_MASK;
3023 reg |= PCCTX_COAL_TXQ0;
3024 CSR_WRITE_4(sc, JME_PCCTX, reg);
3028 jme_set_rx_coal(struct jme_softc *sc)
3033 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3035 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3036 PCCRX_COAL_PKT_MASK;
3037 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
3038 if (r < sc->jme_rx_ring_inuse)
3039 CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3041 CSR_WRITE_4(sc, JME_PCCRX(r), 0);
3045 #ifdef DEVICE_POLLING
3048 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3050 struct jme_softc *sc = ifp->if_softc;
3054 ASSERT_SERIALIZED(ifp->if_serializer);
3058 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3061 case POLL_DEREGISTER:
3062 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
3065 case POLL_AND_CHECK_STATUS:
3067 status = CSR_READ_4(sc, JME_INTR_STATUS);
3068 for (r = 0; r < sc->jme_rx_ring_inuse; ++r)
3069 jme_rxeof(sc, r, count);
3071 if (status & INTR_RXQ_DESC_EMPTY) {
3072 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3073 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3074 RXCSR_RX_ENB | RXCSR_RXQ_START);
3078 if (!ifq_is_empty(&ifp->if_snd))
3084 #endif /* DEVICE_POLLING */
3087 jme_rxring_dma_alloc(struct jme_softc *sc, bus_addr_t lowaddr, int ring)
3089 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3093 /* Create tag for Rx ring. */
3094 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
3095 JME_RX_RING_ALIGN, 0, /* algnmnt, boundary */
3096 lowaddr, /* lowaddr */
3097 BUS_SPACE_MAXADDR, /* highaddr */
3098 NULL, NULL, /* filter, filterarg */
3099 JME_RX_RING_SIZE(sc), /* maxsize */
3101 JME_RX_RING_SIZE(sc), /* maxsegsize */
3103 &rdata->jme_rx_ring_tag);
3105 device_printf(sc->jme_dev,
3106 "could not allocate %dth Rx ring DMA tag.\n", ring);
3110 /* Allocate DMA'able memory for RX ring */
3111 error = bus_dmamem_alloc(rdata->jme_rx_ring_tag,
3112 (void **)&rdata->jme_rx_ring,
3113 BUS_DMA_WAITOK | BUS_DMA_ZERO,
3114 &rdata->jme_rx_ring_map);
3116 device_printf(sc->jme_dev,
3117 "could not allocate DMA'able memory for "
3118 "%dth Rx ring.\n", ring);
3119 bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
3120 rdata->jme_rx_ring_tag = NULL;
3124 /* Load the DMA map for Rx ring. */
3125 error = bus_dmamap_load(rdata->jme_rx_ring_tag, rdata->jme_rx_ring_map,
3126 rdata->jme_rx_ring, JME_RX_RING_SIZE(sc),
3127 jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
3129 device_printf(sc->jme_dev,
3130 "could not load DMA'able memory for %dth Rx ring.\n", ring);
3131 bus_dmamem_free(rdata->jme_rx_ring_tag, rdata->jme_rx_ring,
3132 rdata->jme_rx_ring_map);
3133 bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
3134 rdata->jme_rx_ring_tag = NULL;
3137 rdata->jme_rx_ring_paddr = busaddr;
3143 jme_rxbuf_dma_alloc(struct jme_softc *sc, int ring)
3145 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3148 /* Create tag for Rx buffers. */
3149 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
3150 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
3151 sc->jme_lowaddr, /* lowaddr */
3152 BUS_SPACE_MAXADDR, /* highaddr */
3153 NULL, NULL, /* filter, filterarg */
3154 MCLBYTES, /* maxsize */
3156 MCLBYTES, /* maxsegsize */
3158 &rdata->jme_rx_tag);
3160 device_printf(sc->jme_dev,
3161 "could not create %dth Rx DMA tag.\n", ring);
3165 /* Create DMA maps for Rx buffers. */
3166 error = bus_dmamap_create(rdata->jme_rx_tag, 0,
3167 &rdata->jme_rx_sparemap);
3169 device_printf(sc->jme_dev,
3170 "could not create %dth spare Rx dmamap.\n", ring);
3171 bus_dma_tag_destroy(rdata->jme_rx_tag);
3172 rdata->jme_rx_tag = NULL;
3175 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
3176 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3178 error = bus_dmamap_create(rdata->jme_rx_tag, 0,
3183 device_printf(sc->jme_dev,
3184 "could not create %dth Rx dmamap "
3185 "for %dth RX ring.\n", i, ring);
3187 for (j = 0; j < i; ++j) {
3188 rxd = &rdata->jme_rxdesc[j];
3189 bus_dmamap_destroy(rdata->jme_rx_tag,
3192 bus_dmamap_destroy(rdata->jme_rx_tag,
3193 rdata->jme_rx_sparemap);
3194 bus_dma_tag_destroy(rdata->jme_rx_tag);
3195 rdata->jme_rx_tag = NULL;
3203 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3207 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
3208 if (status & jme_rx_status[r].jme_coal)
3209 jme_rxeof(sc, r, -1);