2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28 * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.12 2008/11/26 11:55:18 sephe Exp $
31 #include "opt_polling.h"
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
41 #include <sys/serialize.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
46 #include <net/ethernet.h>
49 #include <net/if_arp.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 #include <net/ifq_var.h>
53 #include <net/vlan/if_vlan_var.h>
54 #include <net/vlan/if_vlan_ether.h>
56 #include <dev/netif/mii_layer/miivar.h>
57 #include <dev/netif/mii_layer/jmphyreg.h>
59 #include <bus/pci/pcireg.h>
60 #include <bus/pci/pcivar.h>
61 #include <bus/pci/pcidevs.h>
63 #include <dev/netif/jme/if_jmereg.h>
64 #include <dev/netif/jme/if_jmevar.h>
66 #include "miibus_if.h"
68 /* Define the following to disable printing Rx errors. */
69 #undef JME_SHOW_ERRORS
71 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
73 static int jme_probe(device_t);
74 static int jme_attach(device_t);
75 static int jme_detach(device_t);
76 static int jme_shutdown(device_t);
77 static int jme_suspend(device_t);
78 static int jme_resume(device_t);
80 static int jme_miibus_readreg(device_t, int, int);
81 static int jme_miibus_writereg(device_t, int, int, int);
82 static void jme_miibus_statchg(device_t);
84 static void jme_init(void *);
85 static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
86 static void jme_start(struct ifnet *);
87 static void jme_watchdog(struct ifnet *);
88 static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
89 static int jme_mediachange(struct ifnet *);
91 static void jme_poll(struct ifnet *, enum poll_cmd, int);
94 static void jme_intr(void *);
95 static void jme_txeof(struct jme_softc *);
96 static void jme_rxeof(struct jme_softc *);
98 static int jme_dma_alloc(struct jme_softc *);
99 static void jme_dma_free(struct jme_softc *, int);
100 static void jme_dmamap_ring_cb(void *, bus_dma_segment_t *, int, int);
101 static void jme_dmamap_buf_cb(void *, bus_dma_segment_t *, int,
103 static int jme_init_rx_ring(struct jme_softc *);
104 static void jme_init_tx_ring(struct jme_softc *);
105 static void jme_init_ssb(struct jme_softc *);
106 static int jme_newbuf(struct jme_softc *, struct jme_rxdesc *, int);
107 static int jme_encap(struct jme_softc *, struct mbuf **);
108 static void jme_rxpkt(struct jme_softc *);
110 static void jme_tick(void *);
111 static void jme_stop(struct jme_softc *);
112 static void jme_reset(struct jme_softc *);
113 static void jme_set_vlan(struct jme_softc *);
114 static void jme_set_filter(struct jme_softc *);
115 static void jme_stop_tx(struct jme_softc *);
116 static void jme_stop_rx(struct jme_softc *);
117 static void jme_mac_config(struct jme_softc *);
118 static void jme_reg_macaddr(struct jme_softc *, uint8_t[]);
119 static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
120 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
122 static void jme_setwol(struct jme_softc *);
123 static void jme_setlinkspeed(struct jme_softc *);
125 static void jme_set_tx_coal(struct jme_softc *);
126 static void jme_set_rx_coal(struct jme_softc *);
128 static void jme_sysctl_node(struct jme_softc *);
129 static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
130 static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
131 static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
132 static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
135 * Devices supported by this driver.
137 static const struct jme_dev {
138 uint16_t jme_vendorid;
139 uint16_t jme_deviceid;
141 const char *jme_name;
143 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
145 "JMicron Inc, JMC250 Gigabit Ethernet" },
146 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
148 "JMicron Inc, JMC260 Fast Ethernet" },
152 static device_method_t jme_methods[] = {
153 /* Device interface. */
154 DEVMETHOD(device_probe, jme_probe),
155 DEVMETHOD(device_attach, jme_attach),
156 DEVMETHOD(device_detach, jme_detach),
157 DEVMETHOD(device_shutdown, jme_shutdown),
158 DEVMETHOD(device_suspend, jme_suspend),
159 DEVMETHOD(device_resume, jme_resume),
162 DEVMETHOD(bus_print_child, bus_generic_print_child),
163 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
166 DEVMETHOD(miibus_readreg, jme_miibus_readreg),
167 DEVMETHOD(miibus_writereg, jme_miibus_writereg),
168 DEVMETHOD(miibus_statchg, jme_miibus_statchg),
173 static driver_t jme_driver = {
176 sizeof(struct jme_softc)
179 static devclass_t jme_devclass;
181 DECLARE_DUMMY_MODULE(if_jme);
182 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
183 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, 0, 0);
184 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0);
186 static int jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
187 static int jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
189 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
190 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
193 * Read a PHY register on the MII of the JMC250.
196 jme_miibus_readreg(device_t dev, int phy, int reg)
198 struct jme_softc *sc = device_get_softc(dev);
202 /* For FPGA version, PHY address 0 should be ignored. */
203 if (sc->jme_caps & JME_CAP_FPGA) {
207 if (sc->jme_phyaddr != phy)
211 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
212 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
214 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
216 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
220 device_printf(sc->jme_dev, "phy read timeout: "
221 "phy %d, reg %d\n", phy, reg);
225 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
229 * Write a PHY register on the MII of the JMC250.
232 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
234 struct jme_softc *sc = device_get_softc(dev);
237 /* For FPGA version, PHY address 0 should be ignored. */
238 if (sc->jme_caps & JME_CAP_FPGA) {
242 if (sc->jme_phyaddr != phy)
246 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
247 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
248 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
250 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
252 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
256 device_printf(sc->jme_dev, "phy write timeout: "
257 "phy %d, reg %d\n", phy, reg);
264 * Callback from MII layer when media changes.
267 jme_miibus_statchg(device_t dev)
269 struct jme_softc *sc = device_get_softc(dev);
270 struct ifnet *ifp = &sc->arpcom.ac_if;
271 struct mii_data *mii;
272 struct jme_txdesc *txd;
276 ASSERT_SERIALIZED(ifp->if_serializer);
278 if ((ifp->if_flags & IFF_RUNNING) == 0)
281 mii = device_get_softc(sc->jme_miibus);
283 sc->jme_flags &= ~JME_FLAG_LINK;
284 if ((mii->mii_media_status & IFM_AVALID) != 0) {
285 switch (IFM_SUBTYPE(mii->mii_media_active)) {
288 sc->jme_flags |= JME_FLAG_LINK;
291 if (sc->jme_caps & JME_CAP_FASTETH)
293 sc->jme_flags |= JME_FLAG_LINK;
301 * Disabling Rx/Tx MACs have a side-effect of resetting
302 * JME_TXNDA/JME_RXNDA register to the first address of
303 * Tx/Rx descriptor address. So driver should reset its
304 * internal procucer/consumer pointer and reclaim any
305 * allocated resources. Note, just saving the value of
306 * JME_TXNDA and JME_RXNDA registers before stopping MAC
307 * and restoring JME_TXNDA/JME_RXNDA register is not
308 * sufficient to make sure correct MAC state because
309 * stopping MAC operation can take a while and hardware
310 * might have updated JME_TXNDA/JME_RXNDA registers
311 * during the stop operation.
314 /* Disable interrupts */
315 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
318 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
320 callout_stop(&sc->jme_tick_ch);
322 /* Stop receiver/transmitter. */
327 if (sc->jme_cdata.jme_rxhead != NULL)
328 m_freem(sc->jme_cdata.jme_rxhead);
329 JME_RXCHAIN_RESET(sc);
332 if (sc->jme_cdata.jme_tx_cnt != 0) {
333 /* Remove queued packets for transmit. */
334 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
335 txd = &sc->jme_cdata.jme_txdesc[i];
336 if (txd->tx_m != NULL) {
338 sc->jme_cdata.jme_tx_tag,
349 * Reuse configured Rx descriptors and reset
350 * procuder/consumer index.
352 sc->jme_cdata.jme_rx_cons = 0;
354 jme_init_tx_ring(sc);
356 /* Initialize shadow status block. */
359 /* Program MAC with resolved speed/duplex/flow-control. */
360 if (sc->jme_flags & JME_FLAG_LINK) {
363 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
364 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
366 /* Set Tx ring address to the hardware. */
367 paddr = JME_TX_RING_ADDR(sc, 0);
368 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
369 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
371 /* Set Rx ring address to the hardware. */
372 paddr = JME_RX_RING_ADDR(sc, 0);
373 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
374 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
376 /* Restart receiver/transmitter. */
377 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
379 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
382 ifp->if_flags |= IFF_RUNNING;
383 ifp->if_flags &= ~IFF_OACTIVE;
384 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
386 #ifdef DEVICE_POLLING
387 if (!(ifp->if_flags & IFF_POLLING))
389 /* Reenable interrupts. */
390 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
394 * Get the current interface media status.
397 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
399 struct jme_softc *sc = ifp->if_softc;
400 struct mii_data *mii = device_get_softc(sc->jme_miibus);
402 ASSERT_SERIALIZED(ifp->if_serializer);
405 ifmr->ifm_status = mii->mii_media_status;
406 ifmr->ifm_active = mii->mii_media_active;
410 * Set hardware to newly-selected media.
413 jme_mediachange(struct ifnet *ifp)
415 struct jme_softc *sc = ifp->if_softc;
416 struct mii_data *mii = device_get_softc(sc->jme_miibus);
419 ASSERT_SERIALIZED(ifp->if_serializer);
421 if (mii->mii_instance != 0) {
422 struct mii_softc *miisc;
424 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
425 mii_phy_reset(miisc);
427 error = mii_mediachg(mii);
433 jme_probe(device_t dev)
435 const struct jme_dev *sp;
438 vid = pci_get_vendor(dev);
439 did = pci_get_device(dev);
440 for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
441 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
442 struct jme_softc *sc = device_get_softc(dev);
444 sc->jme_caps = sp->jme_caps;
445 device_set_desc(dev, sp->jme_name);
453 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
459 for (i = JME_TIMEOUT; i > 0; i--) {
460 reg = CSR_READ_4(sc, JME_SMBCSR);
461 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
467 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
471 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
472 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
473 for (i = JME_TIMEOUT; i > 0; i--) {
475 reg = CSR_READ_4(sc, JME_SMBINTF);
476 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
481 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
485 reg = CSR_READ_4(sc, JME_SMBINTF);
486 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
492 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
494 uint8_t fup, reg, val;
499 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
500 fup != JME_EEPROM_SIG0)
502 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
503 fup != JME_EEPROM_SIG1)
507 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
509 /* Check for the end of EEPROM descriptor. */
510 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
512 if ((uint8_t)JME_EEPROM_MKDESC(JME_EEPROM_FUNC0,
513 JME_EEPROM_PAGE_BAR1) == fup) {
514 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0)
516 if (reg >= JME_PAR0 &&
517 reg < JME_PAR0 + ETHER_ADDR_LEN) {
518 if (jme_eeprom_read_byte(sc, offset + 2,
521 eaddr[reg - JME_PAR0] = val;
525 /* Try next eeprom descriptor. */
526 offset += JME_EEPROM_DESC_BYTES;
527 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
529 if (match == ETHER_ADDR_LEN)
536 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
540 /* Read station address. */
541 par0 = CSR_READ_4(sc, JME_PAR0);
542 par1 = CSR_READ_4(sc, JME_PAR1);
544 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
545 device_printf(sc->jme_dev,
546 "generating fake ethernet address.\n");
547 par0 = karc4random();
548 /* Set OUI to JMicron. */
552 eaddr[3] = (par0 >> 16) & 0xff;
553 eaddr[4] = (par0 >> 8) & 0xff;
554 eaddr[5] = par0 & 0xff;
556 eaddr[0] = (par0 >> 0) & 0xFF;
557 eaddr[1] = (par0 >> 8) & 0xFF;
558 eaddr[2] = (par0 >> 16) & 0xFF;
559 eaddr[3] = (par0 >> 24) & 0xFF;
560 eaddr[4] = (par1 >> 0) & 0xFF;
561 eaddr[5] = (par1 >> 8) & 0xFF;
566 jme_attach(device_t dev)
568 struct jme_softc *sc = device_get_softc(dev);
569 struct ifnet *ifp = &sc->arpcom.ac_if;
572 uint8_t pcie_ptr, rev;
574 uint8_t eaddr[ETHER_ADDR_LEN];
576 sc->jme_rx_desc_cnt = roundup(jme_rx_desc_count, JME_NDESC_ALIGN);
577 if (sc->jme_rx_desc_cnt > JME_NDESC_MAX)
578 sc->jme_rx_desc_cnt = JME_NDESC_MAX;
580 sc->jme_tx_desc_cnt = roundup(jme_tx_desc_count, JME_NDESC_ALIGN);
581 if (sc->jme_tx_desc_cnt > JME_NDESC_MAX)
582 sc->jme_tx_desc_cnt = JME_NDESC_MAX;
585 sc->jme_lowaddr = BUS_SPACE_MAXADDR;
587 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
589 callout_init(&sc->jme_tick_ch);
592 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
595 irq = pci_read_config(dev, PCIR_INTLINE, 4);
596 mem = pci_read_config(dev, JME_PCIR_BAR, 4);
598 device_printf(dev, "chip is in D%d power mode "
599 "-- setting to D0\n", pci_get_powerstate(dev));
601 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
603 pci_write_config(dev, PCIR_INTLINE, irq, 4);
604 pci_write_config(dev, JME_PCIR_BAR, mem, 4);
606 #endif /* !BURN_BRIDGE */
608 /* Enable bus mastering */
609 pci_enable_busmaster(dev);
614 * JMC250 supports both memory mapped and I/O register space
615 * access. Because I/O register access should use different
616 * BARs to access registers it's waste of time to use I/O
617 * register spce access. JMC250 uses 16K to map entire memory
620 sc->jme_mem_rid = JME_PCIR_BAR;
621 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
622 &sc->jme_mem_rid, RF_ACTIVE);
623 if (sc->jme_mem_res == NULL) {
624 device_printf(dev, "can't allocate IO memory\n");
627 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
628 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
634 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
636 RF_SHAREABLE | RF_ACTIVE);
637 if (sc->jme_irq_res == NULL) {
638 device_printf(dev, "can't allocate irq\n");
646 reg = CSR_READ_4(sc, JME_CHIPMODE);
647 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
649 sc->jme_caps |= JME_CAP_FPGA;
651 device_printf(dev, "FPGA revision: 0x%04x\n",
652 (reg & CHIPMODE_FPGA_REV_MASK) >>
653 CHIPMODE_FPGA_REV_SHIFT);
657 /* NOTE: FM revision is put in the upper 4 bits */
658 rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
659 rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
661 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
663 did = pci_get_device(dev);
665 case PCI_PRODUCT_JMICRON_JMC250:
666 if (rev == JME_REV1_A2)
667 sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
670 case PCI_PRODUCT_JMICRON_JMC260:
672 sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
676 panic("unknown device id 0x%04x\n", did);
678 if (rev >= JME_REV2) {
679 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
680 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
681 GHC_TXMAC_CLKSRC_1000;
684 /* Reset the ethernet controller. */
687 /* Get station address. */
688 reg = CSR_READ_4(sc, JME_SMBCSR);
689 if (reg & SMBCSR_EEPROM_PRESENT)
690 error = jme_eeprom_macaddr(sc, eaddr);
691 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
692 if (error != 0 && (bootverbose)) {
693 device_printf(dev, "ethernet hardware address "
694 "not found in EEPROM.\n");
696 jme_reg_macaddr(sc, eaddr);
701 * Integrated JR0211 has fixed PHY address whereas FPGA version
702 * requires PHY probing to get correct PHY address.
704 if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
705 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
706 GPREG0_PHY_ADDR_MASK;
708 device_printf(dev, "PHY is at address %d.\n",
715 /* Set max allowable DMA size. */
716 pcie_ptr = pci_get_pciecap_ptr(dev);
720 sc->jme_caps |= JME_CAP_PCIE;
721 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
723 device_printf(dev, "Read request size : %d bytes.\n",
724 128 << ((ctrl >> 12) & 0x07));
725 device_printf(dev, "TLP payload size : %d bytes.\n",
726 128 << ((ctrl >> 5) & 0x07));
728 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
729 case PCIEM_DEVCTL_MAX_READRQ_128:
730 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
732 case PCIEM_DEVCTL_MAX_READRQ_256:
733 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
736 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
739 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
741 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
742 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
746 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
747 sc->jme_caps |= JME_CAP_PMCAP;
755 /* Allocate DMA stuffs */
756 error = jme_dma_alloc(sc);
761 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
762 ifp->if_init = jme_init;
763 ifp->if_ioctl = jme_ioctl;
764 ifp->if_start = jme_start;
765 #ifdef DEVICE_POLLING
766 ifp->if_poll = jme_poll;
768 ifp->if_watchdog = jme_watchdog;
769 ifq_set_maxlen(&ifp->if_snd, sc->jme_tx_desc_cnt - JME_TXD_RSVD);
770 ifq_set_ready(&ifp->if_snd);
772 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
773 ifp->if_capabilities = IFCAP_HWCSUM |
775 IFCAP_VLAN_HWTAGGING;
776 ifp->if_hwassist = JME_CSUM_FEATURES;
777 ifp->if_capenable = ifp->if_capabilities;
779 /* Set up MII bus. */
780 error = mii_phy_probe(dev, &sc->jme_miibus,
781 jme_mediachange, jme_mediastatus);
783 device_printf(dev, "no PHY found!\n");
788 * Save PHYADDR for FPGA mode PHY.
790 if (sc->jme_caps & JME_CAP_FPGA) {
791 struct mii_data *mii = device_get_softc(sc->jme_miibus);
793 if (mii->mii_instance != 0) {
794 struct mii_softc *miisc;
796 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
797 if (miisc->mii_phy != 0) {
798 sc->jme_phyaddr = miisc->mii_phy;
802 if (sc->jme_phyaddr != 0) {
803 device_printf(sc->jme_dev,
804 "FPGA PHY is at %d\n", sc->jme_phyaddr);
806 jme_miibus_writereg(dev, sc->jme_phyaddr,
807 JMPHY_CONF, JMPHY_CONF_DEFFIFO);
809 /* XXX should we clear JME_WA_EXTFIFO */
814 ether_ifattach(ifp, eaddr, NULL);
816 /* Tell the upper layer(s) we support long frames. */
817 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
819 error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, jme_intr, sc,
820 &sc->jme_irq_handle, ifp->if_serializer);
822 device_printf(dev, "could not set up interrupt handler.\n");
827 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->jme_irq_res));
828 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
836 jme_detach(device_t dev)
838 struct jme_softc *sc = device_get_softc(dev);
840 if (device_is_attached(dev)) {
841 struct ifnet *ifp = &sc->arpcom.ac_if;
843 lwkt_serialize_enter(ifp->if_serializer);
845 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
846 lwkt_serialize_exit(ifp->if_serializer);
851 if (sc->jme_sysctl_tree != NULL)
852 sysctl_ctx_free(&sc->jme_sysctl_ctx);
854 if (sc->jme_miibus != NULL)
855 device_delete_child(dev, sc->jme_miibus);
856 bus_generic_detach(dev);
858 if (sc->jme_irq_res != NULL) {
859 bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
863 if (sc->jme_mem_res != NULL) {
864 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
874 jme_sysctl_node(struct jme_softc *sc)
878 sysctl_ctx_init(&sc->jme_sysctl_ctx);
879 sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
880 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
881 device_get_nameunit(sc->jme_dev),
883 if (sc->jme_sysctl_tree == NULL) {
884 device_printf(sc->jme_dev, "can't add sysctl node\n");
888 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
889 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
890 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
891 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
893 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
894 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
895 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
896 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
898 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
899 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
900 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
901 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
903 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
904 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
905 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
906 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
908 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
909 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
910 "rx_desc_count", CTLFLAG_RD, &sc->jme_rx_desc_cnt,
912 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
913 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
914 "tx_desc_count", CTLFLAG_RD, &sc->jme_tx_desc_cnt,
918 * Set default coalesce valves
920 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
921 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
922 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
923 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
926 * Adjust coalesce valves, in case that the number of TX/RX
927 * descs are set to small values by users.
929 * NOTE: coal_max will not be zero, since number of descs
930 * must aligned by JME_NDESC_ALIGN (16 currently)
932 coal_max = sc->jme_tx_desc_cnt / 6;
933 if (coal_max < sc->jme_tx_coal_pkt)
934 sc->jme_tx_coal_pkt = coal_max;
936 coal_max = sc->jme_rx_desc_cnt / 4;
937 if (coal_max < sc->jme_rx_coal_pkt)
938 sc->jme_rx_coal_pkt = coal_max;
942 jme_dmamap_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
947 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
948 *((bus_addr_t *)arg) = segs->ds_addr;
952 jme_dmamap_buf_cb(void *xctx, bus_dma_segment_t *segs, int nsegs,
953 bus_size_t mapsz __unused, int error)
955 struct jme_dmamap_ctx *ctx = xctx;
961 if (nsegs > ctx->nsegs) {
967 for (i = 0; i < nsegs; ++i)
968 ctx->segs[i] = segs[i];
972 jme_dma_alloc(struct jme_softc *sc)
974 struct jme_txdesc *txd;
975 struct jme_rxdesc *rxd;
976 bus_addr_t busaddr, lowaddr;
979 sc->jme_cdata.jme_txdesc =
980 kmalloc(sc->jme_tx_desc_cnt * sizeof(struct jme_txdesc),
981 M_DEVBUF, M_WAITOK | M_ZERO);
982 sc->jme_cdata.jme_rxdesc =
983 kmalloc(sc->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
984 M_DEVBUF, M_WAITOK | M_ZERO);
986 lowaddr = sc->jme_lowaddr;
988 /* Create parent ring tag. */
989 error = bus_dma_tag_create(NULL,/* parent */
990 1, 0, /* algnmnt, boundary */
991 lowaddr, /* lowaddr */
992 BUS_SPACE_MAXADDR, /* highaddr */
993 NULL, NULL, /* filter, filterarg */
994 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
996 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
998 &sc->jme_cdata.jme_ring_tag);
1000 device_printf(sc->jme_dev,
1001 "could not create parent ring DMA tag.\n");
1006 * Create DMA stuffs for TX ring
1009 /* Create tag for Tx ring. */
1010 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1011 JME_TX_RING_ALIGN, 0, /* algnmnt, boundary */
1012 lowaddr, /* lowaddr */
1013 BUS_SPACE_MAXADDR, /* highaddr */
1014 NULL, NULL, /* filter, filterarg */
1015 JME_TX_RING_SIZE(sc), /* maxsize */
1017 JME_TX_RING_SIZE(sc), /* maxsegsize */
1019 &sc->jme_cdata.jme_tx_ring_tag);
1021 device_printf(sc->jme_dev,
1022 "could not allocate Tx ring DMA tag.\n");
1026 /* Allocate DMA'able memory for TX ring */
1027 error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag,
1028 (void **)&sc->jme_rdata.jme_tx_ring,
1029 BUS_DMA_WAITOK | BUS_DMA_ZERO,
1030 &sc->jme_cdata.jme_tx_ring_map);
1032 device_printf(sc->jme_dev,
1033 "could not allocate DMA'able memory for Tx ring.\n");
1034 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1035 sc->jme_cdata.jme_tx_ring_tag = NULL;
1039 /* Load the DMA map for Tx ring. */
1040 error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag,
1041 sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
1042 JME_TX_RING_SIZE(sc), jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
1044 device_printf(sc->jme_dev,
1045 "could not load DMA'able memory for Tx ring.\n");
1046 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1047 sc->jme_rdata.jme_tx_ring,
1048 sc->jme_cdata.jme_tx_ring_map);
1049 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1050 sc->jme_cdata.jme_tx_ring_tag = NULL;
1053 sc->jme_rdata.jme_tx_ring_paddr = busaddr;
1056 * Create DMA stuffs for RX ring
1059 /* Create tag for Rx ring. */
1060 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1061 JME_RX_RING_ALIGN, 0, /* algnmnt, boundary */
1062 lowaddr, /* lowaddr */
1063 BUS_SPACE_MAXADDR, /* highaddr */
1064 NULL, NULL, /* filter, filterarg */
1065 JME_RX_RING_SIZE(sc), /* maxsize */
1067 JME_RX_RING_SIZE(sc), /* maxsegsize */
1069 &sc->jme_cdata.jme_rx_ring_tag);
1071 device_printf(sc->jme_dev,
1072 "could not allocate Rx ring DMA tag.\n");
1076 /* Allocate DMA'able memory for RX ring */
1077 error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag,
1078 (void **)&sc->jme_rdata.jme_rx_ring,
1079 BUS_DMA_WAITOK | BUS_DMA_ZERO,
1080 &sc->jme_cdata.jme_rx_ring_map);
1082 device_printf(sc->jme_dev,
1083 "could not allocate DMA'able memory for Rx ring.\n");
1084 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1085 sc->jme_cdata.jme_rx_ring_tag = NULL;
1089 /* Load the DMA map for Rx ring. */
1090 error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag,
1091 sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
1092 JME_RX_RING_SIZE(sc), jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
1094 device_printf(sc->jme_dev,
1095 "could not load DMA'able memory for Rx ring.\n");
1096 bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
1097 sc->jme_rdata.jme_rx_ring,
1098 sc->jme_cdata.jme_rx_ring_map);
1099 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1100 sc->jme_cdata.jme_rx_ring_tag = NULL;
1103 sc->jme_rdata.jme_rx_ring_paddr = busaddr;
1105 if (lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1106 bus_addr_t rx_ring_end, tx_ring_end;
1108 /* Tx/Rx descriptor queue should reside within 4GB boundary. */
1109 tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr +
1110 JME_TX_RING_SIZE(sc);
1111 rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr +
1112 JME_RX_RING_SIZE(sc);
1113 if ((JME_ADDR_HI(tx_ring_end) !=
1114 JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
1115 (JME_ADDR_HI(rx_ring_end) !=
1116 JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
1117 device_printf(sc->jme_dev, "4GB boundary crossed, "
1118 "switching to 32bit DMA address mode.\n");
1119 jme_dma_free(sc, 0);
1120 /* Limit DMA address space to 32bit and try again. */
1121 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1126 /* Create parent buffer tag. */
1127 error = bus_dma_tag_create(NULL,/* parent */
1128 1, 0, /* algnmnt, boundary */
1129 sc->jme_lowaddr, /* lowaddr */
1130 BUS_SPACE_MAXADDR, /* highaddr */
1131 NULL, NULL, /* filter, filterarg */
1132 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1134 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1136 &sc->jme_cdata.jme_buffer_tag);
1138 device_printf(sc->jme_dev,
1139 "could not create parent buffer DMA tag.\n");
1144 * Create DMA stuffs for shadow status block
1147 /* Create shadow status block tag. */
1148 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1149 JME_SSB_ALIGN, 0, /* algnmnt, boundary */
1150 sc->jme_lowaddr, /* lowaddr */
1151 BUS_SPACE_MAXADDR, /* highaddr */
1152 NULL, NULL, /* filter, filterarg */
1153 JME_SSB_SIZE, /* maxsize */
1155 JME_SSB_SIZE, /* maxsegsize */
1157 &sc->jme_cdata.jme_ssb_tag);
1159 device_printf(sc->jme_dev,
1160 "could not create shared status block DMA tag.\n");
1164 /* Allocate DMA'able memory for shared status block. */
1165 error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag,
1166 (void **)&sc->jme_rdata.jme_ssb_block,
1167 BUS_DMA_WAITOK | BUS_DMA_ZERO,
1168 &sc->jme_cdata.jme_ssb_map);
1170 device_printf(sc->jme_dev, "could not allocate DMA'able "
1171 "memory for shared status block.\n");
1172 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1173 sc->jme_cdata.jme_ssb_tag = NULL;
1177 /* Load the DMA map for shared status block */
1178 error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag,
1179 sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
1180 JME_SSB_SIZE, jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
1182 device_printf(sc->jme_dev, "could not load DMA'able memory "
1183 "for shared status block.\n");
1184 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1185 sc->jme_rdata.jme_ssb_block,
1186 sc->jme_cdata.jme_ssb_map);
1187 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1188 sc->jme_cdata.jme_ssb_tag = NULL;
1191 sc->jme_rdata.jme_ssb_block_paddr = busaddr;
1194 * Create DMA stuffs for TX buffers
1197 /* Create tag for Tx buffers. */
1198 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1199 1, 0, /* algnmnt, boundary */
1200 sc->jme_lowaddr, /* lowaddr */
1201 BUS_SPACE_MAXADDR, /* highaddr */
1202 NULL, NULL, /* filter, filterarg */
1203 JME_TSO_MAXSIZE, /* maxsize */
1204 JME_MAXTXSEGS, /* nsegments */
1205 JME_TSO_MAXSEGSIZE, /* maxsegsize */
1207 &sc->jme_cdata.jme_tx_tag);
1209 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1213 /* Create DMA maps for Tx buffers. */
1214 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1215 txd = &sc->jme_cdata.jme_txdesc[i];
1216 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0,
1221 device_printf(sc->jme_dev,
1222 "could not create %dth Tx dmamap.\n", i);
1224 for (j = 0; j < i; ++j) {
1225 txd = &sc->jme_cdata.jme_txdesc[j];
1226 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1229 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1230 sc->jme_cdata.jme_tx_tag = NULL;
1236 * Create DMA stuffs for RX buffers
1239 /* Create tag for Rx buffers. */
1240 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1241 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
1242 sc->jme_lowaddr, /* lowaddr */
1243 BUS_SPACE_MAXADDR, /* highaddr */
1244 NULL, NULL, /* filter, filterarg */
1245 MCLBYTES, /* maxsize */
1247 MCLBYTES, /* maxsegsize */
1249 &sc->jme_cdata.jme_rx_tag);
1251 device_printf(sc->jme_dev, "could not create Rx DMA tag.\n");
1255 /* Create DMA maps for Rx buffers. */
1256 error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1257 &sc->jme_cdata.jme_rx_sparemap);
1259 device_printf(sc->jme_dev,
1260 "could not create spare Rx dmamap.\n");
1261 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1262 sc->jme_cdata.jme_rx_tag = NULL;
1265 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
1266 rxd = &sc->jme_cdata.jme_rxdesc[i];
1267 error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1272 device_printf(sc->jme_dev,
1273 "could not create %dth Rx dmamap.\n", i);
1275 for (j = 0; j < i; ++j) {
1276 rxd = &sc->jme_cdata.jme_rxdesc[j];
1277 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1280 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1281 sc->jme_cdata.jme_rx_sparemap);
1282 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1283 sc->jme_cdata.jme_rx_tag = NULL;
1291 jme_dma_free(struct jme_softc *sc, int detach)
1293 struct jme_txdesc *txd;
1294 struct jme_rxdesc *rxd;
1298 if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1299 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1300 sc->jme_cdata.jme_tx_ring_map);
1301 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1302 sc->jme_rdata.jme_tx_ring,
1303 sc->jme_cdata.jme_tx_ring_map);
1304 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1305 sc->jme_cdata.jme_tx_ring_tag = NULL;
1309 if (sc->jme_cdata.jme_rx_ring_tag != NULL) {
1310 bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag,
1311 sc->jme_cdata.jme_rx_ring_map);
1312 bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
1313 sc->jme_rdata.jme_rx_ring,
1314 sc->jme_cdata.jme_rx_ring_map);
1315 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1316 sc->jme_cdata.jme_rx_ring_tag = NULL;
1320 if (sc->jme_cdata.jme_tx_tag != NULL) {
1321 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1322 txd = &sc->jme_cdata.jme_txdesc[i];
1323 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1326 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1327 sc->jme_cdata.jme_tx_tag = NULL;
1331 if (sc->jme_cdata.jme_rx_tag != NULL) {
1332 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
1333 rxd = &sc->jme_cdata.jme_rxdesc[i];
1334 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1337 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1338 sc->jme_cdata.jme_rx_sparemap);
1339 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1340 sc->jme_cdata.jme_rx_tag = NULL;
1343 /* Shadow status block. */
1344 if (sc->jme_cdata.jme_ssb_tag != NULL) {
1345 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1346 sc->jme_cdata.jme_ssb_map);
1347 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1348 sc->jme_rdata.jme_ssb_block,
1349 sc->jme_cdata.jme_ssb_map);
1350 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1351 sc->jme_cdata.jme_ssb_tag = NULL;
1354 if (sc->jme_cdata.jme_buffer_tag != NULL) {
1355 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1356 sc->jme_cdata.jme_buffer_tag = NULL;
1358 if (sc->jme_cdata.jme_ring_tag != NULL) {
1359 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1360 sc->jme_cdata.jme_ring_tag = NULL;
1364 if (sc->jme_cdata.jme_txdesc != NULL) {
1365 kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1366 sc->jme_cdata.jme_txdesc = NULL;
1368 if (sc->jme_cdata.jme_rxdesc != NULL) {
1369 kfree(sc->jme_cdata.jme_rxdesc, M_DEVBUF);
1370 sc->jme_cdata.jme_rxdesc = NULL;
1376 * Make sure the interface is stopped at reboot time.
1379 jme_shutdown(device_t dev)
1381 return jme_suspend(dev);
1386 * Unlike other ethernet controllers, JMC250 requires
1387 * explicit resetting link speed to 10/100Mbps as gigabit
1388 * link will cunsume more power than 375mA.
1389 * Note, we reset the link speed to 10/100Mbps with
1390 * auto-negotiation but we don't know whether that operation
1391 * would succeed or not as we have no control after powering
1392 * off. If the renegotiation fail WOL may not work. Running
1393 * at 1Gbps draws more power than 375mA at 3.3V which is
1394 * specified in PCI specification and that would result in
1395 * complete shutdowning power to ethernet controller.
1398 * Save current negotiated media speed/duplex/flow-control
1399 * to softc and restore the same link again after resuming.
1400 * PHY handling such as power down/resetting to 100Mbps
1401 * may be better handled in suspend method in phy driver.
1404 jme_setlinkspeed(struct jme_softc *sc)
1406 struct mii_data *mii;
1409 JME_LOCK_ASSERT(sc);
1411 mii = device_get_softc(sc->jme_miibus);
1414 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1415 switch IFM_SUBTYPE(mii->mii_media_active) {
1425 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1426 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1427 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1428 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1429 BMCR_AUTOEN | BMCR_STARTNEG);
1432 /* Poll link state until jme(4) get a 10/100 link. */
1433 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1435 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1436 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1446 pause("jmelnk", hz);
1449 if (i == MII_ANEGTICKS_GIGE)
1450 device_printf(sc->jme_dev, "establishing link failed, "
1451 "WOL may not work!");
1454 * No link, force MAC to have 100Mbps, full-duplex link.
1455 * This is the last resort and may/may not work.
1457 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1458 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1463 jme_setwol(struct jme_softc *sc)
1465 struct ifnet *ifp = &sc->arpcom.ac_if;
1470 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1471 /* No PME capability, PHY power down. */
1472 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1473 MII_BMCR, BMCR_PDOWN);
1477 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1478 pmcs = CSR_READ_4(sc, JME_PMCS);
1479 pmcs &= ~PMCS_WOL_ENB_MASK;
1480 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1481 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1482 /* Enable PME message. */
1483 gpr |= GPREG0_PME_ENB;
1484 /* For gigabit controllers, reset link speed to 10/100. */
1485 if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1486 jme_setlinkspeed(sc);
1489 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1490 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1493 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1494 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1495 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1496 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1497 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1498 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1499 /* No WOL, PHY power down. */
1500 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1501 MII_BMCR, BMCR_PDOWN);
1507 jme_suspend(device_t dev)
1509 struct jme_softc *sc = device_get_softc(dev);
1510 struct ifnet *ifp = &sc->arpcom.ac_if;
1512 lwkt_serialize_enter(ifp->if_serializer);
1517 lwkt_serialize_exit(ifp->if_serializer);
1523 jme_resume(device_t dev)
1525 struct jme_softc *sc = device_get_softc(dev);
1526 struct ifnet *ifp = &sc->arpcom.ac_if;
1531 lwkt_serialize_enter(ifp->if_serializer);
1534 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1537 pmstat = pci_read_config(sc->jme_dev,
1538 pmc + PCIR_POWER_STATUS, 2);
1539 /* Disable PME clear PME status. */
1540 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1541 pci_write_config(sc->jme_dev,
1542 pmc + PCIR_POWER_STATUS, pmstat, 2);
1546 if (ifp->if_flags & IFF_UP)
1549 lwkt_serialize_exit(ifp->if_serializer);
1555 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1557 struct jme_txdesc *txd;
1558 struct jme_desc *desc;
1560 struct jme_dmamap_ctx ctx;
1561 bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1563 int error, i, prod, symbol_desc;
1564 uint32_t cflags, flag64;
1566 M_ASSERTPKTHDR((*m_head));
1568 prod = sc->jme_cdata.jme_tx_prod;
1569 txd = &sc->jme_cdata.jme_txdesc[prod];
1571 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1576 maxsegs = (sc->jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1577 (JME_TXD_RSVD + symbol_desc);
1578 if (maxsegs > JME_MAXTXSEGS)
1579 maxsegs = JME_MAXTXSEGS;
1580 KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc),
1581 ("not enough segments %d\n", maxsegs));
1583 ctx.nsegs = maxsegs;
1585 error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1586 *m_head, jme_dmamap_buf_cb, &ctx,
1588 if (!error && ctx.nsegs == 0) {
1589 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1592 if (error == EFBIG) {
1593 m = m_defrag(*m_head, MB_DONTWAIT);
1595 if_printf(&sc->arpcom.ac_if,
1596 "could not defrag TX mbuf\n");
1602 ctx.nsegs = maxsegs;
1604 error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_tx_tag,
1605 txd->tx_dmamap, *m_head,
1606 jme_dmamap_buf_cb, &ctx,
1608 if (error || ctx.nsegs == 0) {
1609 if_printf(&sc->arpcom.ac_if,
1610 "could not load defragged TX mbuf\n");
1612 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
1619 if_printf(&sc->arpcom.ac_if, "could not load TX mbuf\n");
1626 /* Configure checksum offload. */
1627 if (m->m_pkthdr.csum_flags & CSUM_IP)
1628 cflags |= JME_TD_IPCSUM;
1629 if (m->m_pkthdr.csum_flags & CSUM_TCP)
1630 cflags |= JME_TD_TCPCSUM;
1631 if (m->m_pkthdr.csum_flags & CSUM_UDP)
1632 cflags |= JME_TD_UDPCSUM;
1634 /* Configure VLAN. */
1635 if (m->m_flags & M_VLANTAG) {
1636 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1637 cflags |= JME_TD_VLAN_TAG;
1640 desc = &sc->jme_rdata.jme_tx_ring[prod];
1641 desc->flags = htole32(cflags);
1642 desc->addr_hi = htole32(m->m_pkthdr.len);
1643 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1645 * Use 64bits TX desc chain format.
1647 * The first TX desc of the chain, which is setup here,
1648 * is just a symbol TX desc carrying no payload.
1650 flag64 = JME_TD_64BIT;
1654 /* No effective TX desc is consumed */
1658 * Use 32bits TX desc chain format.
1660 * The first TX desc of the chain, which is setup here,
1661 * is an effective TX desc carrying the first segment of
1665 desc->buflen = htole32(txsegs[0].ds_len);
1666 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1668 /* One effective TX desc is consumed */
1671 sc->jme_cdata.jme_tx_cnt++;
1672 KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1673 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1674 JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1676 txd->tx_ndesc = 1 - i;
1677 for (; i < ctx.nsegs; i++) {
1678 desc = &sc->jme_rdata.jme_tx_ring[prod];
1679 desc->flags = htole32(JME_TD_OWN | flag64);
1680 desc->buflen = htole32(txsegs[i].ds_len);
1681 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1682 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1684 sc->jme_cdata.jme_tx_cnt++;
1685 KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1686 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1687 JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1690 /* Update producer index. */
1691 sc->jme_cdata.jme_tx_prod = prod;
1693 * Finally request interrupt and give the first descriptor
1694 * owenership to hardware.
1696 desc = txd->tx_desc;
1697 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1700 txd->tx_ndesc += ctx.nsegs;
1702 /* Sync descriptors. */
1703 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1704 BUS_DMASYNC_PREWRITE);
1705 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1706 sc->jme_cdata.jme_tx_ring_map, BUS_DMASYNC_PREWRITE);
1715 jme_start(struct ifnet *ifp)
1717 struct jme_softc *sc = ifp->if_softc;
1718 struct mbuf *m_head;
1721 ASSERT_SERIALIZED(ifp->if_serializer);
1723 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1724 ifq_purge(&ifp->if_snd);
1728 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1731 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1734 while (!ifq_is_empty(&ifp->if_snd)) {
1736 * Check number of available TX descs, always
1737 * leave JME_TXD_RSVD free TX descs.
1739 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1740 sc->jme_tx_desc_cnt - JME_TXD_RSVD) {
1741 ifp->if_flags |= IFF_OACTIVE;
1745 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1750 * Pack the data into the transmit ring. If we
1751 * don't have room, set the OACTIVE flag and wait
1752 * for the NIC to drain the ring.
1754 if (jme_encap(sc, &m_head)) {
1755 KKASSERT(m_head == NULL);
1757 ifp->if_flags |= IFF_OACTIVE;
1763 * If there's a BPF listener, bounce a copy of this frame
1766 ETHER_BPF_MTAP(ifp, m_head);
1771 * Reading TXCSR takes very long time under heavy load
1772 * so cache TXCSR value and writes the ORed value with
1773 * the kick command to the TXCSR. This saves one register
1776 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1777 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1778 /* Set a timeout in case the chip goes out to lunch. */
1779 ifp->if_timer = JME_TX_TIMEOUT;
1784 jme_watchdog(struct ifnet *ifp)
1786 struct jme_softc *sc = ifp->if_softc;
1788 ASSERT_SERIALIZED(ifp->if_serializer);
1790 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1791 if_printf(ifp, "watchdog timeout (missed link)\n");
1798 if (sc->jme_cdata.jme_tx_cnt == 0) {
1799 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1801 if (!ifq_is_empty(&ifp->if_snd))
1806 if_printf(ifp, "watchdog timeout\n");
1809 if (!ifq_is_empty(&ifp->if_snd))
1814 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1816 struct jme_softc *sc = ifp->if_softc;
1817 struct mii_data *mii = device_get_softc(sc->jme_miibus);
1818 struct ifreq *ifr = (struct ifreq *)data;
1819 int error = 0, mask;
1821 ASSERT_SERIALIZED(ifp->if_serializer);
1825 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1826 (!(sc->jme_caps & JME_CAP_JUMBO) &&
1827 ifr->ifr_mtu > JME_MAX_MTU)) {
1832 if (ifp->if_mtu != ifr->ifr_mtu) {
1834 * No special configuration is required when interface
1835 * MTU is changed but availability of Tx checksum
1836 * offload should be chcked against new MTU size as
1837 * FIFO size is just 2K.
1839 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1840 ifp->if_capenable &= ~IFCAP_TXCSUM;
1841 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1843 ifp->if_mtu = ifr->ifr_mtu;
1844 if (ifp->if_flags & IFF_RUNNING)
1850 if (ifp->if_flags & IFF_UP) {
1851 if (ifp->if_flags & IFF_RUNNING) {
1852 if ((ifp->if_flags ^ sc->jme_if_flags) &
1853 (IFF_PROMISC | IFF_ALLMULTI))
1859 if (ifp->if_flags & IFF_RUNNING)
1862 sc->jme_if_flags = ifp->if_flags;
1867 if (ifp->if_flags & IFF_RUNNING)
1873 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1877 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1879 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1880 if (IFCAP_TXCSUM & ifp->if_capabilities) {
1881 ifp->if_capenable ^= IFCAP_TXCSUM;
1882 if (IFCAP_TXCSUM & ifp->if_capenable)
1883 ifp->if_hwassist |= JME_CSUM_FEATURES;
1885 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1888 if ((mask & IFCAP_RXCSUM) &&
1889 (IFCAP_RXCSUM & ifp->if_capabilities)) {
1892 ifp->if_capenable ^= IFCAP_RXCSUM;
1893 reg = CSR_READ_4(sc, JME_RXMAC);
1894 reg &= ~RXMAC_CSUM_ENB;
1895 if (ifp->if_capenable & IFCAP_RXCSUM)
1896 reg |= RXMAC_CSUM_ENB;
1897 CSR_WRITE_4(sc, JME_RXMAC, reg);
1900 if ((mask & IFCAP_VLAN_HWTAGGING) &&
1901 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities)) {
1902 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1908 error = ether_ioctl(ifp, cmd, data);
1915 jme_mac_config(struct jme_softc *sc)
1917 struct mii_data *mii;
1918 uint32_t ghc, rxmac, txmac, txpause, gp1;
1919 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1921 mii = device_get_softc(sc->jme_miibus);
1923 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1925 CSR_WRITE_4(sc, JME_GHC, 0);
1927 rxmac = CSR_READ_4(sc, JME_RXMAC);
1928 rxmac &= ~RXMAC_FC_ENB;
1929 txmac = CSR_READ_4(sc, JME_TXMAC);
1930 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1931 txpause = CSR_READ_4(sc, JME_TXPFC);
1932 txpause &= ~TXPFC_PAUSE_ENB;
1933 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1934 ghc |= GHC_FULL_DUPLEX;
1935 rxmac &= ~RXMAC_COLL_DET_ENB;
1936 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1937 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1940 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1941 txpause |= TXPFC_PAUSE_ENB;
1942 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1943 rxmac |= RXMAC_FC_ENB;
1945 /* Disable retry transmit timer/retry limit. */
1946 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1947 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1949 rxmac |= RXMAC_COLL_DET_ENB;
1950 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1951 /* Enable retry transmit timer/retry limit. */
1952 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1953 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1957 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1959 gp1 = CSR_READ_4(sc, JME_GPREG1);
1960 gp1 &= ~GPREG1_WA_HDX;
1962 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1965 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1967 ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1969 gp1 |= GPREG1_WA_HDX;
1973 ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1975 gp1 |= GPREG1_WA_HDX;
1978 * Use extended FIFO depth to workaround CRC errors
1979 * emitted by chips before JMC250B
1981 phyconf = JMPHY_CONF_EXTFIFO;
1985 if (sc->jme_caps & JME_CAP_FASTETH)
1988 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
1990 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1996 CSR_WRITE_4(sc, JME_GHC, ghc);
1997 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1998 CSR_WRITE_4(sc, JME_TXMAC, txmac);
1999 CSR_WRITE_4(sc, JME_TXPFC, txpause);
2001 if (sc->jme_workaround & JME_WA_EXTFIFO) {
2002 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2003 JMPHY_CONF, phyconf);
2005 if (sc->jme_workaround & JME_WA_HDX)
2006 CSR_WRITE_4(sc, JME_GPREG1, gp1);
2012 struct jme_softc *sc = xsc;
2013 struct ifnet *ifp = &sc->arpcom.ac_if;
2016 ASSERT_SERIALIZED(ifp->if_serializer);
2018 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2019 if (status == 0 || status == 0xFFFFFFFF)
2022 /* Disable interrupts. */
2023 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2025 status = CSR_READ_4(sc, JME_INTR_STATUS);
2026 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2029 /* Reset PCC counter/timer and Ack interrupts. */
2030 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2031 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
2032 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2033 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
2034 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
2035 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2037 if (ifp->if_flags & IFF_RUNNING) {
2038 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
2041 if (status & INTR_RXQ_DESC_EMPTY) {
2043 * Notify hardware availability of new Rx buffers.
2044 * Reading RXCSR takes very long time under heavy
2045 * load so cache RXCSR value and writes the ORed
2046 * value with the kick command to the RXCSR. This
2047 * saves one register access cycle.
2049 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2050 RXCSR_RX_ENB | RXCSR_RXQ_START);
2053 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
2055 if (!ifq_is_empty(&ifp->if_snd))
2060 /* Reenable interrupts. */
2061 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2065 jme_txeof(struct jme_softc *sc)
2067 struct ifnet *ifp = &sc->arpcom.ac_if;
2068 struct jme_txdesc *txd;
2072 cons = sc->jme_cdata.jme_tx_cons;
2073 if (cons == sc->jme_cdata.jme_tx_prod)
2076 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2077 sc->jme_cdata.jme_tx_ring_map,
2078 BUS_DMASYNC_POSTREAD);
2081 * Go through our Tx list and free mbufs for those
2082 * frames which have been transmitted.
2084 while (cons != sc->jme_cdata.jme_tx_prod) {
2085 txd = &sc->jme_cdata.jme_txdesc[cons];
2086 KASSERT(txd->tx_m != NULL,
2087 ("%s: freeing NULL mbuf!\n", __func__));
2089 status = le32toh(txd->tx_desc->flags);
2090 if ((status & JME_TD_OWN) == JME_TD_OWN)
2093 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2097 if (status & JME_TD_COLLISION) {
2098 ifp->if_collisions +=
2099 le32toh(txd->tx_desc->buflen) &
2100 JME_TD_BUF_LEN_MASK;
2105 * Only the first descriptor of multi-descriptor
2106 * transmission is updated so driver have to skip entire
2107 * chained buffers for the transmiited frame. In other
2108 * words, JME_TD_OWN bit is valid only at the first
2109 * descriptor of a multi-descriptor transmission.
2111 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2112 sc->jme_rdata.jme_tx_ring[cons].flags = 0;
2113 JME_DESC_INC(cons, sc->jme_tx_desc_cnt);
2116 /* Reclaim transferred mbufs. */
2117 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2120 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2121 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2122 ("%s: Active Tx desc counter was garbled\n", __func__));
2125 sc->jme_cdata.jme_tx_cons = cons;
2127 if (sc->jme_cdata.jme_tx_cnt == 0)
2130 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
2131 sc->jme_tx_desc_cnt - JME_TXD_RSVD)
2132 ifp->if_flags &= ~IFF_OACTIVE;
2134 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2135 sc->jme_cdata.jme_tx_ring_map,
2136 BUS_DMASYNC_PREWRITE);
2139 static __inline void
2140 jme_discard_rxbufs(struct jme_softc *sc, int cons, int count)
2144 for (i = 0; i < count; ++i) {
2145 struct jme_desc *desc = &sc->jme_rdata.jme_rx_ring[cons];
2147 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2148 desc->buflen = htole32(MCLBYTES);
2149 JME_DESC_INC(cons, sc->jme_rx_desc_cnt);
2153 /* Receive a frame. */
2155 jme_rxpkt(struct jme_softc *sc)
2157 struct ifnet *ifp = &sc->arpcom.ac_if;
2158 struct jme_desc *desc;
2159 struct jme_rxdesc *rxd;
2160 struct mbuf *mp, *m;
2161 uint32_t flags, status;
2162 int cons, count, nsegs;
2164 cons = sc->jme_cdata.jme_rx_cons;
2165 desc = &sc->jme_rdata.jme_rx_ring[cons];
2166 flags = le32toh(desc->flags);
2167 status = le32toh(desc->buflen);
2168 nsegs = JME_RX_NSEGS(status);
2170 if (status & JME_RX_ERR_STAT) {
2172 jme_discard_rxbufs(sc, cons, nsegs);
2173 #ifdef JME_SHOW_ERRORS
2174 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2175 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2177 sc->jme_cdata.jme_rx_cons += nsegs;
2178 sc->jme_cdata.jme_rx_cons %= sc->jme_rx_desc_cnt;
2182 sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2183 for (count = 0; count < nsegs; count++,
2184 JME_DESC_INC(cons, sc->jme_rx_desc_cnt)) {
2185 rxd = &sc->jme_cdata.jme_rxdesc[cons];
2188 /* Add a new receive buffer to the ring. */
2189 if (jme_newbuf(sc, rxd, 0) != 0) {
2192 jme_discard_rxbufs(sc, cons, nsegs - count);
2193 if (sc->jme_cdata.jme_rxhead != NULL) {
2194 m_freem(sc->jme_cdata.jme_rxhead);
2195 JME_RXCHAIN_RESET(sc);
2201 * Assume we've received a full sized frame.
2202 * Actual size is fixed when we encounter the end of
2203 * multi-segmented frame.
2205 mp->m_len = MCLBYTES;
2207 /* Chain received mbufs. */
2208 if (sc->jme_cdata.jme_rxhead == NULL) {
2209 sc->jme_cdata.jme_rxhead = mp;
2210 sc->jme_cdata.jme_rxtail = mp;
2213 * Receive processor can receive a maximum frame
2214 * size of 65535 bytes.
2216 mp->m_flags &= ~M_PKTHDR;
2217 sc->jme_cdata.jme_rxtail->m_next = mp;
2218 sc->jme_cdata.jme_rxtail = mp;
2221 if (count == nsegs - 1) {
2222 /* Last desc. for this frame. */
2223 m = sc->jme_cdata.jme_rxhead;
2224 /* XXX assert PKTHDR? */
2225 m->m_flags |= M_PKTHDR;
2226 m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
2228 /* Set first mbuf size. */
2229 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2230 /* Set last mbuf size. */
2231 mp->m_len = sc->jme_cdata.jme_rxlen -
2232 ((MCLBYTES - JME_RX_PAD_BYTES) +
2233 (MCLBYTES * (nsegs - 2)));
2235 m->m_len = sc->jme_cdata.jme_rxlen;
2237 m->m_pkthdr.rcvif = ifp;
2240 * Account for 10bytes auto padding which is used
2241 * to align IP header on 32bit boundary. Also note,
2242 * CRC bytes is automatically removed by the
2245 m->m_data += JME_RX_PAD_BYTES;
2247 /* Set checksum information. */
2248 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2249 (flags & JME_RD_IPV4)) {
2250 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2251 if (flags & JME_RD_IPCSUM)
2252 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2253 if ((flags & JME_RD_MORE_FRAG) == 0 &&
2254 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2255 (JME_RD_TCP | JME_RD_TCPCSUM) ||
2256 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2257 (JME_RD_UDP | JME_RD_UDPCSUM))) {
2258 m->m_pkthdr.csum_flags |=
2259 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2260 m->m_pkthdr.csum_data = 0xffff;
2264 /* Check for VLAN tagged packets. */
2265 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2266 (flags & JME_RD_VLAN_TAG)) {
2267 m->m_pkthdr.ether_vlantag =
2268 flags & JME_RD_VLAN_MASK;
2269 m->m_flags |= M_VLANTAG;
2274 ifp->if_input(ifp, m);
2276 /* Reset mbuf chains. */
2277 JME_RXCHAIN_RESET(sc);
2281 sc->jme_cdata.jme_rx_cons += nsegs;
2282 sc->jme_cdata.jme_rx_cons %= sc->jme_rx_desc_cnt;
2286 jme_rxeof(struct jme_softc *sc)
2288 struct jme_desc *desc;
2289 int nsegs, prog, pktlen;
2291 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2292 sc->jme_cdata.jme_rx_ring_map,
2293 BUS_DMASYNC_POSTREAD);
2297 desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
2298 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2300 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2304 * Check number of segments against received bytes.
2305 * Non-matching value would indicate that hardware
2306 * is still trying to update Rx descriptors. I'm not
2307 * sure whether this check is needed.
2309 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2310 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2311 if (nsegs != howmany(pktlen, MCLBYTES)) {
2312 if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) "
2313 "and packet size(%d) mismach\n",
2318 /* Received a frame. */
2324 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2325 sc->jme_cdata.jme_rx_ring_map,
2326 BUS_DMASYNC_PREWRITE);
2333 struct jme_softc *sc = xsc;
2334 struct ifnet *ifp = &sc->arpcom.ac_if;
2335 struct mii_data *mii = device_get_softc(sc->jme_miibus);
2337 lwkt_serialize_enter(ifp->if_serializer);
2340 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2342 lwkt_serialize_exit(ifp->if_serializer);
2346 jme_reset(struct jme_softc *sc)
2349 /* Stop receiver, transmitter. */
2353 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2355 CSR_WRITE_4(sc, JME_GHC, 0);
2361 struct jme_softc *sc = xsc;
2362 struct ifnet *ifp = &sc->arpcom.ac_if;
2363 struct mii_data *mii;
2364 uint8_t eaddr[ETHER_ADDR_LEN];
2369 ASSERT_SERIALIZED(ifp->if_serializer);
2372 * Cancel any pending I/O.
2377 * Reset the chip to a known state.
2382 howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES);
2383 KKASSERT(sc->jme_txd_spare >= 1);
2386 * If we use 64bit address mode for transmitting, each Tx request
2387 * needs one more symbol descriptor.
2389 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
2390 sc->jme_txd_spare += 1;
2392 /* Init descriptors. */
2393 error = jme_init_rx_ring(sc);
2395 device_printf(sc->jme_dev,
2396 "%s: initialization failed: no memory for Rx buffers.\n",
2401 jme_init_tx_ring(sc);
2403 /* Initialize shadow status block. */
2406 /* Reprogram the station address. */
2407 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2408 CSR_WRITE_4(sc, JME_PAR0,
2409 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2410 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2413 * Configure Tx queue.
2414 * Tx priority queue weight value : 0
2415 * Tx FIFO threshold for processing next packet : 16QW
2416 * Maximum Tx DMA length : 512
2417 * Allow Tx DMA burst.
2419 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2420 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2421 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2422 sc->jme_txcsr |= sc->jme_tx_dma_size;
2423 sc->jme_txcsr |= TXCSR_DMA_BURST;
2424 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2426 /* Set Tx descriptor counter. */
2427 CSR_WRITE_4(sc, JME_TXQDC, sc->jme_tx_desc_cnt);
2429 /* Set Tx ring address to the hardware. */
2430 paddr = JME_TX_RING_ADDR(sc, 0);
2431 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2432 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2434 /* Configure TxMAC parameters. */
2435 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2436 reg |= TXMAC_THRESH_1_PKT;
2437 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2438 CSR_WRITE_4(sc, JME_TXMAC, reg);
2441 * Configure Rx queue.
2442 * FIFO full threshold for transmitting Tx pause packet : 128T
2443 * FIFO threshold for processing next packet : 128QW
2445 * Max Rx DMA length : 128
2446 * Rx descriptor retry : 32
2447 * Rx descriptor retry time gap : 256ns
2448 * Don't receive runt/bad frame.
2450 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2452 * Since Rx FIFO size is 4K bytes, receiving frames larger
2453 * than 4K bytes will suffer from Rx FIFO overruns. So
2454 * decrease FIFO threshold to reduce the FIFO overruns for
2455 * frames larger than 4000 bytes.
2456 * For best performance of standard MTU sized frames use
2457 * maximum allowable FIFO threshold, 128QW.
2459 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2461 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2463 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2464 sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
2465 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2466 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2467 /* XXX TODO DROP_BAD */
2468 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2470 /* Set Rx descriptor counter. */
2471 CSR_WRITE_4(sc, JME_RXQDC, sc->jme_rx_desc_cnt);
2473 /* Set Rx ring address to the hardware. */
2474 paddr = JME_RX_RING_ADDR(sc, 0);
2475 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2476 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2478 /* Clear receive filter. */
2479 CSR_WRITE_4(sc, JME_RXMAC, 0);
2481 /* Set up the receive filter. */
2486 * Disable all WOL bits as WOL can interfere normal Rx
2487 * operation. Also clear WOL detection status bits.
2489 reg = CSR_READ_4(sc, JME_PMCS);
2490 reg &= ~PMCS_WOL_ENB_MASK;
2491 CSR_WRITE_4(sc, JME_PMCS, reg);
2494 * Pad 10bytes right before received frame. This will greatly
2495 * help Rx performance on strict-alignment architectures as
2496 * it does not need to copy the frame to align the payload.
2498 reg = CSR_READ_4(sc, JME_RXMAC);
2499 reg |= RXMAC_PAD_10BYTES;
2501 if (ifp->if_capenable & IFCAP_RXCSUM)
2502 reg |= RXMAC_CSUM_ENB;
2503 CSR_WRITE_4(sc, JME_RXMAC, reg);
2505 /* Configure general purpose reg0 */
2506 reg = CSR_READ_4(sc, JME_GPREG0);
2507 reg &= ~GPREG0_PCC_UNIT_MASK;
2508 /* Set PCC timer resolution to micro-seconds unit. */
2509 reg |= GPREG0_PCC_UNIT_US;
2511 * Disable all shadow register posting as we have to read
2512 * JME_INTR_STATUS register in jme_intr. Also it seems
2513 * that it's hard to synchronize interrupt status between
2514 * hardware and software with shadow posting due to
2515 * requirements of bus_dmamap_sync(9).
2517 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2518 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2519 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2520 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2521 /* Disable posting of DW0. */
2522 reg &= ~GPREG0_POST_DW0_ENB;
2523 /* Clear PME message. */
2524 reg &= ~GPREG0_PME_ENB;
2525 /* Set PHY address. */
2526 reg &= ~GPREG0_PHY_ADDR_MASK;
2527 reg |= sc->jme_phyaddr;
2528 CSR_WRITE_4(sc, JME_GPREG0, reg);
2530 /* Configure Tx queue 0 packet completion coalescing. */
2531 jme_set_tx_coal(sc);
2533 /* Configure Rx queue 0 packet completion coalescing. */
2534 jme_set_rx_coal(sc);
2536 /* Configure shadow status block but don't enable posting. */
2537 paddr = sc->jme_rdata.jme_ssb_block_paddr;
2538 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2539 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2541 /* Disable Timer 1 and Timer 2. */
2542 CSR_WRITE_4(sc, JME_TIMER1, 0);
2543 CSR_WRITE_4(sc, JME_TIMER2, 0);
2545 /* Configure retry transmit period, retry limit value. */
2546 CSR_WRITE_4(sc, JME_TXTRHD,
2547 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2548 TXTRHD_RT_PERIOD_MASK) |
2549 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2550 TXTRHD_RT_LIMIT_SHIFT));
2553 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
2555 #ifdef DEVICE_POLLING
2556 if (!(ifp->if_flags & IFF_POLLING))
2558 /* Initialize the interrupt mask. */
2559 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2560 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2563 * Enabling Tx/Rx DMA engines and Rx queue processing is
2564 * done after detection of valid link in jme_miibus_statchg.
2566 sc->jme_flags &= ~JME_FLAG_LINK;
2568 /* Set the current media. */
2569 mii = device_get_softc(sc->jme_miibus);
2572 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2574 ifp->if_flags |= IFF_RUNNING;
2575 ifp->if_flags &= ~IFF_OACTIVE;
2579 jme_stop(struct jme_softc *sc)
2581 struct ifnet *ifp = &sc->arpcom.ac_if;
2582 struct jme_txdesc *txd;
2583 struct jme_rxdesc *rxd;
2586 ASSERT_SERIALIZED(ifp->if_serializer);
2589 * Mark the interface down and cancel the watchdog timer.
2591 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2594 callout_stop(&sc->jme_tick_ch);
2595 sc->jme_flags &= ~JME_FLAG_LINK;
2598 * Disable interrupts.
2600 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2601 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2603 /* Disable updating shadow status block. */
2604 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2605 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2607 /* Stop receiver, transmitter. */
2612 /* Reclaim Rx/Tx buffers that have been completed. */
2614 if (sc->jme_cdata.jme_rxhead != NULL)
2615 m_freem(sc->jme_cdata.jme_rxhead);
2616 JME_RXCHAIN_RESET(sc);
2621 * Free partial finished RX segments
2623 if (sc->jme_cdata.jme_rxhead != NULL)
2624 m_freem(sc->jme_cdata.jme_rxhead);
2625 JME_RXCHAIN_RESET(sc);
2628 * Free RX and TX mbufs still in the queues.
2630 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2631 rxd = &sc->jme_cdata.jme_rxdesc[i];
2632 if (rxd->rx_m != NULL) {
2633 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
2639 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2640 txd = &sc->jme_cdata.jme_txdesc[i];
2641 if (txd->tx_m != NULL) {
2642 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2652 jme_stop_tx(struct jme_softc *sc)
2657 reg = CSR_READ_4(sc, JME_TXCSR);
2658 if ((reg & TXCSR_TX_ENB) == 0)
2660 reg &= ~TXCSR_TX_ENB;
2661 CSR_WRITE_4(sc, JME_TXCSR, reg);
2662 for (i = JME_TIMEOUT; i > 0; i--) {
2664 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2668 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2672 jme_stop_rx(struct jme_softc *sc)
2677 reg = CSR_READ_4(sc, JME_RXCSR);
2678 if ((reg & RXCSR_RX_ENB) == 0)
2680 reg &= ~RXCSR_RX_ENB;
2681 CSR_WRITE_4(sc, JME_RXCSR, reg);
2682 for (i = JME_TIMEOUT; i > 0; i--) {
2684 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2688 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2692 jme_init_tx_ring(struct jme_softc *sc)
2694 struct jme_ring_data *rd;
2695 struct jme_txdesc *txd;
2698 sc->jme_cdata.jme_tx_prod = 0;
2699 sc->jme_cdata.jme_tx_cons = 0;
2700 sc->jme_cdata.jme_tx_cnt = 0;
2702 rd = &sc->jme_rdata;
2703 bzero(rd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2704 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2705 txd = &sc->jme_cdata.jme_txdesc[i];
2707 txd->tx_desc = &rd->jme_tx_ring[i];
2711 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2712 sc->jme_cdata.jme_tx_ring_map,
2713 BUS_DMASYNC_PREWRITE);
2717 jme_init_ssb(struct jme_softc *sc)
2719 struct jme_ring_data *rd;
2721 rd = &sc->jme_rdata;
2722 bzero(rd->jme_ssb_block, JME_SSB_SIZE);
2723 bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map,
2724 BUS_DMASYNC_PREWRITE);
2728 jme_init_rx_ring(struct jme_softc *sc)
2730 struct jme_ring_data *rd;
2731 struct jme_rxdesc *rxd;
2734 KKASSERT(sc->jme_cdata.jme_rxhead == NULL &&
2735 sc->jme_cdata.jme_rxtail == NULL &&
2736 sc->jme_cdata.jme_rxlen == 0);
2737 sc->jme_cdata.jme_rx_cons = 0;
2739 rd = &sc->jme_rdata;
2740 bzero(rd->jme_rx_ring, JME_RX_RING_SIZE(sc));
2741 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2744 rxd = &sc->jme_cdata.jme_rxdesc[i];
2746 rxd->rx_desc = &rd->jme_rx_ring[i];
2747 error = jme_newbuf(sc, rxd, 1);
2752 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2753 sc->jme_cdata.jme_rx_ring_map,
2754 BUS_DMASYNC_PREWRITE);
2759 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd, int init)
2761 struct jme_desc *desc;
2763 struct jme_dmamap_ctx ctx;
2764 bus_dma_segment_t segs;
2768 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2772 * JMC250 has 64bit boundary alignment limitation so jme(4)
2773 * takes advantage of 10 bytes padding feature of hardware
2774 * in order not to copy entire frame to align IP header on
2777 m->m_len = m->m_pkthdr.len = MCLBYTES;
2781 error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_rx_tag,
2782 sc->jme_cdata.jme_rx_sparemap,
2783 m, jme_dmamap_buf_cb, &ctx,
2785 if (error || ctx.nsegs == 0) {
2787 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
2788 sc->jme_cdata.jme_rx_sparemap);
2790 if_printf(&sc->arpcom.ac_if, "too many segments?!\n");
2795 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2799 if (rxd->rx_m != NULL) {
2800 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
2801 BUS_DMASYNC_POSTREAD);
2802 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap);
2804 map = rxd->rx_dmamap;
2805 rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
2806 sc->jme_cdata.jme_rx_sparemap = map;
2809 desc = rxd->rx_desc;
2810 desc->buflen = htole32(segs.ds_len);
2811 desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr));
2812 desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr));
2813 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2819 jme_set_vlan(struct jme_softc *sc)
2821 struct ifnet *ifp = &sc->arpcom.ac_if;
2824 ASSERT_SERIALIZED(ifp->if_serializer);
2826 reg = CSR_READ_4(sc, JME_RXMAC);
2827 reg &= ~RXMAC_VLAN_ENB;
2828 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2829 reg |= RXMAC_VLAN_ENB;
2830 CSR_WRITE_4(sc, JME_RXMAC, reg);
2834 jme_set_filter(struct jme_softc *sc)
2836 struct ifnet *ifp = &sc->arpcom.ac_if;
2837 struct ifmultiaddr *ifma;
2842 ASSERT_SERIALIZED(ifp->if_serializer);
2844 rxcfg = CSR_READ_4(sc, JME_RXMAC);
2845 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2849 * Always accept frames destined to our station address.
2850 * Always accept broadcast frames.
2852 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2854 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2855 if (ifp->if_flags & IFF_PROMISC)
2856 rxcfg |= RXMAC_PROMISC;
2857 if (ifp->if_flags & IFF_ALLMULTI)
2858 rxcfg |= RXMAC_ALLMULTI;
2859 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2860 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2861 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2866 * Set up the multicast address filter by passing all multicast
2867 * addresses through a CRC generator, and then using the low-order
2868 * 6 bits as an index into the 64 bit multicast hash table. The
2869 * high order bits select the register, while the rest of the bits
2870 * select the bit within the register.
2872 rxcfg |= RXMAC_MULTICAST;
2873 bzero(mchash, sizeof(mchash));
2875 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2876 if (ifma->ifma_addr->sa_family != AF_LINK)
2878 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2879 ifma->ifma_addr), ETHER_ADDR_LEN);
2881 /* Just want the 6 least significant bits. */
2884 /* Set the corresponding bit in the hash table. */
2885 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2888 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2889 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2890 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2894 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
2896 struct jme_softc *sc = arg1;
2897 struct ifnet *ifp = &sc->arpcom.ac_if;
2900 lwkt_serialize_enter(ifp->if_serializer);
2902 v = sc->jme_tx_coal_to;
2903 error = sysctl_handle_int(oidp, &v, 0, req);
2904 if (error || req->newptr == NULL)
2907 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2912 if (v != sc->jme_tx_coal_to) {
2913 sc->jme_tx_coal_to = v;
2914 if (ifp->if_flags & IFF_RUNNING)
2915 jme_set_tx_coal(sc);
2918 lwkt_serialize_exit(ifp->if_serializer);
2923 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2925 struct jme_softc *sc = arg1;
2926 struct ifnet *ifp = &sc->arpcom.ac_if;
2929 lwkt_serialize_enter(ifp->if_serializer);
2931 v = sc->jme_tx_coal_pkt;
2932 error = sysctl_handle_int(oidp, &v, 0, req);
2933 if (error || req->newptr == NULL)
2936 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
2941 if (v != sc->jme_tx_coal_pkt) {
2942 sc->jme_tx_coal_pkt = v;
2943 if (ifp->if_flags & IFF_RUNNING)
2944 jme_set_tx_coal(sc);
2947 lwkt_serialize_exit(ifp->if_serializer);
2952 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
2954 struct jme_softc *sc = arg1;
2955 struct ifnet *ifp = &sc->arpcom.ac_if;
2958 lwkt_serialize_enter(ifp->if_serializer);
2960 v = sc->jme_rx_coal_to;
2961 error = sysctl_handle_int(oidp, &v, 0, req);
2962 if (error || req->newptr == NULL)
2965 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
2970 if (v != sc->jme_rx_coal_to) {
2971 sc->jme_rx_coal_to = v;
2972 if (ifp->if_flags & IFF_RUNNING)
2973 jme_set_rx_coal(sc);
2976 lwkt_serialize_exit(ifp->if_serializer);
2981 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
2983 struct jme_softc *sc = arg1;
2984 struct ifnet *ifp = &sc->arpcom.ac_if;
2987 lwkt_serialize_enter(ifp->if_serializer);
2989 v = sc->jme_rx_coal_pkt;
2990 error = sysctl_handle_int(oidp, &v, 0, req);
2991 if (error || req->newptr == NULL)
2994 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
2999 if (v != sc->jme_rx_coal_pkt) {
3000 sc->jme_rx_coal_pkt = v;
3001 if (ifp->if_flags & IFF_RUNNING)
3002 jme_set_rx_coal(sc);
3005 lwkt_serialize_exit(ifp->if_serializer);
3010 jme_set_tx_coal(struct jme_softc *sc)
3014 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3016 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3017 PCCTX_COAL_PKT_MASK;
3018 reg |= PCCTX_COAL_TXQ0;
3019 CSR_WRITE_4(sc, JME_PCCTX, reg);
3023 jme_set_rx_coal(struct jme_softc *sc)
3027 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3029 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3030 PCCRX_COAL_PKT_MASK;
3031 CSR_WRITE_4(sc, JME_PCCRX0, reg);
3034 #ifdef DEVICE_POLLING
3037 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3039 struct jme_softc *sc = ifp->if_softc;
3042 ASSERT_SERIALIZED(ifp->if_serializer);
3046 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3049 case POLL_DEREGISTER:
3050 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
3053 case POLL_AND_CHECK_STATUS:
3055 status = CSR_READ_4(sc, JME_INTR_STATUS);
3058 if (status & INTR_RXQ_DESC_EMPTY) {
3059 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3060 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3061 RXCSR_RX_ENB | RXCSR_RXQ_START);
3065 if (!ifq_is_empty(&ifp->if_snd))
3071 #endif /* DEVICE_POLLING */