2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
30 #include "opt_polling.h"
34 #include <sys/param.h>
35 #include <sys/endian.h>
36 #include <sys/kernel.h>
38 #include <sys/interrupt.h>
39 #include <sys/malloc.h>
42 #include <sys/serialize.h>
43 #include <sys/serialize2.h>
44 #include <sys/socket.h>
45 #include <sys/sockio.h>
46 #include <sys/sysctl.h>
48 #include <net/ethernet.h>
51 #include <net/if_arp.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/ifq_var.h>
55 #include <net/toeplitz.h>
56 #include <net/toeplitz2.h>
57 #include <net/vlan/if_vlan_var.h>
58 #include <net/vlan/if_vlan_ether.h>
60 #include <netinet/in.h>
62 #include <dev/netif/mii_layer/miivar.h>
63 #include <dev/netif/mii_layer/jmphyreg.h>
65 #include <bus/pci/pcireg.h>
66 #include <bus/pci/pcivar.h>
67 #include <bus/pci/pcidevs.h>
69 #include <dev/netif/jme/if_jmereg.h>
70 #include <dev/netif/jme/if_jmevar.h>
72 #include "miibus_if.h"
74 /* Define the following to disable printing Rx errors. */
75 #undef JME_SHOW_ERRORS
77 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
80 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
82 if ((sc)->jme_rss_debug >= (lvl)) \
83 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
85 #else /* !JME_RSS_DEBUG */
86 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
87 #endif /* JME_RSS_DEBUG */
89 static int jme_probe(device_t);
90 static int jme_attach(device_t);
91 static int jme_detach(device_t);
92 static int jme_shutdown(device_t);
93 static int jme_suspend(device_t);
94 static int jme_resume(device_t);
96 static int jme_miibus_readreg(device_t, int, int);
97 static int jme_miibus_writereg(device_t, int, int, int);
98 static void jme_miibus_statchg(device_t);
100 static void jme_init(void *);
101 static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
102 static void jme_start(struct ifnet *);
103 static void jme_watchdog(struct ifnet *);
104 static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
105 static int jme_mediachange(struct ifnet *);
106 #ifdef DEVICE_POLLING
107 static void jme_poll(struct ifnet *, enum poll_cmd, int);
109 static void jme_serialize(struct ifnet *, enum ifnet_serialize);
110 static void jme_deserialize(struct ifnet *, enum ifnet_serialize);
111 static int jme_tryserialize(struct ifnet *, enum ifnet_serialize);
113 static void jme_serialize_assert(struct ifnet *, enum ifnet_serialize,
117 static void jme_intr(void *);
118 static void jme_txeof(struct jme_softc *);
119 static void jme_rxeof(struct jme_softc *, int);
120 static int jme_rxeof_chain(struct jme_softc *, int,
121 struct mbuf_chain *, int);
122 static void jme_rx_intr(struct jme_softc *, uint32_t);
124 static int jme_dma_alloc(struct jme_softc *);
125 static void jme_dma_free(struct jme_softc *);
126 static int jme_init_rx_ring(struct jme_softc *, int);
127 static void jme_init_tx_ring(struct jme_softc *);
128 static void jme_init_ssb(struct jme_softc *);
129 static int jme_newbuf(struct jme_softc *, int, struct jme_rxdesc *, int);
130 static int jme_encap(struct jme_softc *, struct mbuf **);
131 static void jme_rxpkt(struct jme_softc *, int, struct mbuf_chain *);
132 static int jme_rxring_dma_alloc(struct jme_softc *, int);
133 static int jme_rxbuf_dma_alloc(struct jme_softc *, int);
135 static void jme_tick(void *);
136 static void jme_stop(struct jme_softc *);
137 static void jme_reset(struct jme_softc *);
138 static void jme_set_vlan(struct jme_softc *);
139 static void jme_set_filter(struct jme_softc *);
140 static void jme_stop_tx(struct jme_softc *);
141 static void jme_stop_rx(struct jme_softc *);
142 static void jme_mac_config(struct jme_softc *);
143 static void jme_reg_macaddr(struct jme_softc *, uint8_t[]);
144 static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
145 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
147 static void jme_setwol(struct jme_softc *);
148 static void jme_setlinkspeed(struct jme_softc *);
150 static void jme_set_tx_coal(struct jme_softc *);
151 static void jme_set_rx_coal(struct jme_softc *);
152 static void jme_enable_rss(struct jme_softc *);
153 static void jme_disable_rss(struct jme_softc *);
155 static void jme_sysctl_node(struct jme_softc *);
156 static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
157 static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
158 static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
159 static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
162 * Devices supported by this driver.
164 static const struct jme_dev {
165 uint16_t jme_vendorid;
166 uint16_t jme_deviceid;
168 const char *jme_name;
170 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
172 "JMicron Inc, JMC250 Gigabit Ethernet" },
173 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
175 "JMicron Inc, JMC260 Fast Ethernet" },
179 static device_method_t jme_methods[] = {
180 /* Device interface. */
181 DEVMETHOD(device_probe, jme_probe),
182 DEVMETHOD(device_attach, jme_attach),
183 DEVMETHOD(device_detach, jme_detach),
184 DEVMETHOD(device_shutdown, jme_shutdown),
185 DEVMETHOD(device_suspend, jme_suspend),
186 DEVMETHOD(device_resume, jme_resume),
189 DEVMETHOD(bus_print_child, bus_generic_print_child),
190 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
193 DEVMETHOD(miibus_readreg, jme_miibus_readreg),
194 DEVMETHOD(miibus_writereg, jme_miibus_writereg),
195 DEVMETHOD(miibus_statchg, jme_miibus_statchg),
200 static driver_t jme_driver = {
203 sizeof(struct jme_softc)
206 static devclass_t jme_devclass;
208 DECLARE_DUMMY_MODULE(if_jme);
209 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
210 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL);
211 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL);
213 static const struct {
216 } jme_rx_status[JME_NRXRING_MAX] = {
217 { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP },
218 { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP },
219 { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP },
220 { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP }
223 static int jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
224 static int jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
225 static int jme_rx_ring_count = JME_NRXRING_DEF;
226 static int jme_msi_enable = 1;
228 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
229 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
230 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
231 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable);
234 * Read a PHY register on the MII of the JMC250.
237 jme_miibus_readreg(device_t dev, int phy, int reg)
239 struct jme_softc *sc = device_get_softc(dev);
243 /* For FPGA version, PHY address 0 should be ignored. */
244 if (sc->jme_caps & JME_CAP_FPGA) {
248 if (sc->jme_phyaddr != phy)
252 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
253 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
255 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
257 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
261 device_printf(sc->jme_dev, "phy read timeout: "
262 "phy %d, reg %d\n", phy, reg);
266 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
270 * Write a PHY register on the MII of the JMC250.
273 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
275 struct jme_softc *sc = device_get_softc(dev);
278 /* For FPGA version, PHY address 0 should be ignored. */
279 if (sc->jme_caps & JME_CAP_FPGA) {
283 if (sc->jme_phyaddr != phy)
287 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
288 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
289 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
291 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
293 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
297 device_printf(sc->jme_dev, "phy write timeout: "
298 "phy %d, reg %d\n", phy, reg);
305 * Callback from MII layer when media changes.
308 jme_miibus_statchg(device_t dev)
310 struct jme_softc *sc = device_get_softc(dev);
311 struct ifnet *ifp = &sc->arpcom.ac_if;
312 struct mii_data *mii;
313 struct jme_txdesc *txd;
317 ASSERT_IFNET_SERIALIZED_ALL(ifp);
319 if ((ifp->if_flags & IFF_RUNNING) == 0)
322 mii = device_get_softc(sc->jme_miibus);
324 sc->jme_flags &= ~JME_FLAG_LINK;
325 if ((mii->mii_media_status & IFM_AVALID) != 0) {
326 switch (IFM_SUBTYPE(mii->mii_media_active)) {
329 sc->jme_flags |= JME_FLAG_LINK;
332 if (sc->jme_caps & JME_CAP_FASTETH)
334 sc->jme_flags |= JME_FLAG_LINK;
342 * Disabling Rx/Tx MACs have a side-effect of resetting
343 * JME_TXNDA/JME_RXNDA register to the first address of
344 * Tx/Rx descriptor address. So driver should reset its
345 * internal procucer/consumer pointer and reclaim any
346 * allocated resources. Note, just saving the value of
347 * JME_TXNDA and JME_RXNDA registers before stopping MAC
348 * and restoring JME_TXNDA/JME_RXNDA register is not
349 * sufficient to make sure correct MAC state because
350 * stopping MAC operation can take a while and hardware
351 * might have updated JME_TXNDA/JME_RXNDA registers
352 * during the stop operation.
355 /* Disable interrupts */
356 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
359 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
361 callout_stop(&sc->jme_tick_ch);
363 /* Stop receiver/transmitter. */
367 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
368 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
371 if (rdata->jme_rxhead != NULL)
372 m_freem(rdata->jme_rxhead);
373 JME_RXCHAIN_RESET(sc, r);
376 * Reuse configured Rx descriptors and reset
377 * procuder/consumer index.
379 rdata->jme_rx_cons = 0;
383 if (sc->jme_cdata.jme_tx_cnt != 0) {
384 /* Remove queued packets for transmit. */
385 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
386 txd = &sc->jme_cdata.jme_txdesc[i];
387 if (txd->tx_m != NULL) {
389 sc->jme_cdata.jme_tx_tag,
398 jme_init_tx_ring(sc);
400 /* Initialize shadow status block. */
403 /* Program MAC with resolved speed/duplex/flow-control. */
404 if (sc->jme_flags & JME_FLAG_LINK) {
407 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
409 /* Set Tx ring address to the hardware. */
410 paddr = sc->jme_cdata.jme_tx_ring_paddr;
411 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
412 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
414 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
415 CSR_WRITE_4(sc, JME_RXCSR,
416 sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
418 /* Set Rx ring address to the hardware. */
419 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
420 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
421 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
424 /* Restart receiver/transmitter. */
425 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
427 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
430 ifp->if_flags |= IFF_RUNNING;
431 ifp->if_flags &= ~IFF_OACTIVE;
432 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
434 #ifdef DEVICE_POLLING
435 if (!(ifp->if_flags & IFF_POLLING))
437 /* Reenable interrupts. */
438 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
442 * Get the current interface media status.
445 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
447 struct jme_softc *sc = ifp->if_softc;
448 struct mii_data *mii = device_get_softc(sc->jme_miibus);
450 ASSERT_IFNET_SERIALIZED_ALL(ifp);
453 ifmr->ifm_status = mii->mii_media_status;
454 ifmr->ifm_active = mii->mii_media_active;
458 * Set hardware to newly-selected media.
461 jme_mediachange(struct ifnet *ifp)
463 struct jme_softc *sc = ifp->if_softc;
464 struct mii_data *mii = device_get_softc(sc->jme_miibus);
467 ASSERT_IFNET_SERIALIZED_ALL(ifp);
469 if (mii->mii_instance != 0) {
470 struct mii_softc *miisc;
472 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
473 mii_phy_reset(miisc);
475 error = mii_mediachg(mii);
481 jme_probe(device_t dev)
483 const struct jme_dev *sp;
486 vid = pci_get_vendor(dev);
487 did = pci_get_device(dev);
488 for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
489 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
490 struct jme_softc *sc = device_get_softc(dev);
492 sc->jme_caps = sp->jme_caps;
493 device_set_desc(dev, sp->jme_name);
501 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
507 for (i = JME_TIMEOUT; i > 0; i--) {
508 reg = CSR_READ_4(sc, JME_SMBCSR);
509 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
515 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
519 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
520 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
521 for (i = JME_TIMEOUT; i > 0; i--) {
523 reg = CSR_READ_4(sc, JME_SMBINTF);
524 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
529 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
533 reg = CSR_READ_4(sc, JME_SMBINTF);
534 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
540 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
542 uint8_t fup, reg, val;
547 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
548 fup != JME_EEPROM_SIG0)
550 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
551 fup != JME_EEPROM_SIG1)
555 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
557 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
558 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
559 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0)
561 if (reg >= JME_PAR0 &&
562 reg < JME_PAR0 + ETHER_ADDR_LEN) {
563 if (jme_eeprom_read_byte(sc, offset + 2,
566 eaddr[reg - JME_PAR0] = val;
570 /* Check for the end of EEPROM descriptor. */
571 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
573 /* Try next eeprom descriptor. */
574 offset += JME_EEPROM_DESC_BYTES;
575 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
577 if (match == ETHER_ADDR_LEN)
584 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
588 /* Read station address. */
589 par0 = CSR_READ_4(sc, JME_PAR0);
590 par1 = CSR_READ_4(sc, JME_PAR1);
592 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
593 device_printf(sc->jme_dev,
594 "generating fake ethernet address.\n");
595 par0 = karc4random();
596 /* Set OUI to JMicron. */
600 eaddr[3] = (par0 >> 16) & 0xff;
601 eaddr[4] = (par0 >> 8) & 0xff;
602 eaddr[5] = par0 & 0xff;
604 eaddr[0] = (par0 >> 0) & 0xFF;
605 eaddr[1] = (par0 >> 8) & 0xFF;
606 eaddr[2] = (par0 >> 16) & 0xFF;
607 eaddr[3] = (par0 >> 24) & 0xFF;
608 eaddr[4] = (par1 >> 0) & 0xFF;
609 eaddr[5] = (par1 >> 8) & 0xFF;
614 jme_attach(device_t dev)
616 struct jme_softc *sc = device_get_softc(dev);
617 struct ifnet *ifp = &sc->arpcom.ac_if;
620 uint8_t pcie_ptr, rev;
622 uint8_t eaddr[ETHER_ADDR_LEN];
625 lwkt_serialize_init(&sc->jme_serialize);
626 lwkt_serialize_init(&sc->jme_cdata.jme_tx_serialize);
627 for (i = 0; i < JME_NRXRING_MAX; ++i) {
629 &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize);
632 sc->jme_rx_desc_cnt = roundup(jme_rx_desc_count, JME_NDESC_ALIGN);
633 if (sc->jme_rx_desc_cnt > JME_NDESC_MAX)
634 sc->jme_rx_desc_cnt = JME_NDESC_MAX;
636 sc->jme_tx_desc_cnt = roundup(jme_tx_desc_count, JME_NDESC_ALIGN);
637 if (sc->jme_tx_desc_cnt > JME_NDESC_MAX)
638 sc->jme_tx_desc_cnt = JME_NDESC_MAX;
641 * Calculate rx rings based on ncpus2
643 sc->jme_rx_ring_cnt = jme_rx_ring_count;
644 if (sc->jme_rx_ring_cnt <= 0)
645 sc->jme_rx_ring_cnt = JME_NRXRING_1;
646 if (sc->jme_rx_ring_cnt > ncpus2)
647 sc->jme_rx_ring_cnt = ncpus2;
649 if (sc->jme_rx_ring_cnt >= JME_NRXRING_4)
650 sc->jme_rx_ring_cnt = JME_NRXRING_4;
651 else if (sc->jme_rx_ring_cnt >= JME_NRXRING_2)
652 sc->jme_rx_ring_cnt = JME_NRXRING_2;
653 sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
656 sc->jme_serialize_arr[i++] = &sc->jme_serialize;
657 sc->jme_serialize_arr[i++] = &sc->jme_cdata.jme_tx_serialize;
658 for (j = 0; j < sc->jme_rx_ring_cnt; ++j) {
659 sc->jme_serialize_arr[i++] =
660 &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize;
662 KKASSERT(i <= JME_NSERIALIZE);
663 sc->jme_serialize_cnt = i;
666 sc->jme_lowaddr = BUS_SPACE_MAXADDR;
668 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
670 callout_init(&sc->jme_tick_ch);
673 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
676 irq = pci_read_config(dev, PCIR_INTLINE, 4);
677 mem = pci_read_config(dev, JME_PCIR_BAR, 4);
679 device_printf(dev, "chip is in D%d power mode "
680 "-- setting to D0\n", pci_get_powerstate(dev));
682 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
684 pci_write_config(dev, PCIR_INTLINE, irq, 4);
685 pci_write_config(dev, JME_PCIR_BAR, mem, 4);
687 #endif /* !BURN_BRIDGE */
689 /* Enable bus mastering */
690 pci_enable_busmaster(dev);
695 * JMC250 supports both memory mapped and I/O register space
696 * access. Because I/O register access should use different
697 * BARs to access registers it's waste of time to use I/O
698 * register spce access. JMC250 uses 16K to map entire memory
701 sc->jme_mem_rid = JME_PCIR_BAR;
702 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
703 &sc->jme_mem_rid, RF_ACTIVE);
704 if (sc->jme_mem_res == NULL) {
705 device_printf(dev, "can't allocate IO memory\n");
708 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
709 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
714 sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable,
715 &sc->jme_irq_rid, &irq_flags);
717 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
718 &sc->jme_irq_rid, irq_flags);
719 if (sc->jme_irq_res == NULL) {
720 device_printf(dev, "can't allocate irq\n");
728 reg = CSR_READ_4(sc, JME_CHIPMODE);
729 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
731 sc->jme_caps |= JME_CAP_FPGA;
733 device_printf(dev, "FPGA revision: 0x%04x\n",
734 (reg & CHIPMODE_FPGA_REV_MASK) >>
735 CHIPMODE_FPGA_REV_SHIFT);
739 /* NOTE: FM revision is put in the upper 4 bits */
740 rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
741 rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
743 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
745 did = pci_get_device(dev);
747 case PCI_PRODUCT_JMICRON_JMC250:
748 if (rev == JME_REV1_A2)
749 sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
752 case PCI_PRODUCT_JMICRON_JMC260:
754 sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
758 panic("unknown device id 0x%04x\n", did);
760 if (rev >= JME_REV2) {
761 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
762 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
763 GHC_TXMAC_CLKSRC_1000;
766 /* Reset the ethernet controller. */
769 /* Get station address. */
770 reg = CSR_READ_4(sc, JME_SMBCSR);
771 if (reg & SMBCSR_EEPROM_PRESENT)
772 error = jme_eeprom_macaddr(sc, eaddr);
773 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
774 if (error != 0 && (bootverbose)) {
775 device_printf(dev, "ethernet hardware address "
776 "not found in EEPROM.\n");
778 jme_reg_macaddr(sc, eaddr);
783 * Integrated JR0211 has fixed PHY address whereas FPGA version
784 * requires PHY probing to get correct PHY address.
786 if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
787 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
788 GPREG0_PHY_ADDR_MASK;
790 device_printf(dev, "PHY is at address %d.\n",
797 /* Set max allowable DMA size. */
798 pcie_ptr = pci_get_pciecap_ptr(dev);
802 sc->jme_caps |= JME_CAP_PCIE;
803 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
805 device_printf(dev, "Read request size : %d bytes.\n",
806 128 << ((ctrl >> 12) & 0x07));
807 device_printf(dev, "TLP payload size : %d bytes.\n",
808 128 << ((ctrl >> 5) & 0x07));
810 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
811 case PCIEM_DEVCTL_MAX_READRQ_128:
812 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
814 case PCIEM_DEVCTL_MAX_READRQ_256:
815 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
818 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
821 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
823 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
824 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
828 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
829 sc->jme_caps |= JME_CAP_PMCAP;
837 /* Allocate DMA stuffs */
838 error = jme_dma_alloc(sc);
843 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
844 ifp->if_init = jme_init;
845 ifp->if_ioctl = jme_ioctl;
846 ifp->if_start = jme_start;
847 #ifdef DEVICE_POLLING
848 ifp->if_poll = jme_poll;
850 ifp->if_watchdog = jme_watchdog;
851 ifp->if_serialize = jme_serialize;
852 ifp->if_deserialize = jme_deserialize;
853 ifp->if_tryserialize = jme_tryserialize;
855 ifp->if_serialize_assert = jme_serialize_assert;
857 ifq_set_maxlen(&ifp->if_snd, sc->jme_tx_desc_cnt - JME_TXD_RSVD);
858 ifq_set_ready(&ifp->if_snd);
860 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
861 ifp->if_capabilities = IFCAP_HWCSUM |
863 IFCAP_VLAN_HWTAGGING;
864 if (sc->jme_rx_ring_cnt > JME_NRXRING_MIN)
865 ifp->if_capabilities |= IFCAP_RSS;
866 ifp->if_capenable = ifp->if_capabilities;
869 * Disable TXCSUM by default to improve bulk data
870 * transmit performance (+20Mbps improvement).
872 ifp->if_capenable &= ~IFCAP_TXCSUM;
874 if (ifp->if_capenable & IFCAP_TXCSUM)
875 ifp->if_hwassist = JME_CSUM_FEATURES;
877 /* Set up MII bus. */
878 error = mii_phy_probe(dev, &sc->jme_miibus,
879 jme_mediachange, jme_mediastatus);
881 device_printf(dev, "no PHY found!\n");
886 * Save PHYADDR for FPGA mode PHY.
888 if (sc->jme_caps & JME_CAP_FPGA) {
889 struct mii_data *mii = device_get_softc(sc->jme_miibus);
891 if (mii->mii_instance != 0) {
892 struct mii_softc *miisc;
894 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
895 if (miisc->mii_phy != 0) {
896 sc->jme_phyaddr = miisc->mii_phy;
900 if (sc->jme_phyaddr != 0) {
901 device_printf(sc->jme_dev,
902 "FPGA PHY is at %d\n", sc->jme_phyaddr);
904 jme_miibus_writereg(dev, sc->jme_phyaddr,
905 JMPHY_CONF, JMPHY_CONF_DEFFIFO);
907 /* XXX should we clear JME_WA_EXTFIFO */
912 ether_ifattach(ifp, eaddr, NULL);
914 /* Tell the upper layer(s) we support long frames. */
915 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
917 error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, jme_intr, sc,
918 &sc->jme_irq_handle, &sc->jme_serialize);
920 device_printf(dev, "could not set up interrupt handler.\n");
925 ifp->if_cpuid = rman_get_cpuid(sc->jme_irq_res);
926 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
934 jme_detach(device_t dev)
936 struct jme_softc *sc = device_get_softc(dev);
938 if (device_is_attached(dev)) {
939 struct ifnet *ifp = &sc->arpcom.ac_if;
941 ifnet_serialize_all(ifp);
943 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
944 ifnet_deserialize_all(ifp);
949 if (sc->jme_sysctl_tree != NULL)
950 sysctl_ctx_free(&sc->jme_sysctl_ctx);
952 if (sc->jme_miibus != NULL)
953 device_delete_child(dev, sc->jme_miibus);
954 bus_generic_detach(dev);
956 if (sc->jme_irq_res != NULL) {
957 bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
960 if (sc->jme_irq_type == PCI_INTR_TYPE_MSI)
961 pci_release_msi(dev);
963 if (sc->jme_mem_res != NULL) {
964 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
974 jme_sysctl_node(struct jme_softc *sc)
978 char rx_ring_pkt[32];
982 sysctl_ctx_init(&sc->jme_sysctl_ctx);
983 sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
984 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
985 device_get_nameunit(sc->jme_dev),
987 if (sc->jme_sysctl_tree == NULL) {
988 device_printf(sc->jme_dev, "can't add sysctl node\n");
992 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
993 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
994 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
995 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
997 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
998 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
999 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1000 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
1002 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1003 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1004 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1005 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
1007 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1008 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1009 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1010 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
1012 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1013 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1014 "rx_desc_count", CTLFLAG_RD, &sc->jme_rx_desc_cnt,
1015 0, "RX desc count");
1016 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1017 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1018 "tx_desc_count", CTLFLAG_RD, &sc->jme_tx_desc_cnt,
1019 0, "TX desc count");
1020 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1021 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1022 "rx_ring_count", CTLFLAG_RD, &sc->jme_rx_ring_cnt,
1023 0, "RX ring count");
1024 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1025 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1026 "rx_ring_inuse", CTLFLAG_RD, &sc->jme_rx_ring_inuse,
1027 0, "RX ring in use");
1028 #ifdef JME_RSS_DEBUG
1029 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1030 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1031 "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
1032 0, "RSS debug level");
1033 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1034 ksnprintf(rx_ring_pkt, sizeof(rx_ring_pkt), "rx_ring%d_pkt", r);
1035 SYSCTL_ADD_UINT(&sc->jme_sysctl_ctx,
1036 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1037 rx_ring_pkt, CTLFLAG_RW,
1038 &sc->jme_rx_ring_pkt[r],
1044 * Set default coalesce valves
1046 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1047 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1048 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1049 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1052 * Adjust coalesce valves, in case that the number of TX/RX
1053 * descs are set to small values by users.
1055 * NOTE: coal_max will not be zero, since number of descs
1056 * must aligned by JME_NDESC_ALIGN (16 currently)
1058 coal_max = sc->jme_tx_desc_cnt / 6;
1059 if (coal_max < sc->jme_tx_coal_pkt)
1060 sc->jme_tx_coal_pkt = coal_max;
1062 coal_max = sc->jme_rx_desc_cnt / 4;
1063 if (coal_max < sc->jme_rx_coal_pkt)
1064 sc->jme_rx_coal_pkt = coal_max;
1068 jme_dma_alloc(struct jme_softc *sc)
1070 struct jme_txdesc *txd;
1074 sc->jme_cdata.jme_txdesc =
1075 kmalloc(sc->jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1076 M_DEVBUF, M_WAITOK | M_ZERO);
1077 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1078 sc->jme_cdata.jme_rx_data[i].jme_rxdesc =
1079 kmalloc(sc->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1080 M_DEVBUF, M_WAITOK | M_ZERO);
1083 /* Create parent ring tag. */
1084 error = bus_dma_tag_create(NULL,/* parent */
1085 1, JME_RING_BOUNDARY, /* algnmnt, boundary */
1086 sc->jme_lowaddr, /* lowaddr */
1087 BUS_SPACE_MAXADDR, /* highaddr */
1088 NULL, NULL, /* filter, filterarg */
1089 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1091 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1093 &sc->jme_cdata.jme_ring_tag);
1095 device_printf(sc->jme_dev,
1096 "could not create parent ring DMA tag.\n");
1101 * Create DMA stuffs for TX ring
1103 error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1104 JME_TX_RING_ALIGN, 0,
1105 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1106 JME_TX_RING_SIZE(sc),
1107 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1109 device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1112 sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag;
1113 sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map;
1114 sc->jme_cdata.jme_tx_ring = dmem.dmem_addr;
1115 sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr;
1118 * Create DMA stuffs for RX rings
1120 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1121 error = jme_rxring_dma_alloc(sc, i);
1126 /* Create parent buffer tag. */
1127 error = bus_dma_tag_create(NULL,/* parent */
1128 1, 0, /* algnmnt, boundary */
1129 sc->jme_lowaddr, /* lowaddr */
1130 BUS_SPACE_MAXADDR, /* highaddr */
1131 NULL, NULL, /* filter, filterarg */
1132 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1134 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1136 &sc->jme_cdata.jme_buffer_tag);
1138 device_printf(sc->jme_dev,
1139 "could not create parent buffer DMA tag.\n");
1144 * Create DMA stuffs for shadow status block
1146 error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1147 JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1148 JME_SSB_SIZE, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1150 device_printf(sc->jme_dev,
1151 "could not create shadow status block.\n");
1154 sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1155 sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1156 sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1157 sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1160 * Create DMA stuffs for TX buffers
1163 /* Create tag for Tx buffers. */
1164 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1165 1, 0, /* algnmnt, boundary */
1166 BUS_SPACE_MAXADDR, /* lowaddr */
1167 BUS_SPACE_MAXADDR, /* highaddr */
1168 NULL, NULL, /* filter, filterarg */
1169 JME_JUMBO_FRAMELEN, /* maxsize */
1170 JME_MAXTXSEGS, /* nsegments */
1171 JME_MAXSEGSIZE, /* maxsegsize */
1172 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1173 &sc->jme_cdata.jme_tx_tag);
1175 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1179 /* Create DMA maps for Tx buffers. */
1180 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1181 txd = &sc->jme_cdata.jme_txdesc[i];
1182 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag,
1183 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1188 device_printf(sc->jme_dev,
1189 "could not create %dth Tx dmamap.\n", i);
1191 for (j = 0; j < i; ++j) {
1192 txd = &sc->jme_cdata.jme_txdesc[j];
1193 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1196 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1197 sc->jme_cdata.jme_tx_tag = NULL;
1203 * Create DMA stuffs for RX buffers
1205 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1206 error = jme_rxbuf_dma_alloc(sc, i);
1214 jme_dma_free(struct jme_softc *sc)
1216 struct jme_txdesc *txd;
1217 struct jme_rxdesc *rxd;
1218 struct jme_rxdata *rdata;
1222 if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1223 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1224 sc->jme_cdata.jme_tx_ring_map);
1225 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1226 sc->jme_cdata.jme_tx_ring,
1227 sc->jme_cdata.jme_tx_ring_map);
1228 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1229 sc->jme_cdata.jme_tx_ring_tag = NULL;
1233 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1234 rdata = &sc->jme_cdata.jme_rx_data[r];
1235 if (rdata->jme_rx_ring_tag != NULL) {
1236 bus_dmamap_unload(rdata->jme_rx_ring_tag,
1237 rdata->jme_rx_ring_map);
1238 bus_dmamem_free(rdata->jme_rx_ring_tag,
1240 rdata->jme_rx_ring_map);
1241 bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1242 rdata->jme_rx_ring_tag = NULL;
1247 if (sc->jme_cdata.jme_tx_tag != NULL) {
1248 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1249 txd = &sc->jme_cdata.jme_txdesc[i];
1250 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1253 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1254 sc->jme_cdata.jme_tx_tag = NULL;
1258 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1259 rdata = &sc->jme_cdata.jme_rx_data[r];
1260 if (rdata->jme_rx_tag != NULL) {
1261 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
1262 rxd = &rdata->jme_rxdesc[i];
1263 bus_dmamap_destroy(rdata->jme_rx_tag,
1266 bus_dmamap_destroy(rdata->jme_rx_tag,
1267 rdata->jme_rx_sparemap);
1268 bus_dma_tag_destroy(rdata->jme_rx_tag);
1269 rdata->jme_rx_tag = NULL;
1273 /* Shadow status block. */
1274 if (sc->jme_cdata.jme_ssb_tag != NULL) {
1275 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1276 sc->jme_cdata.jme_ssb_map);
1277 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1278 sc->jme_cdata.jme_ssb_block,
1279 sc->jme_cdata.jme_ssb_map);
1280 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1281 sc->jme_cdata.jme_ssb_tag = NULL;
1284 if (sc->jme_cdata.jme_buffer_tag != NULL) {
1285 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1286 sc->jme_cdata.jme_buffer_tag = NULL;
1288 if (sc->jme_cdata.jme_ring_tag != NULL) {
1289 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1290 sc->jme_cdata.jme_ring_tag = NULL;
1293 if (sc->jme_cdata.jme_txdesc != NULL) {
1294 kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1295 sc->jme_cdata.jme_txdesc = NULL;
1297 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1298 rdata = &sc->jme_cdata.jme_rx_data[r];
1299 if (rdata->jme_rxdesc != NULL) {
1300 kfree(rdata->jme_rxdesc, M_DEVBUF);
1301 rdata->jme_rxdesc = NULL;
1307 * Make sure the interface is stopped at reboot time.
1310 jme_shutdown(device_t dev)
1312 return jme_suspend(dev);
1317 * Unlike other ethernet controllers, JMC250 requires
1318 * explicit resetting link speed to 10/100Mbps as gigabit
1319 * link will cunsume more power than 375mA.
1320 * Note, we reset the link speed to 10/100Mbps with
1321 * auto-negotiation but we don't know whether that operation
1322 * would succeed or not as we have no control after powering
1323 * off. If the renegotiation fail WOL may not work. Running
1324 * at 1Gbps draws more power than 375mA at 3.3V which is
1325 * specified in PCI specification and that would result in
1326 * complete shutdowning power to ethernet controller.
1329 * Save current negotiated media speed/duplex/flow-control
1330 * to softc and restore the same link again after resuming.
1331 * PHY handling such as power down/resetting to 100Mbps
1332 * may be better handled in suspend method in phy driver.
1335 jme_setlinkspeed(struct jme_softc *sc)
1337 struct mii_data *mii;
1340 JME_LOCK_ASSERT(sc);
1342 mii = device_get_softc(sc->jme_miibus);
1345 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1346 switch IFM_SUBTYPE(mii->mii_media_active) {
1356 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1357 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1358 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1359 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1360 BMCR_AUTOEN | BMCR_STARTNEG);
1363 /* Poll link state until jme(4) get a 10/100 link. */
1364 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1366 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1367 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1377 pause("jmelnk", hz);
1380 if (i == MII_ANEGTICKS_GIGE)
1381 device_printf(sc->jme_dev, "establishing link failed, "
1382 "WOL may not work!");
1385 * No link, force MAC to have 100Mbps, full-duplex link.
1386 * This is the last resort and may/may not work.
1388 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1389 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1394 jme_setwol(struct jme_softc *sc)
1396 struct ifnet *ifp = &sc->arpcom.ac_if;
1401 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1402 /* No PME capability, PHY power down. */
1403 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1404 MII_BMCR, BMCR_PDOWN);
1408 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1409 pmcs = CSR_READ_4(sc, JME_PMCS);
1410 pmcs &= ~PMCS_WOL_ENB_MASK;
1411 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1412 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1413 /* Enable PME message. */
1414 gpr |= GPREG0_PME_ENB;
1415 /* For gigabit controllers, reset link speed to 10/100. */
1416 if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1417 jme_setlinkspeed(sc);
1420 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1421 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1424 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1425 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1426 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1427 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1428 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1429 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1430 /* No WOL, PHY power down. */
1431 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1432 MII_BMCR, BMCR_PDOWN);
1438 jme_suspend(device_t dev)
1440 struct jme_softc *sc = device_get_softc(dev);
1441 struct ifnet *ifp = &sc->arpcom.ac_if;
1443 ifnet_serialize_all(ifp);
1448 ifnet_deserialize_all(ifp);
1454 jme_resume(device_t dev)
1456 struct jme_softc *sc = device_get_softc(dev);
1457 struct ifnet *ifp = &sc->arpcom.ac_if;
1462 ifnet_serialize_all(ifp);
1465 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1468 pmstat = pci_read_config(sc->jme_dev,
1469 pmc + PCIR_POWER_STATUS, 2);
1470 /* Disable PME clear PME status. */
1471 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1472 pci_write_config(sc->jme_dev,
1473 pmc + PCIR_POWER_STATUS, pmstat, 2);
1477 if (ifp->if_flags & IFF_UP)
1480 ifnet_deserialize_all(ifp);
1486 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1488 struct jme_txdesc *txd;
1489 struct jme_desc *desc;
1491 bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1493 int error, i, prod, symbol_desc;
1494 uint32_t cflags, flag64;
1496 M_ASSERTPKTHDR((*m_head));
1498 prod = sc->jme_cdata.jme_tx_prod;
1499 txd = &sc->jme_cdata.jme_txdesc[prod];
1501 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1506 maxsegs = (sc->jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1507 (JME_TXD_RSVD + symbol_desc);
1508 if (maxsegs > JME_MAXTXSEGS)
1509 maxsegs = JME_MAXTXSEGS;
1510 KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc),
1511 ("not enough segments %d\n", maxsegs));
1513 error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag,
1514 txd->tx_dmamap, m_head,
1515 txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1519 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1520 BUS_DMASYNC_PREWRITE);
1525 /* Configure checksum offload. */
1526 if (m->m_pkthdr.csum_flags & CSUM_IP)
1527 cflags |= JME_TD_IPCSUM;
1528 if (m->m_pkthdr.csum_flags & CSUM_TCP)
1529 cflags |= JME_TD_TCPCSUM;
1530 if (m->m_pkthdr.csum_flags & CSUM_UDP)
1531 cflags |= JME_TD_UDPCSUM;
1533 /* Configure VLAN. */
1534 if (m->m_flags & M_VLANTAG) {
1535 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1536 cflags |= JME_TD_VLAN_TAG;
1539 desc = &sc->jme_cdata.jme_tx_ring[prod];
1540 desc->flags = htole32(cflags);
1541 desc->addr_hi = htole32(m->m_pkthdr.len);
1542 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1544 * Use 64bits TX desc chain format.
1546 * The first TX desc of the chain, which is setup here,
1547 * is just a symbol TX desc carrying no payload.
1549 flag64 = JME_TD_64BIT;
1553 /* No effective TX desc is consumed */
1557 * Use 32bits TX desc chain format.
1559 * The first TX desc of the chain, which is setup here,
1560 * is an effective TX desc carrying the first segment of
1564 desc->buflen = htole32(txsegs[0].ds_len);
1565 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1567 /* One effective TX desc is consumed */
1570 sc->jme_cdata.jme_tx_cnt++;
1571 KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1572 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1573 JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1575 txd->tx_ndesc = 1 - i;
1576 for (; i < nsegs; i++) {
1577 desc = &sc->jme_cdata.jme_tx_ring[prod];
1578 desc->flags = htole32(JME_TD_OWN | flag64);
1579 desc->buflen = htole32(txsegs[i].ds_len);
1580 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1581 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1583 sc->jme_cdata.jme_tx_cnt++;
1584 KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1585 sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1586 JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1589 /* Update producer index. */
1590 sc->jme_cdata.jme_tx_prod = prod;
1592 * Finally request interrupt and give the first descriptor
1593 * owenership to hardware.
1595 desc = txd->tx_desc;
1596 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1599 txd->tx_ndesc += nsegs;
1609 jme_start(struct ifnet *ifp)
1611 struct jme_softc *sc = ifp->if_softc;
1612 struct mbuf *m_head;
1615 ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
1617 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1618 ifq_purge(&ifp->if_snd);
1622 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1625 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1628 while (!ifq_is_empty(&ifp->if_snd)) {
1630 * Check number of available TX descs, always
1631 * leave JME_TXD_RSVD free TX descs.
1633 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1634 sc->jme_tx_desc_cnt - JME_TXD_RSVD) {
1635 ifp->if_flags |= IFF_OACTIVE;
1639 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1644 * Pack the data into the transmit ring. If we
1645 * don't have room, set the OACTIVE flag and wait
1646 * for the NIC to drain the ring.
1648 if (jme_encap(sc, &m_head)) {
1649 KKASSERT(m_head == NULL);
1651 ifp->if_flags |= IFF_OACTIVE;
1657 * If there's a BPF listener, bounce a copy of this frame
1660 ETHER_BPF_MTAP(ifp, m_head);
1665 * Reading TXCSR takes very long time under heavy load
1666 * so cache TXCSR value and writes the ORed value with
1667 * the kick command to the TXCSR. This saves one register
1670 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1671 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1672 /* Set a timeout in case the chip goes out to lunch. */
1673 ifp->if_timer = JME_TX_TIMEOUT;
1678 jme_watchdog(struct ifnet *ifp)
1680 struct jme_softc *sc = ifp->if_softc;
1682 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1684 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1685 if_printf(ifp, "watchdog timeout (missed link)\n");
1692 if (sc->jme_cdata.jme_tx_cnt == 0) {
1693 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1695 if (!ifq_is_empty(&ifp->if_snd))
1700 if_printf(ifp, "watchdog timeout\n");
1703 if (!ifq_is_empty(&ifp->if_snd))
1708 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1710 struct jme_softc *sc = ifp->if_softc;
1711 struct mii_data *mii = device_get_softc(sc->jme_miibus);
1712 struct ifreq *ifr = (struct ifreq *)data;
1713 int error = 0, mask;
1715 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1719 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1720 (!(sc->jme_caps & JME_CAP_JUMBO) &&
1721 ifr->ifr_mtu > JME_MAX_MTU)) {
1726 if (ifp->if_mtu != ifr->ifr_mtu) {
1728 * No special configuration is required when interface
1729 * MTU is changed but availability of Tx checksum
1730 * offload should be chcked against new MTU size as
1731 * FIFO size is just 2K.
1733 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1734 ifp->if_capenable &= ~IFCAP_TXCSUM;
1735 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1737 ifp->if_mtu = ifr->ifr_mtu;
1738 if (ifp->if_flags & IFF_RUNNING)
1744 if (ifp->if_flags & IFF_UP) {
1745 if (ifp->if_flags & IFF_RUNNING) {
1746 if ((ifp->if_flags ^ sc->jme_if_flags) &
1747 (IFF_PROMISC | IFF_ALLMULTI))
1753 if (ifp->if_flags & IFF_RUNNING)
1756 sc->jme_if_flags = ifp->if_flags;
1761 if (ifp->if_flags & IFF_RUNNING)
1767 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1771 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1773 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1774 ifp->if_capenable ^= IFCAP_TXCSUM;
1775 if (IFCAP_TXCSUM & ifp->if_capenable)
1776 ifp->if_hwassist |= JME_CSUM_FEATURES;
1778 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1780 if (mask & IFCAP_RXCSUM) {
1783 ifp->if_capenable ^= IFCAP_RXCSUM;
1784 reg = CSR_READ_4(sc, JME_RXMAC);
1785 reg &= ~RXMAC_CSUM_ENB;
1786 if (ifp->if_capenable & IFCAP_RXCSUM)
1787 reg |= RXMAC_CSUM_ENB;
1788 CSR_WRITE_4(sc, JME_RXMAC, reg);
1791 if (mask & IFCAP_VLAN_HWTAGGING) {
1792 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1796 if (mask & IFCAP_RSS) {
1797 ifp->if_capenable ^= IFCAP_RSS;
1798 if (ifp->if_flags & IFF_RUNNING)
1804 error = ether_ioctl(ifp, cmd, data);
1811 jme_mac_config(struct jme_softc *sc)
1813 struct mii_data *mii;
1814 uint32_t ghc, rxmac, txmac, txpause, gp1;
1815 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1817 mii = device_get_softc(sc->jme_miibus);
1819 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1821 CSR_WRITE_4(sc, JME_GHC, 0);
1823 rxmac = CSR_READ_4(sc, JME_RXMAC);
1824 rxmac &= ~RXMAC_FC_ENB;
1825 txmac = CSR_READ_4(sc, JME_TXMAC);
1826 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1827 txpause = CSR_READ_4(sc, JME_TXPFC);
1828 txpause &= ~TXPFC_PAUSE_ENB;
1829 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1830 ghc |= GHC_FULL_DUPLEX;
1831 rxmac &= ~RXMAC_COLL_DET_ENB;
1832 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1833 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1836 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1837 txpause |= TXPFC_PAUSE_ENB;
1838 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1839 rxmac |= RXMAC_FC_ENB;
1841 /* Disable retry transmit timer/retry limit. */
1842 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1843 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1845 rxmac |= RXMAC_COLL_DET_ENB;
1846 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1847 /* Enable retry transmit timer/retry limit. */
1848 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1849 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1853 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1855 gp1 = CSR_READ_4(sc, JME_GPREG1);
1856 gp1 &= ~GPREG1_WA_HDX;
1858 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1861 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1863 ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1865 gp1 |= GPREG1_WA_HDX;
1869 ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1871 gp1 |= GPREG1_WA_HDX;
1874 * Use extended FIFO depth to workaround CRC errors
1875 * emitted by chips before JMC250B
1877 phyconf = JMPHY_CONF_EXTFIFO;
1881 if (sc->jme_caps & JME_CAP_FASTETH)
1884 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
1886 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1892 CSR_WRITE_4(sc, JME_GHC, ghc);
1893 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1894 CSR_WRITE_4(sc, JME_TXMAC, txmac);
1895 CSR_WRITE_4(sc, JME_TXPFC, txpause);
1897 if (sc->jme_workaround & JME_WA_EXTFIFO) {
1898 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1899 JMPHY_CONF, phyconf);
1901 if (sc->jme_workaround & JME_WA_HDX)
1902 CSR_WRITE_4(sc, JME_GPREG1, gp1);
1908 struct jme_softc *sc = xsc;
1909 struct ifnet *ifp = &sc->arpcom.ac_if;
1913 ASSERT_SERIALIZED(&sc->jme_serialize);
1915 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1916 if (status == 0 || status == 0xFFFFFFFF)
1919 /* Disable interrupts. */
1920 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1922 status = CSR_READ_4(sc, JME_INTR_STATUS);
1923 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1926 /* Reset PCC counter/timer and Ack interrupts. */
1927 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1929 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1930 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1932 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
1933 if (status & jme_rx_status[r].jme_coal) {
1934 status |= jme_rx_status[r].jme_coal |
1935 jme_rx_status[r].jme_comp;
1939 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1941 if (ifp->if_flags & IFF_RUNNING) {
1942 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1943 jme_rx_intr(sc, status);
1945 if (status & INTR_RXQ_DESC_EMPTY) {
1947 * Notify hardware availability of new Rx buffers.
1948 * Reading RXCSR takes very long time under heavy
1949 * load so cache RXCSR value and writes the ORed
1950 * value with the kick command to the RXCSR. This
1951 * saves one register access cycle.
1953 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1954 RXCSR_RX_ENB | RXCSR_RXQ_START);
1957 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1958 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
1960 if (!ifq_is_empty(&ifp->if_snd))
1962 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
1966 /* Reenable interrupts. */
1967 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1971 jme_txeof(struct jme_softc *sc)
1973 struct ifnet *ifp = &sc->arpcom.ac_if;
1974 struct jme_txdesc *txd;
1978 cons = sc->jme_cdata.jme_tx_cons;
1979 if (cons == sc->jme_cdata.jme_tx_prod)
1983 * Go through our Tx list and free mbufs for those
1984 * frames which have been transmitted.
1986 while (cons != sc->jme_cdata.jme_tx_prod) {
1987 txd = &sc->jme_cdata.jme_txdesc[cons];
1988 KASSERT(txd->tx_m != NULL,
1989 ("%s: freeing NULL mbuf!\n", __func__));
1991 status = le32toh(txd->tx_desc->flags);
1992 if ((status & JME_TD_OWN) == JME_TD_OWN)
1995 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
1999 if (status & JME_TD_COLLISION) {
2000 ifp->if_collisions +=
2001 le32toh(txd->tx_desc->buflen) &
2002 JME_TD_BUF_LEN_MASK;
2007 * Only the first descriptor of multi-descriptor
2008 * transmission is updated so driver have to skip entire
2009 * chained buffers for the transmiited frame. In other
2010 * words, JME_TD_OWN bit is valid only at the first
2011 * descriptor of a multi-descriptor transmission.
2013 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2014 sc->jme_cdata.jme_tx_ring[cons].flags = 0;
2015 JME_DESC_INC(cons, sc->jme_tx_desc_cnt);
2018 /* Reclaim transferred mbufs. */
2019 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2022 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2023 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2024 ("%s: Active Tx desc counter was garbled\n", __func__));
2027 sc->jme_cdata.jme_tx_cons = cons;
2029 if (sc->jme_cdata.jme_tx_cnt == 0)
2032 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
2033 sc->jme_tx_desc_cnt - JME_TXD_RSVD)
2034 ifp->if_flags &= ~IFF_OACTIVE;
2037 static __inline void
2038 jme_discard_rxbufs(struct jme_softc *sc, int ring, int cons, int count)
2040 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2043 for (i = 0; i < count; ++i) {
2044 struct jme_desc *desc = &rdata->jme_rx_ring[cons];
2046 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2047 desc->buflen = htole32(MCLBYTES);
2048 JME_DESC_INC(cons, sc->jme_rx_desc_cnt);
2052 static __inline struct pktinfo *
2053 jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2055 if (flags & JME_RD_IPV4)
2056 pi->pi_netisr = NETISR_IP;
2057 else if (flags & JME_RD_IPV6)
2058 pi->pi_netisr = NETISR_IPV6;
2063 pi->pi_l3proto = IPPROTO_UNKNOWN;
2065 if (flags & JME_RD_MORE_FRAG)
2066 pi->pi_flags |= PKTINFO_FLAG_FRAG;
2067 else if (flags & JME_RD_TCP)
2068 pi->pi_l3proto = IPPROTO_TCP;
2069 else if (flags & JME_RD_UDP)
2070 pi->pi_l3proto = IPPROTO_UDP;
2076 /* Receive a frame. */
2078 jme_rxpkt(struct jme_softc *sc, int ring, struct mbuf_chain *chain)
2080 struct ifnet *ifp = &sc->arpcom.ac_if;
2081 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2082 struct jme_desc *desc;
2083 struct jme_rxdesc *rxd;
2084 struct mbuf *mp, *m;
2085 uint32_t flags, status, hash, hashinfo;
2086 int cons, count, nsegs;
2088 cons = rdata->jme_rx_cons;
2089 desc = &rdata->jme_rx_ring[cons];
2090 flags = le32toh(desc->flags);
2091 status = le32toh(desc->buflen);
2092 hash = le32toh(desc->addr_hi);
2093 hashinfo = le32toh(desc->addr_lo);
2094 nsegs = JME_RX_NSEGS(status);
2096 JME_RSS_DPRINTF(sc, 15, "ring%d, flags 0x%08x, "
2097 "hash 0x%08x, hash info 0x%08x\n",
2098 ring, flags, hash, hashinfo);
2100 if (status & JME_RX_ERR_STAT) {
2102 jme_discard_rxbufs(sc, ring, cons, nsegs);
2103 #ifdef JME_SHOW_ERRORS
2104 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2105 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2107 rdata->jme_rx_cons += nsegs;
2108 rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2112 rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2113 for (count = 0; count < nsegs; count++,
2114 JME_DESC_INC(cons, sc->jme_rx_desc_cnt)) {
2115 rxd = &rdata->jme_rxdesc[cons];
2118 /* Add a new receive buffer to the ring. */
2119 if (jme_newbuf(sc, ring, rxd, 0) != 0) {
2122 jme_discard_rxbufs(sc, ring, cons, nsegs - count);
2123 if (rdata->jme_rxhead != NULL) {
2124 m_freem(rdata->jme_rxhead);
2125 JME_RXCHAIN_RESET(sc, ring);
2131 * Assume we've received a full sized frame.
2132 * Actual size is fixed when we encounter the end of
2133 * multi-segmented frame.
2135 mp->m_len = MCLBYTES;
2137 /* Chain received mbufs. */
2138 if (rdata->jme_rxhead == NULL) {
2139 rdata->jme_rxhead = mp;
2140 rdata->jme_rxtail = mp;
2143 * Receive processor can receive a maximum frame
2144 * size of 65535 bytes.
2146 rdata->jme_rxtail->m_next = mp;
2147 rdata->jme_rxtail = mp;
2150 if (count == nsegs - 1) {
2151 struct pktinfo pi0, *pi;
2153 /* Last desc. for this frame. */
2154 m = rdata->jme_rxhead;
2155 m->m_pkthdr.len = rdata->jme_rxlen;
2157 /* Set first mbuf size. */
2158 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2159 /* Set last mbuf size. */
2160 mp->m_len = rdata->jme_rxlen -
2161 ((MCLBYTES - JME_RX_PAD_BYTES) +
2162 (MCLBYTES * (nsegs - 2)));
2164 m->m_len = rdata->jme_rxlen;
2166 m->m_pkthdr.rcvif = ifp;
2169 * Account for 10bytes auto padding which is used
2170 * to align IP header on 32bit boundary. Also note,
2171 * CRC bytes is automatically removed by the
2174 m->m_data += JME_RX_PAD_BYTES;
2176 /* Set checksum information. */
2177 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2178 (flags & JME_RD_IPV4)) {
2179 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2180 if (flags & JME_RD_IPCSUM)
2181 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2182 if ((flags & JME_RD_MORE_FRAG) == 0 &&
2183 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2184 (JME_RD_TCP | JME_RD_TCPCSUM) ||
2185 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2186 (JME_RD_UDP | JME_RD_UDPCSUM))) {
2187 m->m_pkthdr.csum_flags |=
2188 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2189 m->m_pkthdr.csum_data = 0xffff;
2193 /* Check for VLAN tagged packets. */
2194 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2195 (flags & JME_RD_VLAN_TAG)) {
2196 m->m_pkthdr.ether_vlantag =
2197 flags & JME_RD_VLAN_MASK;
2198 m->m_flags |= M_VLANTAG;
2203 if (ifp->if_capenable & IFCAP_RSS)
2204 pi = jme_pktinfo(&pi0, flags);
2209 (hashinfo & JME_RD_HASH_FN_MASK) != 0) {
2210 m->m_flags |= M_HASH;
2211 m->m_pkthdr.hash = toeplitz_hash(hash);
2214 #ifdef JME_RSS_DEBUG
2216 JME_RSS_DPRINTF(sc, 10,
2217 "isr %d flags %08x, l3 %d %s\n",
2218 pi->pi_netisr, pi->pi_flags,
2220 (m->m_flags & M_HASH) ? "hash" : "");
2225 ether_input_chain(ifp, m, pi, chain);
2227 /* Reset mbuf chains. */
2228 JME_RXCHAIN_RESET(sc, ring);
2229 #ifdef JME_RSS_DEBUG
2230 sc->jme_rx_ring_pkt[ring]++;
2235 rdata->jme_rx_cons += nsegs;
2236 rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2240 jme_rxeof_chain(struct jme_softc *sc, int ring, struct mbuf_chain *chain,
2243 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2244 struct jme_desc *desc;
2245 int nsegs, prog, pktlen;
2249 #ifdef DEVICE_POLLING
2250 if (count >= 0 && count-- == 0)
2253 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2254 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2256 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2260 * Check number of segments against received bytes.
2261 * Non-matching value would indicate that hardware
2262 * is still trying to update Rx descriptors. I'm not
2263 * sure whether this check is needed.
2265 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2266 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2267 if (nsegs != howmany(pktlen, MCLBYTES)) {
2268 if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) "
2269 "and packet size(%d) mismach\n",
2274 /* Received a frame. */
2275 jme_rxpkt(sc, ring, chain);
2282 jme_rxeof(struct jme_softc *sc, int ring)
2284 struct mbuf_chain chain[MAXCPU];
2286 ether_input_chain_init(chain);
2287 if (jme_rxeof_chain(sc, ring, chain, -1))
2288 ether_input_dispatch(chain);
2294 struct jme_softc *sc = xsc;
2295 struct ifnet *ifp = &sc->arpcom.ac_if;
2296 struct mii_data *mii = device_get_softc(sc->jme_miibus);
2298 ifnet_serialize_all(ifp);
2301 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2303 ifnet_deserialize_all(ifp);
2307 jme_reset(struct jme_softc *sc)
2311 /* Make sure that TX and RX are stopped */
2316 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2320 * Hold reset bit before stop reset
2323 /* Disable TXMAC and TXOFL clock sources */
2324 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2325 /* Disable RXMAC clock source */
2326 val = CSR_READ_4(sc, JME_GPREG1);
2327 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2329 CSR_READ_4(sc, JME_GHC);
2332 CSR_WRITE_4(sc, JME_GHC, 0);
2334 CSR_READ_4(sc, JME_GHC);
2337 * Clear reset bit after stop reset
2340 /* Enable TXMAC and TXOFL clock sources */
2341 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2342 /* Enable RXMAC clock source */
2343 val = CSR_READ_4(sc, JME_GPREG1);
2344 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2346 CSR_READ_4(sc, JME_GHC);
2348 /* Disable TXMAC and TXOFL clock sources */
2349 CSR_WRITE_4(sc, JME_GHC, 0);
2350 /* Disable RXMAC clock source */
2351 val = CSR_READ_4(sc, JME_GPREG1);
2352 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2354 CSR_READ_4(sc, JME_GHC);
2356 /* Enable TX and RX */
2357 val = CSR_READ_4(sc, JME_TXCSR);
2358 CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB);
2359 val = CSR_READ_4(sc, JME_RXCSR);
2360 CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB);
2362 CSR_READ_4(sc, JME_TXCSR);
2363 CSR_READ_4(sc, JME_RXCSR);
2365 /* Enable TXMAC and TXOFL clock sources */
2366 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2367 /* Eisable RXMAC clock source */
2368 val = CSR_READ_4(sc, JME_GPREG1);
2369 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2371 CSR_READ_4(sc, JME_GHC);
2373 /* Stop TX and RX */
2381 struct jme_softc *sc = xsc;
2382 struct ifnet *ifp = &sc->arpcom.ac_if;
2383 struct mii_data *mii;
2384 uint8_t eaddr[ETHER_ADDR_LEN];
2389 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2392 * Cancel any pending I/O.
2397 * Reset the chip to a known state.
2402 howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES);
2403 KKASSERT(sc->jme_txd_spare >= 1);
2406 * If we use 64bit address mode for transmitting, each Tx request
2407 * needs one more symbol descriptor.
2409 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
2410 sc->jme_txd_spare += 1;
2412 if (ifp->if_capenable & IFCAP_RSS)
2415 jme_disable_rss(sc);
2417 /* Init RX descriptors */
2418 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2419 error = jme_init_rx_ring(sc, r);
2421 if_printf(ifp, "initialization failed: "
2422 "no memory for %dth RX ring.\n", r);
2428 /* Init TX descriptors */
2429 jme_init_tx_ring(sc);
2431 /* Initialize shadow status block. */
2434 /* Reprogram the station address. */
2435 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2436 CSR_WRITE_4(sc, JME_PAR0,
2437 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2438 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2441 * Configure Tx queue.
2442 * Tx priority queue weight value : 0
2443 * Tx FIFO threshold for processing next packet : 16QW
2444 * Maximum Tx DMA length : 512
2445 * Allow Tx DMA burst.
2447 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2448 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2449 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2450 sc->jme_txcsr |= sc->jme_tx_dma_size;
2451 sc->jme_txcsr |= TXCSR_DMA_BURST;
2452 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2454 /* Set Tx descriptor counter. */
2455 CSR_WRITE_4(sc, JME_TXQDC, sc->jme_tx_desc_cnt);
2457 /* Set Tx ring address to the hardware. */
2458 paddr = sc->jme_cdata.jme_tx_ring_paddr;
2459 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2460 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2462 /* Configure TxMAC parameters. */
2463 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2464 reg |= TXMAC_THRESH_1_PKT;
2465 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2466 CSR_WRITE_4(sc, JME_TXMAC, reg);
2469 * Configure Rx queue.
2470 * FIFO full threshold for transmitting Tx pause packet : 128T
2471 * FIFO threshold for processing next packet : 128QW
2473 * Max Rx DMA length : 128
2474 * Rx descriptor retry : 32
2475 * Rx descriptor retry time gap : 256ns
2476 * Don't receive runt/bad frame.
2478 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2481 * Since Rx FIFO size is 4K bytes, receiving frames larger
2482 * than 4K bytes will suffer from Rx FIFO overruns. So
2483 * decrease FIFO threshold to reduce the FIFO overruns for
2484 * frames larger than 4000 bytes.
2485 * For best performance of standard MTU sized frames use
2486 * maximum allowable FIFO threshold, 128QW.
2488 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2490 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2492 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2494 /* Improve PCI Express compatibility */
2495 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2497 sc->jme_rxcsr |= sc->jme_rx_dma_size;
2498 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2499 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2500 /* XXX TODO DROP_BAD */
2502 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2503 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2505 /* Set Rx descriptor counter. */
2506 CSR_WRITE_4(sc, JME_RXQDC, sc->jme_rx_desc_cnt);
2508 /* Set Rx ring address to the hardware. */
2509 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
2510 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2511 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2514 /* Clear receive filter. */
2515 CSR_WRITE_4(sc, JME_RXMAC, 0);
2517 /* Set up the receive filter. */
2522 * Disable all WOL bits as WOL can interfere normal Rx
2523 * operation. Also clear WOL detection status bits.
2525 reg = CSR_READ_4(sc, JME_PMCS);
2526 reg &= ~PMCS_WOL_ENB_MASK;
2527 CSR_WRITE_4(sc, JME_PMCS, reg);
2530 * Pad 10bytes right before received frame. This will greatly
2531 * help Rx performance on strict-alignment architectures as
2532 * it does not need to copy the frame to align the payload.
2534 reg = CSR_READ_4(sc, JME_RXMAC);
2535 reg |= RXMAC_PAD_10BYTES;
2537 if (ifp->if_capenable & IFCAP_RXCSUM)
2538 reg |= RXMAC_CSUM_ENB;
2539 CSR_WRITE_4(sc, JME_RXMAC, reg);
2541 /* Configure general purpose reg0 */
2542 reg = CSR_READ_4(sc, JME_GPREG0);
2543 reg &= ~GPREG0_PCC_UNIT_MASK;
2544 /* Set PCC timer resolution to micro-seconds unit. */
2545 reg |= GPREG0_PCC_UNIT_US;
2547 * Disable all shadow register posting as we have to read
2548 * JME_INTR_STATUS register in jme_intr. Also it seems
2549 * that it's hard to synchronize interrupt status between
2550 * hardware and software with shadow posting due to
2551 * requirements of bus_dmamap_sync(9).
2553 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2554 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2555 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2556 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2557 /* Disable posting of DW0. */
2558 reg &= ~GPREG0_POST_DW0_ENB;
2559 /* Clear PME message. */
2560 reg &= ~GPREG0_PME_ENB;
2561 /* Set PHY address. */
2562 reg &= ~GPREG0_PHY_ADDR_MASK;
2563 reg |= sc->jme_phyaddr;
2564 CSR_WRITE_4(sc, JME_GPREG0, reg);
2566 /* Configure Tx queue 0 packet completion coalescing. */
2567 jme_set_tx_coal(sc);
2569 /* Configure Rx queue 0 packet completion coalescing. */
2570 jme_set_rx_coal(sc);
2572 /* Configure shadow status block but don't enable posting. */
2573 paddr = sc->jme_cdata.jme_ssb_block_paddr;
2574 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2575 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2577 /* Disable Timer 1 and Timer 2. */
2578 CSR_WRITE_4(sc, JME_TIMER1, 0);
2579 CSR_WRITE_4(sc, JME_TIMER2, 0);
2581 /* Configure retry transmit period, retry limit value. */
2582 CSR_WRITE_4(sc, JME_TXTRHD,
2583 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2584 TXTRHD_RT_PERIOD_MASK) |
2585 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2586 TXTRHD_RT_LIMIT_SHIFT));
2588 #ifdef DEVICE_POLLING
2589 if (!(ifp->if_flags & IFF_POLLING))
2591 /* Initialize the interrupt mask. */
2592 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2593 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2596 * Enabling Tx/Rx DMA engines and Rx queue processing is
2597 * done after detection of valid link in jme_miibus_statchg.
2599 sc->jme_flags &= ~JME_FLAG_LINK;
2601 /* Set the current media. */
2602 mii = device_get_softc(sc->jme_miibus);
2605 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2607 ifp->if_flags |= IFF_RUNNING;
2608 ifp->if_flags &= ~IFF_OACTIVE;
2612 jme_stop(struct jme_softc *sc)
2614 struct ifnet *ifp = &sc->arpcom.ac_if;
2615 struct jme_txdesc *txd;
2616 struct jme_rxdesc *rxd;
2617 struct jme_rxdata *rdata;
2620 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2623 * Mark the interface down and cancel the watchdog timer.
2625 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2628 callout_stop(&sc->jme_tick_ch);
2629 sc->jme_flags &= ~JME_FLAG_LINK;
2632 * Disable interrupts.
2634 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2635 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2637 /* Disable updating shadow status block. */
2638 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2639 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2641 /* Stop receiver, transmitter. */
2646 * Free partial finished RX segments
2648 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2649 rdata = &sc->jme_cdata.jme_rx_data[r];
2650 if (rdata->jme_rxhead != NULL)
2651 m_freem(rdata->jme_rxhead);
2652 JME_RXCHAIN_RESET(sc, r);
2656 * Free RX and TX mbufs still in the queues.
2658 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2659 rdata = &sc->jme_cdata.jme_rx_data[r];
2660 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2661 rxd = &rdata->jme_rxdesc[i];
2662 if (rxd->rx_m != NULL) {
2663 bus_dmamap_unload(rdata->jme_rx_tag,
2670 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2671 txd = &sc->jme_cdata.jme_txdesc[i];
2672 if (txd->tx_m != NULL) {
2673 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2683 jme_stop_tx(struct jme_softc *sc)
2688 reg = CSR_READ_4(sc, JME_TXCSR);
2689 if ((reg & TXCSR_TX_ENB) == 0)
2691 reg &= ~TXCSR_TX_ENB;
2692 CSR_WRITE_4(sc, JME_TXCSR, reg);
2693 for (i = JME_TIMEOUT; i > 0; i--) {
2695 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2699 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2703 jme_stop_rx(struct jme_softc *sc)
2708 reg = CSR_READ_4(sc, JME_RXCSR);
2709 if ((reg & RXCSR_RX_ENB) == 0)
2711 reg &= ~RXCSR_RX_ENB;
2712 CSR_WRITE_4(sc, JME_RXCSR, reg);
2713 for (i = JME_TIMEOUT; i > 0; i--) {
2715 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2719 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2723 jme_init_tx_ring(struct jme_softc *sc)
2725 struct jme_chain_data *cd;
2726 struct jme_txdesc *txd;
2729 sc->jme_cdata.jme_tx_prod = 0;
2730 sc->jme_cdata.jme_tx_cons = 0;
2731 sc->jme_cdata.jme_tx_cnt = 0;
2733 cd = &sc->jme_cdata;
2734 bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2735 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2736 txd = &sc->jme_cdata.jme_txdesc[i];
2738 txd->tx_desc = &cd->jme_tx_ring[i];
2744 jme_init_ssb(struct jme_softc *sc)
2746 struct jme_chain_data *cd;
2748 cd = &sc->jme_cdata;
2749 bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2753 jme_init_rx_ring(struct jme_softc *sc, int ring)
2755 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2756 struct jme_rxdesc *rxd;
2759 KKASSERT(rdata->jme_rxhead == NULL &&
2760 rdata->jme_rxtail == NULL &&
2761 rdata->jme_rxlen == 0);
2762 rdata->jme_rx_cons = 0;
2764 bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(sc));
2765 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2768 rxd = &rdata->jme_rxdesc[i];
2770 rxd->rx_desc = &rdata->jme_rx_ring[i];
2771 error = jme_newbuf(sc, ring, rxd, 1);
2779 jme_newbuf(struct jme_softc *sc, int ring, struct jme_rxdesc *rxd, int init)
2781 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2782 struct jme_desc *desc;
2784 bus_dma_segment_t segs;
2788 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2792 * JMC250 has 64bit boundary alignment limitation so jme(4)
2793 * takes advantage of 10 bytes padding feature of hardware
2794 * in order not to copy entire frame to align IP header on
2797 m->m_len = m->m_pkthdr.len = MCLBYTES;
2799 error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
2800 rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
2805 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2809 if (rxd->rx_m != NULL) {
2810 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
2811 BUS_DMASYNC_POSTREAD);
2812 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
2814 map = rxd->rx_dmamap;
2815 rxd->rx_dmamap = rdata->jme_rx_sparemap;
2816 rdata->jme_rx_sparemap = map;
2819 desc = rxd->rx_desc;
2820 desc->buflen = htole32(segs.ds_len);
2821 desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr));
2822 desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr));
2823 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2829 jme_set_vlan(struct jme_softc *sc)
2831 struct ifnet *ifp = &sc->arpcom.ac_if;
2834 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2836 reg = CSR_READ_4(sc, JME_RXMAC);
2837 reg &= ~RXMAC_VLAN_ENB;
2838 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2839 reg |= RXMAC_VLAN_ENB;
2840 CSR_WRITE_4(sc, JME_RXMAC, reg);
2844 jme_set_filter(struct jme_softc *sc)
2846 struct ifnet *ifp = &sc->arpcom.ac_if;
2847 struct ifmultiaddr *ifma;
2852 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2854 rxcfg = CSR_READ_4(sc, JME_RXMAC);
2855 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2859 * Always accept frames destined to our station address.
2860 * Always accept broadcast frames.
2862 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2864 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2865 if (ifp->if_flags & IFF_PROMISC)
2866 rxcfg |= RXMAC_PROMISC;
2867 if (ifp->if_flags & IFF_ALLMULTI)
2868 rxcfg |= RXMAC_ALLMULTI;
2869 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2870 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2871 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2876 * Set up the multicast address filter by passing all multicast
2877 * addresses through a CRC generator, and then using the low-order
2878 * 6 bits as an index into the 64 bit multicast hash table. The
2879 * high order bits select the register, while the rest of the bits
2880 * select the bit within the register.
2882 rxcfg |= RXMAC_MULTICAST;
2883 bzero(mchash, sizeof(mchash));
2885 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2886 if (ifma->ifma_addr->sa_family != AF_LINK)
2888 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2889 ifma->ifma_addr), ETHER_ADDR_LEN);
2891 /* Just want the 6 least significant bits. */
2894 /* Set the corresponding bit in the hash table. */
2895 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2898 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2899 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2900 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2904 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
2906 struct jme_softc *sc = arg1;
2907 struct ifnet *ifp = &sc->arpcom.ac_if;
2910 ifnet_serialize_all(ifp);
2912 v = sc->jme_tx_coal_to;
2913 error = sysctl_handle_int(oidp, &v, 0, req);
2914 if (error || req->newptr == NULL)
2917 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2922 if (v != sc->jme_tx_coal_to) {
2923 sc->jme_tx_coal_to = v;
2924 if (ifp->if_flags & IFF_RUNNING)
2925 jme_set_tx_coal(sc);
2928 ifnet_deserialize_all(ifp);
2933 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2935 struct jme_softc *sc = arg1;
2936 struct ifnet *ifp = &sc->arpcom.ac_if;
2939 ifnet_serialize_all(ifp);
2941 v = sc->jme_tx_coal_pkt;
2942 error = sysctl_handle_int(oidp, &v, 0, req);
2943 if (error || req->newptr == NULL)
2946 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
2951 if (v != sc->jme_tx_coal_pkt) {
2952 sc->jme_tx_coal_pkt = v;
2953 if (ifp->if_flags & IFF_RUNNING)
2954 jme_set_tx_coal(sc);
2957 ifnet_deserialize_all(ifp);
2962 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
2964 struct jme_softc *sc = arg1;
2965 struct ifnet *ifp = &sc->arpcom.ac_if;
2968 ifnet_serialize_all(ifp);
2970 v = sc->jme_rx_coal_to;
2971 error = sysctl_handle_int(oidp, &v, 0, req);
2972 if (error || req->newptr == NULL)
2975 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
2980 if (v != sc->jme_rx_coal_to) {
2981 sc->jme_rx_coal_to = v;
2982 if (ifp->if_flags & IFF_RUNNING)
2983 jme_set_rx_coal(sc);
2986 ifnet_deserialize_all(ifp);
2991 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
2993 struct jme_softc *sc = arg1;
2994 struct ifnet *ifp = &sc->arpcom.ac_if;
2997 ifnet_serialize_all(ifp);
2999 v = sc->jme_rx_coal_pkt;
3000 error = sysctl_handle_int(oidp, &v, 0, req);
3001 if (error || req->newptr == NULL)
3004 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
3009 if (v != sc->jme_rx_coal_pkt) {
3010 sc->jme_rx_coal_pkt = v;
3011 if (ifp->if_flags & IFF_RUNNING)
3012 jme_set_rx_coal(sc);
3015 ifnet_deserialize_all(ifp);
3020 jme_set_tx_coal(struct jme_softc *sc)
3024 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3026 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3027 PCCTX_COAL_PKT_MASK;
3028 reg |= PCCTX_COAL_TXQ0;
3029 CSR_WRITE_4(sc, JME_PCCTX, reg);
3033 jme_set_rx_coal(struct jme_softc *sc)
3038 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3040 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3041 PCCRX_COAL_PKT_MASK;
3042 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
3043 if (r < sc->jme_rx_ring_inuse)
3044 CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3046 CSR_WRITE_4(sc, JME_PCCRX(r), 0);
3050 #ifdef DEVICE_POLLING
3053 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3055 struct jme_softc *sc = ifp->if_softc;
3056 struct mbuf_chain chain[MAXCPU];
3060 ASSERT_SERIALIZED(&sc->jme_serialize);
3064 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3067 case POLL_DEREGISTER:
3068 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
3071 case POLL_AND_CHECK_STATUS:
3073 status = CSR_READ_4(sc, JME_INTR_STATUS);
3075 ether_input_chain_init(chain);
3076 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
3077 struct jme_rxdata *rdata =
3078 &sc->jme_cdata.jme_rx_data[r];
3080 lwkt_serialize_enter(&rdata->jme_rx_serialize);
3081 prog += jme_rxeof_chain(sc, r, chain, count);
3082 lwkt_serialize_exit(&rdata->jme_rx_serialize);
3085 ether_input_dispatch(chain);
3087 if (status & INTR_RXQ_DESC_EMPTY) {
3088 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3089 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3090 RXCSR_RX_ENB | RXCSR_RXQ_START);
3093 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
3095 if (!ifq_is_empty(&ifp->if_snd))
3097 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
3102 #endif /* DEVICE_POLLING */
3105 jme_rxring_dma_alloc(struct jme_softc *sc, int ring)
3107 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3111 error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
3112 JME_RX_RING_ALIGN, 0,
3113 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3114 JME_RX_RING_SIZE(sc),
3115 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3117 device_printf(sc->jme_dev,
3118 "could not allocate %dth Rx ring.\n", ring);
3121 rdata->jme_rx_ring_tag = dmem.dmem_tag;
3122 rdata->jme_rx_ring_map = dmem.dmem_map;
3123 rdata->jme_rx_ring = dmem.dmem_addr;
3124 rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
3130 jme_rxbuf_dma_alloc(struct jme_softc *sc, int ring)
3132 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3135 /* Create tag for Rx buffers. */
3136 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
3137 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
3138 BUS_SPACE_MAXADDR, /* lowaddr */
3139 BUS_SPACE_MAXADDR, /* highaddr */
3140 NULL, NULL, /* filter, filterarg */
3141 MCLBYTES, /* maxsize */
3143 MCLBYTES, /* maxsegsize */
3144 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
3145 &rdata->jme_rx_tag);
3147 device_printf(sc->jme_dev,
3148 "could not create %dth Rx DMA tag.\n", ring);
3152 /* Create DMA maps for Rx buffers. */
3153 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3154 &rdata->jme_rx_sparemap);
3156 device_printf(sc->jme_dev,
3157 "could not create %dth spare Rx dmamap.\n", ring);
3158 bus_dma_tag_destroy(rdata->jme_rx_tag);
3159 rdata->jme_rx_tag = NULL;
3162 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
3163 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3165 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3170 device_printf(sc->jme_dev,
3171 "could not create %dth Rx dmamap "
3172 "for %dth RX ring.\n", i, ring);
3174 for (j = 0; j < i; ++j) {
3175 rxd = &rdata->jme_rxdesc[j];
3176 bus_dmamap_destroy(rdata->jme_rx_tag,
3179 bus_dmamap_destroy(rdata->jme_rx_tag,
3180 rdata->jme_rx_sparemap);
3181 bus_dma_tag_destroy(rdata->jme_rx_tag);
3182 rdata->jme_rx_tag = NULL;
3190 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3192 struct mbuf_chain chain[MAXCPU];
3195 ether_input_chain_init(chain);
3196 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
3197 if (status & jme_rx_status[r].jme_coal) {
3198 struct jme_rxdata *rdata =
3199 &sc->jme_cdata.jme_rx_data[r];
3201 lwkt_serialize_enter(&rdata->jme_rx_serialize);
3202 prog += jme_rxeof_chain(sc, r, chain, -1);
3203 lwkt_serialize_exit(&rdata->jme_rx_serialize);
3207 ether_input_dispatch(chain);
3211 jme_enable_rss(struct jme_softc *sc)
3214 uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3217 sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
3219 KASSERT(sc->jme_rx_ring_inuse == JME_NRXRING_2 ||
3220 sc->jme_rx_ring_inuse == JME_NRXRING_4,
3221 ("%s: invalid # of RX rings (%d)\n",
3222 sc->arpcom.ac_if.if_xname, sc->jme_rx_ring_inuse));
3224 rssc = RSSC_HASH_64_ENTRY;
3225 rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3226 rssc |= sc->jme_rx_ring_inuse >> 1;
3227 JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3228 CSR_WRITE_4(sc, JME_RSSC, rssc);
3230 toeplitz_get_key(key, sizeof(key));
3231 for (i = 0; i < RSSKEY_NREGS; ++i) {
3234 keyreg = RSSKEY_REGVAL(key, i);
3235 JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg);
3237 CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg);
3241 * Create redirect table in following fashion:
3242 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3245 for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3248 q = i % sc->jme_rx_ring_inuse;
3249 ind |= q << (i * 8);
3251 JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3253 for (i = 0; i < RSSTBL_NREGS; ++i)
3254 CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3258 jme_disable_rss(struct jme_softc *sc)
3260 sc->jme_rx_ring_inuse = JME_NRXRING_1;
3261 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3265 jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3267 struct jme_softc *sc = ifp->if_softc;
3270 case IFNET_SERIALIZE_ALL:
3271 lwkt_serialize_array_enter(sc->jme_serialize_arr,
3272 sc->jme_serialize_cnt, 0);
3275 case IFNET_SERIALIZE_MAIN:
3276 lwkt_serialize_enter(&sc->jme_serialize);
3279 case IFNET_SERIALIZE_TX:
3280 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
3283 case IFNET_SERIALIZE_RX(0):
3284 lwkt_serialize_enter(
3285 &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize);
3288 case IFNET_SERIALIZE_RX(1):
3289 lwkt_serialize_enter(
3290 &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize);
3293 case IFNET_SERIALIZE_RX(2):
3294 lwkt_serialize_enter(
3295 &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize);
3298 case IFNET_SERIALIZE_RX(3):
3299 lwkt_serialize_enter(
3300 &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize);
3304 panic("%s unsupported serialize type\n", ifp->if_xname);
3309 jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3311 struct jme_softc *sc = ifp->if_softc;
3314 case IFNET_SERIALIZE_ALL:
3315 lwkt_serialize_array_exit(sc->jme_serialize_arr,
3316 sc->jme_serialize_cnt, 0);
3319 case IFNET_SERIALIZE_MAIN:
3320 lwkt_serialize_exit(&sc->jme_serialize);
3323 case IFNET_SERIALIZE_TX:
3324 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
3327 case IFNET_SERIALIZE_RX(0):
3328 lwkt_serialize_exit(
3329 &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize);
3332 case IFNET_SERIALIZE_RX(1):
3333 lwkt_serialize_exit(
3334 &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize);
3337 case IFNET_SERIALIZE_RX(2):
3338 lwkt_serialize_exit(
3339 &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize);
3342 case IFNET_SERIALIZE_RX(3):
3343 lwkt_serialize_exit(
3344 &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize);
3348 panic("%s unsupported serialize type\n", ifp->if_xname);
3353 jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3355 struct jme_softc *sc = ifp->if_softc;
3358 case IFNET_SERIALIZE_ALL:
3359 return lwkt_serialize_array_try(sc->jme_serialize_arr,
3360 sc->jme_serialize_cnt, 0);
3362 case IFNET_SERIALIZE_MAIN:
3363 return lwkt_serialize_try(&sc->jme_serialize);
3365 case IFNET_SERIALIZE_TX:
3366 return lwkt_serialize_try(&sc->jme_cdata.jme_tx_serialize);
3368 case IFNET_SERIALIZE_RX(0):
3369 return lwkt_serialize_try(
3370 &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize);
3372 case IFNET_SERIALIZE_RX(1):
3373 return lwkt_serialize_try(
3374 &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize);
3376 case IFNET_SERIALIZE_RX(2):
3377 return lwkt_serialize_try(
3378 &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize);
3380 case IFNET_SERIALIZE_RX(3):
3381 return lwkt_serialize_try(
3382 &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize);
3385 panic("%s unsupported serialize type\n", ifp->if_xname);
3392 jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
3393 boolean_t serialized)
3395 struct jme_softc *sc = ifp->if_softc;
3396 struct jme_rxdata *rdata;
3400 case IFNET_SERIALIZE_ALL:
3402 for (i = 0; i < sc->jme_serialize_cnt; ++i)
3403 ASSERT_SERIALIZED(sc->jme_serialize_arr[i]);
3405 for (i = 0; i < sc->jme_serialize_cnt; ++i)
3406 ASSERT_NOT_SERIALIZED(sc->jme_serialize_arr[i]);
3410 case IFNET_SERIALIZE_MAIN:
3412 ASSERT_SERIALIZED(&sc->jme_serialize);
3414 ASSERT_NOT_SERIALIZED(&sc->jme_serialize);
3417 case IFNET_SERIALIZE_TX:
3419 ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
3421 ASSERT_NOT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
3424 case IFNET_SERIALIZE_RX(0):
3425 rdata = &sc->jme_cdata.jme_rx_data[0];
3427 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3429 ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3432 case IFNET_SERIALIZE_RX(1):
3433 rdata = &sc->jme_cdata.jme_rx_data[1];
3435 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3437 ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3440 case IFNET_SERIALIZE_RX(2):
3441 rdata = &sc->jme_cdata.jme_rx_data[2];
3443 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3445 ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3448 case IFNET_SERIALIZE_RX(3):
3449 rdata = &sc->jme_cdata.jme_rx_data[3];
3451 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3453 ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3457 panic("%s unsupported serialize type\n", ifp->if_xname);
3461 #endif /* INVARIANTS */