2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
30 #include "opt_ifpoll.h"
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
41 #include <sys/serialize.h>
42 #include <sys/serialize2.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
47 #include <net/ethernet.h>
50 #include <net/if_arp.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_poll.h>
54 #include <net/ifq_var.h>
55 #include <net/toeplitz.h>
56 #include <net/toeplitz2.h>
57 #include <net/vlan/if_vlan_var.h>
58 #include <net/vlan/if_vlan_ether.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
63 #include <dev/netif/mii_layer/mii.h>
64 #include <dev/netif/mii_layer/miivar.h>
65 #include <dev/netif/mii_layer/jmphyreg.h>
67 #include <bus/pci/pcireg.h>
68 #include <bus/pci/pcivar.h>
69 #include <bus/pci/pcidevs.h>
71 #include <dev/netif/jme/if_jmereg.h>
72 #include <dev/netif/jme/if_jmevar.h>
74 #include "miibus_if.h"
76 #define JME_TICK_CPUID 0 /* DO NOT CHANGE THIS */
78 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
81 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
83 if ((sc)->jme_rss_debug >= (lvl)) \
84 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
86 #else /* !JME_RSS_DEBUG */
87 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
88 #endif /* JME_RSS_DEBUG */
90 static int jme_probe(device_t);
91 static int jme_attach(device_t);
92 static int jme_detach(device_t);
93 static int jme_shutdown(device_t);
94 static int jme_suspend(device_t);
95 static int jme_resume(device_t);
97 static int jme_miibus_readreg(device_t, int, int);
98 static int jme_miibus_writereg(device_t, int, int, int);
99 static void jme_miibus_statchg(device_t);
101 static void jme_init(void *);
102 static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
103 static void jme_start(struct ifnet *, struct ifaltq_subque *);
104 static void jme_watchdog(struct ifnet *);
105 static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
106 static int jme_mediachange(struct ifnet *);
108 static void jme_npoll(struct ifnet *, struct ifpoll_info *);
109 static void jme_npoll_status(struct ifnet *);
110 static void jme_npoll_rx(struct ifnet *, void *, int);
111 static void jme_npoll_tx(struct ifnet *, void *, int);
113 static void jme_serialize(struct ifnet *, enum ifnet_serialize);
114 static void jme_deserialize(struct ifnet *, enum ifnet_serialize);
115 static int jme_tryserialize(struct ifnet *, enum ifnet_serialize);
117 static void jme_serialize_assert(struct ifnet *, enum ifnet_serialize,
121 static void jme_intr(void *);
122 static void jme_msix_tx(void *);
123 static void jme_msix_rx(void *);
124 static void jme_msix_status(void *);
125 static void jme_txeof(struct jme_txdata *);
126 static void jme_rxeof(struct jme_rxdata *, int);
127 static void jme_rx_intr(struct jme_softc *, uint32_t);
128 static void jme_enable_intr(struct jme_softc *);
129 static void jme_disable_intr(struct jme_softc *);
130 static void jme_rx_restart(struct jme_softc *, uint32_t);
132 static int jme_msix_setup(device_t);
133 static void jme_msix_teardown(device_t, int);
134 static int jme_intr_setup(device_t);
135 static void jme_intr_teardown(device_t);
136 static void jme_msix_try_alloc(device_t);
137 static void jme_msix_free(device_t);
138 static int jme_intr_alloc(device_t);
139 static void jme_intr_free(device_t);
140 static int jme_dma_alloc(struct jme_softc *);
141 static void jme_dma_free(struct jme_softc *);
142 static int jme_init_rx_ring(struct jme_rxdata *);
143 static void jme_init_tx_ring(struct jme_txdata *);
144 static void jme_init_ssb(struct jme_softc *);
145 static int jme_newbuf(struct jme_rxdata *, struct jme_rxdesc *, int);
146 static int jme_encap(struct jme_txdata *, struct mbuf **, int *);
147 static void jme_rxpkt(struct jme_rxdata *);
148 static int jme_rxring_dma_alloc(struct jme_rxdata *);
149 static int jme_rxbuf_dma_alloc(struct jme_rxdata *);
150 static int jme_rxbuf_dma_filter(void *, bus_addr_t);
152 static void jme_tick(void *);
153 static void jme_stop(struct jme_softc *);
154 static void jme_reset(struct jme_softc *);
155 static void jme_set_msinum(struct jme_softc *);
156 static void jme_set_vlan(struct jme_softc *);
157 static void jme_set_filter(struct jme_softc *);
158 static void jme_stop_tx(struct jme_softc *);
159 static void jme_stop_rx(struct jme_softc *);
160 static void jme_mac_config(struct jme_softc *);
161 static void jme_reg_macaddr(struct jme_softc *, uint8_t[]);
162 static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
163 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
165 static void jme_setwol(struct jme_softc *);
166 static void jme_setlinkspeed(struct jme_softc *);
168 static void jme_set_tx_coal(struct jme_softc *);
169 static void jme_set_rx_coal(struct jme_softc *);
170 static void jme_enable_rss(struct jme_softc *);
171 static void jme_disable_rss(struct jme_softc *);
172 static void jme_serialize_skipmain(struct jme_softc *);
173 static void jme_deserialize_skipmain(struct jme_softc *);
174 static void jme_phy_poweron(struct jme_softc *);
175 static void jme_phy_poweroff(struct jme_softc *);
176 static int jme_miiext_read(struct jme_softc *, int);
177 static void jme_miiext_write(struct jme_softc *, int, int);
178 static void jme_phy_init(struct jme_softc *);
180 static void jme_sysctl_node(struct jme_softc *);
181 static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
182 static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
183 static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
184 static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
186 static int jme_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS);
187 static int jme_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS);
191 * Devices supported by this driver.
193 static const struct jme_dev {
194 uint16_t jme_vendorid;
195 uint16_t jme_deviceid;
197 const char *jme_name;
199 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
201 "JMicron Inc, JMC250 Gigabit Ethernet" },
202 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
204 "JMicron Inc, JMC260 Fast Ethernet" },
208 static device_method_t jme_methods[] = {
209 /* Device interface. */
210 DEVMETHOD(device_probe, jme_probe),
211 DEVMETHOD(device_attach, jme_attach),
212 DEVMETHOD(device_detach, jme_detach),
213 DEVMETHOD(device_shutdown, jme_shutdown),
214 DEVMETHOD(device_suspend, jme_suspend),
215 DEVMETHOD(device_resume, jme_resume),
218 DEVMETHOD(bus_print_child, bus_generic_print_child),
219 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
222 DEVMETHOD(miibus_readreg, jme_miibus_readreg),
223 DEVMETHOD(miibus_writereg, jme_miibus_writereg),
224 DEVMETHOD(miibus_statchg, jme_miibus_statchg),
229 static driver_t jme_driver = {
232 sizeof(struct jme_softc)
235 static devclass_t jme_devclass;
237 DECLARE_DUMMY_MODULE(if_jme);
238 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
239 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL);
240 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL);
242 static const struct {
246 } jme_rx_status[JME_NRXRING_MAX] = {
247 { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP,
248 INTR_RXQ0_DESC_EMPTY },
249 { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP,
250 INTR_RXQ1_DESC_EMPTY },
251 { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP,
252 INTR_RXQ2_DESC_EMPTY },
253 { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP,
254 INTR_RXQ3_DESC_EMPTY }
257 static int jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
258 static int jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
259 static int jme_rx_ring_count = 0;
260 static int jme_msi_enable = 1;
261 static int jme_msix_enable = 1;
263 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
264 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
265 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
266 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable);
267 TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable);
270 jme_setup_rxdesc(struct jme_rxdesc *rxd)
272 struct jme_desc *desc;
275 desc->buflen = htole32(MCLBYTES);
276 desc->addr_lo = htole32(JME_ADDR_LO(rxd->rx_paddr));
277 desc->addr_hi = htole32(JME_ADDR_HI(rxd->rx_paddr));
278 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
282 * Read a PHY register on the MII of the JMC250.
285 jme_miibus_readreg(device_t dev, int phy, int reg)
287 struct jme_softc *sc = device_get_softc(dev);
291 /* For FPGA version, PHY address 0 should be ignored. */
292 if (sc->jme_caps & JME_CAP_FPGA) {
296 if (sc->jme_phyaddr != phy)
300 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
301 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
303 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
305 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
309 device_printf(sc->jme_dev, "phy read timeout: "
310 "phy %d, reg %d\n", phy, reg);
314 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
318 * Write a PHY register on the MII of the JMC250.
321 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
323 struct jme_softc *sc = device_get_softc(dev);
326 /* For FPGA version, PHY address 0 should be ignored. */
327 if (sc->jme_caps & JME_CAP_FPGA) {
331 if (sc->jme_phyaddr != phy)
335 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
336 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
337 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
339 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
341 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
345 device_printf(sc->jme_dev, "phy write timeout: "
346 "phy %d, reg %d\n", phy, reg);
353 * Callback from MII layer when media changes.
356 jme_miibus_statchg(device_t dev)
358 struct jme_softc *sc = device_get_softc(dev);
359 struct ifnet *ifp = &sc->arpcom.ac_if;
360 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
361 struct mii_data *mii;
362 struct jme_txdesc *txd;
367 jme_serialize_skipmain(sc);
368 ASSERT_IFNET_SERIALIZED_ALL(ifp);
370 if ((ifp->if_flags & IFF_RUNNING) == 0)
373 mii = device_get_softc(sc->jme_miibus);
375 sc->jme_has_link = FALSE;
376 if ((mii->mii_media_status & IFM_AVALID) != 0) {
377 switch (IFM_SUBTYPE(mii->mii_media_active)) {
380 sc->jme_has_link = TRUE;
383 if (sc->jme_caps & JME_CAP_FASTETH)
385 sc->jme_has_link = TRUE;
393 * Disabling Rx/Tx MACs have a side-effect of resetting
394 * JME_TXNDA/JME_RXNDA register to the first address of
395 * Tx/Rx descriptor address. So driver should reset its
396 * internal procucer/consumer pointer and reclaim any
397 * allocated resources. Note, just saving the value of
398 * JME_TXNDA and JME_RXNDA registers before stopping MAC
399 * and restoring JME_TXNDA/JME_RXNDA register is not
400 * sufficient to make sure correct MAC state because
401 * stopping MAC operation can take a while and hardware
402 * might have updated JME_TXNDA/JME_RXNDA registers
403 * during the stop operation.
406 /* Disable interrupts */
407 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
410 ifp->if_flags &= ~IFF_RUNNING;
411 ifq_clr_oactive(&ifp->if_snd);
413 callout_stop(&sc->jme_tick_ch);
415 /* Stop receiver/transmitter. */
419 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
420 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
422 jme_rxeof(rdata, -1);
423 if (rdata->jme_rxhead != NULL)
424 m_freem(rdata->jme_rxhead);
425 JME_RXCHAIN_RESET(rdata);
428 * Reuse configured Rx descriptors and reset
429 * procuder/consumer index.
431 rdata->jme_rx_cons = 0;
433 if (JME_ENABLE_HWRSS(sc))
439 if (tdata->jme_tx_cnt != 0) {
440 /* Remove queued packets for transmit. */
441 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
442 txd = &tdata->jme_txdesc[i];
443 if (txd->tx_m != NULL) {
444 bus_dmamap_unload( tdata->jme_tx_tag,
449 IFNET_STAT_INC(ifp, oerrors, 1);
453 jme_init_tx_ring(tdata);
455 /* Initialize shadow status block. */
458 /* Program MAC with resolved speed/duplex/flow-control. */
459 if (sc->jme_has_link) {
462 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
464 /* Set Tx ring address to the hardware. */
465 paddr = tdata->jme_tx_ring_paddr;
466 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
467 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
469 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
470 CSR_WRITE_4(sc, JME_RXCSR,
471 sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
473 /* Set Rx ring address to the hardware. */
474 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
475 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
476 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
479 /* Restart receiver/transmitter. */
480 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
482 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
485 ifp->if_flags |= IFF_RUNNING;
486 ifq_clr_oactive(&ifp->if_snd);
487 callout_reset_bycpu(&sc->jme_tick_ch, hz, jme_tick, sc,
491 if (!(ifp->if_flags & IFF_NPOLLING))
493 /* Reenable interrupts. */
494 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
498 jme_deserialize_skipmain(sc);
502 * Get the current interface media status.
505 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
507 struct jme_softc *sc = ifp->if_softc;
508 struct mii_data *mii = device_get_softc(sc->jme_miibus);
510 ASSERT_IFNET_SERIALIZED_ALL(ifp);
513 ifmr->ifm_status = mii->mii_media_status;
514 ifmr->ifm_active = mii->mii_media_active;
518 * Set hardware to newly-selected media.
521 jme_mediachange(struct ifnet *ifp)
523 struct jme_softc *sc = ifp->if_softc;
524 struct mii_data *mii = device_get_softc(sc->jme_miibus);
527 ASSERT_IFNET_SERIALIZED_ALL(ifp);
529 if (mii->mii_instance != 0) {
530 struct mii_softc *miisc;
532 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
533 mii_phy_reset(miisc);
535 error = mii_mediachg(mii);
541 jme_probe(device_t dev)
543 const struct jme_dev *sp;
546 vid = pci_get_vendor(dev);
547 did = pci_get_device(dev);
548 for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
549 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
550 struct jme_softc *sc = device_get_softc(dev);
552 sc->jme_caps = sp->jme_caps;
553 device_set_desc(dev, sp->jme_name);
561 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
567 for (i = JME_TIMEOUT; i > 0; i--) {
568 reg = CSR_READ_4(sc, JME_SMBCSR);
569 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
575 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
579 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
580 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
581 for (i = JME_TIMEOUT; i > 0; i--) {
583 reg = CSR_READ_4(sc, JME_SMBINTF);
584 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
589 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
593 reg = CSR_READ_4(sc, JME_SMBINTF);
594 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
600 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
602 uint8_t fup, reg, val;
607 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
608 fup != JME_EEPROM_SIG0)
610 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
611 fup != JME_EEPROM_SIG1)
615 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
617 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
618 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
619 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0)
621 if (reg >= JME_PAR0 &&
622 reg < JME_PAR0 + ETHER_ADDR_LEN) {
623 if (jme_eeprom_read_byte(sc, offset + 2,
626 eaddr[reg - JME_PAR0] = val;
630 /* Check for the end of EEPROM descriptor. */
631 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
633 /* Try next eeprom descriptor. */
634 offset += JME_EEPROM_DESC_BYTES;
635 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
637 if (match == ETHER_ADDR_LEN)
644 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
648 /* Read station address. */
649 par0 = CSR_READ_4(sc, JME_PAR0);
650 par1 = CSR_READ_4(sc, JME_PAR1);
652 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
653 device_printf(sc->jme_dev,
654 "generating fake ethernet address.\n");
655 par0 = karc4random();
656 /* Set OUI to JMicron. */
660 eaddr[3] = (par0 >> 16) & 0xff;
661 eaddr[4] = (par0 >> 8) & 0xff;
662 eaddr[5] = par0 & 0xff;
664 eaddr[0] = (par0 >> 0) & 0xFF;
665 eaddr[1] = (par0 >> 8) & 0xFF;
666 eaddr[2] = (par0 >> 16) & 0xFF;
667 eaddr[3] = (par0 >> 24) & 0xFF;
668 eaddr[4] = (par1 >> 0) & 0xFF;
669 eaddr[5] = (par1 >> 8) & 0xFF;
674 jme_attach(device_t dev)
676 struct jme_softc *sc = device_get_softc(dev);
677 struct ifnet *ifp = &sc->arpcom.ac_if;
680 uint8_t pcie_ptr, rev;
681 int error = 0, i, j, rx_desc_cnt, coal_max;
682 uint8_t eaddr[ETHER_ADDR_LEN];
684 int offset, offset_def;
688 * Initialize serializers
690 lwkt_serialize_init(&sc->jme_serialize);
691 lwkt_serialize_init(&sc->jme_cdata.jme_tx_data.jme_tx_serialize);
692 for (i = 0; i < JME_NRXRING_MAX; ++i) {
694 &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize);
698 * Get # of RX ring descriptors
700 rx_desc_cnt = device_getenv_int(dev, "rx_desc_count",
702 rx_desc_cnt = roundup(rx_desc_cnt, JME_NDESC_ALIGN);
703 if (rx_desc_cnt > JME_NDESC_MAX)
704 rx_desc_cnt = JME_NDESC_MAX;
707 * Get # of TX ring descriptors
709 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt =
710 device_getenv_int(dev, "tx_desc_count", jme_tx_desc_count);
711 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt =
712 roundup(sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt, JME_NDESC_ALIGN);
713 if (sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt > JME_NDESC_MAX)
714 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt = JME_NDESC_MAX;
719 sc->jme_cdata.jme_rx_ring_cnt = device_getenv_int(dev, "rx_ring_count",
721 sc->jme_cdata.jme_rx_ring_cnt =
722 if_ring_count2(sc->jme_cdata.jme_rx_ring_cnt, JME_NRXRING_MAX);
725 * Initialize serializer array
729 KKASSERT(i < JME_NSERIALIZE);
730 sc->jme_serialize_arr[i++] = &sc->jme_serialize;
732 KKASSERT(i < JME_NSERIALIZE);
733 sc->jme_serialize_arr[i++] =
734 &sc->jme_cdata.jme_tx_data.jme_tx_serialize;
736 for (j = 0; j < sc->jme_cdata.jme_rx_ring_cnt; ++j) {
737 KKASSERT(i < JME_NSERIALIZE);
738 sc->jme_serialize_arr[i++] =
739 &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize;
742 KKASSERT(i <= JME_NSERIALIZE);
743 sc->jme_serialize_cnt = i;
746 * Setup TX ring specific data
748 sc->jme_cdata.jme_tx_data.jme_sc = sc;
751 * Setup RX rings specific data
753 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
754 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
757 rdata->jme_rx_coal = jme_rx_status[i].jme_coal;
758 rdata->jme_rx_comp = jme_rx_status[i].jme_comp;
759 rdata->jme_rx_empty = jme_rx_status[i].jme_empty;
760 rdata->jme_rx_idx = i;
761 rdata->jme_rx_desc_cnt = rx_desc_cnt;
765 sc->jme_lowaddr = BUS_SPACE_MAXADDR;
767 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
769 callout_init(&sc->jme_tick_ch);
772 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
775 irq = pci_read_config(dev, PCIR_INTLINE, 4);
776 mem = pci_read_config(dev, JME_PCIR_BAR, 4);
778 device_printf(dev, "chip is in D%d power mode "
779 "-- setting to D0\n", pci_get_powerstate(dev));
781 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
783 pci_write_config(dev, PCIR_INTLINE, irq, 4);
784 pci_write_config(dev, JME_PCIR_BAR, mem, 4);
786 #endif /* !BURN_BRIDGE */
788 /* Enable bus mastering */
789 pci_enable_busmaster(dev);
794 * JMC250 supports both memory mapped and I/O register space
795 * access. Because I/O register access should use different
796 * BARs to access registers it's waste of time to use I/O
797 * register spce access. JMC250 uses 16K to map entire memory
800 sc->jme_mem_rid = JME_PCIR_BAR;
801 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
802 &sc->jme_mem_rid, RF_ACTIVE);
803 if (sc->jme_mem_res == NULL) {
804 device_printf(dev, "can't allocate IO memory\n");
807 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
808 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
813 error = jme_intr_alloc(dev);
820 reg = CSR_READ_4(sc, JME_CHIPMODE);
821 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
823 sc->jme_caps |= JME_CAP_FPGA;
825 device_printf(dev, "FPGA revision: 0x%04x\n",
826 (reg & CHIPMODE_FPGA_REV_MASK) >>
827 CHIPMODE_FPGA_REV_SHIFT);
831 /* NOTE: FM revision is put in the upper 4 bits */
832 rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
833 rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
835 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
837 did = pci_get_device(dev);
839 case PCI_PRODUCT_JMICRON_JMC250:
840 if (rev == JME_REV1_A2)
841 sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
844 case PCI_PRODUCT_JMICRON_JMC260:
845 if (rev == JME_REV2) {
846 sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
847 sc->jme_phycom0 = 0x608a;
848 } else if (rev == JME_REV2_2) {
849 sc->jme_phycom0 = 0x408a;
854 panic("unknown device id 0x%04x", did);
856 if (rev >= JME_REV2) {
857 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
858 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
859 GHC_TXMAC_CLKSRC_1000;
862 sc->jme_caps |= JME_CAP_PHYPWR;
863 if (rev >= JME_REV6 || rev == JME_REV5 || rev == JME_REV5_1 ||
865 sc->jme_phycom0 = 0x008a;
866 sc->jme_phycom1 = 0x4109;
867 } else if (rev == JME_REV3_1 || rev == JME_REV3_2) {
868 sc->jme_phycom0 = 0xe088;
871 if (rev >= JME_REV2) {
872 reg = pci_read_config(dev, JME_PCI_SSCTRL, 4);
873 if ((reg & SSCTRL_PHYMASK) == SSCTRL_PHYEA) {
879 /* Reset the ethernet controller. */
882 /* Map MSI/MSI-X vectors */
885 /* Get station address. */
886 reg = CSR_READ_4(sc, JME_SMBCSR);
887 if (reg & SMBCSR_EEPROM_PRESENT)
888 error = jme_eeprom_macaddr(sc, eaddr);
889 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
890 if (error != 0 && (bootverbose)) {
891 device_printf(dev, "ethernet hardware address "
892 "not found in EEPROM.\n");
894 jme_reg_macaddr(sc, eaddr);
899 * Integrated JR0211 has fixed PHY address whereas FPGA version
900 * requires PHY probing to get correct PHY address.
902 if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
903 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
904 GPREG0_PHY_ADDR_MASK;
906 device_printf(dev, "PHY is at address %d.\n",
913 /* Set max allowable DMA size. */
914 pcie_ptr = pci_get_pciecap_ptr(dev);
918 sc->jme_caps |= JME_CAP_PCIE;
919 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
921 device_printf(dev, "Read request size : %d bytes.\n",
922 128 << ((ctrl >> 12) & 0x07));
923 device_printf(dev, "TLP payload size : %d bytes.\n",
924 128 << ((ctrl >> 5) & 0x07));
926 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
927 case PCIEM_DEVCTL_MAX_READRQ_128:
928 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
930 case PCIEM_DEVCTL_MAX_READRQ_256:
931 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
934 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
937 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
939 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
940 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
944 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
945 sc->jme_caps |= JME_CAP_PMCAP;
950 * NPOLLING RX CPU offset
952 if (sc->jme_cdata.jme_rx_ring_cnt == ncpus2) {
955 offset_def = (sc->jme_cdata.jme_rx_ring_cnt *
956 device_get_unit(dev)) % ncpus2;
957 offset = device_getenv_int(dev, "npoll.rxoff", offset_def);
958 if (offset >= ncpus2 ||
959 offset % sc->jme_cdata.jme_rx_ring_cnt != 0) {
960 device_printf(dev, "invalid npoll.rxoff %d, use %d\n",
965 sc->jme_npoll_rxoff = offset;
968 * NPOLLING TX CPU offset
970 offset_def = sc->jme_npoll_rxoff;
971 offset = device_getenv_int(dev, "npoll.txoff", offset_def);
972 if (offset >= ncpus2) {
973 device_printf(dev, "invalid npoll.txoff %d, use %d\n",
977 sc->jme_npoll_txoff = offset;
981 * Set default coalesce valves
983 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
984 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
985 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
986 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
989 * Adjust coalesce valves, in case that the number of TX/RX
990 * descs are set to small values by users.
992 * NOTE: coal_max will not be zero, since number of descs
993 * must aligned by JME_NDESC_ALIGN (16 currently)
995 coal_max = sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt / 2;
996 if (coal_max < sc->jme_tx_coal_pkt)
997 sc->jme_tx_coal_pkt = coal_max;
999 coal_max = sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt / 2;
1000 if (coal_max < sc->jme_rx_coal_pkt)
1001 sc->jme_rx_coal_pkt = coal_max;
1003 sc->jme_cdata.jme_tx_data.jme_tx_wreg = JME_TXWREG_NSEGS;
1006 * Create sysctl tree
1008 jme_sysctl_node(sc);
1010 /* Allocate DMA stuffs */
1011 error = jme_dma_alloc(sc);
1016 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1017 ifp->if_init = jme_init;
1018 ifp->if_ioctl = jme_ioctl;
1019 ifp->if_start = jme_start;
1020 #ifdef IFPOLL_ENABLE
1021 ifp->if_npoll = jme_npoll;
1023 ifp->if_watchdog = jme_watchdog;
1024 ifp->if_serialize = jme_serialize;
1025 ifp->if_deserialize = jme_deserialize;
1026 ifp->if_tryserialize = jme_tryserialize;
1028 ifp->if_serialize_assert = jme_serialize_assert;
1030 ifq_set_maxlen(&ifp->if_snd,
1031 sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt - JME_TXD_RSVD);
1032 ifq_set_ready(&ifp->if_snd);
1034 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
1035 ifp->if_capabilities = IFCAP_HWCSUM |
1038 IFCAP_VLAN_HWTAGGING;
1039 if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN)
1040 ifp->if_capabilities |= IFCAP_RSS;
1041 ifp->if_capenable = ifp->if_capabilities;
1044 * Disable TXCSUM by default to improve bulk data
1045 * transmit performance (+20Mbps improvement).
1047 ifp->if_capenable &= ~IFCAP_TXCSUM;
1049 if (ifp->if_capenable & IFCAP_TXCSUM)
1050 ifp->if_hwassist |= JME_CSUM_FEATURES;
1051 ifp->if_hwassist |= CSUM_TSO;
1053 /* Set up MII bus. */
1054 error = mii_phy_probe(dev, &sc->jme_miibus,
1055 jme_mediachange, jme_mediastatus);
1057 device_printf(dev, "no PHY found!\n");
1062 * Save PHYADDR for FPGA mode PHY.
1064 if (sc->jme_caps & JME_CAP_FPGA) {
1065 struct mii_data *mii = device_get_softc(sc->jme_miibus);
1067 if (mii->mii_instance != 0) {
1068 struct mii_softc *miisc;
1070 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
1071 if (miisc->mii_phy != 0) {
1072 sc->jme_phyaddr = miisc->mii_phy;
1076 if (sc->jme_phyaddr != 0) {
1077 device_printf(sc->jme_dev,
1078 "FPGA PHY is at %d\n", sc->jme_phyaddr);
1080 jme_miibus_writereg(dev, sc->jme_phyaddr,
1081 JMPHY_CONF, JMPHY_CONF_DEFFIFO);
1083 /* XXX should we clear JME_WA_EXTFIFO */
1088 ether_ifattach(ifp, eaddr, NULL);
1090 /* Tell the upper layer(s) we support long frames. */
1091 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1093 /* Setup the TX ring's CPUID */
1094 ifq_set_cpuid(&ifp->if_snd, sc->jme_tx_cpuid);
1095 ifsq_set_hw_serialize(ifq_get_subq_default(&ifp->if_snd),
1096 &sc->jme_cdata.jme_tx_data.jme_tx_serialize);
1098 error = jme_intr_setup(dev);
1100 ether_ifdetach(ifp);
1111 jme_detach(device_t dev)
1113 struct jme_softc *sc = device_get_softc(dev);
1115 if (device_is_attached(dev)) {
1116 struct ifnet *ifp = &sc->arpcom.ac_if;
1118 ifnet_serialize_all(ifp);
1120 jme_intr_teardown(dev);
1121 ifnet_deserialize_all(ifp);
1123 ether_ifdetach(ifp);
1126 if (sc->jme_sysctl_tree != NULL)
1127 sysctl_ctx_free(&sc->jme_sysctl_ctx);
1129 if (sc->jme_miibus != NULL)
1130 device_delete_child(dev, sc->jme_miibus);
1131 bus_generic_detach(dev);
1135 if (sc->jme_mem_res != NULL) {
1136 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
1146 jme_sysctl_node(struct jme_softc *sc)
1148 #ifdef JME_RSS_DEBUG
1152 sysctl_ctx_init(&sc->jme_sysctl_ctx);
1153 sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
1154 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
1155 device_get_nameunit(sc->jme_dev),
1157 if (sc->jme_sysctl_tree == NULL) {
1158 device_printf(sc->jme_dev, "can't add sysctl node\n");
1162 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1163 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1164 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1165 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
1167 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1168 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1169 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1170 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
1172 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1173 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1174 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1175 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
1177 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1178 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1179 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1180 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
1182 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1183 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1184 "rx_desc_count", CTLFLAG_RD,
1185 &sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt,
1186 0, "RX desc count");
1187 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1188 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1189 "tx_desc_count", CTLFLAG_RD,
1190 &sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt,
1191 0, "TX desc count");
1192 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1193 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1194 "rx_ring_count", CTLFLAG_RD,
1195 &sc->jme_cdata.jme_rx_ring_cnt,
1196 0, "RX ring count");
1197 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1198 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1199 "tx_wreg", CTLFLAG_RW,
1200 &sc->jme_cdata.jme_tx_data.jme_tx_wreg, 0,
1201 "# of segments before writing to hardware register");
1203 #ifdef JME_RSS_DEBUG
1204 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1205 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1206 "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
1207 0, "RSS debug level");
1208 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1209 char rx_ring_desc[32];
1211 ksnprintf(rx_ring_desc, sizeof(rx_ring_desc),
1212 "rx_ring%d_pkt", r);
1213 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx,
1214 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1215 rx_ring_desc, CTLFLAG_RW,
1216 &sc->jme_cdata.jme_rx_data[r].jme_rx_pkt, "RXed packets");
1218 ksnprintf(rx_ring_desc, sizeof(rx_ring_desc),
1219 "rx_ring%d_emp", r);
1220 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx,
1221 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1222 rx_ring_desc, CTLFLAG_RW,
1223 &sc->jme_cdata.jme_rx_data[r].jme_rx_emp,
1224 "# of time RX ring empty");
1228 #ifdef IFPOLL_ENABLE
1229 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1230 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1231 "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
1232 jme_sysctl_npoll_rxoff, "I", "NPOLLING RX cpu offset");
1233 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1234 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1235 "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
1236 jme_sysctl_npoll_txoff, "I", "NPOLLING TX cpu offset");
1241 jme_dma_alloc(struct jme_softc *sc)
1243 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
1244 struct jme_txdesc *txd;
1246 int error, i, asize;
1248 asize = __VM_CACHELINE_ALIGN(
1249 tdata->jme_tx_desc_cnt * sizeof(struct jme_txdesc));
1250 tdata->jme_txdesc = kmalloc_cachealign(asize, M_DEVBUF,
1253 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1254 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
1256 asize = __VM_CACHELINE_ALIGN(
1257 rdata->jme_rx_desc_cnt * sizeof(struct jme_rxdesc));
1258 rdata->jme_rxdesc = kmalloc_cachealign(asize, M_DEVBUF,
1262 /* Create parent ring tag. */
1263 error = bus_dma_tag_create(NULL,/* parent */
1264 1, JME_RING_BOUNDARY, /* algnmnt, boundary */
1265 sc->jme_lowaddr, /* lowaddr */
1266 BUS_SPACE_MAXADDR, /* highaddr */
1267 NULL, NULL, /* filter, filterarg */
1268 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1270 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1272 &sc->jme_cdata.jme_ring_tag);
1274 device_printf(sc->jme_dev,
1275 "could not create parent ring DMA tag.\n");
1280 * Create DMA stuffs for TX ring
1282 asize = roundup2(JME_TX_RING_SIZE(tdata), JME_TX_RING_ALIGN);
1283 error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1284 JME_TX_RING_ALIGN, 0,
1285 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1286 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1288 device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1291 tdata->jme_tx_ring_tag = dmem.dmem_tag;
1292 tdata->jme_tx_ring_map = dmem.dmem_map;
1293 tdata->jme_tx_ring = dmem.dmem_addr;
1294 tdata->jme_tx_ring_paddr = dmem.dmem_busaddr;
1297 * Create DMA stuffs for RX rings
1299 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1300 error = jme_rxring_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1305 /* Create parent buffer tag. */
1306 error = bus_dma_tag_create(NULL,/* parent */
1307 1, 0, /* algnmnt, boundary */
1308 sc->jme_lowaddr, /* lowaddr */
1309 BUS_SPACE_MAXADDR, /* highaddr */
1310 NULL, NULL, /* filter, filterarg */
1311 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1313 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1315 &sc->jme_cdata.jme_buffer_tag);
1317 device_printf(sc->jme_dev,
1318 "could not create parent buffer DMA tag.\n");
1323 * Create DMA stuffs for shadow status block
1325 asize = roundup2(JME_SSB_SIZE, JME_SSB_ALIGN);
1326 error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1327 JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1328 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1330 device_printf(sc->jme_dev,
1331 "could not create shadow status block.\n");
1334 sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1335 sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1336 sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1337 sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1340 * Create DMA stuffs for TX buffers
1343 /* Create tag for Tx buffers. */
1344 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1345 1, 0, /* algnmnt, boundary */
1346 BUS_SPACE_MAXADDR, /* lowaddr */
1347 BUS_SPACE_MAXADDR, /* highaddr */
1348 NULL, NULL, /* filter, filterarg */
1349 JME_TSO_MAXSIZE, /* maxsize */
1350 JME_MAXTXSEGS, /* nsegments */
1351 JME_MAXSEGSIZE, /* maxsegsize */
1352 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1353 &tdata->jme_tx_tag);
1355 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1359 /* Create DMA maps for Tx buffers. */
1360 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
1361 txd = &tdata->jme_txdesc[i];
1362 error = bus_dmamap_create(tdata->jme_tx_tag,
1363 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1368 device_printf(sc->jme_dev,
1369 "could not create %dth Tx dmamap.\n", i);
1371 for (j = 0; j < i; ++j) {
1372 txd = &tdata->jme_txdesc[j];
1373 bus_dmamap_destroy(tdata->jme_tx_tag,
1376 bus_dma_tag_destroy(tdata->jme_tx_tag);
1377 tdata->jme_tx_tag = NULL;
1383 * Create DMA stuffs for RX buffers
1385 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1386 error = jme_rxbuf_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1394 jme_dma_free(struct jme_softc *sc)
1396 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
1397 struct jme_txdesc *txd;
1398 struct jme_rxdesc *rxd;
1399 struct jme_rxdata *rdata;
1403 if (tdata->jme_tx_ring_tag != NULL) {
1404 bus_dmamap_unload(tdata->jme_tx_ring_tag,
1405 tdata->jme_tx_ring_map);
1406 bus_dmamem_free(tdata->jme_tx_ring_tag,
1407 tdata->jme_tx_ring, tdata->jme_tx_ring_map);
1408 bus_dma_tag_destroy(tdata->jme_tx_ring_tag);
1409 tdata->jme_tx_ring_tag = NULL;
1413 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1414 rdata = &sc->jme_cdata.jme_rx_data[r];
1415 if (rdata->jme_rx_ring_tag != NULL) {
1416 bus_dmamap_unload(rdata->jme_rx_ring_tag,
1417 rdata->jme_rx_ring_map);
1418 bus_dmamem_free(rdata->jme_rx_ring_tag,
1420 rdata->jme_rx_ring_map);
1421 bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1422 rdata->jme_rx_ring_tag = NULL;
1427 if (tdata->jme_tx_tag != NULL) {
1428 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
1429 txd = &tdata->jme_txdesc[i];
1430 bus_dmamap_destroy(tdata->jme_tx_tag, txd->tx_dmamap);
1432 bus_dma_tag_destroy(tdata->jme_tx_tag);
1433 tdata->jme_tx_tag = NULL;
1437 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1438 rdata = &sc->jme_cdata.jme_rx_data[r];
1439 if (rdata->jme_rx_tag != NULL) {
1440 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
1441 rxd = &rdata->jme_rxdesc[i];
1442 bus_dmamap_destroy(rdata->jme_rx_tag,
1445 bus_dmamap_destroy(rdata->jme_rx_tag,
1446 rdata->jme_rx_sparemap);
1447 bus_dma_tag_destroy(rdata->jme_rx_tag);
1448 rdata->jme_rx_tag = NULL;
1452 /* Shadow status block. */
1453 if (sc->jme_cdata.jme_ssb_tag != NULL) {
1454 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1455 sc->jme_cdata.jme_ssb_map);
1456 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1457 sc->jme_cdata.jme_ssb_block,
1458 sc->jme_cdata.jme_ssb_map);
1459 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1460 sc->jme_cdata.jme_ssb_tag = NULL;
1463 if (sc->jme_cdata.jme_buffer_tag != NULL) {
1464 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1465 sc->jme_cdata.jme_buffer_tag = NULL;
1467 if (sc->jme_cdata.jme_ring_tag != NULL) {
1468 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1469 sc->jme_cdata.jme_ring_tag = NULL;
1472 if (tdata->jme_txdesc != NULL) {
1473 kfree(tdata->jme_txdesc, M_DEVBUF);
1474 tdata->jme_txdesc = NULL;
1476 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1477 rdata = &sc->jme_cdata.jme_rx_data[r];
1478 if (rdata->jme_rxdesc != NULL) {
1479 kfree(rdata->jme_rxdesc, M_DEVBUF);
1480 rdata->jme_rxdesc = NULL;
1486 * Make sure the interface is stopped at reboot time.
1489 jme_shutdown(device_t dev)
1491 return jme_suspend(dev);
1496 * Unlike other ethernet controllers, JMC250 requires
1497 * explicit resetting link speed to 10/100Mbps as gigabit
1498 * link will cunsume more power than 375mA.
1499 * Note, we reset the link speed to 10/100Mbps with
1500 * auto-negotiation but we don't know whether that operation
1501 * would succeed or not as we have no control after powering
1502 * off. If the renegotiation fail WOL may not work. Running
1503 * at 1Gbps draws more power than 375mA at 3.3V which is
1504 * specified in PCI specification and that would result in
1505 * complete shutdowning power to ethernet controller.
1508 * Save current negotiated media speed/duplex/flow-control
1509 * to softc and restore the same link again after resuming.
1510 * PHY handling such as power down/resetting to 100Mbps
1511 * may be better handled in suspend method in phy driver.
1514 jme_setlinkspeed(struct jme_softc *sc)
1516 struct mii_data *mii;
1519 JME_LOCK_ASSERT(sc);
1521 mii = device_get_softc(sc->jme_miibus);
1524 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1525 switch IFM_SUBTYPE(mii->mii_media_active) {
1535 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1536 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1537 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1538 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1539 BMCR_AUTOEN | BMCR_STARTNEG);
1542 /* Poll link state until jme(4) get a 10/100 link. */
1543 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1545 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1546 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1556 pause("jmelnk", hz);
1559 if (i == MII_ANEGTICKS_GIGE)
1560 device_printf(sc->jme_dev, "establishing link failed, "
1561 "WOL may not work!");
1564 * No link, force MAC to have 100Mbps, full-duplex link.
1565 * This is the last resort and may/may not work.
1567 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1568 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1573 jme_setwol(struct jme_softc *sc)
1575 struct ifnet *ifp = &sc->arpcom.ac_if;
1580 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1581 /* No PME capability, PHY power down. */
1582 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1583 MII_BMCR, BMCR_PDOWN);
1587 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1588 pmcs = CSR_READ_4(sc, JME_PMCS);
1589 pmcs &= ~PMCS_WOL_ENB_MASK;
1590 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1591 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1592 /* Enable PME message. */
1593 gpr |= GPREG0_PME_ENB;
1594 /* For gigabit controllers, reset link speed to 10/100. */
1595 if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1596 jme_setlinkspeed(sc);
1599 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1600 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1603 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1604 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1605 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1606 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1607 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1608 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1609 /* No WOL, PHY power down. */
1610 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1611 MII_BMCR, BMCR_PDOWN);
1617 jme_suspend(device_t dev)
1619 struct jme_softc *sc = device_get_softc(dev);
1620 struct ifnet *ifp = &sc->arpcom.ac_if;
1622 ifnet_serialize_all(ifp);
1627 ifnet_deserialize_all(ifp);
1633 jme_resume(device_t dev)
1635 struct jme_softc *sc = device_get_softc(dev);
1636 struct ifnet *ifp = &sc->arpcom.ac_if;
1641 ifnet_serialize_all(ifp);
1644 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1647 pmstat = pci_read_config(sc->jme_dev,
1648 pmc + PCIR_POWER_STATUS, 2);
1649 /* Disable PME clear PME status. */
1650 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1651 pci_write_config(sc->jme_dev,
1652 pmc + PCIR_POWER_STATUS, pmstat, 2);
1656 if (ifp->if_flags & IFF_UP)
1659 ifnet_deserialize_all(ifp);
1665 jme_tso_pullup(struct mbuf **mp)
1667 int hoff, iphlen, thoff;
1671 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
1673 iphlen = m->m_pkthdr.csum_iphlen;
1674 thoff = m->m_pkthdr.csum_thlen;
1675 hoff = m->m_pkthdr.csum_lhlen;
1677 KASSERT(iphlen > 0, ("invalid ip hlen"));
1678 KASSERT(thoff > 0, ("invalid tcp hlen"));
1679 KASSERT(hoff > 0, ("invalid ether hlen"));
1681 if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
1682 m = m_pullup(m, hoff + iphlen + thoff);
1693 jme_encap(struct jme_txdata *tdata, struct mbuf **m_head, int *segs_used)
1695 struct jme_txdesc *txd;
1696 struct jme_desc *desc;
1698 bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1700 int error, i, prod, symbol_desc;
1701 uint32_t cflags, flag64, mss;
1703 M_ASSERTPKTHDR((*m_head));
1705 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) {
1706 /* XXX Is this necessary? */
1707 error = jme_tso_pullup(m_head);
1712 prod = tdata->jme_tx_prod;
1713 txd = &tdata->jme_txdesc[prod];
1715 if (tdata->jme_sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1720 maxsegs = (tdata->jme_tx_desc_cnt - tdata->jme_tx_cnt) -
1721 (JME_TXD_RSVD + symbol_desc);
1722 if (maxsegs > JME_MAXTXSEGS)
1723 maxsegs = JME_MAXTXSEGS;
1724 KASSERT(maxsegs >= (JME_TXD_SPARE - symbol_desc),
1725 ("not enough segments %d", maxsegs));
1727 error = bus_dmamap_load_mbuf_defrag(tdata->jme_tx_tag,
1728 txd->tx_dmamap, m_head,
1729 txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1732 *segs_used += nsegs;
1734 bus_dmamap_sync(tdata->jme_tx_tag, txd->tx_dmamap,
1735 BUS_DMASYNC_PREWRITE);
1741 /* Configure checksum offload. */
1742 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1743 mss = (uint32_t)m->m_pkthdr.tso_segsz << JME_TD_MSS_SHIFT;
1744 cflags |= JME_TD_TSO;
1745 } else if (m->m_pkthdr.csum_flags & JME_CSUM_FEATURES) {
1746 if (m->m_pkthdr.csum_flags & CSUM_IP)
1747 cflags |= JME_TD_IPCSUM;
1748 if (m->m_pkthdr.csum_flags & CSUM_TCP)
1749 cflags |= JME_TD_TCPCSUM;
1750 if (m->m_pkthdr.csum_flags & CSUM_UDP)
1751 cflags |= JME_TD_UDPCSUM;
1754 /* Configure VLAN. */
1755 if (m->m_flags & M_VLANTAG) {
1756 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1757 cflags |= JME_TD_VLAN_TAG;
1760 desc = &tdata->jme_tx_ring[prod];
1761 desc->flags = htole32(cflags);
1762 desc->addr_hi = htole32(m->m_pkthdr.len);
1763 if (tdata->jme_sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1765 * Use 64bits TX desc chain format.
1767 * The first TX desc of the chain, which is setup here,
1768 * is just a symbol TX desc carrying no payload.
1770 flag64 = JME_TD_64BIT;
1771 desc->buflen = htole32(mss);
1776 /* No effective TX desc is consumed */
1780 * Use 32bits TX desc chain format.
1782 * The first TX desc of the chain, which is setup here,
1783 * is an effective TX desc carrying the first segment of
1787 desc->buflen = htole32(mss | txsegs[0].ds_len);
1788 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1790 /* One effective TX desc is consumed */
1793 tdata->jme_tx_cnt++;
1794 KKASSERT(tdata->jme_tx_cnt - i < tdata->jme_tx_desc_cnt - JME_TXD_RSVD);
1795 JME_DESC_INC(prod, tdata->jme_tx_desc_cnt);
1797 txd->tx_ndesc = 1 - i;
1798 for (; i < nsegs; i++) {
1799 desc = &tdata->jme_tx_ring[prod];
1800 desc->buflen = htole32(txsegs[i].ds_len);
1801 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1802 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1803 desc->flags = htole32(JME_TD_OWN | flag64);
1805 tdata->jme_tx_cnt++;
1806 KKASSERT(tdata->jme_tx_cnt <=
1807 tdata->jme_tx_desc_cnt - JME_TXD_RSVD);
1808 JME_DESC_INC(prod, tdata->jme_tx_desc_cnt);
1811 /* Update producer index. */
1812 tdata->jme_tx_prod = prod;
1814 * Finally request interrupt and give the first descriptor
1815 * owenership to hardware.
1817 desc = txd->tx_desc;
1818 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1821 txd->tx_ndesc += nsegs;
1831 jme_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1833 struct jme_softc *sc = ifp->if_softc;
1834 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
1835 struct mbuf *m_head;
1838 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1839 ASSERT_SERIALIZED(&tdata->jme_tx_serialize);
1841 if (!sc->jme_has_link) {
1842 ifq_purge(&ifp->if_snd);
1846 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
1849 if (tdata->jme_tx_cnt >= JME_TX_DESC_HIWAT(tdata))
1852 while (!ifq_is_empty(&ifp->if_snd)) {
1854 * Check number of available TX descs, always
1855 * leave JME_TXD_RSVD free TX descs.
1857 if (tdata->jme_tx_cnt + JME_TXD_SPARE >
1858 tdata->jme_tx_desc_cnt - JME_TXD_RSVD) {
1859 ifq_set_oactive(&ifp->if_snd);
1863 m_head = ifq_dequeue(&ifp->if_snd);
1868 * Pack the data into the transmit ring. If we
1869 * don't have room, set the OACTIVE flag and wait
1870 * for the NIC to drain the ring.
1872 if (jme_encap(tdata, &m_head, &enq)) {
1873 KKASSERT(m_head == NULL);
1874 IFNET_STAT_INC(ifp, oerrors, 1);
1875 ifq_set_oactive(&ifp->if_snd);
1879 if (enq >= tdata->jme_tx_wreg) {
1880 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr |
1881 TXCSR_TX_ENB | TXCSR_TXQ_N_START(TXCSR_TXQ0));
1886 * If there's a BPF listener, bounce a copy of this frame
1889 ETHER_BPF_MTAP(ifp, m_head);
1891 /* Set a timeout in case the chip goes out to lunch. */
1892 ifp->if_timer = JME_TX_TIMEOUT;
1897 * Reading TXCSR takes very long time under heavy load
1898 * so cache TXCSR value and writes the ORed value with
1899 * the kick command to the TXCSR. This saves one register
1902 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1903 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1908 jme_watchdog(struct ifnet *ifp)
1910 struct jme_softc *sc = ifp->if_softc;
1911 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
1913 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1915 if (!sc->jme_has_link) {
1916 if_printf(ifp, "watchdog timeout (missed link)\n");
1917 IFNET_STAT_INC(ifp, oerrors, 1);
1923 if (tdata->jme_tx_cnt == 0) {
1924 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1926 if (!ifq_is_empty(&ifp->if_snd))
1931 if_printf(ifp, "watchdog timeout\n");
1932 IFNET_STAT_INC(ifp, oerrors, 1);
1934 if (!ifq_is_empty(&ifp->if_snd))
1939 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1941 struct jme_softc *sc = ifp->if_softc;
1942 struct mii_data *mii = device_get_softc(sc->jme_miibus);
1943 struct ifreq *ifr = (struct ifreq *)data;
1944 int error = 0, mask;
1946 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1950 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1951 (!(sc->jme_caps & JME_CAP_JUMBO) &&
1952 ifr->ifr_mtu > JME_MAX_MTU)) {
1957 if (ifp->if_mtu != ifr->ifr_mtu) {
1959 * No special configuration is required when interface
1960 * MTU is changed but availability of Tx checksum
1961 * offload should be chcked against new MTU size as
1962 * FIFO size is just 2K.
1964 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1965 ifp->if_capenable &=
1966 ~(IFCAP_TXCSUM | IFCAP_TSO);
1968 ~(JME_CSUM_FEATURES | CSUM_TSO);
1970 ifp->if_mtu = ifr->ifr_mtu;
1971 if (ifp->if_flags & IFF_RUNNING)
1977 if (ifp->if_flags & IFF_UP) {
1978 if (ifp->if_flags & IFF_RUNNING) {
1979 if ((ifp->if_flags ^ sc->jme_if_flags) &
1980 (IFF_PROMISC | IFF_ALLMULTI))
1986 if (ifp->if_flags & IFF_RUNNING)
1989 sc->jme_if_flags = ifp->if_flags;
1994 if (ifp->if_flags & IFF_RUNNING)
2000 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2004 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2006 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
2007 ifp->if_capenable ^= IFCAP_TXCSUM;
2008 if (ifp->if_capenable & IFCAP_TXCSUM)
2009 ifp->if_hwassist |= JME_CSUM_FEATURES;
2011 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
2013 if (mask & IFCAP_RXCSUM) {
2016 ifp->if_capenable ^= IFCAP_RXCSUM;
2017 reg = CSR_READ_4(sc, JME_RXMAC);
2018 reg &= ~RXMAC_CSUM_ENB;
2019 if (ifp->if_capenable & IFCAP_RXCSUM)
2020 reg |= RXMAC_CSUM_ENB;
2021 CSR_WRITE_4(sc, JME_RXMAC, reg);
2024 if (mask & IFCAP_VLAN_HWTAGGING) {
2025 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2029 if ((mask & IFCAP_TSO) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
2030 ifp->if_capenable ^= IFCAP_TSO;
2031 if (ifp->if_capenable & IFCAP_TSO)
2032 ifp->if_hwassist |= CSUM_TSO;
2034 ifp->if_hwassist &= ~CSUM_TSO;
2037 if (mask & IFCAP_RSS)
2038 ifp->if_capenable ^= IFCAP_RSS;
2042 error = ether_ioctl(ifp, cmd, data);
2049 jme_mac_config(struct jme_softc *sc)
2051 struct mii_data *mii;
2052 uint32_t ghc, rxmac, txmac, txpause, gp1;
2053 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
2055 mii = device_get_softc(sc->jme_miibus);
2057 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2059 CSR_WRITE_4(sc, JME_GHC, 0);
2061 rxmac = CSR_READ_4(sc, JME_RXMAC);
2062 rxmac &= ~RXMAC_FC_ENB;
2063 txmac = CSR_READ_4(sc, JME_TXMAC);
2064 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
2065 txpause = CSR_READ_4(sc, JME_TXPFC);
2066 txpause &= ~TXPFC_PAUSE_ENB;
2067 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2068 ghc |= GHC_FULL_DUPLEX;
2069 rxmac &= ~RXMAC_COLL_DET_ENB;
2070 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
2071 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
2074 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2075 txpause |= TXPFC_PAUSE_ENB;
2076 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2077 rxmac |= RXMAC_FC_ENB;
2079 /* Disable retry transmit timer/retry limit. */
2080 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
2081 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
2083 rxmac |= RXMAC_COLL_DET_ENB;
2084 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
2085 /* Enable retry transmit timer/retry limit. */
2086 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
2087 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
2091 * Reprogram Tx/Rx MACs with resolved speed/duplex.
2093 gp1 = CSR_READ_4(sc, JME_GPREG1);
2094 gp1 &= ~GPREG1_WA_HDX;
2096 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
2099 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2101 ghc |= GHC_SPEED_10 | sc->jme_clksrc;
2103 gp1 |= GPREG1_WA_HDX;
2107 ghc |= GHC_SPEED_100 | sc->jme_clksrc;
2109 gp1 |= GPREG1_WA_HDX;
2112 * Use extended FIFO depth to workaround CRC errors
2113 * emitted by chips before JMC250B
2115 phyconf = JMPHY_CONF_EXTFIFO;
2119 if (sc->jme_caps & JME_CAP_FASTETH)
2122 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
2124 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
2130 CSR_WRITE_4(sc, JME_GHC, ghc);
2131 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
2132 CSR_WRITE_4(sc, JME_TXMAC, txmac);
2133 CSR_WRITE_4(sc, JME_TXPFC, txpause);
2135 if (sc->jme_workaround & JME_WA_EXTFIFO) {
2136 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2137 JMPHY_CONF, phyconf);
2139 if (sc->jme_workaround & JME_WA_HDX)
2140 CSR_WRITE_4(sc, JME_GPREG1, gp1);
2146 struct jme_softc *sc = xsc;
2147 struct ifnet *ifp = &sc->arpcom.ac_if;
2151 ASSERT_SERIALIZED(&sc->jme_serialize);
2153 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2154 if (status == 0 || status == 0xFFFFFFFF)
2157 /* Disable interrupts. */
2158 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2160 status = CSR_READ_4(sc, JME_INTR_STATUS);
2161 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2164 /* Reset PCC counter/timer and Ack interrupts. */
2165 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2167 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
2168 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2170 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2171 if (status & jme_rx_status[r].jme_coal) {
2172 status |= jme_rx_status[r].jme_coal |
2173 jme_rx_status[r].jme_comp;
2177 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2179 if (ifp->if_flags & IFF_RUNNING) {
2180 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
2182 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
2183 jme_rx_intr(sc, status);
2185 if (status & INTR_RXQ_DESC_EMPTY) {
2187 * Notify hardware availability of new Rx buffers.
2188 * Reading RXCSR takes very long time under heavy
2189 * load so cache RXCSR value and writes the ORed
2190 * value with the kick command to the RXCSR. This
2191 * saves one register access cycle.
2193 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2194 RXCSR_RX_ENB | RXCSR_RXQ_START);
2197 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
2198 lwkt_serialize_enter(&tdata->jme_tx_serialize);
2200 if (!ifq_is_empty(&ifp->if_snd))
2202 lwkt_serialize_exit(&tdata->jme_tx_serialize);
2206 /* Reenable interrupts. */
2207 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2211 jme_txeof(struct jme_txdata *tdata)
2213 struct ifnet *ifp = &tdata->jme_sc->arpcom.ac_if;
2216 cons = tdata->jme_tx_cons;
2217 if (cons == tdata->jme_tx_prod)
2221 * Go through our Tx list and free mbufs for those
2222 * frames which have been transmitted.
2224 while (cons != tdata->jme_tx_prod) {
2225 struct jme_txdesc *txd, *next_txd;
2226 uint32_t status, next_status;
2227 int next_cons, nsegs;
2229 txd = &tdata->jme_txdesc[cons];
2230 KASSERT(txd->tx_m != NULL,
2231 ("%s: freeing NULL mbuf!", __func__));
2233 status = le32toh(txd->tx_desc->flags);
2234 if ((status & JME_TD_OWN) == JME_TD_OWN)
2239 * This chip will always update the TX descriptor's
2240 * buflen field and this updating always happens
2241 * after clearing the OWN bit, so even if the OWN
2242 * bit is cleared by the chip, we still don't sure
2243 * about whether the buflen field has been updated
2244 * by the chip or not. To avoid this race, we wait
2245 * for the next TX descriptor's OWN bit to be cleared
2246 * by the chip before reusing this TX descriptor.
2249 JME_DESC_ADD(next_cons, txd->tx_ndesc, tdata->jme_tx_desc_cnt);
2250 next_txd = &tdata->jme_txdesc[next_cons];
2251 if (next_txd->tx_m == NULL)
2253 next_status = le32toh(next_txd->tx_desc->flags);
2254 if ((next_status & JME_TD_OWN) == JME_TD_OWN)
2257 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2258 IFNET_STAT_INC(ifp, oerrors, 1);
2260 IFNET_STAT_INC(ifp, opackets, 1);
2261 if (status & JME_TD_COLLISION) {
2262 IFNET_STAT_INC(ifp, collisions,
2263 le32toh(txd->tx_desc->buflen) &
2264 JME_TD_BUF_LEN_MASK);
2269 * Only the first descriptor of multi-descriptor
2270 * transmission is updated so driver have to skip entire
2271 * chained buffers for the transmiited frame. In other
2272 * words, JME_TD_OWN bit is valid only at the first
2273 * descriptor of a multi-descriptor transmission.
2275 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2276 tdata->jme_tx_ring[cons].flags = 0;
2277 JME_DESC_INC(cons, tdata->jme_tx_desc_cnt);
2280 /* Reclaim transferred mbufs. */
2281 bus_dmamap_unload(tdata->jme_tx_tag, txd->tx_dmamap);
2284 tdata->jme_tx_cnt -= txd->tx_ndesc;
2285 KASSERT(tdata->jme_tx_cnt >= 0,
2286 ("%s: Active Tx desc counter was garbled", __func__));
2289 tdata->jme_tx_cons = cons;
2291 /* 1 for symbol TX descriptor */
2292 if (tdata->jme_tx_cnt <= JME_MAXTXSEGS + 1)
2295 if (tdata->jme_tx_cnt + JME_TXD_SPARE <=
2296 tdata->jme_tx_desc_cnt - JME_TXD_RSVD)
2297 ifq_clr_oactive(&ifp->if_snd);
2300 static __inline void
2301 jme_discard_rxbufs(struct jme_rxdata *rdata, int cons, int count)
2305 for (i = 0; i < count; ++i) {
2306 jme_setup_rxdesc(&rdata->jme_rxdesc[cons]);
2307 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2311 static __inline struct pktinfo *
2312 jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2314 if (flags & JME_RD_IPV4)
2315 pi->pi_netisr = NETISR_IP;
2316 else if (flags & JME_RD_IPV6)
2317 pi->pi_netisr = NETISR_IPV6;
2322 pi->pi_l3proto = IPPROTO_UNKNOWN;
2324 if (flags & JME_RD_MORE_FRAG)
2325 pi->pi_flags |= PKTINFO_FLAG_FRAG;
2326 else if (flags & JME_RD_TCP)
2327 pi->pi_l3proto = IPPROTO_TCP;
2328 else if (flags & JME_RD_UDP)
2329 pi->pi_l3proto = IPPROTO_UDP;
2335 /* Receive a frame. */
2337 jme_rxpkt(struct jme_rxdata *rdata)
2339 struct ifnet *ifp = &rdata->jme_sc->arpcom.ac_if;
2340 struct jme_desc *desc;
2341 struct jme_rxdesc *rxd;
2342 struct mbuf *mp, *m;
2343 uint32_t flags, status, hash, hashinfo;
2344 int cons, count, nsegs;
2346 cons = rdata->jme_rx_cons;
2347 desc = &rdata->jme_rx_ring[cons];
2349 flags = le32toh(desc->flags);
2350 status = le32toh(desc->buflen);
2351 hash = le32toh(desc->addr_hi);
2352 hashinfo = le32toh(desc->addr_lo);
2353 nsegs = JME_RX_NSEGS(status);
2356 /* Skip the first descriptor. */
2357 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2360 * Clear the OWN bit of the following RX descriptors;
2361 * hardware will not clear the OWN bit except the first
2364 * Since the first RX descriptor is setup, i.e. OWN bit
2365 * on, before its followins RX descriptors, leaving the
2366 * OWN bit on the following RX descriptors will trick
2367 * the hardware into thinking that the following RX
2368 * descriptors are ready to be used too.
2370 for (count = 1; count < nsegs; count++,
2371 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt))
2372 rdata->jme_rx_ring[cons].flags = 0;
2374 cons = rdata->jme_rx_cons;
2377 JME_RSS_DPRINTF(rdata->jme_sc, 15, "ring%d, flags 0x%08x, "
2378 "hash 0x%08x, hash info 0x%08x\n",
2379 rdata->jme_rx_idx, flags, hash, hashinfo);
2381 if (status & JME_RX_ERR_STAT) {
2382 IFNET_STAT_INC(ifp, ierrors, 1);
2383 jme_discard_rxbufs(rdata, cons, nsegs);
2384 #ifdef JME_SHOW_ERRORS
2385 if_printf(ifp, "%s : receive error = 0x%b\n",
2386 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2388 rdata->jme_rx_cons += nsegs;
2389 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2393 rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2394 for (count = 0; count < nsegs; count++,
2395 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) {
2396 rxd = &rdata->jme_rxdesc[cons];
2399 /* Add a new receive buffer to the ring. */
2400 if (jme_newbuf(rdata, rxd, 0) != 0) {
2401 IFNET_STAT_INC(ifp, iqdrops, 1);
2403 jme_discard_rxbufs(rdata, cons, nsegs - count);
2404 if (rdata->jme_rxhead != NULL) {
2405 m_freem(rdata->jme_rxhead);
2406 JME_RXCHAIN_RESET(rdata);
2412 * Assume we've received a full sized frame.
2413 * Actual size is fixed when we encounter the end of
2414 * multi-segmented frame.
2416 mp->m_len = MCLBYTES;
2418 /* Chain received mbufs. */
2419 if (rdata->jme_rxhead == NULL) {
2420 rdata->jme_rxhead = mp;
2421 rdata->jme_rxtail = mp;
2424 * Receive processor can receive a maximum frame
2425 * size of 65535 bytes.
2427 rdata->jme_rxtail->m_next = mp;
2428 rdata->jme_rxtail = mp;
2431 if (count == nsegs - 1) {
2432 struct pktinfo pi0, *pi;
2434 /* Last desc. for this frame. */
2435 m = rdata->jme_rxhead;
2436 m->m_pkthdr.len = rdata->jme_rxlen;
2438 /* Set first mbuf size. */
2439 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2440 /* Set last mbuf size. */
2441 mp->m_len = rdata->jme_rxlen -
2442 ((MCLBYTES - JME_RX_PAD_BYTES) +
2443 (MCLBYTES * (nsegs - 2)));
2445 m->m_len = rdata->jme_rxlen;
2447 m->m_pkthdr.rcvif = ifp;
2450 * Account for 10bytes auto padding which is used
2451 * to align IP header on 32bit boundary. Also note,
2452 * CRC bytes is automatically removed by the
2455 m->m_data += JME_RX_PAD_BYTES;
2457 /* Set checksum information. */
2458 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2459 (flags & JME_RD_IPV4)) {
2460 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2461 if (flags & JME_RD_IPCSUM)
2462 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2463 if ((flags & JME_RD_MORE_FRAG) == 0 &&
2464 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2465 (JME_RD_TCP | JME_RD_TCPCSUM) ||
2466 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2467 (JME_RD_UDP | JME_RD_UDPCSUM))) {
2468 m->m_pkthdr.csum_flags |=
2469 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2470 m->m_pkthdr.csum_data = 0xffff;
2474 /* Check for VLAN tagged packets. */
2475 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2476 (flags & JME_RD_VLAN_TAG)) {
2477 m->m_pkthdr.ether_vlantag =
2478 flags & JME_RD_VLAN_MASK;
2479 m->m_flags |= M_VLANTAG;
2482 IFNET_STAT_INC(ifp, ipackets, 1);
2484 if (ifp->if_capenable & IFCAP_RSS)
2485 pi = jme_pktinfo(&pi0, flags);
2490 (hashinfo & JME_RD_HASH_FN_MASK) ==
2491 JME_RD_HASH_FN_TOEPLITZ) {
2492 m->m_flags |= (M_HASH | M_CKHASH);
2493 m->m_pkthdr.hash = toeplitz_hash(hash);
2496 #ifdef JME_RSS_DEBUG
2498 JME_RSS_DPRINTF(rdata->jme_sc, 10,
2499 "isr %d flags %08x, l3 %d %s\n",
2500 pi->pi_netisr, pi->pi_flags,
2502 (m->m_flags & M_HASH) ? "hash" : "");
2507 ether_input_pkt(ifp, m, pi);
2509 /* Reset mbuf chains. */
2510 JME_RXCHAIN_RESET(rdata);
2511 #ifdef JME_RSS_DEBUG
2512 rdata->jme_rx_pkt++;
2517 rdata->jme_rx_cons += nsegs;
2518 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2522 jme_rxeof(struct jme_rxdata *rdata, int count)
2524 struct jme_desc *desc;
2528 #ifdef IFPOLL_ENABLE
2529 if (count >= 0 && count-- == 0)
2532 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2533 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2535 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2539 * Check number of segments against received bytes.
2540 * Non-matching value would indicate that hardware
2541 * is still trying to update Rx descriptors. I'm not
2542 * sure whether this check is needed.
2544 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2545 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2546 if (nsegs != howmany(pktlen, MCLBYTES)) {
2547 if_printf(&rdata->jme_sc->arpcom.ac_if,
2548 "RX fragment count(%d) and "
2549 "packet size(%d) mismach\n", nsegs, pktlen);
2555 * RSS hash and hash information may _not_ be set by the
2556 * hardware even if the OWN bit is cleared and VALID bit
2559 * If the RSS information is not delivered by the hardware
2560 * yet, we MUST NOT accept this packet, let alone reusing
2561 * its RX descriptor. If this packet was accepted and its
2562 * RX descriptor was reused before hardware delivering the
2563 * RSS information, the RX buffer's address would be trashed
2564 * by the RSS information delivered by the hardware.
2566 if (JME_ENABLE_HWRSS(rdata->jme_sc)) {
2567 struct jme_rxdesc *rxd;
2570 hashinfo = le32toh(desc->addr_lo);
2571 rxd = &rdata->jme_rxdesc[rdata->jme_rx_cons];
2574 * This test should be enough to detect the pending
2575 * RSS information delivery, given:
2576 * - If RSS hash is not calculated, the hashinfo
2577 * will be 0. Howvever, the lower 32bits of RX
2578 * buffers' physical address will never be 0.
2579 * (see jme_rxbuf_dma_filter)
2580 * - If RSS hash is calculated, the lowest 4 bits
2581 * of hashinfo will be set, while the RX buffers
2582 * are at least 2K aligned.
2584 if (hashinfo == JME_ADDR_LO(rxd->rx_paddr)) {
2585 #ifdef JME_SHOW_RSSWB
2586 if_printf(&rdata->jme_sc->arpcom.ac_if,
2587 "RSS is not written back yet\n");
2593 /* Received a frame. */
2601 struct jme_softc *sc = xsc;
2602 struct mii_data *mii = device_get_softc(sc->jme_miibus);
2604 lwkt_serialize_enter(&sc->jme_serialize);
2606 KKASSERT(mycpuid == JME_TICK_CPUID);
2608 sc->jme_in_tick = TRUE;
2610 sc->jme_in_tick = FALSE;
2612 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2614 lwkt_serialize_exit(&sc->jme_serialize);
2618 jme_reset(struct jme_softc *sc)
2622 /* Make sure that TX and RX are stopped */
2627 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2631 * Hold reset bit before stop reset
2634 /* Disable TXMAC and TXOFL clock sources */
2635 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2636 /* Disable RXMAC clock source */
2637 val = CSR_READ_4(sc, JME_GPREG1);
2638 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2640 CSR_READ_4(sc, JME_GHC);
2643 CSR_WRITE_4(sc, JME_GHC, 0);
2645 CSR_READ_4(sc, JME_GHC);
2648 * Clear reset bit after stop reset
2651 /* Enable TXMAC and TXOFL clock sources */
2652 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2653 /* Enable RXMAC clock source */
2654 val = CSR_READ_4(sc, JME_GPREG1);
2655 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2657 CSR_READ_4(sc, JME_GHC);
2659 /* Disable TXMAC and TXOFL clock sources */
2660 CSR_WRITE_4(sc, JME_GHC, 0);
2661 /* Disable RXMAC clock source */
2662 val = CSR_READ_4(sc, JME_GPREG1);
2663 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2665 CSR_READ_4(sc, JME_GHC);
2667 /* Enable TX and RX */
2668 val = CSR_READ_4(sc, JME_TXCSR);
2669 CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB);
2670 val = CSR_READ_4(sc, JME_RXCSR);
2671 CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB);
2673 CSR_READ_4(sc, JME_TXCSR);
2674 CSR_READ_4(sc, JME_RXCSR);
2676 /* Enable TXMAC and TXOFL clock sources */
2677 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2678 /* Disable RXMAC clock source */
2679 val = CSR_READ_4(sc, JME_GPREG1);
2680 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2682 CSR_READ_4(sc, JME_GHC);
2684 /* Stop TX and RX */
2692 struct jme_softc *sc = xsc;
2693 struct ifnet *ifp = &sc->arpcom.ac_if;
2694 struct mii_data *mii;
2695 uint8_t eaddr[ETHER_ADDR_LEN];
2700 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2703 * Cancel any pending I/O.
2708 * Reset the chip to a known state.
2713 * Setup MSI/MSI-X vectors to interrupts mapping
2717 if (JME_ENABLE_HWRSS(sc))
2720 jme_disable_rss(sc);
2722 /* Init RX descriptors */
2723 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2724 error = jme_init_rx_ring(&sc->jme_cdata.jme_rx_data[r]);
2726 if_printf(ifp, "initialization failed: "
2727 "no memory for %dth RX ring.\n", r);
2733 /* Init TX descriptors */
2734 jme_init_tx_ring(&sc->jme_cdata.jme_tx_data);
2736 /* Initialize shadow status block. */
2739 /* Reprogram the station address. */
2740 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2741 CSR_WRITE_4(sc, JME_PAR0,
2742 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2743 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2746 * Configure Tx queue.
2747 * Tx priority queue weight value : 0
2748 * Tx FIFO threshold for processing next packet : 16QW
2749 * Maximum Tx DMA length : 512
2750 * Allow Tx DMA burst.
2752 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2753 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2754 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2755 sc->jme_txcsr |= sc->jme_tx_dma_size;
2756 sc->jme_txcsr |= TXCSR_DMA_BURST;
2757 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2759 /* Set Tx descriptor counter. */
2760 CSR_WRITE_4(sc, JME_TXQDC, sc->jme_cdata.jme_tx_data.jme_tx_desc_cnt);
2762 /* Set Tx ring address to the hardware. */
2763 paddr = sc->jme_cdata.jme_tx_data.jme_tx_ring_paddr;
2764 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2765 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2767 /* Configure TxMAC parameters. */
2768 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2769 reg |= TXMAC_THRESH_1_PKT;
2770 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2771 CSR_WRITE_4(sc, JME_TXMAC, reg);
2774 * Configure Rx queue.
2775 * FIFO full threshold for transmitting Tx pause packet : 128T
2776 * FIFO threshold for processing next packet : 128QW
2778 * Max Rx DMA length : 128
2779 * Rx descriptor retry : 32
2780 * Rx descriptor retry time gap : 256ns
2781 * Don't receive runt/bad frame.
2783 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2786 * Since Rx FIFO size is 4K bytes, receiving frames larger
2787 * than 4K bytes will suffer from Rx FIFO overruns. So
2788 * decrease FIFO threshold to reduce the FIFO overruns for
2789 * frames larger than 4000 bytes.
2790 * For best performance of standard MTU sized frames use
2791 * maximum allowable FIFO threshold, 128QW.
2793 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2795 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2797 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2799 /* Improve PCI Express compatibility */
2800 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2802 sc->jme_rxcsr |= sc->jme_rx_dma_size;
2803 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2804 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2805 /* XXX TODO DROP_BAD */
2807 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2808 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
2810 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2812 /* Set Rx descriptor counter. */
2813 CSR_WRITE_4(sc, JME_RXQDC, rdata->jme_rx_desc_cnt);
2815 /* Set Rx ring address to the hardware. */
2816 paddr = rdata->jme_rx_ring_paddr;
2817 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2818 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2821 /* Clear receive filter. */
2822 CSR_WRITE_4(sc, JME_RXMAC, 0);
2824 /* Set up the receive filter. */
2829 * Disable all WOL bits as WOL can interfere normal Rx
2830 * operation. Also clear WOL detection status bits.
2832 reg = CSR_READ_4(sc, JME_PMCS);
2833 reg &= ~PMCS_WOL_ENB_MASK;
2834 CSR_WRITE_4(sc, JME_PMCS, reg);
2837 * Pad 10bytes right before received frame. This will greatly
2838 * help Rx performance on strict-alignment architectures as
2839 * it does not need to copy the frame to align the payload.
2841 reg = CSR_READ_4(sc, JME_RXMAC);
2842 reg |= RXMAC_PAD_10BYTES;
2844 if (ifp->if_capenable & IFCAP_RXCSUM)
2845 reg |= RXMAC_CSUM_ENB;
2846 CSR_WRITE_4(sc, JME_RXMAC, reg);
2848 /* Configure general purpose reg0 */
2849 reg = CSR_READ_4(sc, JME_GPREG0);
2850 reg &= ~GPREG0_PCC_UNIT_MASK;
2851 /* Set PCC timer resolution to micro-seconds unit. */
2852 reg |= GPREG0_PCC_UNIT_US;
2854 * Disable all shadow register posting as we have to read
2855 * JME_INTR_STATUS register in jme_intr. Also it seems
2856 * that it's hard to synchronize interrupt status between
2857 * hardware and software with shadow posting due to
2858 * requirements of bus_dmamap_sync(9).
2860 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2861 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2862 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2863 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2864 /* Disable posting of DW0. */
2865 reg &= ~GPREG0_POST_DW0_ENB;
2866 /* Clear PME message. */
2867 reg &= ~GPREG0_PME_ENB;
2868 /* Set PHY address. */
2869 reg &= ~GPREG0_PHY_ADDR_MASK;
2870 reg |= sc->jme_phyaddr;
2871 CSR_WRITE_4(sc, JME_GPREG0, reg);
2873 /* Configure Tx queue 0 packet completion coalescing. */
2874 jme_set_tx_coal(sc);
2876 /* Configure Rx queues packet completion coalescing. */
2877 jme_set_rx_coal(sc);
2879 /* Configure shadow status block but don't enable posting. */
2880 paddr = sc->jme_cdata.jme_ssb_block_paddr;
2881 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2882 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2884 /* Disable Timer 1 and Timer 2. */
2885 CSR_WRITE_4(sc, JME_TIMER1, 0);
2886 CSR_WRITE_4(sc, JME_TIMER2, 0);
2888 /* Configure retry transmit period, retry limit value. */
2889 CSR_WRITE_4(sc, JME_TXTRHD,
2890 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2891 TXTRHD_RT_PERIOD_MASK) |
2892 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2893 TXTRHD_RT_LIMIT_SHIFT));
2895 #ifdef IFPOLL_ENABLE
2896 if (!(ifp->if_flags & IFF_NPOLLING))
2898 /* Initialize the interrupt mask. */
2899 jme_enable_intr(sc);
2900 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2903 * Enabling Tx/Rx DMA engines and Rx queue processing is
2904 * done after detection of valid link in jme_miibus_statchg.
2906 sc->jme_has_link = FALSE;
2910 /* Set the current media. */
2911 mii = device_get_softc(sc->jme_miibus);
2914 callout_reset_bycpu(&sc->jme_tick_ch, hz, jme_tick, sc,
2917 ifp->if_flags |= IFF_RUNNING;
2918 ifq_clr_oactive(&ifp->if_snd);
2922 jme_stop(struct jme_softc *sc)
2924 struct ifnet *ifp = &sc->arpcom.ac_if;
2925 struct jme_txdata *tdata = &sc->jme_cdata.jme_tx_data;
2926 struct jme_txdesc *txd;
2927 struct jme_rxdesc *rxd;
2928 struct jme_rxdata *rdata;
2931 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2934 * Mark the interface down and cancel the watchdog timer.
2936 ifp->if_flags &= ~IFF_RUNNING;
2937 ifq_clr_oactive(&ifp->if_snd);
2940 callout_stop(&sc->jme_tick_ch);
2941 sc->jme_has_link = FALSE;
2944 * Disable interrupts.
2946 jme_disable_intr(sc);
2947 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2949 /* Disable updating shadow status block. */
2950 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2951 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2953 /* Stop receiver, transmitter. */
2958 * Free partial finished RX segments
2960 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2961 rdata = &sc->jme_cdata.jme_rx_data[r];
2962 if (rdata->jme_rxhead != NULL)
2963 m_freem(rdata->jme_rxhead);
2964 JME_RXCHAIN_RESET(rdata);
2968 * Free RX and TX mbufs still in the queues.
2970 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2971 rdata = &sc->jme_cdata.jme_rx_data[r];
2972 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
2973 rxd = &rdata->jme_rxdesc[i];
2974 if (rxd->rx_m != NULL) {
2975 bus_dmamap_unload(rdata->jme_rx_tag,
2982 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
2983 txd = &tdata->jme_txdesc[i];
2984 if (txd->tx_m != NULL) {
2985 bus_dmamap_unload(tdata->jme_tx_tag, txd->tx_dmamap);
2994 jme_stop_tx(struct jme_softc *sc)
2999 reg = CSR_READ_4(sc, JME_TXCSR);
3000 if ((reg & TXCSR_TX_ENB) == 0)
3002 reg &= ~TXCSR_TX_ENB;
3003 CSR_WRITE_4(sc, JME_TXCSR, reg);
3004 for (i = JME_TIMEOUT; i > 0; i--) {
3006 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
3010 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
3014 jme_stop_rx(struct jme_softc *sc)
3019 reg = CSR_READ_4(sc, JME_RXCSR);
3020 if ((reg & RXCSR_RX_ENB) == 0)
3022 reg &= ~RXCSR_RX_ENB;
3023 CSR_WRITE_4(sc, JME_RXCSR, reg);
3024 for (i = JME_TIMEOUT; i > 0; i--) {
3026 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
3030 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
3034 jme_init_tx_ring(struct jme_txdata *tdata)
3036 struct jme_txdesc *txd;
3039 tdata->jme_tx_prod = 0;
3040 tdata->jme_tx_cons = 0;
3041 tdata->jme_tx_cnt = 0;
3043 bzero(tdata->jme_tx_ring, JME_TX_RING_SIZE(tdata));
3044 for (i = 0; i < tdata->jme_tx_desc_cnt; i++) {
3045 txd = &tdata->jme_txdesc[i];
3047 txd->tx_desc = &tdata->jme_tx_ring[i];
3053 jme_init_ssb(struct jme_softc *sc)
3055 struct jme_chain_data *cd;
3057 cd = &sc->jme_cdata;
3058 bzero(cd->jme_ssb_block, JME_SSB_SIZE);
3062 jme_init_rx_ring(struct jme_rxdata *rdata)
3064 struct jme_rxdesc *rxd;
3067 KKASSERT(rdata->jme_rxhead == NULL &&
3068 rdata->jme_rxtail == NULL &&
3069 rdata->jme_rxlen == 0);
3070 rdata->jme_rx_cons = 0;
3072 bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(rdata));
3073 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
3076 rxd = &rdata->jme_rxdesc[i];
3078 rxd->rx_desc = &rdata->jme_rx_ring[i];
3079 error = jme_newbuf(rdata, rxd, 1);
3087 jme_newbuf(struct jme_rxdata *rdata, struct jme_rxdesc *rxd, int init)
3090 bus_dma_segment_t segs;
3094 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
3098 * JMC250 has 64bit boundary alignment limitation so jme(4)
3099 * takes advantage of 10 bytes padding feature of hardware
3100 * in order not to copy entire frame to align IP header on
3103 m->m_len = m->m_pkthdr.len = MCLBYTES;
3105 error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
3106 rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
3111 if_printf(&rdata->jme_sc->arpcom.ac_if,
3112 "can't load RX mbuf\n");
3117 if (rxd->rx_m != NULL) {
3118 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
3119 BUS_DMASYNC_POSTREAD);
3120 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
3122 map = rxd->rx_dmamap;
3123 rxd->rx_dmamap = rdata->jme_rx_sparemap;
3124 rdata->jme_rx_sparemap = map;
3126 rxd->rx_paddr = segs.ds_addr;
3128 jme_setup_rxdesc(rxd);
3133 jme_set_vlan(struct jme_softc *sc)
3135 struct ifnet *ifp = &sc->arpcom.ac_if;
3138 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3140 reg = CSR_READ_4(sc, JME_RXMAC);
3141 reg &= ~RXMAC_VLAN_ENB;
3142 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
3143 reg |= RXMAC_VLAN_ENB;
3144 CSR_WRITE_4(sc, JME_RXMAC, reg);
3148 jme_set_filter(struct jme_softc *sc)
3150 struct ifnet *ifp = &sc->arpcom.ac_if;
3151 struct ifmultiaddr *ifma;
3156 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3158 rxcfg = CSR_READ_4(sc, JME_RXMAC);
3159 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
3163 * Always accept frames destined to our station address.
3164 * Always accept broadcast frames.
3166 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
3168 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
3169 if (ifp->if_flags & IFF_PROMISC)
3170 rxcfg |= RXMAC_PROMISC;
3171 if (ifp->if_flags & IFF_ALLMULTI)
3172 rxcfg |= RXMAC_ALLMULTI;
3173 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
3174 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
3175 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3180 * Set up the multicast address filter by passing all multicast
3181 * addresses through a CRC generator, and then using the low-order
3182 * 6 bits as an index into the 64 bit multicast hash table. The
3183 * high order bits select the register, while the rest of the bits
3184 * select the bit within the register.
3186 rxcfg |= RXMAC_MULTICAST;
3187 bzero(mchash, sizeof(mchash));
3189 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3190 if (ifma->ifma_addr->sa_family != AF_LINK)
3192 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3193 ifma->ifma_addr), ETHER_ADDR_LEN);
3195 /* Just want the 6 least significant bits. */
3198 /* Set the corresponding bit in the hash table. */
3199 mchash[crc >> 5] |= 1 << (crc & 0x1f);
3202 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
3203 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
3204 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3208 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
3210 struct jme_softc *sc = arg1;
3211 struct ifnet *ifp = &sc->arpcom.ac_if;
3214 ifnet_serialize_all(ifp);
3216 v = sc->jme_tx_coal_to;
3217 error = sysctl_handle_int(oidp, &v, 0, req);
3218 if (error || req->newptr == NULL)
3221 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
3226 if (v != sc->jme_tx_coal_to) {
3227 sc->jme_tx_coal_to = v;
3228 if (ifp->if_flags & IFF_RUNNING)
3229 jme_set_tx_coal(sc);
3232 ifnet_deserialize_all(ifp);
3237 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
3239 struct jme_softc *sc = arg1;
3240 struct ifnet *ifp = &sc->arpcom.ac_if;
3243 ifnet_serialize_all(ifp);
3245 v = sc->jme_tx_coal_pkt;
3246 error = sysctl_handle_int(oidp, &v, 0, req);
3247 if (error || req->newptr == NULL)
3250 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
3255 if (v != sc->jme_tx_coal_pkt) {
3256 sc->jme_tx_coal_pkt = v;
3257 if (ifp->if_flags & IFF_RUNNING)
3258 jme_set_tx_coal(sc);
3261 ifnet_deserialize_all(ifp);
3266 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
3268 struct jme_softc *sc = arg1;
3269 struct ifnet *ifp = &sc->arpcom.ac_if;
3272 ifnet_serialize_all(ifp);
3274 v = sc->jme_rx_coal_to;
3275 error = sysctl_handle_int(oidp, &v, 0, req);
3276 if (error || req->newptr == NULL)
3279 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
3284 if (v != sc->jme_rx_coal_to) {
3285 sc->jme_rx_coal_to = v;
3286 if (ifp->if_flags & IFF_RUNNING)
3287 jme_set_rx_coal(sc);
3290 ifnet_deserialize_all(ifp);
3295 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3297 struct jme_softc *sc = arg1;
3298 struct ifnet *ifp = &sc->arpcom.ac_if;
3301 ifnet_serialize_all(ifp);
3303 v = sc->jme_rx_coal_pkt;
3304 error = sysctl_handle_int(oidp, &v, 0, req);
3305 if (error || req->newptr == NULL)
3308 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
3313 if (v != sc->jme_rx_coal_pkt) {
3314 sc->jme_rx_coal_pkt = v;
3315 if (ifp->if_flags & IFF_RUNNING)
3316 jme_set_rx_coal(sc);
3319 ifnet_deserialize_all(ifp);
3324 jme_set_tx_coal(struct jme_softc *sc)
3328 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3330 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3331 PCCTX_COAL_PKT_MASK;
3332 reg |= PCCTX_COAL_TXQ0;
3333 CSR_WRITE_4(sc, JME_PCCTX, reg);
3337 jme_set_rx_coal(struct jme_softc *sc)
3342 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3344 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3345 PCCRX_COAL_PKT_MASK;
3346 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r)
3347 CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3350 #ifdef IFPOLL_ENABLE
3353 jme_npoll_status(struct ifnet *ifp)
3355 struct jme_softc *sc = ifp->if_softc;
3358 ASSERT_SERIALIZED(&sc->jme_serialize);
3360 status = CSR_READ_4(sc, JME_INTR_STATUS);
3361 if (status & INTR_RXQ_DESC_EMPTY) {
3362 CSR_WRITE_4(sc, JME_INTR_STATUS, status & INTR_RXQ_DESC_EMPTY);
3363 jme_rx_restart(sc, status);
3368 jme_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle)
3370 struct jme_rxdata *rdata = arg;
3372 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3374 jme_rxeof(rdata, cycle);
3378 jme_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused)
3380 struct jme_txdata *tdata = arg;
3382 ASSERT_SERIALIZED(&tdata->jme_tx_serialize);
3385 if (!ifq_is_empty(&ifp->if_snd))
3390 jme_npoll(struct ifnet *ifp, struct ifpoll_info *info)
3392 struct jme_softc *sc = ifp->if_softc;
3394 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3399 info->ifpi_status.status_func = jme_npoll_status;
3400 info->ifpi_status.serializer = &sc->jme_serialize;
3402 off = sc->jme_npoll_txoff;
3403 KKASSERT(off <= ncpus2);
3404 info->ifpi_tx[off].poll_func = jme_npoll_tx;
3405 info->ifpi_tx[off].arg = &sc->jme_cdata.jme_tx_data;
3406 info->ifpi_tx[off].serializer =
3407 &sc->jme_cdata.jme_tx_data.jme_tx_serialize;
3408 ifq_set_cpuid(&ifp->if_snd, sc->jme_npoll_txoff);
3410 off = sc->jme_npoll_rxoff;
3411 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
3412 struct jme_rxdata *rdata =
3413 &sc->jme_cdata.jme_rx_data[i];
3416 info->ifpi_rx[idx].poll_func = jme_npoll_rx;
3417 info->ifpi_rx[idx].arg = rdata;
3418 info->ifpi_rx[idx].serializer =
3419 &rdata->jme_rx_serialize;
3422 if (ifp->if_flags & IFF_RUNNING)
3423 jme_disable_intr(sc);
3425 ifq_set_cpuid(&ifp->if_snd, sc->jme_tx_cpuid);
3426 if (ifp->if_flags & IFF_RUNNING)
3427 jme_enable_intr(sc);
3432 jme_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS)
3434 struct jme_softc *sc = (void *)arg1;
3435 struct ifnet *ifp = &sc->arpcom.ac_if;
3438 off = sc->jme_npoll_rxoff;
3439 error = sysctl_handle_int(oidp, &off, 0, req);
3440 if (error || req->newptr == NULL)
3445 ifnet_serialize_all(ifp);
3446 if (off >= ncpus2 || off % sc->jme_cdata.jme_rx_ring_cnt != 0) {
3450 sc->jme_npoll_rxoff = off;
3452 ifnet_deserialize_all(ifp);
3458 jme_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS)
3460 struct jme_softc *sc = (void *)arg1;
3461 struct ifnet *ifp = &sc->arpcom.ac_if;
3464 off = sc->jme_npoll_txoff;
3465 error = sysctl_handle_int(oidp, &off, 0, req);
3466 if (error || req->newptr == NULL)
3471 ifnet_serialize_all(ifp);
3472 if (off >= ncpus2) {
3476 sc->jme_npoll_txoff = off;
3478 ifnet_deserialize_all(ifp);
3483 #endif /* IFPOLL_ENABLE */
3486 jme_rxring_dma_alloc(struct jme_rxdata *rdata)
3491 asize = roundup2(JME_RX_RING_SIZE(rdata), JME_RX_RING_ALIGN);
3492 error = bus_dmamem_coherent(rdata->jme_sc->jme_cdata.jme_ring_tag,
3493 JME_RX_RING_ALIGN, 0,
3494 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3495 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3497 device_printf(rdata->jme_sc->jme_dev,
3498 "could not allocate %dth Rx ring.\n", rdata->jme_rx_idx);
3501 rdata->jme_rx_ring_tag = dmem.dmem_tag;
3502 rdata->jme_rx_ring_map = dmem.dmem_map;
3503 rdata->jme_rx_ring = dmem.dmem_addr;
3504 rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
3510 jme_rxbuf_dma_filter(void *arg __unused, bus_addr_t paddr)
3512 if ((paddr & 0xffffffff) == 0) {
3514 * Don't allow lower 32bits of the RX buffer's
3515 * physical address to be 0, else it will break
3516 * hardware pending RSS information delivery
3517 * detection on RX path.
3525 jme_rxbuf_dma_alloc(struct jme_rxdata *rdata)
3530 lowaddr = BUS_SPACE_MAXADDR;
3531 if (JME_ENABLE_HWRSS(rdata->jme_sc)) {
3532 /* jme_rxbuf_dma_filter will be called */
3533 lowaddr = BUS_SPACE_MAXADDR_32BIT;
3536 /* Create tag for Rx buffers. */
3537 error = bus_dma_tag_create(
3538 rdata->jme_sc->jme_cdata.jme_buffer_tag,/* parent */
3539 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
3540 lowaddr, /* lowaddr */
3541 BUS_SPACE_MAXADDR, /* highaddr */
3542 jme_rxbuf_dma_filter, NULL, /* filter, filterarg */
3543 MCLBYTES, /* maxsize */
3545 MCLBYTES, /* maxsegsize */
3546 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
3547 &rdata->jme_rx_tag);
3549 device_printf(rdata->jme_sc->jme_dev,
3550 "could not create %dth Rx DMA tag.\n", rdata->jme_rx_idx);
3554 /* Create DMA maps for Rx buffers. */
3555 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3556 &rdata->jme_rx_sparemap);
3558 device_printf(rdata->jme_sc->jme_dev,
3559 "could not create %dth spare Rx dmamap.\n",
3561 bus_dma_tag_destroy(rdata->jme_rx_tag);
3562 rdata->jme_rx_tag = NULL;
3565 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
3566 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3568 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3573 device_printf(rdata->jme_sc->jme_dev,
3574 "could not create %dth Rx dmamap "
3575 "for %dth RX ring.\n", i, rdata->jme_rx_idx);
3577 for (j = 0; j < i; ++j) {
3578 rxd = &rdata->jme_rxdesc[j];
3579 bus_dmamap_destroy(rdata->jme_rx_tag,
3582 bus_dmamap_destroy(rdata->jme_rx_tag,
3583 rdata->jme_rx_sparemap);
3584 bus_dma_tag_destroy(rdata->jme_rx_tag);
3585 rdata->jme_rx_tag = NULL;
3593 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3597 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3598 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3600 if (status & rdata->jme_rx_coal) {
3601 lwkt_serialize_enter(&rdata->jme_rx_serialize);
3602 jme_rxeof(rdata, -1);
3603 lwkt_serialize_exit(&rdata->jme_rx_serialize);
3609 jme_enable_rss(struct jme_softc *sc)
3612 uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3615 KASSERT(sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_2 ||
3616 sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_4,
3617 ("%s: invalid # of RX rings (%d)",
3618 sc->arpcom.ac_if.if_xname, sc->jme_cdata.jme_rx_ring_cnt));
3620 rssc = RSSC_HASH_64_ENTRY;
3621 rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3622 rssc |= sc->jme_cdata.jme_rx_ring_cnt >> 1;
3623 JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3624 CSR_WRITE_4(sc, JME_RSSC, rssc);
3626 toeplitz_get_key(key, sizeof(key));
3627 for (i = 0; i < RSSKEY_NREGS; ++i) {
3630 keyreg = RSSKEY_REGVAL(key, i);
3631 JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x, reg 0x%08x\n",
3632 i, keyreg, RSSKEY_REG(RSSKEY_NREGS - 1 - i));
3634 CSR_WRITE_4(sc, RSSKEY_REG(RSSKEY_NREGS - 1 - i), keyreg);
3638 * Create redirect table in following fashion:
3639 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3642 for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3645 q = i % sc->jme_cdata.jme_rx_ring_cnt;
3646 ind |= q << (i * 8);
3648 JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3650 for (i = 0; i < RSSTBL_NREGS; ++i)
3651 CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3655 jme_disable_rss(struct jme_softc *sc)
3657 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3661 jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3663 struct jme_softc *sc = ifp->if_softc;
3665 ifnet_serialize_array_enter(sc->jme_serialize_arr,
3666 sc->jme_serialize_cnt, slz);
3670 jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3672 struct jme_softc *sc = ifp->if_softc;
3674 ifnet_serialize_array_exit(sc->jme_serialize_arr,
3675 sc->jme_serialize_cnt, slz);
3679 jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3681 struct jme_softc *sc = ifp->if_softc;
3683 return ifnet_serialize_array_try(sc->jme_serialize_arr,
3684 sc->jme_serialize_cnt, slz);
3690 jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
3691 boolean_t serialized)
3693 struct jme_softc *sc = ifp->if_softc;
3695 ifnet_serialize_array_assert(sc->jme_serialize_arr,
3696 sc->jme_serialize_cnt, slz, serialized);
3699 #endif /* INVARIANTS */
3702 jme_msix_try_alloc(device_t dev)
3704 struct jme_softc *sc = device_get_softc(dev);
3705 struct jme_msix_data *msix;
3706 int error, i, r, msix_enable, msix_count;
3707 int offset, offset_def;
3709 msix_count = JME_MSIXCNT(sc->jme_cdata.jme_rx_ring_cnt);
3710 KKASSERT(msix_count <= JME_NMSIX);
3712 msix_enable = device_getenv_int(dev, "msix.enable", jme_msix_enable);
3715 * We leave the 1st MSI-X vector unused, so we
3716 * actually need msix_count + 1 MSI-X vectors.
3718 if (!msix_enable || pci_msix_count(dev) < (msix_count + 1))
3721 for (i = 0; i < msix_count; ++i)
3722 sc->jme_msix[i].jme_msix_rid = -1;
3727 * Setup status MSI-X
3730 msix = &sc->jme_msix[i++];
3731 msix->jme_msix_cpuid = 0;
3732 msix->jme_msix_arg = sc;
3733 msix->jme_msix_func = jme_msix_status;
3734 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3735 msix->jme_msix_intrs |=
3736 sc->jme_cdata.jme_rx_data[r].jme_rx_empty;
3738 msix->jme_msix_serialize = &sc->jme_serialize;
3739 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s sts",
3740 device_get_nameunit(dev));
3746 offset_def = device_get_unit(dev) % ncpus2;
3747 offset = device_getenv_int(dev, "msix.txoff", offset_def);
3748 if (offset >= ncpus2) {
3749 device_printf(dev, "invalid msix.txoff %d, use %d\n",
3750 offset, offset_def);
3751 offset = offset_def;
3754 msix = &sc->jme_msix[i++];
3755 msix->jme_msix_cpuid = offset;
3756 sc->jme_tx_cpuid = msix->jme_msix_cpuid;
3757 msix->jme_msix_arg = &sc->jme_cdata.jme_tx_data;
3758 msix->jme_msix_func = jme_msix_tx;
3759 msix->jme_msix_intrs = INTR_TXQ_COAL | INTR_TXQ_COAL_TO;
3760 msix->jme_msix_serialize = &sc->jme_cdata.jme_tx_data.jme_tx_serialize;
3761 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s tx",
3762 device_get_nameunit(dev));
3768 if (sc->jme_cdata.jme_rx_ring_cnt == ncpus2) {
3771 offset_def = (sc->jme_cdata.jme_rx_ring_cnt *
3772 device_get_unit(dev)) % ncpus2;
3774 offset = device_getenv_int(dev, "msix.rxoff", offset_def);
3775 if (offset >= ncpus2 ||
3776 offset % sc->jme_cdata.jme_rx_ring_cnt != 0) {
3777 device_printf(dev, "invalid msix.rxoff %d, use %d\n",
3778 offset, offset_def);
3779 offset = offset_def;
3783 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3784 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3786 msix = &sc->jme_msix[i++];
3787 msix->jme_msix_cpuid = r + offset;
3788 KKASSERT(msix->jme_msix_cpuid < ncpus2);
3789 msix->jme_msix_arg = rdata;
3790 msix->jme_msix_func = jme_msix_rx;
3791 msix->jme_msix_intrs = rdata->jme_rx_coal;
3792 msix->jme_msix_serialize = &rdata->jme_rx_serialize;
3793 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc),
3794 "%s rx%d", device_get_nameunit(dev), r);
3797 KKASSERT(i == msix_count);
3799 error = pci_setup_msix(dev);
3803 /* Setup jme_msix_cnt early, so we could cleanup */
3804 sc->jme_msix_cnt = msix_count;
3806 for (i = 0; i < msix_count; ++i) {
3807 msix = &sc->jme_msix[i];
3809 msix->jme_msix_vector = i + 1;
3810 error = pci_alloc_msix_vector(dev, msix->jme_msix_vector,
3811 &msix->jme_msix_rid, msix->jme_msix_cpuid);
3815 msix->jme_msix_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3816 &msix->jme_msix_rid, RF_ACTIVE);
3817 if (msix->jme_msix_res == NULL) {
3823 for (i = 0; i < JME_INTR_CNT; ++i) {
3824 uint32_t intr_mask = (1 << i);
3827 if ((JME_INTRS & intr_mask) == 0)
3830 for (x = 0; x < msix_count; ++x) {
3831 msix = &sc->jme_msix[x];
3832 if (msix->jme_msix_intrs & intr_mask) {
3835 reg = i / JME_MSINUM_FACTOR;
3836 KKASSERT(reg < JME_MSINUM_CNT);
3838 shift = (i % JME_MSINUM_FACTOR) * 4;
3840 sc->jme_msinum[reg] |=
3841 (msix->jme_msix_vector << shift);
3849 for (i = 0; i < JME_MSINUM_CNT; ++i) {
3850 device_printf(dev, "MSINUM%d: %#x\n", i,
3855 pci_enable_msix(dev);
3856 sc->jme_irq_type = PCI_INTR_TYPE_MSIX;
3864 jme_intr_alloc(device_t dev)
3866 struct jme_softc *sc = device_get_softc(dev);
3869 jme_msix_try_alloc(dev);
3871 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3872 sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable,
3873 &sc->jme_irq_rid, &irq_flags);
3875 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3876 &sc->jme_irq_rid, irq_flags);
3877 if (sc->jme_irq_res == NULL) {
3878 device_printf(dev, "can't allocate irq\n");
3881 sc->jme_tx_cpuid = rman_get_cpuid(sc->jme_irq_res);
3887 jme_msix_free(device_t dev)
3889 struct jme_softc *sc = device_get_softc(dev);
3892 KKASSERT(sc->jme_msix_cnt > 1);
3894 for (i = 0; i < sc->jme_msix_cnt; ++i) {
3895 struct jme_msix_data *msix = &sc->jme_msix[i];
3897 if (msix->jme_msix_res != NULL) {
3898 bus_release_resource(dev, SYS_RES_IRQ,
3899 msix->jme_msix_rid, msix->jme_msix_res);
3900 msix->jme_msix_res = NULL;
3902 if (msix->jme_msix_rid >= 0) {
3903 pci_release_msix_vector(dev, msix->jme_msix_rid);
3904 msix->jme_msix_rid = -1;
3907 pci_teardown_msix(dev);
3911 jme_intr_free(device_t dev)
3913 struct jme_softc *sc = device_get_softc(dev);
3915 if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3916 if (sc->jme_irq_res != NULL) {
3917 bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
3920 if (sc->jme_irq_type == PCI_INTR_TYPE_MSI)
3921 pci_release_msi(dev);
3928 jme_msix_tx(void *xtdata)
3930 struct jme_txdata *tdata = xtdata;
3931 struct jme_softc *sc = tdata->jme_sc;
3932 struct ifnet *ifp = &sc->arpcom.ac_if;
3934 ASSERT_SERIALIZED(&tdata->jme_tx_serialize);
3936 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3938 CSR_WRITE_4(sc, JME_INTR_STATUS,
3939 INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP);
3941 if (ifp->if_flags & IFF_RUNNING) {
3943 if (!ifq_is_empty(&ifp->if_snd))
3947 CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3951 jme_msix_rx(void *xrdata)
3953 struct jme_rxdata *rdata = xrdata;
3954 struct jme_softc *sc = rdata->jme_sc;
3955 struct ifnet *ifp = &sc->arpcom.ac_if;
3957 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3959 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, rdata->jme_rx_coal);
3961 CSR_WRITE_4(sc, JME_INTR_STATUS,
3962 rdata->jme_rx_coal | rdata->jme_rx_comp);
3964 if (ifp->if_flags & IFF_RUNNING)
3965 jme_rxeof(rdata, -1);
3967 CSR_WRITE_4(sc, JME_INTR_MASK_SET, rdata->jme_rx_coal);
3971 jme_msix_status(void *xsc)
3973 struct jme_softc *sc = xsc;
3974 struct ifnet *ifp = &sc->arpcom.ac_if;
3977 ASSERT_SERIALIZED(&sc->jme_serialize);
3979 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_RXQ_DESC_EMPTY);
3981 status = CSR_READ_4(sc, JME_INTR_STATUS);
3983 if (status & INTR_RXQ_DESC_EMPTY) {
3984 CSR_WRITE_4(sc, JME_INTR_STATUS, status & INTR_RXQ_DESC_EMPTY);
3985 if (ifp->if_flags & IFF_RUNNING)
3986 jme_rx_restart(sc, status);
3989 CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_RXQ_DESC_EMPTY);
3993 jme_rx_restart(struct jme_softc *sc, uint32_t status)
3997 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
3998 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
4000 if (status & rdata->jme_rx_empty) {
4001 lwkt_serialize_enter(&rdata->jme_rx_serialize);
4002 jme_rxeof(rdata, -1);
4003 #ifdef JME_RSS_DEBUG
4004 rdata->jme_rx_emp++;
4006 lwkt_serialize_exit(&rdata->jme_rx_serialize);
4009 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
4014 jme_set_msinum(struct jme_softc *sc)
4018 for (i = 0; i < JME_MSINUM_CNT; ++i)
4019 CSR_WRITE_4(sc, JME_MSINUM(i), sc->jme_msinum[i]);
4023 jme_intr_setup(device_t dev)
4025 struct jme_softc *sc = device_get_softc(dev);
4028 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
4029 return jme_msix_setup(dev);
4031 error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE,
4032 jme_intr, sc, &sc->jme_irq_handle, &sc->jme_serialize);
4034 device_printf(dev, "could not set up interrupt handler.\n");
4042 jme_intr_teardown(device_t dev)
4044 struct jme_softc *sc = device_get_softc(dev);
4046 if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
4047 jme_msix_teardown(dev, sc->jme_msix_cnt);
4049 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
4053 jme_msix_setup(device_t dev)
4055 struct jme_softc *sc = device_get_softc(dev);
4058 for (x = 0; x < sc->jme_msix_cnt; ++x) {
4059 struct jme_msix_data *msix = &sc->jme_msix[x];
4062 error = bus_setup_intr_descr(dev, msix->jme_msix_res,
4063 INTR_MPSAFE, msix->jme_msix_func, msix->jme_msix_arg,
4064 &msix->jme_msix_handle, msix->jme_msix_serialize,
4065 msix->jme_msix_desc);
4067 device_printf(dev, "could not set up %s "
4068 "interrupt handler.\n", msix->jme_msix_desc);
4069 jme_msix_teardown(dev, x);
4077 jme_msix_teardown(device_t dev, int msix_count)
4079 struct jme_softc *sc = device_get_softc(dev);
4082 for (x = 0; x < msix_count; ++x) {
4083 struct jme_msix_data *msix = &sc->jme_msix[x];
4085 bus_teardown_intr(dev, msix->jme_msix_res,
4086 msix->jme_msix_handle);
4091 jme_serialize_skipmain(struct jme_softc *sc)
4093 lwkt_serialize_array_enter(sc->jme_serialize_arr,
4094 sc->jme_serialize_cnt, 1);
4098 jme_deserialize_skipmain(struct jme_softc *sc)
4100 lwkt_serialize_array_exit(sc->jme_serialize_arr,
4101 sc->jme_serialize_cnt, 1);
4105 jme_enable_intr(struct jme_softc *sc)
4109 for (i = 0; i < sc->jme_serialize_cnt; ++i)
4110 lwkt_serialize_handler_enable(sc->jme_serialize_arr[i]);
4112 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
4116 jme_disable_intr(struct jme_softc *sc)
4120 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
4122 for (i = 0; i < sc->jme_serialize_cnt; ++i)
4123 lwkt_serialize_handler_disable(sc->jme_serialize_arr[i]);
4127 jme_phy_poweron(struct jme_softc *sc)
4131 bmcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR);
4132 bmcr &= ~BMCR_PDOWN;
4133 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, bmcr);
4135 if (sc->jme_caps & JME_CAP_PHYPWR) {
4138 val = CSR_READ_4(sc, JME_PHYPWR);
4139 val &= ~(PHYPWR_DOWN1SEL | PHYPWR_DOWN1SW |
4140 PHYPWR_DOWN2 | PHYPWR_CLKSEL);
4141 CSR_WRITE_4(sc, JME_PHYPWR, val);
4143 val = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
4144 val &= ~PE1_GPREG0_PHYBG;
4145 val |= PE1_GPREG0_ENBG;
4146 pci_write_config(sc->jme_dev, JME_PCI_PE1, val, 4);
4151 jme_phy_poweroff(struct jme_softc *sc)
4155 bmcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR);
4157 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, bmcr);
4159 if (sc->jme_caps & JME_CAP_PHYPWR) {
4162 val = CSR_READ_4(sc, JME_PHYPWR);
4163 val |= PHYPWR_DOWN1SEL | PHYPWR_DOWN1SW |
4164 PHYPWR_DOWN2 | PHYPWR_CLKSEL;
4165 CSR_WRITE_4(sc, JME_PHYPWR, val);
4167 val = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
4168 val &= ~PE1_GPREG0_PHYBG;
4169 val |= PE1_GPREG0_PDD3COLD;
4170 pci_write_config(sc->jme_dev, JME_PCI_PE1, val, 4);
4175 jme_miiext_read(struct jme_softc *sc, int reg)
4179 addr = JME_MII_EXT_ADDR_RD | reg;
4180 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
4181 JME_MII_EXT_ADDR, addr);
4182 return jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr,
4187 jme_miiext_write(struct jme_softc *sc, int reg, int val)
4191 addr = JME_MII_EXT_ADDR_WR | reg;
4192 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
4193 JME_MII_EXT_DATA, val);
4194 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
4195 JME_MII_EXT_ADDR, addr);
4199 jme_phy_init(struct jme_softc *sc)
4204 jme_phy_poweroff(sc);
4205 jme_phy_poweron(sc);
4207 /* Enable PHY test 1 */
4208 gtcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR);
4209 gtcr &= ~GTCR_TEST_MASK;
4210 gtcr |= GTCR_TEST_1;
4211 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, gtcr);
4213 val = jme_miiext_read(sc, JME_MII_EXT_COM2);
4214 val &= ~JME_MII_EXT_COM2_CALIB_MODE0;
4215 val |= JME_MII_EXT_COM2_CALIB_LATCH | JME_MII_EXT_COM2_CALIB_EN;
4216 jme_miiext_write(sc, JME_MII_EXT_COM2, val);
4220 val = jme_miiext_read(sc, JME_MII_EXT_COM2);
4221 val &= ~(JME_MII_EXT_COM2_CALIB_MODE0 |
4222 JME_MII_EXT_COM2_CALIB_LATCH | JME_MII_EXT_COM2_CALIB_EN);
4223 jme_miiext_write(sc, JME_MII_EXT_COM2, val);
4225 /* Disable PHY test */
4226 gtcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR);
4227 gtcr &= ~GTCR_TEST_MASK;
4228 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, gtcr);
4230 if (sc->jme_phycom0 != 0)
4231 jme_miiext_write(sc, JME_MII_EXT_COM0, sc->jme_phycom0);
4232 if (sc->jme_phycom1 != 0)
4233 jme_miiext_write(sc, JME_MII_EXT_COM1, sc->jme_phycom1);