9ea8aec5396f1741e1b34f948ac5d13a89669e85
[dragonfly.git] / sys / dev / netif / jme / if_jme.c
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28  */
29
30 #include "opt_ifpoll.h"
31 #include "opt_jme.h"
32
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/bus.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
39 #include <sys/proc.h>
40 #include <sys/rman.h>
41 #include <sys/serialize.h>
42 #include <sys/serialize2.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
46
47 #include <net/ethernet.h>
48 #include <net/if.h>
49 #include <net/bpf.h>
50 #include <net/if_arp.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_poll.h>
54 #include <net/ifq_var.h>
55 #include <net/toeplitz.h>
56 #include <net/toeplitz2.h>
57 #include <net/vlan/if_vlan_var.h>
58 #include <net/vlan/if_vlan_ether.h>
59
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62
63 #include <dev/netif/mii_layer/miivar.h>
64 #include <dev/netif/mii_layer/jmphyreg.h>
65
66 #include <bus/pci/pcireg.h>
67 #include <bus/pci/pcivar.h>
68 #include <bus/pci/pcidevs.h>
69
70 #include <dev/netif/jme/if_jmereg.h>
71 #include <dev/netif/jme/if_jmevar.h>
72
73 #include "miibus_if.h"
74
75 #define JME_TX_SERIALIZE        1
76 #define JME_RX_SERIALIZE        2
77
78 #define JME_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
79
80 #ifdef JME_RSS_DEBUG
81 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
82 do { \
83         if ((sc)->jme_rss_debug >= (lvl)) \
84                 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
85 } while (0)
86 #else   /* !JME_RSS_DEBUG */
87 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...)      ((void)0)
88 #endif  /* JME_RSS_DEBUG */
89
90 static int      jme_probe(device_t);
91 static int      jme_attach(device_t);
92 static int      jme_detach(device_t);
93 static int      jme_shutdown(device_t);
94 static int      jme_suspend(device_t);
95 static int      jme_resume(device_t);
96
97 static int      jme_miibus_readreg(device_t, int, int);
98 static int      jme_miibus_writereg(device_t, int, int, int);
99 static void     jme_miibus_statchg(device_t);
100
101 static void     jme_init(void *);
102 static int      jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
103 static void     jme_start(struct ifnet *);
104 static void     jme_watchdog(struct ifnet *);
105 static void     jme_mediastatus(struct ifnet *, struct ifmediareq *);
106 static int      jme_mediachange(struct ifnet *);
107 #ifdef IFPOLL_ENABLE
108 static void     jme_npoll(struct ifnet *, struct ifpoll_info *);
109 #endif
110 static void     jme_serialize(struct ifnet *, enum ifnet_serialize);
111 static void     jme_deserialize(struct ifnet *, enum ifnet_serialize);
112 static int      jme_tryserialize(struct ifnet *, enum ifnet_serialize);
113 #ifdef INVARIANTS
114 static void     jme_serialize_assert(struct ifnet *, enum ifnet_serialize,
115                     boolean_t);
116 #endif
117
118 static void     jme_intr(void *);
119 static void     jme_msix_tx(void *);
120 static void     jme_msix_rx(void *);
121 static void     jme_msix_status(void *);
122 static void     jme_txeof(struct jme_softc *);
123 static void     jme_rxeof(struct jme_rxdata *, int);
124 static void     jme_rx_intr(struct jme_softc *, uint32_t);
125 static void     jme_enable_intr(struct jme_softc *);
126 static void     jme_disable_intr(struct jme_softc *);
127
128 static int      jme_msix_setup(device_t);
129 static void     jme_msix_teardown(device_t, int);
130 static int      jme_intr_setup(device_t);
131 static void     jme_intr_teardown(device_t);
132 static void     jme_msix_try_alloc(device_t);
133 static void     jme_msix_free(device_t);
134 static int      jme_intr_alloc(device_t);
135 static void     jme_intr_free(device_t);
136 static int      jme_dma_alloc(struct jme_softc *);
137 static void     jme_dma_free(struct jme_softc *);
138 static int      jme_init_rx_ring(struct jme_rxdata *);
139 static void     jme_init_tx_ring(struct jme_softc *);
140 static void     jme_init_ssb(struct jme_softc *);
141 static int      jme_newbuf(struct jme_rxdata *, struct jme_rxdesc *, int);
142 static int      jme_encap(struct jme_softc *, struct mbuf **);
143 static void     jme_rxpkt(struct jme_rxdata *);
144 static int      jme_rxring_dma_alloc(struct jme_rxdata *);
145 static int      jme_rxbuf_dma_alloc(struct jme_rxdata *);
146 static int      jme_rxbuf_dma_filter(void *, bus_addr_t);
147
148 static void     jme_tick(void *);
149 static void     jme_stop(struct jme_softc *);
150 static void     jme_reset(struct jme_softc *);
151 static void     jme_set_msinum(struct jme_softc *);
152 static void     jme_set_vlan(struct jme_softc *);
153 static void     jme_set_filter(struct jme_softc *);
154 static void     jme_stop_tx(struct jme_softc *);
155 static void     jme_stop_rx(struct jme_softc *);
156 static void     jme_mac_config(struct jme_softc *);
157 static void     jme_reg_macaddr(struct jme_softc *, uint8_t[]);
158 static int      jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
159 static int      jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
160 #ifdef notyet
161 static void     jme_setwol(struct jme_softc *);
162 static void     jme_setlinkspeed(struct jme_softc *);
163 #endif
164 static void     jme_set_tx_coal(struct jme_softc *);
165 static void     jme_set_rx_coal(struct jme_softc *);
166 static void     jme_enable_rss(struct jme_softc *);
167 static void     jme_disable_rss(struct jme_softc *);
168 static void     jme_serialize_skipmain(struct jme_softc *);
169 static void     jme_deserialize_skipmain(struct jme_softc *);
170
171 static void     jme_sysctl_node(struct jme_softc *);
172 static int      jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
173 static int      jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
174 static int      jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
175 static int      jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
176 #ifdef IFPOLL_ENABLE
177 static int      jme_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS);
178 static int      jme_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS);
179 #endif
180
181 /*
182  * Devices supported by this driver.
183  */
184 static const struct jme_dev {
185         uint16_t        jme_vendorid;
186         uint16_t        jme_deviceid;
187         uint32_t        jme_caps;
188         const char      *jme_name;
189 } jme_devs[] = {
190         { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
191             JME_CAP_JUMBO,
192             "JMicron Inc, JMC250 Gigabit Ethernet" },
193         { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
194             JME_CAP_FASTETH,
195             "JMicron Inc, JMC260 Fast Ethernet" },
196         { 0, 0, 0, NULL }
197 };
198
199 static device_method_t jme_methods[] = {
200         /* Device interface. */
201         DEVMETHOD(device_probe,         jme_probe),
202         DEVMETHOD(device_attach,        jme_attach),
203         DEVMETHOD(device_detach,        jme_detach),
204         DEVMETHOD(device_shutdown,      jme_shutdown),
205         DEVMETHOD(device_suspend,       jme_suspend),
206         DEVMETHOD(device_resume,        jme_resume),
207
208         /* Bus interface. */
209         DEVMETHOD(bus_print_child,      bus_generic_print_child),
210         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
211
212         /* MII interface. */
213         DEVMETHOD(miibus_readreg,       jme_miibus_readreg),
214         DEVMETHOD(miibus_writereg,      jme_miibus_writereg),
215         DEVMETHOD(miibus_statchg,       jme_miibus_statchg),
216
217         { NULL, NULL }
218 };
219
220 static driver_t jme_driver = {
221         "jme",
222         jme_methods,
223         sizeof(struct jme_softc)
224 };
225
226 static devclass_t jme_devclass;
227
228 DECLARE_DUMMY_MODULE(if_jme);
229 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
230 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL);
231 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL);
232
233 static const struct {
234         uint32_t        jme_coal;
235         uint32_t        jme_comp;
236         uint32_t        jme_empty;
237 } jme_rx_status[JME_NRXRING_MAX] = {
238         { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP,
239           INTR_RXQ0_DESC_EMPTY },
240         { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP,
241           INTR_RXQ1_DESC_EMPTY },
242         { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP,
243           INTR_RXQ2_DESC_EMPTY },
244         { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP,
245           INTR_RXQ3_DESC_EMPTY }
246 };
247
248 static int      jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
249 static int      jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
250 static int      jme_rx_ring_count = 0;
251 static int      jme_msi_enable = 1;
252 static int      jme_msix_enable = 1;
253
254 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
255 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
256 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
257 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable);
258 TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable);
259
260 static __inline void
261 jme_setup_rxdesc(struct jme_rxdesc *rxd)
262 {
263         struct jme_desc *desc;
264
265         desc = rxd->rx_desc;
266         desc->buflen = htole32(MCLBYTES);
267         desc->addr_lo = htole32(JME_ADDR_LO(rxd->rx_paddr));
268         desc->addr_hi = htole32(JME_ADDR_HI(rxd->rx_paddr));
269         desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
270 }
271
272 /*
273  *      Read a PHY register on the MII of the JMC250.
274  */
275 static int
276 jme_miibus_readreg(device_t dev, int phy, int reg)
277 {
278         struct jme_softc *sc = device_get_softc(dev);
279         uint32_t val;
280         int i;
281
282         /* For FPGA version, PHY address 0 should be ignored. */
283         if (sc->jme_caps & JME_CAP_FPGA) {
284                 if (phy == 0)
285                         return (0);
286         } else {
287                 if (sc->jme_phyaddr != phy)
288                         return (0);
289         }
290
291         CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
292             SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
293
294         for (i = JME_PHY_TIMEOUT; i > 0; i--) {
295                 DELAY(1);
296                 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
297                         break;
298         }
299         if (i == 0) {
300                 device_printf(sc->jme_dev, "phy read timeout: "
301                               "phy %d, reg %d\n", phy, reg);
302                 return (0);
303         }
304
305         return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
306 }
307
308 /*
309  *      Write a PHY register on the MII of the JMC250.
310  */
311 static int
312 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
313 {
314         struct jme_softc *sc = device_get_softc(dev);
315         int i;
316
317         /* For FPGA version, PHY address 0 should be ignored. */
318         if (sc->jme_caps & JME_CAP_FPGA) {
319                 if (phy == 0)
320                         return (0);
321         } else {
322                 if (sc->jme_phyaddr != phy)
323                         return (0);
324         }
325
326         CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
327             ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
328             SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
329
330         for (i = JME_PHY_TIMEOUT; i > 0; i--) {
331                 DELAY(1);
332                 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
333                         break;
334         }
335         if (i == 0) {
336                 device_printf(sc->jme_dev, "phy write timeout: "
337                               "phy %d, reg %d\n", phy, reg);
338         }
339
340         return (0);
341 }
342
343 /*
344  *      Callback from MII layer when media changes.
345  */
346 static void
347 jme_miibus_statchg(device_t dev)
348 {
349         struct jme_softc *sc = device_get_softc(dev);
350         struct ifnet *ifp = &sc->arpcom.ac_if;
351         struct mii_data *mii;
352         struct jme_txdesc *txd;
353         bus_addr_t paddr;
354         int i, r;
355
356         if (sc->jme_in_tick)
357                 jme_serialize_skipmain(sc);
358         ASSERT_IFNET_SERIALIZED_ALL(ifp);
359
360         if ((ifp->if_flags & IFF_RUNNING) == 0)
361                 goto done;
362
363         mii = device_get_softc(sc->jme_miibus);
364
365         sc->jme_has_link = FALSE;
366         if ((mii->mii_media_status & IFM_AVALID) != 0) {
367                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
368                 case IFM_10_T:
369                 case IFM_100_TX:
370                         sc->jme_has_link = TRUE;
371                         break;
372                 case IFM_1000_T:
373                         if (sc->jme_caps & JME_CAP_FASTETH)
374                                 break;
375                         sc->jme_has_link = TRUE;
376                         break;
377                 default:
378                         break;
379                 }
380         }
381
382         /*
383          * Disabling Rx/Tx MACs have a side-effect of resetting
384          * JME_TXNDA/JME_RXNDA register to the first address of
385          * Tx/Rx descriptor address. So driver should reset its
386          * internal procucer/consumer pointer and reclaim any
387          * allocated resources.  Note, just saving the value of
388          * JME_TXNDA and JME_RXNDA registers before stopping MAC
389          * and restoring JME_TXNDA/JME_RXNDA register is not
390          * sufficient to make sure correct MAC state because
391          * stopping MAC operation can take a while and hardware
392          * might have updated JME_TXNDA/JME_RXNDA registers
393          * during the stop operation.
394          */
395
396         /* Disable interrupts */
397         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
398
399         /* Stop driver */
400         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
401         ifp->if_timer = 0;
402         callout_stop(&sc->jme_tick_ch);
403
404         /* Stop receiver/transmitter. */
405         jme_stop_rx(sc);
406         jme_stop_tx(sc);
407
408         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
409                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
410
411                 jme_rxeof(rdata, -1);
412                 if (rdata->jme_rxhead != NULL)
413                         m_freem(rdata->jme_rxhead);
414                 JME_RXCHAIN_RESET(rdata);
415
416                 /*
417                  * Reuse configured Rx descriptors and reset
418                  * procuder/consumer index.
419                  */
420                 rdata->jme_rx_cons = 0;
421         }
422         if (JME_ENABLE_HWRSS(sc))
423                 jme_enable_rss(sc);
424         else
425                 jme_disable_rss(sc);
426
427         jme_txeof(sc);
428         if (sc->jme_cdata.jme_tx_cnt != 0) {
429                 /* Remove queued packets for transmit. */
430                 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
431                         txd = &sc->jme_cdata.jme_txdesc[i];
432                         if (txd->tx_m != NULL) {
433                                 bus_dmamap_unload(
434                                     sc->jme_cdata.jme_tx_tag,
435                                     txd->tx_dmamap);
436                                 m_freem(txd->tx_m);
437                                 txd->tx_m = NULL;
438                                 txd->tx_ndesc = 0;
439                                 ifp->if_oerrors++;
440                         }
441                 }
442         }
443         jme_init_tx_ring(sc);
444
445         /* Initialize shadow status block. */
446         jme_init_ssb(sc);
447
448         /* Program MAC with resolved speed/duplex/flow-control. */
449         if (sc->jme_has_link) {
450                 jme_mac_config(sc);
451
452                 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
453
454                 /* Set Tx ring address to the hardware. */
455                 paddr = sc->jme_cdata.jme_tx_ring_paddr;
456                 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
457                 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
458
459                 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
460                         CSR_WRITE_4(sc, JME_RXCSR,
461                             sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
462
463                         /* Set Rx ring address to the hardware. */
464                         paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
465                         CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
466                         CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
467                 }
468
469                 /* Restart receiver/transmitter. */
470                 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
471                     RXCSR_RXQ_START);
472                 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
473         }
474
475         ifp->if_flags |= IFF_RUNNING;
476         ifp->if_flags &= ~IFF_OACTIVE;
477         callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
478
479 #ifdef IFPOLL_ENABLE
480         if (!(ifp->if_flags & IFF_NPOLLING))
481 #endif
482         /* Reenable interrupts. */
483         CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
484
485 done:
486         if (sc->jme_in_tick)
487                 jme_deserialize_skipmain(sc);
488 }
489
490 /*
491  *      Get the current interface media status.
492  */
493 static void
494 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
495 {
496         struct jme_softc *sc = ifp->if_softc;
497         struct mii_data *mii = device_get_softc(sc->jme_miibus);
498
499         ASSERT_IFNET_SERIALIZED_ALL(ifp);
500
501         mii_pollstat(mii);
502         ifmr->ifm_status = mii->mii_media_status;
503         ifmr->ifm_active = mii->mii_media_active;
504 }
505
506 /*
507  *      Set hardware to newly-selected media.
508  */
509 static int
510 jme_mediachange(struct ifnet *ifp)
511 {
512         struct jme_softc *sc = ifp->if_softc;
513         struct mii_data *mii = device_get_softc(sc->jme_miibus);
514         int error;
515
516         ASSERT_IFNET_SERIALIZED_ALL(ifp);
517
518         if (mii->mii_instance != 0) {
519                 struct mii_softc *miisc;
520
521                 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
522                         mii_phy_reset(miisc);
523         }
524         error = mii_mediachg(mii);
525
526         return (error);
527 }
528
529 static int
530 jme_probe(device_t dev)
531 {
532         const struct jme_dev *sp;
533         uint16_t vid, did;
534
535         vid = pci_get_vendor(dev);
536         did = pci_get_device(dev);
537         for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
538                 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
539                         struct jme_softc *sc = device_get_softc(dev);
540
541                         sc->jme_caps = sp->jme_caps;
542                         device_set_desc(dev, sp->jme_name);
543                         return (0);
544                 }
545         }
546         return (ENXIO);
547 }
548
549 static int
550 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
551 {
552         uint32_t reg;
553         int i;
554
555         *val = 0;
556         for (i = JME_TIMEOUT; i > 0; i--) {
557                 reg = CSR_READ_4(sc, JME_SMBCSR);
558                 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
559                         break;
560                 DELAY(1);
561         }
562
563         if (i == 0) {
564                 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
565                 return (ETIMEDOUT);
566         }
567
568         reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
569         CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
570         for (i = JME_TIMEOUT; i > 0; i--) {
571                 DELAY(1);
572                 reg = CSR_READ_4(sc, JME_SMBINTF);
573                 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
574                         break;
575         }
576
577         if (i == 0) {
578                 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
579                 return (ETIMEDOUT);
580         }
581
582         reg = CSR_READ_4(sc, JME_SMBINTF);
583         *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
584
585         return (0);
586 }
587
588 static int
589 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
590 {
591         uint8_t fup, reg, val;
592         uint32_t offset;
593         int match;
594
595         offset = 0;
596         if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
597             fup != JME_EEPROM_SIG0)
598                 return (ENOENT);
599         if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
600             fup != JME_EEPROM_SIG1)
601                 return (ENOENT);
602         match = 0;
603         do {
604                 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
605                         break;
606                 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
607                     (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
608                         if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
609                                 break;
610                         if (reg >= JME_PAR0 &&
611                             reg < JME_PAR0 + ETHER_ADDR_LEN) {
612                                 if (jme_eeprom_read_byte(sc, offset + 2,
613                                     &val) != 0)
614                                         break;
615                                 eaddr[reg - JME_PAR0] = val;
616                                 match++;
617                         }
618                 }
619                 /* Check for the end of EEPROM descriptor. */
620                 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
621                         break;
622                 /* Try next eeprom descriptor. */
623                 offset += JME_EEPROM_DESC_BYTES;
624         } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
625
626         if (match == ETHER_ADDR_LEN)
627                 return (0);
628
629         return (ENOENT);
630 }
631
632 static void
633 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
634 {
635         uint32_t par0, par1;
636
637         /* Read station address. */
638         par0 = CSR_READ_4(sc, JME_PAR0);
639         par1 = CSR_READ_4(sc, JME_PAR1);
640         par1 &= 0xFFFF;
641         if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
642                 device_printf(sc->jme_dev,
643                     "generating fake ethernet address.\n");
644                 par0 = karc4random();
645                 /* Set OUI to JMicron. */
646                 eaddr[0] = 0x00;
647                 eaddr[1] = 0x1B;
648                 eaddr[2] = 0x8C;
649                 eaddr[3] = (par0 >> 16) & 0xff;
650                 eaddr[4] = (par0 >> 8) & 0xff;
651                 eaddr[5] = par0 & 0xff;
652         } else {
653                 eaddr[0] = (par0 >> 0) & 0xFF;
654                 eaddr[1] = (par0 >> 8) & 0xFF;
655                 eaddr[2] = (par0 >> 16) & 0xFF;
656                 eaddr[3] = (par0 >> 24) & 0xFF;
657                 eaddr[4] = (par1 >> 0) & 0xFF;
658                 eaddr[5] = (par1 >> 8) & 0xFF;
659         }
660 }
661
662 static int
663 jme_attach(device_t dev)
664 {
665         struct jme_softc *sc = device_get_softc(dev);
666         struct ifnet *ifp = &sc->arpcom.ac_if;
667         uint32_t reg;
668         uint16_t did;
669         uint8_t pcie_ptr, rev;
670         int error = 0, i, j, rx_desc_cnt, coal_max;
671         uint8_t eaddr[ETHER_ADDR_LEN];
672 #ifdef IFPOLL_ENABLE
673         int offset, offset_def;
674 #endif
675
676         lwkt_serialize_init(&sc->jme_serialize);
677         lwkt_serialize_init(&sc->jme_cdata.jme_tx_serialize);
678         for (i = 0; i < JME_NRXRING_MAX; ++i) {
679                 lwkt_serialize_init(
680                     &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize);
681         }
682
683         rx_desc_cnt = device_getenv_int(dev, "rx_desc_count",
684             jme_rx_desc_count);
685         rx_desc_cnt = roundup(rx_desc_cnt, JME_NDESC_ALIGN);
686         if (rx_desc_cnt > JME_NDESC_MAX)
687                 rx_desc_cnt = JME_NDESC_MAX;
688
689         sc->jme_cdata.jme_tx_desc_cnt = device_getenv_int(dev, "tx_desc_count",
690             jme_tx_desc_count);
691         sc->jme_cdata.jme_tx_desc_cnt = roundup(sc->jme_cdata.jme_tx_desc_cnt,
692             JME_NDESC_ALIGN);
693         if (sc->jme_cdata.jme_tx_desc_cnt > JME_NDESC_MAX)
694                 sc->jme_cdata.jme_tx_desc_cnt = JME_NDESC_MAX;
695
696         /*
697          * Calculate rx rings
698          */
699         sc->jme_cdata.jme_rx_ring_cnt = device_getenv_int(dev, "rx_ring_count",
700             jme_rx_ring_count);
701         sc->jme_cdata.jme_rx_ring_cnt =
702             if_ring_count2(sc->jme_cdata.jme_rx_ring_cnt, JME_NRXRING_MAX);
703
704         i = 0;
705         sc->jme_serialize_arr[i++] = &sc->jme_serialize;
706
707         KKASSERT(i == JME_TX_SERIALIZE);
708         sc->jme_serialize_arr[i++] = &sc->jme_cdata.jme_tx_serialize;
709
710         KKASSERT(i == JME_RX_SERIALIZE);
711         for (j = 0; j < sc->jme_cdata.jme_rx_ring_cnt; ++j) {
712                 sc->jme_serialize_arr[i++] =
713                     &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize;
714         }
715         KKASSERT(i <= JME_NSERIALIZE);
716         sc->jme_serialize_cnt = i;
717
718         sc->jme_cdata.jme_sc = sc;
719         for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
720                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
721
722                 rdata->jme_sc = sc;
723                 rdata->jme_rx_coal = jme_rx_status[i].jme_coal;
724                 rdata->jme_rx_comp = jme_rx_status[i].jme_comp;
725                 rdata->jme_rx_empty = jme_rx_status[i].jme_empty;
726                 rdata->jme_rx_idx = i;
727                 rdata->jme_rx_desc_cnt = rx_desc_cnt;
728         }
729
730         sc->jme_dev = dev;
731         sc->jme_lowaddr = BUS_SPACE_MAXADDR;
732
733         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
734
735         callout_init(&sc->jme_tick_ch);
736
737 #ifndef BURN_BRIDGES
738         if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
739                 uint32_t irq, mem;
740
741                 irq = pci_read_config(dev, PCIR_INTLINE, 4);
742                 mem = pci_read_config(dev, JME_PCIR_BAR, 4);
743
744                 device_printf(dev, "chip is in D%d power mode "
745                     "-- setting to D0\n", pci_get_powerstate(dev));
746
747                 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
748
749                 pci_write_config(dev, PCIR_INTLINE, irq, 4);
750                 pci_write_config(dev, JME_PCIR_BAR, mem, 4);
751         }
752 #endif  /* !BURN_BRIDGE */
753
754         /* Enable bus mastering */
755         pci_enable_busmaster(dev);
756
757         /*
758          * Allocate IO memory
759          *
760          * JMC250 supports both memory mapped and I/O register space
761          * access.  Because I/O register access should use different
762          * BARs to access registers it's waste of time to use I/O
763          * register spce access.  JMC250 uses 16K to map entire memory
764          * space.
765          */
766         sc->jme_mem_rid = JME_PCIR_BAR;
767         sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
768                                                  &sc->jme_mem_rid, RF_ACTIVE);
769         if (sc->jme_mem_res == NULL) {
770                 device_printf(dev, "can't allocate IO memory\n");
771                 return ENXIO;
772         }
773         sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
774         sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
775
776         /*
777          * Allocate IRQ
778          */
779         error = jme_intr_alloc(dev);
780         if (error)
781                 goto fail;
782
783         /*
784          * Extract revisions
785          */
786         reg = CSR_READ_4(sc, JME_CHIPMODE);
787         if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
788             CHIPMODE_NOT_FPGA) {
789                 sc->jme_caps |= JME_CAP_FPGA;
790                 if (bootverbose) {
791                         device_printf(dev, "FPGA revision: 0x%04x\n",
792                                       (reg & CHIPMODE_FPGA_REV_MASK) >>
793                                       CHIPMODE_FPGA_REV_SHIFT);
794                 }
795         }
796
797         /* NOTE: FM revision is put in the upper 4 bits */
798         rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
799         rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
800         if (bootverbose)
801                 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
802
803         did = pci_get_device(dev);
804         switch (did) {
805         case PCI_PRODUCT_JMICRON_JMC250:
806                 if (rev == JME_REV1_A2)
807                         sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
808                 break;
809
810         case PCI_PRODUCT_JMICRON_JMC260:
811                 if (rev == JME_REV2)
812                         sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
813                 break;
814
815         default:
816                 panic("unknown device id 0x%04x", did);
817         }
818         if (rev >= JME_REV2) {
819                 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
820                 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
821                                       GHC_TXMAC_CLKSRC_1000;
822         }
823
824         /* Reset the ethernet controller. */
825         jme_reset(sc);
826
827         /* Map MSI/MSI-X vectors */
828         jme_set_msinum(sc);
829
830         /* Get station address. */
831         reg = CSR_READ_4(sc, JME_SMBCSR);
832         if (reg & SMBCSR_EEPROM_PRESENT)
833                 error = jme_eeprom_macaddr(sc, eaddr);
834         if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
835                 if (error != 0 && (bootverbose)) {
836                         device_printf(dev, "ethernet hardware address "
837                                       "not found in EEPROM.\n");
838                 }
839                 jme_reg_macaddr(sc, eaddr);
840         }
841
842         /*
843          * Save PHY address.
844          * Integrated JR0211 has fixed PHY address whereas FPGA version
845          * requires PHY probing to get correct PHY address.
846          */
847         if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
848                 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
849                     GPREG0_PHY_ADDR_MASK;
850                 if (bootverbose) {
851                         device_printf(dev, "PHY is at address %d.\n",
852                             sc->jme_phyaddr);
853                 }
854         } else {
855                 sc->jme_phyaddr = 0;
856         }
857
858         /* Set max allowable DMA size. */
859         pcie_ptr = pci_get_pciecap_ptr(dev);
860         if (pcie_ptr != 0) {
861                 uint16_t ctrl;
862
863                 sc->jme_caps |= JME_CAP_PCIE;
864                 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
865                 if (bootverbose) {
866                         device_printf(dev, "Read request size : %d bytes.\n",
867                             128 << ((ctrl >> 12) & 0x07));
868                         device_printf(dev, "TLP payload size : %d bytes.\n",
869                             128 << ((ctrl >> 5) & 0x07));
870                 }
871                 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
872                 case PCIEM_DEVCTL_MAX_READRQ_128:
873                         sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
874                         break;
875                 case PCIEM_DEVCTL_MAX_READRQ_256:
876                         sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
877                         break;
878                 default:
879                         sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
880                         break;
881                 }
882                 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
883         } else {
884                 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
885                 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
886         }
887
888 #ifdef notyet
889         if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
890                 sc->jme_caps |= JME_CAP_PMCAP;
891 #endif
892
893 #ifdef IFPOLL_ENABLE
894         /*
895          * NPOLLING RX CPU offset
896          */
897         if (sc->jme_cdata.jme_rx_ring_cnt == ncpus2) {
898                 offset = 0;
899         } else {
900                 offset_def = (sc->jme_cdata.jme_rx_ring_cnt *
901                     device_get_unit(dev)) % ncpus2;
902                 offset = device_getenv_int(dev, "npoll.rxoff", offset_def);
903                 if (offset >= ncpus2 ||
904                     offset % sc->jme_cdata.jme_rx_ring_cnt != 0) {
905                         device_printf(dev, "invalid npoll.rxoff %d, use %d\n",
906                             offset, offset_def);
907                         offset = offset_def;
908                 }
909         }
910         sc->jme_npoll_rxoff = offset;
911
912         /*
913          * NPOLLING TX CPU offset
914          */
915         offset_def = sc->jme_npoll_rxoff;
916         offset = device_getenv_int(dev, "npoll.txoff", offset_def);
917         if (offset >= ncpus2) {
918                 device_printf(dev, "invalid npoll.txoff %d, use %d\n",
919                     offset, offset_def);
920                 offset = offset_def;
921         }
922         sc->jme_npoll_txoff = offset;
923 #endif
924
925         /*
926          * Set default coalesce valves
927          */
928         sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
929         sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
930         sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
931         sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
932
933         /*
934          * Adjust coalesce valves, in case that the number of TX/RX
935          * descs are set to small values by users.
936          *
937          * NOTE: coal_max will not be zero, since number of descs
938          * must aligned by JME_NDESC_ALIGN (16 currently)
939          */
940         coal_max = sc->jme_cdata.jme_tx_desc_cnt / 2;
941         if (coal_max < sc->jme_tx_coal_pkt)
942                 sc->jme_tx_coal_pkt = coal_max;
943
944         coal_max = sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt / 2;
945         if (coal_max < sc->jme_rx_coal_pkt)
946                 sc->jme_rx_coal_pkt = coal_max;
947
948         /*
949          * Create sysctl tree
950          */
951         jme_sysctl_node(sc);
952
953         /* Allocate DMA stuffs */
954         error = jme_dma_alloc(sc);
955         if (error)
956                 goto fail;
957
958         ifp->if_softc = sc;
959         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
960         ifp->if_init = jme_init;
961         ifp->if_ioctl = jme_ioctl;
962         ifp->if_start = jme_start;
963 #ifdef IFPOLL_ENABLE
964         ifp->if_npoll = jme_npoll;
965 #endif
966         ifp->if_watchdog = jme_watchdog;
967         ifp->if_serialize = jme_serialize;
968         ifp->if_deserialize = jme_deserialize;
969         ifp->if_tryserialize = jme_tryserialize;
970 #ifdef INVARIANTS
971         ifp->if_serialize_assert = jme_serialize_assert;
972 #endif
973         ifq_set_maxlen(&ifp->if_snd,
974             sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
975         ifq_set_ready(&ifp->if_snd);
976
977         /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
978         ifp->if_capabilities = IFCAP_HWCSUM |
979                                IFCAP_TSO |
980                                IFCAP_VLAN_MTU |
981                                IFCAP_VLAN_HWTAGGING;
982         if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN)
983                 ifp->if_capabilities |= IFCAP_RSS;
984         ifp->if_capenable = ifp->if_capabilities;
985
986         /*
987          * Disable TXCSUM by default to improve bulk data
988          * transmit performance (+20Mbps improvement).
989          */
990         ifp->if_capenable &= ~IFCAP_TXCSUM;
991
992         if (ifp->if_capenable & IFCAP_TXCSUM)
993                 ifp->if_hwassist |= JME_CSUM_FEATURES;
994         ifp->if_hwassist |= CSUM_TSO;
995
996         /* Set up MII bus. */
997         error = mii_phy_probe(dev, &sc->jme_miibus,
998                               jme_mediachange, jme_mediastatus);
999         if (error) {
1000                 device_printf(dev, "no PHY found!\n");
1001                 goto fail;
1002         }
1003
1004         /*
1005          * Save PHYADDR for FPGA mode PHY.
1006          */
1007         if (sc->jme_caps & JME_CAP_FPGA) {
1008                 struct mii_data *mii = device_get_softc(sc->jme_miibus);
1009
1010                 if (mii->mii_instance != 0) {
1011                         struct mii_softc *miisc;
1012
1013                         LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
1014                                 if (miisc->mii_phy != 0) {
1015                                         sc->jme_phyaddr = miisc->mii_phy;
1016                                         break;
1017                                 }
1018                         }
1019                         if (sc->jme_phyaddr != 0) {
1020                                 device_printf(sc->jme_dev,
1021                                     "FPGA PHY is at %d\n", sc->jme_phyaddr);
1022                                 /* vendor magic. */
1023                                 jme_miibus_writereg(dev, sc->jme_phyaddr,
1024                                     JMPHY_CONF, JMPHY_CONF_DEFFIFO);
1025
1026                                 /* XXX should we clear JME_WA_EXTFIFO */
1027                         }
1028                 }
1029         }
1030
1031         ether_ifattach(ifp, eaddr, NULL);
1032
1033         /* Tell the upper layer(s) we support long frames. */
1034         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1035
1036         error = jme_intr_setup(dev);
1037         if (error) {
1038                 ether_ifdetach(ifp);
1039                 goto fail;
1040         }
1041
1042         return 0;
1043 fail:
1044         jme_detach(dev);
1045         return (error);
1046 }
1047
1048 static int
1049 jme_detach(device_t dev)
1050 {
1051         struct jme_softc *sc = device_get_softc(dev);
1052
1053         if (device_is_attached(dev)) {
1054                 struct ifnet *ifp = &sc->arpcom.ac_if;
1055
1056                 ifnet_serialize_all(ifp);
1057                 jme_stop(sc);
1058                 jme_intr_teardown(dev);
1059                 ifnet_deserialize_all(ifp);
1060
1061                 ether_ifdetach(ifp);
1062         }
1063
1064         if (sc->jme_sysctl_tree != NULL)
1065                 sysctl_ctx_free(&sc->jme_sysctl_ctx);
1066
1067         if (sc->jme_miibus != NULL)
1068                 device_delete_child(dev, sc->jme_miibus);
1069         bus_generic_detach(dev);
1070
1071         jme_intr_free(dev);
1072
1073         if (sc->jme_mem_res != NULL) {
1074                 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
1075                                      sc->jme_mem_res);
1076         }
1077
1078         jme_dma_free(sc);
1079
1080         return (0);
1081 }
1082
1083 static void
1084 jme_sysctl_node(struct jme_softc *sc)
1085 {
1086 #ifdef JME_RSS_DEBUG
1087         int r;
1088 #endif
1089
1090         sysctl_ctx_init(&sc->jme_sysctl_ctx);
1091         sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
1092                                 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
1093                                 device_get_nameunit(sc->jme_dev),
1094                                 CTLFLAG_RD, 0, "");
1095         if (sc->jme_sysctl_tree == NULL) {
1096                 device_printf(sc->jme_dev, "can't add sysctl node\n");
1097                 return;
1098         }
1099
1100         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1101             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1102             "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1103             sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
1104
1105         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1106             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1107             "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1108             sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
1109
1110         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1111             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1112             "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1113             sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
1114
1115         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1116             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1117             "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1118             sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
1119
1120         SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1121                        SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1122                        "rx_desc_count", CTLFLAG_RD,
1123                        &sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt,
1124                        0, "RX desc count");
1125         SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1126                        SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1127                        "tx_desc_count", CTLFLAG_RD,
1128                        &sc->jme_cdata.jme_tx_desc_cnt,
1129                        0, "TX desc count");
1130         SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1131                        SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1132                        "rx_ring_count", CTLFLAG_RD,
1133                        &sc->jme_cdata.jme_rx_ring_cnt,
1134                        0, "RX ring count");
1135
1136 #ifdef JME_RSS_DEBUG
1137         SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1138                        SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1139                        "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
1140                        0, "RSS debug level");
1141         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1142                 char rx_ring_desc[32];
1143
1144                 ksnprintf(rx_ring_desc, sizeof(rx_ring_desc),
1145                     "rx_ring%d_pkt", r);
1146                 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx,
1147                     SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1148                     rx_ring_desc, CTLFLAG_RW,
1149                     &sc->jme_cdata.jme_rx_data[r].jme_rx_pkt, "RXed packets");
1150
1151                 ksnprintf(rx_ring_desc, sizeof(rx_ring_desc),
1152                     "rx_ring%d_emp", r);
1153                 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx,
1154                     SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1155                     rx_ring_desc, CTLFLAG_RW,
1156                     &sc->jme_cdata.jme_rx_data[r].jme_rx_emp,
1157                     "# of time RX ring empty");
1158         }
1159 #endif
1160
1161 #ifdef IFPOLL_ENABLE
1162         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1163             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1164             "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
1165             jme_sysctl_npoll_rxoff, "I", "NPOLLING RX cpu offset");
1166         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1167             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1168             "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
1169             jme_sysctl_npoll_txoff, "I", "NPOLLING TX cpu offset");
1170 #endif
1171 }
1172
1173 static int
1174 jme_dma_alloc(struct jme_softc *sc)
1175 {
1176         struct jme_txdesc *txd;
1177         bus_dmamem_t dmem;
1178         int error, i, asize;
1179
1180         sc->jme_cdata.jme_txdesc =
1181         kmalloc(sc->jme_cdata.jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1182                 M_DEVBUF, M_WAITOK | M_ZERO);
1183         for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1184                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
1185
1186                 rdata->jme_rxdesc =
1187                 kmalloc(rdata->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1188                         M_DEVBUF, M_WAITOK | M_ZERO);
1189         }
1190
1191         /* Create parent ring tag. */
1192         error = bus_dma_tag_create(NULL,/* parent */
1193             1, JME_RING_BOUNDARY,       /* algnmnt, boundary */
1194             sc->jme_lowaddr,            /* lowaddr */
1195             BUS_SPACE_MAXADDR,          /* highaddr */
1196             NULL, NULL,                 /* filter, filterarg */
1197             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1198             0,                          /* nsegments */
1199             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1200             0,                          /* flags */
1201             &sc->jme_cdata.jme_ring_tag);
1202         if (error) {
1203                 device_printf(sc->jme_dev,
1204                     "could not create parent ring DMA tag.\n");
1205                 return error;
1206         }
1207
1208         /*
1209          * Create DMA stuffs for TX ring
1210          */
1211         asize = roundup2(JME_TX_RING_SIZE(sc), JME_TX_RING_ALIGN);
1212         error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1213                         JME_TX_RING_ALIGN, 0,
1214                         BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1215                         asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1216         if (error) {
1217                 device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1218                 return error;
1219         }
1220         sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag;
1221         sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map;
1222         sc->jme_cdata.jme_tx_ring = dmem.dmem_addr;
1223         sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr;
1224
1225         /*
1226          * Create DMA stuffs for RX rings
1227          */
1228         for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1229                 error = jme_rxring_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1230                 if (error)
1231                         return error;
1232         }
1233
1234         /* Create parent buffer tag. */
1235         error = bus_dma_tag_create(NULL,/* parent */
1236             1, 0,                       /* algnmnt, boundary */
1237             sc->jme_lowaddr,            /* lowaddr */
1238             BUS_SPACE_MAXADDR,          /* highaddr */
1239             NULL, NULL,                 /* filter, filterarg */
1240             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1241             0,                          /* nsegments */
1242             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1243             0,                          /* flags */
1244             &sc->jme_cdata.jme_buffer_tag);
1245         if (error) {
1246                 device_printf(sc->jme_dev,
1247                     "could not create parent buffer DMA tag.\n");
1248                 return error;
1249         }
1250
1251         /*
1252          * Create DMA stuffs for shadow status block
1253          */
1254         asize = roundup2(JME_SSB_SIZE, JME_SSB_ALIGN);
1255         error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1256                         JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1257                         asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1258         if (error) {
1259                 device_printf(sc->jme_dev,
1260                     "could not create shadow status block.\n");
1261                 return error;
1262         }
1263         sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1264         sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1265         sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1266         sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1267
1268         /*
1269          * Create DMA stuffs for TX buffers
1270          */
1271
1272         /* Create tag for Tx buffers. */
1273         error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1274             1, 0,                       /* algnmnt, boundary */
1275             BUS_SPACE_MAXADDR,          /* lowaddr */
1276             BUS_SPACE_MAXADDR,          /* highaddr */
1277             NULL, NULL,                 /* filter, filterarg */
1278             JME_TSO_MAXSIZE,            /* maxsize */
1279             JME_MAXTXSEGS,              /* nsegments */
1280             JME_MAXSEGSIZE,             /* maxsegsize */
1281             BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1282             &sc->jme_cdata.jme_tx_tag);
1283         if (error != 0) {
1284                 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1285                 return error;
1286         }
1287
1288         /* Create DMA maps for Tx buffers. */
1289         for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
1290                 txd = &sc->jme_cdata.jme_txdesc[i];
1291                 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag,
1292                                 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1293                                 &txd->tx_dmamap);
1294                 if (error) {
1295                         int j;
1296
1297                         device_printf(sc->jme_dev,
1298                             "could not create %dth Tx dmamap.\n", i);
1299
1300                         for (j = 0; j < i; ++j) {
1301                                 txd = &sc->jme_cdata.jme_txdesc[j];
1302                                 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1303                                                    txd->tx_dmamap);
1304                         }
1305                         bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1306                         sc->jme_cdata.jme_tx_tag = NULL;
1307                         return error;
1308                 }
1309         }
1310
1311         /*
1312          * Create DMA stuffs for RX buffers
1313          */
1314         for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1315                 error = jme_rxbuf_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1316                 if (error)
1317                         return error;
1318         }
1319         return 0;
1320 }
1321
1322 static void
1323 jme_dma_free(struct jme_softc *sc)
1324 {
1325         struct jme_txdesc *txd;
1326         struct jme_rxdesc *rxd;
1327         struct jme_rxdata *rdata;
1328         int i, r;
1329
1330         /* Tx ring */
1331         if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1332                 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1333                     sc->jme_cdata.jme_tx_ring_map);
1334                 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1335                     sc->jme_cdata.jme_tx_ring,
1336                     sc->jme_cdata.jme_tx_ring_map);
1337                 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1338                 sc->jme_cdata.jme_tx_ring_tag = NULL;
1339         }
1340
1341         /* Rx ring */
1342         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1343                 rdata = &sc->jme_cdata.jme_rx_data[r];
1344                 if (rdata->jme_rx_ring_tag != NULL) {
1345                         bus_dmamap_unload(rdata->jme_rx_ring_tag,
1346                                           rdata->jme_rx_ring_map);
1347                         bus_dmamem_free(rdata->jme_rx_ring_tag,
1348                                         rdata->jme_rx_ring,
1349                                         rdata->jme_rx_ring_map);
1350                         bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1351                         rdata->jme_rx_ring_tag = NULL;
1352                 }
1353         }
1354
1355         /* Tx buffers */
1356         if (sc->jme_cdata.jme_tx_tag != NULL) {
1357                 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
1358                         txd = &sc->jme_cdata.jme_txdesc[i];
1359                         bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1360                             txd->tx_dmamap);
1361                 }
1362                 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1363                 sc->jme_cdata.jme_tx_tag = NULL;
1364         }
1365
1366         /* Rx buffers */
1367         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1368                 rdata = &sc->jme_cdata.jme_rx_data[r];
1369                 if (rdata->jme_rx_tag != NULL) {
1370                         for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
1371                                 rxd = &rdata->jme_rxdesc[i];
1372                                 bus_dmamap_destroy(rdata->jme_rx_tag,
1373                                                    rxd->rx_dmamap);
1374                         }
1375                         bus_dmamap_destroy(rdata->jme_rx_tag,
1376                                            rdata->jme_rx_sparemap);
1377                         bus_dma_tag_destroy(rdata->jme_rx_tag);
1378                         rdata->jme_rx_tag = NULL;
1379                 }
1380         }
1381
1382         /* Shadow status block. */
1383         if (sc->jme_cdata.jme_ssb_tag != NULL) {
1384                 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1385                     sc->jme_cdata.jme_ssb_map);
1386                 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1387                     sc->jme_cdata.jme_ssb_block,
1388                     sc->jme_cdata.jme_ssb_map);
1389                 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1390                 sc->jme_cdata.jme_ssb_tag = NULL;
1391         }
1392
1393         if (sc->jme_cdata.jme_buffer_tag != NULL) {
1394                 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1395                 sc->jme_cdata.jme_buffer_tag = NULL;
1396         }
1397         if (sc->jme_cdata.jme_ring_tag != NULL) {
1398                 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1399                 sc->jme_cdata.jme_ring_tag = NULL;
1400         }
1401
1402         if (sc->jme_cdata.jme_txdesc != NULL) {
1403                 kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1404                 sc->jme_cdata.jme_txdesc = NULL;
1405         }
1406         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1407                 rdata = &sc->jme_cdata.jme_rx_data[r];
1408                 if (rdata->jme_rxdesc != NULL) {
1409                         kfree(rdata->jme_rxdesc, M_DEVBUF);
1410                         rdata->jme_rxdesc = NULL;
1411                 }
1412         }
1413 }
1414
1415 /*
1416  *      Make sure the interface is stopped at reboot time.
1417  */
1418 static int
1419 jme_shutdown(device_t dev)
1420 {
1421         return jme_suspend(dev);
1422 }
1423
1424 #ifdef notyet
1425 /*
1426  * Unlike other ethernet controllers, JMC250 requires
1427  * explicit resetting link speed to 10/100Mbps as gigabit
1428  * link will cunsume more power than 375mA.
1429  * Note, we reset the link speed to 10/100Mbps with
1430  * auto-negotiation but we don't know whether that operation
1431  * would succeed or not as we have no control after powering
1432  * off. If the renegotiation fail WOL may not work. Running
1433  * at 1Gbps draws more power than 375mA at 3.3V which is
1434  * specified in PCI specification and that would result in
1435  * complete shutdowning power to ethernet controller.
1436  *
1437  * TODO
1438  *  Save current negotiated media speed/duplex/flow-control
1439  *  to softc and restore the same link again after resuming.
1440  *  PHY handling such as power down/resetting to 100Mbps
1441  *  may be better handled in suspend method in phy driver.
1442  */
1443 static void
1444 jme_setlinkspeed(struct jme_softc *sc)
1445 {
1446         struct mii_data *mii;
1447         int aneg, i;
1448
1449         JME_LOCK_ASSERT(sc);
1450
1451         mii = device_get_softc(sc->jme_miibus);
1452         mii_pollstat(mii);
1453         aneg = 0;
1454         if ((mii->mii_media_status & IFM_AVALID) != 0) {
1455                 switch IFM_SUBTYPE(mii->mii_media_active) {
1456                 case IFM_10_T:
1457                 case IFM_100_TX:
1458                         return;
1459                 case IFM_1000_T:
1460                         aneg++;
1461                 default:
1462                         break;
1463                 }
1464         }
1465         jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1466         jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1467             ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1468         jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1469             BMCR_AUTOEN | BMCR_STARTNEG);
1470         DELAY(1000);
1471         if (aneg != 0) {
1472                 /* Poll link state until jme(4) get a 10/100 link. */
1473                 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1474                         mii_pollstat(mii);
1475                         if ((mii->mii_media_status & IFM_AVALID) != 0) {
1476                                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1477                                 case IFM_10_T:
1478                                 case IFM_100_TX:
1479                                         jme_mac_config(sc);
1480                                         return;
1481                                 default:
1482                                         break;
1483                                 }
1484                         }
1485                         JME_UNLOCK(sc);
1486                         pause("jmelnk", hz);
1487                         JME_LOCK(sc);
1488                 }
1489                 if (i == MII_ANEGTICKS_GIGE)
1490                         device_printf(sc->jme_dev, "establishing link failed, "
1491                             "WOL may not work!");
1492         }
1493         /*
1494          * No link, force MAC to have 100Mbps, full-duplex link.
1495          * This is the last resort and may/may not work.
1496          */
1497         mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1498         mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1499         jme_mac_config(sc);
1500 }
1501
1502 static void
1503 jme_setwol(struct jme_softc *sc)
1504 {
1505         struct ifnet *ifp = &sc->arpcom.ac_if;
1506         uint32_t gpr, pmcs;
1507         uint16_t pmstat;
1508         int pmc;
1509
1510         if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1511                 /* No PME capability, PHY power down. */
1512                 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1513                     MII_BMCR, BMCR_PDOWN);
1514                 return;
1515         }
1516
1517         gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1518         pmcs = CSR_READ_4(sc, JME_PMCS);
1519         pmcs &= ~PMCS_WOL_ENB_MASK;
1520         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1521                 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1522                 /* Enable PME message. */
1523                 gpr |= GPREG0_PME_ENB;
1524                 /* For gigabit controllers, reset link speed to 10/100. */
1525                 if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1526                         jme_setlinkspeed(sc);
1527         }
1528
1529         CSR_WRITE_4(sc, JME_PMCS, pmcs);
1530         CSR_WRITE_4(sc, JME_GPREG0, gpr);
1531
1532         /* Request PME. */
1533         pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1534         pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1535         if ((ifp->if_capenable & IFCAP_WOL) != 0)
1536                 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1537         pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1538         if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1539                 /* No WOL, PHY power down. */
1540                 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1541                     MII_BMCR, BMCR_PDOWN);
1542         }
1543 }
1544 #endif
1545
1546 static int
1547 jme_suspend(device_t dev)
1548 {
1549         struct jme_softc *sc = device_get_softc(dev);
1550         struct ifnet *ifp = &sc->arpcom.ac_if;
1551
1552         ifnet_serialize_all(ifp);
1553         jme_stop(sc);
1554 #ifdef notyet
1555         jme_setwol(sc);
1556 #endif
1557         ifnet_deserialize_all(ifp);
1558
1559         return (0);
1560 }
1561
1562 static int
1563 jme_resume(device_t dev)
1564 {
1565         struct jme_softc *sc = device_get_softc(dev);
1566         struct ifnet *ifp = &sc->arpcom.ac_if;
1567 #ifdef notyet
1568         int pmc;
1569 #endif
1570
1571         ifnet_serialize_all(ifp);
1572
1573 #ifdef notyet
1574         if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1575                 uint16_t pmstat;
1576
1577                 pmstat = pci_read_config(sc->jme_dev,
1578                     pmc + PCIR_POWER_STATUS, 2);
1579                 /* Disable PME clear PME status. */
1580                 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1581                 pci_write_config(sc->jme_dev,
1582                     pmc + PCIR_POWER_STATUS, pmstat, 2);
1583         }
1584 #endif
1585
1586         if (ifp->if_flags & IFF_UP)
1587                 jme_init(sc);
1588
1589         ifnet_deserialize_all(ifp);
1590
1591         return (0);
1592 }
1593
1594 static __inline int
1595 jme_tso_pullup(struct mbuf **mp)
1596 {
1597         int hoff, iphlen, thoff;
1598         struct mbuf *m;
1599
1600         m = *mp;
1601         KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
1602
1603         iphlen = m->m_pkthdr.csum_iphlen;
1604         thoff = m->m_pkthdr.csum_thlen;
1605         hoff = m->m_pkthdr.csum_lhlen;
1606
1607         KASSERT(iphlen > 0, ("invalid ip hlen"));
1608         KASSERT(thoff > 0, ("invalid tcp hlen"));
1609         KASSERT(hoff > 0, ("invalid ether hlen"));
1610
1611         if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
1612                 m = m_pullup(m, hoff + iphlen + thoff);
1613                 if (m == NULL) {
1614                         *mp = NULL;
1615                         return ENOBUFS;
1616                 }
1617                 *mp = m;
1618         }
1619         return 0;
1620 }
1621
1622 static int
1623 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1624 {
1625         struct jme_txdesc *txd;
1626         struct jme_desc *desc;
1627         struct mbuf *m;
1628         bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1629         int maxsegs, nsegs;
1630         int error, i, prod, symbol_desc;
1631         uint32_t cflags, flag64, mss;
1632
1633         M_ASSERTPKTHDR((*m_head));
1634
1635         if ((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) {
1636                 /* XXX Is this necessary? */
1637                 error = jme_tso_pullup(m_head);
1638                 if (error)
1639                         return error;
1640         }
1641
1642         prod = sc->jme_cdata.jme_tx_prod;
1643         txd = &sc->jme_cdata.jme_txdesc[prod];
1644
1645         if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1646                 symbol_desc = 1;
1647         else
1648                 symbol_desc = 0;
1649
1650         maxsegs = (sc->jme_cdata.jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1651                   (JME_TXD_RSVD + symbol_desc);
1652         if (maxsegs > JME_MAXTXSEGS)
1653                 maxsegs = JME_MAXTXSEGS;
1654         KASSERT(maxsegs >= (JME_TXD_SPARE - symbol_desc),
1655                 ("not enough segments %d", maxsegs));
1656
1657         error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag,
1658                         txd->tx_dmamap, m_head,
1659                         txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1660         if (error)
1661                 goto fail;
1662
1663         bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1664                         BUS_DMASYNC_PREWRITE);
1665
1666         m = *m_head;
1667         cflags = 0;
1668         mss = 0;
1669
1670         /* Configure checksum offload. */
1671         if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1672                 mss = (uint32_t)m->m_pkthdr.tso_segsz << JME_TD_MSS_SHIFT;
1673                 cflags |= JME_TD_TSO;
1674         } else if (m->m_pkthdr.csum_flags & JME_CSUM_FEATURES) {
1675                 if (m->m_pkthdr.csum_flags & CSUM_IP)
1676                         cflags |= JME_TD_IPCSUM;
1677                 if (m->m_pkthdr.csum_flags & CSUM_TCP)
1678                         cflags |= JME_TD_TCPCSUM;
1679                 if (m->m_pkthdr.csum_flags & CSUM_UDP)
1680                         cflags |= JME_TD_UDPCSUM;
1681         }
1682
1683         /* Configure VLAN. */
1684         if (m->m_flags & M_VLANTAG) {
1685                 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1686                 cflags |= JME_TD_VLAN_TAG;
1687         }
1688
1689         desc = &sc->jme_cdata.jme_tx_ring[prod];
1690         desc->flags = htole32(cflags);
1691         desc->addr_hi = htole32(m->m_pkthdr.len);
1692         if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1693                 /*
1694                  * Use 64bits TX desc chain format.
1695                  *
1696                  * The first TX desc of the chain, which is setup here,
1697                  * is just a symbol TX desc carrying no payload.
1698                  */
1699                 flag64 = JME_TD_64BIT;
1700                 desc->buflen = htole32(mss);
1701                 desc->addr_lo = 0;
1702
1703                 /* No effective TX desc is consumed */
1704                 i = 0;
1705         } else {
1706                 /*
1707                  * Use 32bits TX desc chain format.
1708                  *
1709                  * The first TX desc of the chain, which is setup here,
1710                  * is an effective TX desc carrying the first segment of
1711                  * the mbuf chain.
1712                  */
1713                 flag64 = 0;
1714                 desc->buflen = htole32(mss | txsegs[0].ds_len);
1715                 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1716
1717                 /* One effective TX desc is consumed */
1718                 i = 1;
1719         }
1720         sc->jme_cdata.jme_tx_cnt++;
1721         KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1722                  sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
1723         JME_DESC_INC(prod, sc->jme_cdata.jme_tx_desc_cnt);
1724
1725         txd->tx_ndesc = 1 - i;
1726         for (; i < nsegs; i++) {
1727                 desc = &sc->jme_cdata.jme_tx_ring[prod];
1728                 desc->buflen = htole32(txsegs[i].ds_len);
1729                 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1730                 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1731                 desc->flags = htole32(JME_TD_OWN | flag64);
1732
1733                 sc->jme_cdata.jme_tx_cnt++;
1734                 KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1735                          sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
1736                 JME_DESC_INC(prod, sc->jme_cdata.jme_tx_desc_cnt);
1737         }
1738
1739         /* Update producer index. */
1740         sc->jme_cdata.jme_tx_prod = prod;
1741         /*
1742          * Finally request interrupt and give the first descriptor
1743          * owenership to hardware.
1744          */
1745         desc = txd->tx_desc;
1746         desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1747
1748         txd->tx_m = m;
1749         txd->tx_ndesc += nsegs;
1750
1751         return 0;
1752 fail:
1753         m_freem(*m_head);
1754         *m_head = NULL;
1755         return error;
1756 }
1757
1758 static void
1759 jme_start(struct ifnet *ifp)
1760 {
1761         struct jme_softc *sc = ifp->if_softc;
1762         struct mbuf *m_head;
1763         int enq = 0;
1764
1765         ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
1766
1767         if (!sc->jme_has_link) {
1768                 ifq_purge(&ifp->if_snd);
1769                 return;
1770         }
1771
1772         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1773                 return;
1774
1775         if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1776                 jme_txeof(sc);
1777
1778         while (!ifq_is_empty(&ifp->if_snd)) {
1779                 /*
1780                  * Check number of available TX descs, always
1781                  * leave JME_TXD_RSVD free TX descs.
1782                  */
1783                 if (sc->jme_cdata.jme_tx_cnt + JME_TXD_SPARE >
1784                     sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD) {
1785                         ifp->if_flags |= IFF_OACTIVE;
1786                         break;
1787                 }
1788
1789                 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1790                 if (m_head == NULL)
1791                         break;
1792
1793                 /*
1794                  * Pack the data into the transmit ring. If we
1795                  * don't have room, set the OACTIVE flag and wait
1796                  * for the NIC to drain the ring.
1797                  */
1798                 if (jme_encap(sc, &m_head)) {
1799                         KKASSERT(m_head == NULL);
1800                         ifp->if_oerrors++;
1801                         ifp->if_flags |= IFF_OACTIVE;
1802                         break;
1803                 }
1804                 enq++;
1805
1806                 /*
1807                  * If there's a BPF listener, bounce a copy of this frame
1808                  * to him.
1809                  */
1810                 ETHER_BPF_MTAP(ifp, m_head);
1811         }
1812
1813         if (enq > 0) {
1814                 /*
1815                  * Reading TXCSR takes very long time under heavy load
1816                  * so cache TXCSR value and writes the ORed value with
1817                  * the kick command to the TXCSR. This saves one register
1818                  * access cycle.
1819                  */
1820                 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1821                     TXCSR_TXQ_N_START(TXCSR_TXQ0));
1822                 /* Set a timeout in case the chip goes out to lunch. */
1823                 ifp->if_timer = JME_TX_TIMEOUT;
1824         }
1825 }
1826
1827 static void
1828 jme_watchdog(struct ifnet *ifp)
1829 {
1830         struct jme_softc *sc = ifp->if_softc;
1831
1832         ASSERT_IFNET_SERIALIZED_ALL(ifp);
1833
1834         if (!sc->jme_has_link) {
1835                 if_printf(ifp, "watchdog timeout (missed link)\n");
1836                 ifp->if_oerrors++;
1837                 jme_init(sc);
1838                 return;
1839         }
1840
1841         jme_txeof(sc);
1842         if (sc->jme_cdata.jme_tx_cnt == 0) {
1843                 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1844                           "-- recovering\n");
1845                 if (!ifq_is_empty(&ifp->if_snd))
1846                         if_devstart(ifp);
1847                 return;
1848         }
1849
1850         if_printf(ifp, "watchdog timeout\n");
1851         ifp->if_oerrors++;
1852         jme_init(sc);
1853         if (!ifq_is_empty(&ifp->if_snd))
1854                 if_devstart(ifp);
1855 }
1856
1857 static int
1858 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1859 {
1860         struct jme_softc *sc = ifp->if_softc;
1861         struct mii_data *mii = device_get_softc(sc->jme_miibus);
1862         struct ifreq *ifr = (struct ifreq *)data;
1863         int error = 0, mask;
1864
1865         ASSERT_IFNET_SERIALIZED_ALL(ifp);
1866
1867         switch (cmd) {
1868         case SIOCSIFMTU:
1869                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1870                     (!(sc->jme_caps & JME_CAP_JUMBO) &&
1871                      ifr->ifr_mtu > JME_MAX_MTU)) {
1872                         error = EINVAL;
1873                         break;
1874                 }
1875
1876                 if (ifp->if_mtu != ifr->ifr_mtu) {
1877                         /*
1878                          * No special configuration is required when interface
1879                          * MTU is changed but availability of Tx checksum
1880                          * offload should be chcked against new MTU size as
1881                          * FIFO size is just 2K.
1882                          */
1883                         if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1884                                 ifp->if_capenable &=
1885                                     ~(IFCAP_TXCSUM | IFCAP_TSO);
1886                                 ifp->if_hwassist &=
1887                                     ~(JME_CSUM_FEATURES | CSUM_TSO);
1888                         }
1889                         ifp->if_mtu = ifr->ifr_mtu;
1890                         if (ifp->if_flags & IFF_RUNNING)
1891                                 jme_init(sc);
1892                 }
1893                 break;
1894
1895         case SIOCSIFFLAGS:
1896                 if (ifp->if_flags & IFF_UP) {
1897                         if (ifp->if_flags & IFF_RUNNING) {
1898                                 if ((ifp->if_flags ^ sc->jme_if_flags) &
1899                                     (IFF_PROMISC | IFF_ALLMULTI))
1900                                         jme_set_filter(sc);
1901                         } else {
1902                                 jme_init(sc);
1903                         }
1904                 } else {
1905                         if (ifp->if_flags & IFF_RUNNING)
1906                                 jme_stop(sc);
1907                 }
1908                 sc->jme_if_flags = ifp->if_flags;
1909                 break;
1910
1911         case SIOCADDMULTI:
1912         case SIOCDELMULTI:
1913                 if (ifp->if_flags & IFF_RUNNING)
1914                         jme_set_filter(sc);
1915                 break;
1916
1917         case SIOCSIFMEDIA:
1918         case SIOCGIFMEDIA:
1919                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1920                 break;
1921
1922         case SIOCSIFCAP:
1923                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1924
1925                 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1926                         ifp->if_capenable ^= IFCAP_TXCSUM;
1927                         if (ifp->if_capenable & IFCAP_TXCSUM)
1928                                 ifp->if_hwassist |= JME_CSUM_FEATURES;
1929                         else
1930                                 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1931                 }
1932                 if (mask & IFCAP_RXCSUM) {
1933                         uint32_t reg;
1934
1935                         ifp->if_capenable ^= IFCAP_RXCSUM;
1936                         reg = CSR_READ_4(sc, JME_RXMAC);
1937                         reg &= ~RXMAC_CSUM_ENB;
1938                         if (ifp->if_capenable & IFCAP_RXCSUM)
1939                                 reg |= RXMAC_CSUM_ENB;
1940                         CSR_WRITE_4(sc, JME_RXMAC, reg);
1941                 }
1942
1943                 if (mask & IFCAP_VLAN_HWTAGGING) {
1944                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1945                         jme_set_vlan(sc);
1946                 }
1947
1948                 if ((mask & IFCAP_TSO) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1949                         ifp->if_capenable ^= IFCAP_TSO;
1950                         if (ifp->if_capenable & IFCAP_TSO)
1951                                 ifp->if_hwassist |= CSUM_TSO;
1952                         else
1953                                 ifp->if_hwassist &= ~CSUM_TSO;
1954                 }
1955
1956                 if (mask & IFCAP_RSS)
1957                         ifp->if_capenable ^= IFCAP_RSS;
1958                 break;
1959
1960         default:
1961                 error = ether_ioctl(ifp, cmd, data);
1962                 break;
1963         }
1964         return (error);
1965 }
1966
1967 static void
1968 jme_mac_config(struct jme_softc *sc)
1969 {
1970         struct mii_data *mii;
1971         uint32_t ghc, rxmac, txmac, txpause, gp1;
1972         int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1973
1974         mii = device_get_softc(sc->jme_miibus);
1975
1976         CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1977         DELAY(10);
1978         CSR_WRITE_4(sc, JME_GHC, 0);
1979         ghc = 0;
1980         rxmac = CSR_READ_4(sc, JME_RXMAC);
1981         rxmac &= ~RXMAC_FC_ENB;
1982         txmac = CSR_READ_4(sc, JME_TXMAC);
1983         txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1984         txpause = CSR_READ_4(sc, JME_TXPFC);
1985         txpause &= ~TXPFC_PAUSE_ENB;
1986         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1987                 ghc |= GHC_FULL_DUPLEX;
1988                 rxmac &= ~RXMAC_COLL_DET_ENB;
1989                 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1990                     TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1991                     TXMAC_FRAME_BURST);
1992 #ifdef notyet
1993                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1994                         txpause |= TXPFC_PAUSE_ENB;
1995                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1996                         rxmac |= RXMAC_FC_ENB;
1997 #endif
1998                 /* Disable retry transmit timer/retry limit. */
1999                 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
2000                     ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
2001         } else {
2002                 rxmac |= RXMAC_COLL_DET_ENB;
2003                 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
2004                 /* Enable retry transmit timer/retry limit. */
2005                 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
2006                     TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
2007         }
2008
2009         /*
2010          * Reprogram Tx/Rx MACs with resolved speed/duplex.
2011          */
2012         gp1 = CSR_READ_4(sc, JME_GPREG1);
2013         gp1 &= ~GPREG1_WA_HDX;
2014
2015         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
2016                 hdx = 1;
2017
2018         switch (IFM_SUBTYPE(mii->mii_media_active)) {
2019         case IFM_10_T:
2020                 ghc |= GHC_SPEED_10 | sc->jme_clksrc;
2021                 if (hdx)
2022                         gp1 |= GPREG1_WA_HDX;
2023                 break;
2024
2025         case IFM_100_TX:
2026                 ghc |= GHC_SPEED_100 | sc->jme_clksrc;
2027                 if (hdx)
2028                         gp1 |= GPREG1_WA_HDX;
2029
2030                 /*
2031                  * Use extended FIFO depth to workaround CRC errors
2032                  * emitted by chips before JMC250B
2033                  */
2034                 phyconf = JMPHY_CONF_EXTFIFO;
2035                 break;
2036
2037         case IFM_1000_T:
2038                 if (sc->jme_caps & JME_CAP_FASTETH)
2039                         break;
2040
2041                 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
2042                 if (hdx)
2043                         txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
2044                 break;
2045
2046         default:
2047                 break;
2048         }
2049         CSR_WRITE_4(sc, JME_GHC, ghc);
2050         CSR_WRITE_4(sc, JME_RXMAC, rxmac);
2051         CSR_WRITE_4(sc, JME_TXMAC, txmac);
2052         CSR_WRITE_4(sc, JME_TXPFC, txpause);
2053
2054         if (sc->jme_workaround & JME_WA_EXTFIFO) {
2055                 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2056                                     JMPHY_CONF, phyconf);
2057         }
2058         if (sc->jme_workaround & JME_WA_HDX)
2059                 CSR_WRITE_4(sc, JME_GPREG1, gp1);
2060 }
2061
2062 static void
2063 jme_intr(void *xsc)
2064 {
2065         struct jme_softc *sc = xsc;
2066         struct ifnet *ifp = &sc->arpcom.ac_if;
2067         uint32_t status;
2068         int r;
2069
2070         ASSERT_SERIALIZED(&sc->jme_serialize);
2071
2072         status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2073         if (status == 0 || status == 0xFFFFFFFF)
2074                 return;
2075
2076         /* Disable interrupts. */
2077         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2078
2079         status = CSR_READ_4(sc, JME_INTR_STATUS);
2080         if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2081                 goto back;
2082
2083         /* Reset PCC counter/timer and Ack interrupts. */
2084         status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2085
2086         if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
2087                 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2088
2089         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2090                 if (status & jme_rx_status[r].jme_coal) {
2091                         status |= jme_rx_status[r].jme_coal |
2092                                   jme_rx_status[r].jme_comp;
2093                 }
2094         }
2095
2096         CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2097
2098         if (ifp->if_flags & IFF_RUNNING) {
2099                 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
2100                         jme_rx_intr(sc, status);
2101
2102                 if (status & INTR_RXQ_DESC_EMPTY) {
2103                         /*
2104                          * Notify hardware availability of new Rx buffers.
2105                          * Reading RXCSR takes very long time under heavy
2106                          * load so cache RXCSR value and writes the ORed
2107                          * value with the kick command to the RXCSR. This
2108                          * saves one register access cycle.
2109                          */
2110                         CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2111                             RXCSR_RX_ENB | RXCSR_RXQ_START);
2112                 }
2113
2114                 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
2115                         lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
2116                         jme_txeof(sc);
2117                         if (!ifq_is_empty(&ifp->if_snd))
2118                                 if_devstart(ifp);
2119                         lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
2120                 }
2121         }
2122 back:
2123         /* Reenable interrupts. */
2124         CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2125 }
2126
2127 static void
2128 jme_txeof(struct jme_softc *sc)
2129 {
2130         struct ifnet *ifp = &sc->arpcom.ac_if;
2131         int cons;
2132
2133         cons = sc->jme_cdata.jme_tx_cons;
2134         if (cons == sc->jme_cdata.jme_tx_prod)
2135                 return;
2136
2137         /*
2138          * Go through our Tx list and free mbufs for those
2139          * frames which have been transmitted.
2140          */
2141         while (cons != sc->jme_cdata.jme_tx_prod) {
2142                 struct jme_txdesc *txd, *next_txd;
2143                 uint32_t status, next_status;
2144                 int next_cons, nsegs;
2145
2146                 txd = &sc->jme_cdata.jme_txdesc[cons];
2147                 KASSERT(txd->tx_m != NULL,
2148                         ("%s: freeing NULL mbuf!", __func__));
2149
2150                 status = le32toh(txd->tx_desc->flags);
2151                 if ((status & JME_TD_OWN) == JME_TD_OWN)
2152                         break;
2153
2154                 /*
2155                  * NOTE:
2156                  * This chip will always update the TX descriptor's
2157                  * buflen field and this updating always happens
2158                  * after clearing the OWN bit, so even if the OWN
2159                  * bit is cleared by the chip, we still don't sure
2160                  * about whether the buflen field has been updated
2161                  * by the chip or not.  To avoid this race, we wait
2162                  * for the next TX descriptor's OWN bit to be cleared
2163                  * by the chip before reusing this TX descriptor.
2164                  */
2165                 next_cons = cons;
2166                 JME_DESC_ADD(next_cons, txd->tx_ndesc,
2167                     sc->jme_cdata.jme_tx_desc_cnt);
2168                 next_txd = &sc->jme_cdata.jme_txdesc[next_cons];
2169                 if (next_txd->tx_m == NULL)
2170                         break;
2171                 next_status = le32toh(next_txd->tx_desc->flags);
2172                 if ((next_status & JME_TD_OWN) == JME_TD_OWN)
2173                         break;
2174
2175                 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2176                         ifp->if_oerrors++;
2177                 } else {
2178                         ifp->if_opackets++;
2179                         if (status & JME_TD_COLLISION) {
2180                                 ifp->if_collisions +=
2181                                     le32toh(txd->tx_desc->buflen) &
2182                                     JME_TD_BUF_LEN_MASK;
2183                         }
2184                 }
2185
2186                 /*
2187                  * Only the first descriptor of multi-descriptor
2188                  * transmission is updated so driver have to skip entire
2189                  * chained buffers for the transmiited frame. In other
2190                  * words, JME_TD_OWN bit is valid only at the first
2191                  * descriptor of a multi-descriptor transmission.
2192                  */
2193                 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2194                         sc->jme_cdata.jme_tx_ring[cons].flags = 0;
2195                         JME_DESC_INC(cons, sc->jme_cdata.jme_tx_desc_cnt);
2196                 }
2197
2198                 /* Reclaim transferred mbufs. */
2199                 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2200                 m_freem(txd->tx_m);
2201                 txd->tx_m = NULL;
2202                 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2203                 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2204                         ("%s: Active Tx desc counter was garbled", __func__));
2205                 txd->tx_ndesc = 0;
2206         }
2207         sc->jme_cdata.jme_tx_cons = cons;
2208
2209         /* 1 for symbol TX descriptor */
2210         if (sc->jme_cdata.jme_tx_cnt <= JME_MAXTXSEGS + 1)
2211                 ifp->if_timer = 0;
2212
2213         if (sc->jme_cdata.jme_tx_cnt + JME_TXD_SPARE <=
2214             sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD)
2215                 ifp->if_flags &= ~IFF_OACTIVE;
2216 }
2217
2218 static __inline void
2219 jme_discard_rxbufs(struct jme_rxdata *rdata, int cons, int count)
2220 {
2221         int i;
2222
2223         for (i = 0; i < count; ++i) {
2224                 jme_setup_rxdesc(&rdata->jme_rxdesc[cons]);
2225                 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2226         }
2227 }
2228
2229 static __inline struct pktinfo *
2230 jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2231 {
2232         if (flags & JME_RD_IPV4)
2233                 pi->pi_netisr = NETISR_IP;
2234         else if (flags & JME_RD_IPV6)
2235                 pi->pi_netisr = NETISR_IPV6;
2236         else
2237                 return NULL;
2238
2239         pi->pi_flags = 0;
2240         pi->pi_l3proto = IPPROTO_UNKNOWN;
2241
2242         if (flags & JME_RD_MORE_FRAG)
2243                 pi->pi_flags |= PKTINFO_FLAG_FRAG;
2244         else if (flags & JME_RD_TCP)
2245                 pi->pi_l3proto = IPPROTO_TCP;
2246         else if (flags & JME_RD_UDP)
2247                 pi->pi_l3proto = IPPROTO_UDP;
2248         else
2249                 pi = NULL;
2250         return pi;
2251 }
2252
2253 /* Receive a frame. */
2254 static void
2255 jme_rxpkt(struct jme_rxdata *rdata)
2256 {
2257         struct ifnet *ifp = &rdata->jme_sc->arpcom.ac_if;
2258         struct jme_desc *desc;
2259         struct jme_rxdesc *rxd;
2260         struct mbuf *mp, *m;
2261         uint32_t flags, status, hash, hashinfo;
2262         int cons, count, nsegs;
2263
2264         cons = rdata->jme_rx_cons;
2265         desc = &rdata->jme_rx_ring[cons];
2266
2267         flags = le32toh(desc->flags);
2268         status = le32toh(desc->buflen);
2269         hash = le32toh(desc->addr_hi);
2270         hashinfo = le32toh(desc->addr_lo);
2271         nsegs = JME_RX_NSEGS(status);
2272
2273         if (nsegs > 1) {
2274                 /* Skip the first descriptor. */
2275                 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2276
2277                 /*
2278                  * Clear the OWN bit of the following RX descriptors;
2279                  * hardware will not clear the OWN bit except the first
2280                  * RX descriptor.
2281                  *
2282                  * Since the first RX descriptor is setup, i.e. OWN bit
2283                  * on, before its followins RX descriptors, leaving the
2284                  * OWN bit on the following RX descriptors will trick
2285                  * the hardware into thinking that the following RX
2286                  * descriptors are ready to be used too.
2287                  */
2288                 for (count = 1; count < nsegs; count++,
2289                      JME_DESC_INC(cons, rdata->jme_rx_desc_cnt))
2290                         rdata->jme_rx_ring[cons].flags = 0;
2291
2292                 cons = rdata->jme_rx_cons;
2293         }
2294
2295         JME_RSS_DPRINTF(rdata->jme_sc, 15, "ring%d, flags 0x%08x, "
2296                         "hash 0x%08x, hash info 0x%08x\n",
2297                         rdata->jme_rx_idx, flags, hash, hashinfo);
2298
2299         if (status & JME_RX_ERR_STAT) {
2300                 ifp->if_ierrors++;
2301                 jme_discard_rxbufs(rdata, cons, nsegs);
2302 #ifdef JME_SHOW_ERRORS
2303                 if_printf(ifp, "%s : receive error = 0x%b\n",
2304                     __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2305 #endif
2306                 rdata->jme_rx_cons += nsegs;
2307                 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2308                 return;
2309         }
2310
2311         rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2312         for (count = 0; count < nsegs; count++,
2313              JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) {
2314                 rxd = &rdata->jme_rxdesc[cons];
2315                 mp = rxd->rx_m;
2316
2317                 /* Add a new receive buffer to the ring. */
2318                 if (jme_newbuf(rdata, rxd, 0) != 0) {
2319                         ifp->if_iqdrops++;
2320                         /* Reuse buffer. */
2321                         jme_discard_rxbufs(rdata, cons, nsegs - count);
2322                         if (rdata->jme_rxhead != NULL) {
2323                                 m_freem(rdata->jme_rxhead);
2324                                 JME_RXCHAIN_RESET(rdata);
2325                         }
2326                         break;
2327                 }
2328
2329                 /*
2330                  * Assume we've received a full sized frame.
2331                  * Actual size is fixed when we encounter the end of
2332                  * multi-segmented frame.
2333                  */
2334                 mp->m_len = MCLBYTES;
2335
2336                 /* Chain received mbufs. */
2337                 if (rdata->jme_rxhead == NULL) {
2338                         rdata->jme_rxhead = mp;
2339                         rdata->jme_rxtail = mp;
2340                 } else {
2341                         /*
2342                          * Receive processor can receive a maximum frame
2343                          * size of 65535 bytes.
2344                          */
2345                         rdata->jme_rxtail->m_next = mp;
2346                         rdata->jme_rxtail = mp;
2347                 }
2348
2349                 if (count == nsegs - 1) {
2350                         struct pktinfo pi0, *pi;
2351
2352                         /* Last desc. for this frame. */
2353                         m = rdata->jme_rxhead;
2354                         m->m_pkthdr.len = rdata->jme_rxlen;
2355                         if (nsegs > 1) {
2356                                 /* Set first mbuf size. */
2357                                 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2358                                 /* Set last mbuf size. */
2359                                 mp->m_len = rdata->jme_rxlen -
2360                                     ((MCLBYTES - JME_RX_PAD_BYTES) +
2361                                     (MCLBYTES * (nsegs - 2)));
2362                         } else {
2363                                 m->m_len = rdata->jme_rxlen;
2364                         }
2365                         m->m_pkthdr.rcvif = ifp;
2366
2367                         /*
2368                          * Account for 10bytes auto padding which is used
2369                          * to align IP header on 32bit boundary. Also note,
2370                          * CRC bytes is automatically removed by the
2371                          * hardware.
2372                          */
2373                         m->m_data += JME_RX_PAD_BYTES;
2374
2375                         /* Set checksum information. */
2376                         if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2377                             (flags & JME_RD_IPV4)) {
2378                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2379                                 if (flags & JME_RD_IPCSUM)
2380                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2381                                 if ((flags & JME_RD_MORE_FRAG) == 0 &&
2382                                     ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2383                                      (JME_RD_TCP | JME_RD_TCPCSUM) ||
2384                                      (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2385                                      (JME_RD_UDP | JME_RD_UDPCSUM))) {
2386                                         m->m_pkthdr.csum_flags |=
2387                                             CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2388                                         m->m_pkthdr.csum_data = 0xffff;
2389                                 }
2390                         }
2391
2392                         /* Check for VLAN tagged packets. */
2393                         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2394                             (flags & JME_RD_VLAN_TAG)) {
2395                                 m->m_pkthdr.ether_vlantag =
2396                                     flags & JME_RD_VLAN_MASK;
2397                                 m->m_flags |= M_VLANTAG;
2398                         }
2399
2400                         ifp->if_ipackets++;
2401
2402                         if (ifp->if_capenable & IFCAP_RSS)
2403                                 pi = jme_pktinfo(&pi0, flags);
2404                         else
2405                                 pi = NULL;
2406
2407                         if (pi != NULL &&
2408                             (hashinfo & JME_RD_HASH_FN_MASK) ==
2409                             JME_RD_HASH_FN_TOEPLITZ) {
2410                                 m->m_flags |= (M_HASH | M_CKHASH);
2411                                 m->m_pkthdr.hash = toeplitz_hash(hash);
2412                         }
2413
2414 #ifdef JME_RSS_DEBUG
2415                         if (pi != NULL) {
2416                                 JME_RSS_DPRINTF(rdata->jme_sc, 10,
2417                                     "isr %d flags %08x, l3 %d %s\n",
2418                                     pi->pi_netisr, pi->pi_flags,
2419                                     pi->pi_l3proto,
2420                                     (m->m_flags & M_HASH) ? "hash" : "");
2421                         }
2422 #endif
2423
2424                         /* Pass it on. */
2425                         ether_input_pkt(ifp, m, pi);
2426
2427                         /* Reset mbuf chains. */
2428                         JME_RXCHAIN_RESET(rdata);
2429 #ifdef JME_RSS_DEBUG
2430                         rdata->jme_rx_pkt++;
2431 #endif
2432                 }
2433         }
2434
2435         rdata->jme_rx_cons += nsegs;
2436         rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2437 }
2438
2439 static void
2440 jme_rxeof(struct jme_rxdata *rdata, int count)
2441 {
2442         struct jme_desc *desc;
2443         int nsegs, pktlen;
2444
2445         for (;;) {
2446 #ifdef IFPOLL_ENABLE
2447                 if (count >= 0 && count-- == 0)
2448                         break;
2449 #endif
2450                 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2451                 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2452                         break;
2453                 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2454                         break;
2455
2456                 /*
2457                  * Check number of segments against received bytes.
2458                  * Non-matching value would indicate that hardware
2459                  * is still trying to update Rx descriptors. I'm not
2460                  * sure whether this check is needed.
2461                  */
2462                 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2463                 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2464                 if (nsegs != howmany(pktlen, MCLBYTES)) {
2465                         if_printf(&rdata->jme_sc->arpcom.ac_if,
2466                             "RX fragment count(%d) and "
2467                             "packet size(%d) mismach\n", nsegs, pktlen);
2468                         break;
2469                 }
2470
2471                 /*
2472                  * NOTE:
2473                  * RSS hash and hash information may _not_ be set by the
2474                  * hardware even if the OWN bit is cleared and VALID bit
2475                  * is set.
2476                  *
2477                  * If the RSS information is not delivered by the hardware
2478                  * yet, we MUST NOT accept this packet, let alone reusing
2479                  * its RX descriptor.  If this packet was accepted and its
2480                  * RX descriptor was reused before hardware delivering the
2481                  * RSS information, the RX buffer's address would be trashed
2482                  * by the RSS information delivered by the hardware.
2483                  */
2484                 if (JME_ENABLE_HWRSS(rdata->jme_sc)) {
2485                         struct jme_rxdesc *rxd;
2486                         uint32_t hashinfo;
2487
2488                         hashinfo = le32toh(desc->addr_lo);
2489                         rxd = &rdata->jme_rxdesc[rdata->jme_rx_cons];
2490
2491                         /*
2492                          * This test should be enough to detect the pending
2493                          * RSS information delivery, given:
2494                          * - If RSS hash is not calculated, the hashinfo
2495                          *   will be 0.  Howvever, the lower 32bits of RX
2496                          *   buffers' physical address will never be 0.
2497                          *   (see jme_rxbuf_dma_filter)
2498                          * - If RSS hash is calculated, the lowest 4 bits
2499                          *   of hashinfo will be set, while the RX buffers
2500                          *   are at least 2K aligned.
2501                          */
2502                         if (hashinfo == JME_ADDR_LO(rxd->rx_paddr)) {
2503 #ifdef JME_SHOW_RSSWB
2504                                 if_printf(&rdata->jme_sc->arpcom.ac_if,
2505                                     "RSS is not written back yet\n");
2506 #endif
2507                                 break;
2508                         }
2509                 }
2510
2511                 /* Received a frame. */
2512                 jme_rxpkt(rdata);
2513         }
2514 }
2515
2516 static void
2517 jme_tick(void *xsc)
2518 {
2519         struct jme_softc *sc = xsc;
2520         struct mii_data *mii = device_get_softc(sc->jme_miibus);
2521
2522         lwkt_serialize_enter(&sc->jme_serialize);
2523
2524         sc->jme_in_tick = TRUE;
2525         mii_tick(mii);
2526         sc->jme_in_tick = FALSE;
2527
2528         callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2529
2530         lwkt_serialize_exit(&sc->jme_serialize);
2531 }
2532
2533 static void
2534 jme_reset(struct jme_softc *sc)
2535 {
2536         uint32_t val;
2537
2538         /* Make sure that TX and RX are stopped */
2539         jme_stop_tx(sc);
2540         jme_stop_rx(sc);
2541
2542         /* Start reset */
2543         CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2544         DELAY(20);
2545
2546         /*
2547          * Hold reset bit before stop reset
2548          */
2549
2550         /* Disable TXMAC and TXOFL clock sources */
2551         CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2552         /* Disable RXMAC clock source */
2553         val = CSR_READ_4(sc, JME_GPREG1);
2554         CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2555         /* Flush */
2556         CSR_READ_4(sc, JME_GHC);
2557
2558         /* Stop reset */
2559         CSR_WRITE_4(sc, JME_GHC, 0);
2560         /* Flush */
2561         CSR_READ_4(sc, JME_GHC);
2562
2563         /*
2564          * Clear reset bit after stop reset
2565          */
2566
2567         /* Enable TXMAC and TXOFL clock sources */
2568         CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2569         /* Enable RXMAC clock source */
2570         val = CSR_READ_4(sc, JME_GPREG1);
2571         CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2572         /* Flush */
2573         CSR_READ_4(sc, JME_GHC);
2574
2575         /* Disable TXMAC and TXOFL clock sources */
2576         CSR_WRITE_4(sc, JME_GHC, 0);
2577         /* Disable RXMAC clock source */
2578         val = CSR_READ_4(sc, JME_GPREG1);
2579         CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2580         /* Flush */
2581         CSR_READ_4(sc, JME_GHC);
2582
2583         /* Enable TX and RX */
2584         val = CSR_READ_4(sc, JME_TXCSR);
2585         CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB);
2586         val = CSR_READ_4(sc, JME_RXCSR);
2587         CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB);
2588         /* Flush */
2589         CSR_READ_4(sc, JME_TXCSR);
2590         CSR_READ_4(sc, JME_RXCSR);
2591
2592         /* Enable TXMAC and TXOFL clock sources */
2593         CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2594         /* Eisable RXMAC clock source */
2595         val = CSR_READ_4(sc, JME_GPREG1);
2596         CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2597         /* Flush */
2598         CSR_READ_4(sc, JME_GHC);
2599
2600         /* Stop TX and RX */
2601         jme_stop_tx(sc);
2602         jme_stop_rx(sc);
2603 }
2604
2605 static void
2606 jme_init(void *xsc)
2607 {
2608         struct jme_softc *sc = xsc;
2609         struct ifnet *ifp = &sc->arpcom.ac_if;
2610         struct mii_data *mii;
2611         uint8_t eaddr[ETHER_ADDR_LEN];
2612         bus_addr_t paddr;
2613         uint32_t reg;
2614         int error, r;
2615
2616         ASSERT_IFNET_SERIALIZED_ALL(ifp);
2617
2618         /*
2619          * Cancel any pending I/O.
2620          */
2621         jme_stop(sc);
2622
2623         /*
2624          * Reset the chip to a known state.
2625          */
2626         jme_reset(sc);
2627
2628         /*
2629          * Setup MSI/MSI-X vectors to interrupts mapping
2630          */
2631         jme_set_msinum(sc);
2632
2633         if (JME_ENABLE_HWRSS(sc))
2634                 jme_enable_rss(sc);
2635         else
2636                 jme_disable_rss(sc);
2637
2638         /* Init RX descriptors */
2639         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2640                 error = jme_init_rx_ring(&sc->jme_cdata.jme_rx_data[r]);
2641                 if (error) {
2642                         if_printf(ifp, "initialization failed: "
2643                                   "no memory for %dth RX ring.\n", r);
2644                         jme_stop(sc);
2645                         return;
2646                 }
2647         }
2648
2649         /* Init TX descriptors */
2650         jme_init_tx_ring(sc);
2651
2652         /* Initialize shadow status block. */
2653         jme_init_ssb(sc);
2654
2655         /* Reprogram the station address. */
2656         bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2657         CSR_WRITE_4(sc, JME_PAR0,
2658             eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2659         CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2660
2661         /*
2662          * Configure Tx queue.
2663          *  Tx priority queue weight value : 0
2664          *  Tx FIFO threshold for processing next packet : 16QW
2665          *  Maximum Tx DMA length : 512
2666          *  Allow Tx DMA burst.
2667          */
2668         sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2669         sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2670         sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2671         sc->jme_txcsr |= sc->jme_tx_dma_size;
2672         sc->jme_txcsr |= TXCSR_DMA_BURST;
2673         CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2674
2675         /* Set Tx descriptor counter. */
2676         CSR_WRITE_4(sc, JME_TXQDC, sc->jme_cdata.jme_tx_desc_cnt);
2677
2678         /* Set Tx ring address to the hardware. */
2679         paddr = sc->jme_cdata.jme_tx_ring_paddr;
2680         CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2681         CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2682
2683         /* Configure TxMAC parameters. */
2684         reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2685         reg |= TXMAC_THRESH_1_PKT;
2686         reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2687         CSR_WRITE_4(sc, JME_TXMAC, reg);
2688
2689         /*
2690          * Configure Rx queue.
2691          *  FIFO full threshold for transmitting Tx pause packet : 128T
2692          *  FIFO threshold for processing next packet : 128QW
2693          *  Rx queue 0 select
2694          *  Max Rx DMA length : 128
2695          *  Rx descriptor retry : 32
2696          *  Rx descriptor retry time gap : 256ns
2697          *  Don't receive runt/bad frame.
2698          */
2699         sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2700 #if 0
2701         /*
2702          * Since Rx FIFO size is 4K bytes, receiving frames larger
2703          * than 4K bytes will suffer from Rx FIFO overruns. So
2704          * decrease FIFO threshold to reduce the FIFO overruns for
2705          * frames larger than 4000 bytes.
2706          * For best performance of standard MTU sized frames use
2707          * maximum allowable FIFO threshold, 128QW.
2708          */
2709         if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2710             JME_RX_FIFO_SIZE)
2711                 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2712         else
2713                 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2714 #else
2715         /* Improve PCI Express compatibility */
2716         sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2717 #endif
2718         sc->jme_rxcsr |= sc->jme_rx_dma_size;
2719         sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2720         sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2721         /* XXX TODO DROP_BAD */
2722
2723         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2724                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
2725
2726                 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2727
2728                 /* Set Rx descriptor counter. */
2729                 CSR_WRITE_4(sc, JME_RXQDC, rdata->jme_rx_desc_cnt);
2730
2731                 /* Set Rx ring address to the hardware. */
2732                 paddr = rdata->jme_rx_ring_paddr;
2733                 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2734                 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2735         }
2736
2737         /* Clear receive filter. */
2738         CSR_WRITE_4(sc, JME_RXMAC, 0);
2739
2740         /* Set up the receive filter. */
2741         jme_set_filter(sc);
2742         jme_set_vlan(sc);
2743
2744         /*
2745          * Disable all WOL bits as WOL can interfere normal Rx
2746          * operation. Also clear WOL detection status bits.
2747          */
2748         reg = CSR_READ_4(sc, JME_PMCS);
2749         reg &= ~PMCS_WOL_ENB_MASK;
2750         CSR_WRITE_4(sc, JME_PMCS, reg);
2751
2752         /*
2753          * Pad 10bytes right before received frame. This will greatly
2754          * help Rx performance on strict-alignment architectures as
2755          * it does not need to copy the frame to align the payload.
2756          */
2757         reg = CSR_READ_4(sc, JME_RXMAC);
2758         reg |= RXMAC_PAD_10BYTES;
2759
2760         if (ifp->if_capenable & IFCAP_RXCSUM)
2761                 reg |= RXMAC_CSUM_ENB;
2762         CSR_WRITE_4(sc, JME_RXMAC, reg);
2763
2764         /* Configure general purpose reg0 */
2765         reg = CSR_READ_4(sc, JME_GPREG0);
2766         reg &= ~GPREG0_PCC_UNIT_MASK;
2767         /* Set PCC timer resolution to micro-seconds unit. */
2768         reg |= GPREG0_PCC_UNIT_US;
2769         /*
2770          * Disable all shadow register posting as we have to read
2771          * JME_INTR_STATUS register in jme_intr. Also it seems
2772          * that it's hard to synchronize interrupt status between
2773          * hardware and software with shadow posting due to
2774          * requirements of bus_dmamap_sync(9).
2775          */
2776         reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2777             GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2778             GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2779             GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2780         /* Disable posting of DW0. */
2781         reg &= ~GPREG0_POST_DW0_ENB;
2782         /* Clear PME message. */
2783         reg &= ~GPREG0_PME_ENB;
2784         /* Set PHY address. */
2785         reg &= ~GPREG0_PHY_ADDR_MASK;
2786         reg |= sc->jme_phyaddr;
2787         CSR_WRITE_4(sc, JME_GPREG0, reg);
2788
2789         /* Configure Tx queue 0 packet completion coalescing. */
2790         jme_set_tx_coal(sc);
2791
2792         /* Configure Rx queues packet completion coalescing. */
2793         jme_set_rx_coal(sc);
2794
2795         /* Configure shadow status block but don't enable posting. */
2796         paddr = sc->jme_cdata.jme_ssb_block_paddr;
2797         CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2798         CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2799
2800         /* Disable Timer 1 and Timer 2. */
2801         CSR_WRITE_4(sc, JME_TIMER1, 0);
2802         CSR_WRITE_4(sc, JME_TIMER2, 0);
2803
2804         /* Configure retry transmit period, retry limit value. */
2805         CSR_WRITE_4(sc, JME_TXTRHD,
2806             ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2807             TXTRHD_RT_PERIOD_MASK) |
2808             ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2809             TXTRHD_RT_LIMIT_SHIFT));
2810
2811 #ifdef IFPOLL_ENABLE
2812         if (!(ifp->if_flags & IFF_NPOLLING))
2813 #endif
2814         /* Initialize the interrupt mask. */
2815         jme_enable_intr(sc);
2816         CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2817
2818         /*
2819          * Enabling Tx/Rx DMA engines and Rx queue processing is
2820          * done after detection of valid link in jme_miibus_statchg.
2821          */
2822         sc->jme_has_link = FALSE;
2823
2824         /* Set the current media. */
2825         mii = device_get_softc(sc->jme_miibus);
2826         mii_mediachg(mii);
2827
2828         callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2829
2830         ifp->if_flags |= IFF_RUNNING;
2831         ifp->if_flags &= ~IFF_OACTIVE;
2832 }
2833
2834 static void
2835 jme_stop(struct jme_softc *sc)
2836 {
2837         struct ifnet *ifp = &sc->arpcom.ac_if;
2838         struct jme_txdesc *txd;
2839         struct jme_rxdesc *rxd;
2840         struct jme_rxdata *rdata;
2841         int i, r;
2842
2843         ASSERT_IFNET_SERIALIZED_ALL(ifp);
2844
2845         /*
2846          * Mark the interface down and cancel the watchdog timer.
2847          */
2848         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2849         ifp->if_timer = 0;
2850
2851         callout_stop(&sc->jme_tick_ch);
2852         sc->jme_has_link = FALSE;
2853
2854         /*
2855          * Disable interrupts.
2856          */
2857         jme_disable_intr(sc);
2858         CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2859
2860         /* Disable updating shadow status block. */
2861         CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2862             CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2863
2864         /* Stop receiver, transmitter. */
2865         jme_stop_rx(sc);
2866         jme_stop_tx(sc);
2867
2868         /*
2869          * Free partial finished RX segments
2870          */
2871         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2872                 rdata = &sc->jme_cdata.jme_rx_data[r];
2873                 if (rdata->jme_rxhead != NULL)
2874                         m_freem(rdata->jme_rxhead);
2875                 JME_RXCHAIN_RESET(rdata);
2876         }
2877
2878         /*
2879          * Free RX and TX mbufs still in the queues.
2880          */
2881         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2882                 rdata = &sc->jme_cdata.jme_rx_data[r];
2883                 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
2884                         rxd = &rdata->jme_rxdesc[i];
2885                         if (rxd->rx_m != NULL) {
2886                                 bus_dmamap_unload(rdata->jme_rx_tag,
2887                                                   rxd->rx_dmamap);
2888                                 m_freem(rxd->rx_m);
2889                                 rxd->rx_m = NULL;
2890                         }
2891                 }
2892         }
2893         for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
2894                 txd = &sc->jme_cdata.jme_txdesc[i];
2895                 if (txd->tx_m != NULL) {
2896                         bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2897                             txd->tx_dmamap);
2898                         m_freem(txd->tx_m);
2899                         txd->tx_m = NULL;
2900                         txd->tx_ndesc = 0;
2901                 }
2902         }
2903 }
2904
2905 static void
2906 jme_stop_tx(struct jme_softc *sc)
2907 {
2908         uint32_t reg;
2909         int i;
2910
2911         reg = CSR_READ_4(sc, JME_TXCSR);
2912         if ((reg & TXCSR_TX_ENB) == 0)
2913                 return;
2914         reg &= ~TXCSR_TX_ENB;
2915         CSR_WRITE_4(sc, JME_TXCSR, reg);
2916         for (i = JME_TIMEOUT; i > 0; i--) {
2917                 DELAY(1);
2918                 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2919                         break;
2920         }
2921         if (i == 0)
2922                 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2923 }
2924
2925 static void
2926 jme_stop_rx(struct jme_softc *sc)
2927 {
2928         uint32_t reg;
2929         int i;
2930
2931         reg = CSR_READ_4(sc, JME_RXCSR);
2932         if ((reg & RXCSR_RX_ENB) == 0)
2933                 return;
2934         reg &= ~RXCSR_RX_ENB;
2935         CSR_WRITE_4(sc, JME_RXCSR, reg);
2936         for (i = JME_TIMEOUT; i > 0; i--) {
2937                 DELAY(1);
2938                 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2939                         break;
2940         }
2941         if (i == 0)
2942                 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2943 }
2944
2945 static void
2946 jme_init_tx_ring(struct jme_softc *sc)
2947 {
2948         struct jme_chain_data *cd;
2949         struct jme_txdesc *txd;
2950         int i;
2951
2952         sc->jme_cdata.jme_tx_prod = 0;
2953         sc->jme_cdata.jme_tx_cons = 0;
2954         sc->jme_cdata.jme_tx_cnt = 0;
2955
2956         cd = &sc->jme_cdata;
2957         bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2958         for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
2959                 txd = &sc->jme_cdata.jme_txdesc[i];
2960                 txd->tx_m = NULL;
2961                 txd->tx_desc = &cd->jme_tx_ring[i];
2962                 txd->tx_ndesc = 0;
2963         }
2964 }
2965
2966 static void
2967 jme_init_ssb(struct jme_softc *sc)
2968 {
2969         struct jme_chain_data *cd;
2970
2971         cd = &sc->jme_cdata;
2972         bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2973 }
2974
2975 static int
2976 jme_init_rx_ring(struct jme_rxdata *rdata)
2977 {
2978         struct jme_rxdesc *rxd;
2979         int i;
2980
2981         KKASSERT(rdata->jme_rxhead == NULL &&
2982                  rdata->jme_rxtail == NULL &&
2983                  rdata->jme_rxlen == 0);
2984         rdata->jme_rx_cons = 0;
2985
2986         bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(rdata));
2987         for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
2988                 int error;
2989
2990                 rxd = &rdata->jme_rxdesc[i];
2991                 rxd->rx_m = NULL;
2992                 rxd->rx_desc = &rdata->jme_rx_ring[i];
2993                 error = jme_newbuf(rdata, rxd, 1);
2994                 if (error)
2995                         return error;
2996         }
2997         return 0;
2998 }
2999
3000 static int
3001 jme_newbuf(struct jme_rxdata *rdata, struct jme_rxdesc *rxd, int init)
3002 {
3003         struct mbuf *m;
3004         bus_dma_segment_t segs;
3005         bus_dmamap_t map;
3006         int error, nsegs;
3007
3008         m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
3009         if (m == NULL)
3010                 return ENOBUFS;
3011         /*
3012          * JMC250 has 64bit boundary alignment limitation so jme(4)
3013          * takes advantage of 10 bytes padding feature of hardware
3014          * in order not to copy entire frame to align IP header on
3015          * 32bit boundary.
3016          */
3017         m->m_len = m->m_pkthdr.len = MCLBYTES;
3018
3019         error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
3020                         rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
3021                         BUS_DMA_NOWAIT);
3022         if (error) {
3023                 m_freem(m);
3024                 if (init) {
3025                         if_printf(&rdata->jme_sc->arpcom.ac_if,
3026                             "can't load RX mbuf\n");
3027                 }
3028                 return error;
3029         }
3030
3031         if (rxd->rx_m != NULL) {
3032                 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
3033                                 BUS_DMASYNC_POSTREAD);
3034                 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
3035         }
3036         map = rxd->rx_dmamap;
3037         rxd->rx_dmamap = rdata->jme_rx_sparemap;
3038         rdata->jme_rx_sparemap = map;
3039         rxd->rx_m = m;
3040         rxd->rx_paddr = segs.ds_addr;
3041
3042         jme_setup_rxdesc(rxd);
3043         return 0;
3044 }
3045
3046 static void
3047 jme_set_vlan(struct jme_softc *sc)
3048 {
3049         struct ifnet *ifp = &sc->arpcom.ac_if;
3050         uint32_t reg;
3051
3052         ASSERT_IFNET_SERIALIZED_ALL(ifp);
3053
3054         reg = CSR_READ_4(sc, JME_RXMAC);
3055         reg &= ~RXMAC_VLAN_ENB;
3056         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
3057                 reg |= RXMAC_VLAN_ENB;
3058         CSR_WRITE_4(sc, JME_RXMAC, reg);
3059 }
3060
3061 static void
3062 jme_set_filter(struct jme_softc *sc)
3063 {
3064         struct ifnet *ifp = &sc->arpcom.ac_if;
3065         struct ifmultiaddr *ifma;
3066         uint32_t crc;
3067         uint32_t mchash[2];
3068         uint32_t rxcfg;
3069
3070         ASSERT_IFNET_SERIALIZED_ALL(ifp);
3071
3072         rxcfg = CSR_READ_4(sc, JME_RXMAC);
3073         rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
3074             RXMAC_ALLMULTI);
3075
3076         /*
3077          * Always accept frames destined to our station address.
3078          * Always accept broadcast frames.
3079          */
3080         rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
3081
3082         if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
3083                 if (ifp->if_flags & IFF_PROMISC)
3084                         rxcfg |= RXMAC_PROMISC;
3085                 if (ifp->if_flags & IFF_ALLMULTI)
3086                         rxcfg |= RXMAC_ALLMULTI;
3087                 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
3088                 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
3089                 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3090                 return;
3091         }
3092
3093         /*
3094          * Set up the multicast address filter by passing all multicast
3095          * addresses through a CRC generator, and then using the low-order
3096          * 6 bits as an index into the 64 bit multicast hash table.  The
3097          * high order bits select the register, while the rest of the bits
3098          * select the bit within the register.
3099          */
3100         rxcfg |= RXMAC_MULTICAST;
3101         bzero(mchash, sizeof(mchash));
3102
3103         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3104                 if (ifma->ifma_addr->sa_family != AF_LINK)
3105                         continue;
3106                 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3107                     ifma->ifma_addr), ETHER_ADDR_LEN);
3108
3109                 /* Just want the 6 least significant bits. */
3110                 crc &= 0x3f;
3111
3112                 /* Set the corresponding bit in the hash table. */
3113                 mchash[crc >> 5] |= 1 << (crc & 0x1f);
3114         }
3115
3116         CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
3117         CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
3118         CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3119 }
3120
3121 static int
3122 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
3123 {
3124         struct jme_softc *sc = arg1;
3125         struct ifnet *ifp = &sc->arpcom.ac_if;
3126         int error, v;
3127
3128         ifnet_serialize_all(ifp);
3129
3130         v = sc->jme_tx_coal_to;
3131         error = sysctl_handle_int(oidp, &v, 0, req);
3132         if (error || req->newptr == NULL)
3133                 goto back;
3134
3135         if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
3136                 error = EINVAL;
3137                 goto back;
3138         }
3139
3140         if (v != sc->jme_tx_coal_to) {
3141                 sc->jme_tx_coal_to = v;
3142                 if (ifp->if_flags & IFF_RUNNING)
3143                         jme_set_tx_coal(sc);
3144         }
3145 back:
3146         ifnet_deserialize_all(ifp);
3147         return error;
3148 }
3149
3150 static int
3151 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
3152 {
3153         struct jme_softc *sc = arg1;
3154         struct ifnet *ifp = &sc->arpcom.ac_if;
3155         int error, v;
3156
3157         ifnet_serialize_all(ifp);
3158
3159         v = sc->jme_tx_coal_pkt;
3160         error = sysctl_handle_int(oidp, &v, 0, req);
3161         if (error || req->newptr == NULL)
3162                 goto back;
3163
3164         if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
3165                 error = EINVAL;
3166                 goto back;
3167         }
3168
3169         if (v != sc->jme_tx_coal_pkt) {
3170                 sc->jme_tx_coal_pkt = v;
3171                 if (ifp->if_flags & IFF_RUNNING)
3172                         jme_set_tx_coal(sc);
3173         }
3174 back:
3175         ifnet_deserialize_all(ifp);
3176         return error;
3177 }
3178
3179 static int
3180 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
3181 {
3182         struct jme_softc *sc = arg1;
3183         struct ifnet *ifp = &sc->arpcom.ac_if;
3184         int error, v;
3185
3186         ifnet_serialize_all(ifp);
3187
3188         v = sc->jme_rx_coal_to;
3189         error = sysctl_handle_int(oidp, &v, 0, req);
3190         if (error || req->newptr == NULL)
3191                 goto back;
3192
3193         if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
3194                 error = EINVAL;
3195                 goto back;
3196         }
3197
3198         if (v != sc->jme_rx_coal_to) {
3199                 sc->jme_rx_coal_to = v;
3200                 if (ifp->if_flags & IFF_RUNNING)
3201                         jme_set_rx_coal(sc);
3202         }
3203 back:
3204         ifnet_deserialize_all(ifp);
3205         return error;
3206 }
3207
3208 static int
3209 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3210 {
3211         struct jme_softc *sc = arg1;
3212         struct ifnet *ifp = &sc->arpcom.ac_if;
3213         int error, v;
3214
3215         ifnet_serialize_all(ifp);
3216
3217         v = sc->jme_rx_coal_pkt;
3218         error = sysctl_handle_int(oidp, &v, 0, req);
3219         if (error || req->newptr == NULL)
3220                 goto back;
3221
3222         if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
3223                 error = EINVAL;
3224                 goto back;
3225         }
3226
3227         if (v != sc->jme_rx_coal_pkt) {
3228                 sc->jme_rx_coal_pkt = v;
3229                 if (ifp->if_flags & IFF_RUNNING)
3230                         jme_set_rx_coal(sc);
3231         }
3232 back:
3233         ifnet_deserialize_all(ifp);
3234         return error;
3235 }
3236
3237 static void
3238 jme_set_tx_coal(struct jme_softc *sc)
3239 {
3240         uint32_t reg;
3241
3242         reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3243             PCCTX_COAL_TO_MASK;
3244         reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3245             PCCTX_COAL_PKT_MASK;
3246         reg |= PCCTX_COAL_TXQ0;
3247         CSR_WRITE_4(sc, JME_PCCTX, reg);
3248 }
3249
3250 static void
3251 jme_set_rx_coal(struct jme_softc *sc)
3252 {
3253         uint32_t reg;
3254         int r;
3255
3256         reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3257             PCCRX_COAL_TO_MASK;
3258         reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3259             PCCRX_COAL_PKT_MASK;
3260         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r)
3261                 CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3262 }
3263
3264 #ifdef IFPOLL_ENABLE
3265
3266 static void
3267 jme_npoll_status(struct ifnet *ifp, int pollhz __unused)
3268 {
3269         struct jme_softc *sc = ifp->if_softc;
3270         uint32_t status;
3271
3272         ASSERT_SERIALIZED(&sc->jme_serialize);
3273
3274         status = CSR_READ_4(sc, JME_INTR_STATUS);
3275         if (status & INTR_RXQ_DESC_EMPTY) {
3276                 int i;
3277
3278                 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
3279                         struct jme_rxdata *rdata =
3280                             &sc->jme_cdata.jme_rx_data[i];
3281
3282                         if (status & rdata->jme_rx_empty) {
3283                                 lwkt_serialize_enter(&rdata->jme_rx_serialize);
3284                                 jme_rxeof(rdata, -1);
3285 #ifdef JME_RSS_DEBUG
3286                                 rdata->jme_rx_emp++;
3287 #endif
3288                                 lwkt_serialize_exit(&rdata->jme_rx_serialize);
3289                         }
3290                 }
3291                 CSR_WRITE_4(sc, JME_INTR_STATUS, status & INTR_RXQ_DESC_EMPTY);
3292                 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3293                     RXCSR_RX_ENB | RXCSR_RXQ_START);
3294         }
3295 }
3296
3297 static void
3298 jme_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle)
3299 {
3300         struct jme_rxdata *rdata = arg;
3301
3302         ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3303
3304         jme_rxeof(rdata, cycle);
3305 }
3306
3307 static void
3308 jme_npoll_tx(struct ifnet *ifp, void *arg __unused, int cycle __unused)
3309 {
3310         struct jme_softc *sc = ifp->if_softc;
3311
3312         ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
3313
3314         jme_txeof(sc);
3315         if (!ifq_is_empty(&ifp->if_snd))
3316                 if_devstart(ifp);
3317 }
3318
3319 static void
3320 jme_npoll(struct ifnet *ifp, struct ifpoll_info *info)
3321 {
3322         struct jme_softc *sc = ifp->if_softc;
3323
3324         ASSERT_IFNET_SERIALIZED_ALL(ifp);
3325
3326         if (info) {
3327                 int i, off;
3328
3329                 info->ifpi_status.status_func = jme_npoll_status;
3330                 info->ifpi_status.serializer = &sc->jme_serialize;
3331
3332                 off = sc->jme_npoll_txoff;
3333                 KKASSERT(off <= ncpus2);
3334                 info->ifpi_tx[off].poll_func = jme_npoll_tx;
3335                 info->ifpi_tx[off].arg = NULL;
3336                 info->ifpi_tx[off].serializer = &sc->jme_cdata.jme_tx_serialize;
3337
3338                 off = sc->jme_npoll_rxoff;
3339                 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
3340                         struct jme_rxdata *rdata =
3341                             &sc->jme_cdata.jme_rx_data[i];
3342                         int idx = i + off;
3343
3344                         info->ifpi_rx[idx].poll_func = jme_npoll_rx;
3345                         info->ifpi_rx[idx].arg = rdata;
3346                         info->ifpi_rx[idx].serializer =
3347                             &rdata->jme_rx_serialize;
3348                 }
3349
3350                 if (ifp->if_flags & IFF_RUNNING)
3351                         jme_disable_intr(sc);
3352                 ifp->if_npoll_cpuid = sc->jme_npoll_txoff;
3353         } else {
3354                 if (ifp->if_flags & IFF_RUNNING)
3355                         jme_enable_intr(sc);
3356                 ifp->if_npoll_cpuid = -1;
3357         }
3358 }
3359
3360 static int
3361 jme_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS)
3362 {
3363         struct jme_softc *sc = (void *)arg1;
3364         struct ifnet *ifp = &sc->arpcom.ac_if;
3365         int error, off;
3366
3367         off = sc->jme_npoll_rxoff;
3368         error = sysctl_handle_int(oidp, &off, 0, req);
3369         if (error || req->newptr == NULL)
3370                 return error;
3371         if (off < 0)
3372                 return EINVAL;
3373
3374         ifnet_serialize_all(ifp);
3375         if (off >= ncpus2 || off % sc->jme_cdata.jme_rx_ring_cnt != 0) {
3376                 error = EINVAL;
3377         } else {
3378                 error = 0;
3379                 sc->jme_npoll_rxoff = off;
3380         }
3381         ifnet_deserialize_all(ifp);
3382
3383         return error;
3384 }
3385
3386 static int
3387 jme_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS)
3388 {
3389         struct jme_softc *sc = (void *)arg1;
3390         struct ifnet *ifp = &sc->arpcom.ac_if;
3391         int error, off;
3392
3393         off = sc->jme_npoll_txoff;
3394         error = sysctl_handle_int(oidp, &off, 0, req);
3395         if (error || req->newptr == NULL)
3396                 return error;
3397         if (off < 0)
3398                 return EINVAL;
3399
3400         ifnet_serialize_all(ifp);
3401         if (off >= ncpus2) {
3402                 error = EINVAL;
3403         } else {
3404                 error = 0;
3405                 sc->jme_npoll_txoff = off;
3406         }
3407         ifnet_deserialize_all(ifp);
3408
3409         return error;
3410 }
3411
3412 #endif  /* IFPOLL_ENABLE */
3413
3414 static int
3415 jme_rxring_dma_alloc(struct jme_rxdata *rdata)
3416 {
3417         bus_dmamem_t dmem;
3418         int error, asize;
3419
3420         asize = roundup2(JME_RX_RING_SIZE(rdata), JME_RX_RING_ALIGN);
3421         error = bus_dmamem_coherent(rdata->jme_sc->jme_cdata.jme_ring_tag,
3422                         JME_RX_RING_ALIGN, 0,
3423                         BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3424                         asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3425         if (error) {
3426                 device_printf(rdata->jme_sc->jme_dev,
3427                     "could not allocate %dth Rx ring.\n", rdata->jme_rx_idx);
3428                 return error;
3429         }
3430         rdata->jme_rx_ring_tag = dmem.dmem_tag;
3431         rdata->jme_rx_ring_map = dmem.dmem_map;
3432         rdata->jme_rx_ring = dmem.dmem_addr;
3433         rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
3434
3435         return 0;
3436 }
3437
3438 static int
3439 jme_rxbuf_dma_filter(void *arg __unused, bus_addr_t paddr)
3440 {
3441         if ((paddr & 0xffffffff) == 0) {
3442                 /*
3443                  * Don't allow lower 32bits of the RX buffer's
3444                  * physical address to be 0, else it will break
3445                  * hardware pending RSS information delivery
3446                  * detection on RX path.
3447                  */
3448                 return 1;
3449         }
3450         return 0;
3451 }
3452
3453 static int
3454 jme_rxbuf_dma_alloc(struct jme_rxdata *rdata)
3455 {
3456         bus_addr_t lowaddr;
3457         int i, error;
3458
3459         lowaddr = BUS_SPACE_MAXADDR;
3460         if (JME_ENABLE_HWRSS(rdata->jme_sc)) {
3461                 /* jme_rxbuf_dma_filter will be called */
3462                 lowaddr = BUS_SPACE_MAXADDR_32BIT;
3463         }
3464
3465         /* Create tag for Rx buffers. */
3466         error = bus_dma_tag_create(
3467             rdata->jme_sc->jme_cdata.jme_buffer_tag,/* parent */
3468             JME_RX_BUF_ALIGN, 0,        /* algnmnt, boundary */
3469             lowaddr,                    /* lowaddr */
3470             BUS_SPACE_MAXADDR,          /* highaddr */
3471             jme_rxbuf_dma_filter, NULL, /* filter, filterarg */
3472             MCLBYTES,                   /* maxsize */
3473             1,                          /* nsegments */
3474             MCLBYTES,                   /* maxsegsize */
3475             BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
3476             &rdata->jme_rx_tag);
3477         if (error) {
3478                 device_printf(rdata->jme_sc->jme_dev,
3479                     "could not create %dth Rx DMA tag.\n", rdata->jme_rx_idx);
3480                 return error;
3481         }
3482
3483         /* Create DMA maps for Rx buffers. */
3484         error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3485                                   &rdata->jme_rx_sparemap);
3486         if (error) {
3487                 device_printf(rdata->jme_sc->jme_dev,
3488                     "could not create %dth spare Rx dmamap.\n",
3489                     rdata->jme_rx_idx);
3490                 bus_dma_tag_destroy(rdata->jme_rx_tag);
3491                 rdata->jme_rx_tag = NULL;
3492                 return error;
3493         }
3494         for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
3495                 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3496
3497                 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3498                                           &rxd->rx_dmamap);
3499                 if (error) {
3500                         int j;
3501
3502                         device_printf(rdata->jme_sc->jme_dev,
3503                             "could not create %dth Rx dmamap "
3504                             "for %dth RX ring.\n", i, rdata->jme_rx_idx);
3505
3506                         for (j = 0; j < i; ++j) {
3507                                 rxd = &rdata->jme_rxdesc[j];
3508                                 bus_dmamap_destroy(rdata->jme_rx_tag,
3509                                                    rxd->rx_dmamap);
3510                         }
3511                         bus_dmamap_destroy(rdata->jme_rx_tag,
3512                                            rdata->jme_rx_sparemap);
3513                         bus_dma_tag_destroy(rdata->jme_rx_tag);
3514                         rdata->jme_rx_tag = NULL;
3515                         return error;
3516                 }
3517         }
3518         return 0;
3519 }
3520
3521 static void
3522 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3523 {
3524         int r;
3525
3526         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3527                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3528
3529                 if (status & rdata->jme_rx_coal) {
3530                         lwkt_serialize_enter(&rdata->jme_rx_serialize);
3531                         jme_rxeof(rdata, -1);
3532                         lwkt_serialize_exit(&rdata->jme_rx_serialize);
3533                 }
3534         }
3535 }
3536
3537 static void
3538 jme_enable_rss(struct jme_softc *sc)
3539 {
3540         uint32_t rssc, ind;
3541         uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3542         int i;
3543
3544         KASSERT(sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_2 ||
3545                 sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_4,
3546                 ("%s: invalid # of RX rings (%d)",
3547                  sc->arpcom.ac_if.if_xname, sc->jme_cdata.jme_rx_ring_cnt));
3548
3549         rssc = RSSC_HASH_64_ENTRY;
3550         rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3551         rssc |= sc->jme_cdata.jme_rx_ring_cnt >> 1;
3552         JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3553         CSR_WRITE_4(sc, JME_RSSC, rssc);
3554
3555         toeplitz_get_key(key, sizeof(key));
3556         for (i = 0; i < RSSKEY_NREGS; ++i) {
3557                 uint32_t keyreg;
3558
3559                 keyreg = RSSKEY_REGVAL(key, i);
3560                 JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg);
3561
3562                 CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg);
3563         }
3564
3565         /*
3566          * Create redirect table in following fashion:
3567          * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3568          */
3569         ind = 0;
3570         for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3571                 int q;
3572
3573                 q = i % sc->jme_cdata.jme_rx_ring_cnt;
3574                 ind |= q << (i * 8);
3575         }
3576         JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3577
3578         for (i = 0; i < RSSTBL_NREGS; ++i)
3579                 CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3580 }
3581
3582 static void
3583 jme_disable_rss(struct jme_softc *sc)
3584 {
3585         CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3586 }
3587
3588 static void
3589 jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3590 {
3591         struct jme_softc *sc = ifp->if_softc;
3592
3593         ifnet_serialize_array_enter(sc->jme_serialize_arr,
3594             sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz);
3595 }
3596
3597 static void
3598 jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3599 {
3600         struct jme_softc *sc = ifp->if_softc;
3601
3602         ifnet_serialize_array_exit(sc->jme_serialize_arr,
3603             sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz);
3604 }
3605
3606 static int
3607 jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3608 {
3609         struct jme_softc *sc = ifp->if_softc;
3610
3611         return ifnet_serialize_array_try(sc->jme_serialize_arr,
3612             sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz);
3613 }
3614
3615 #ifdef INVARIANTS
3616
3617 static void
3618 jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
3619     boolean_t serialized)
3620 {
3621         struct jme_softc *sc = ifp->if_softc;
3622
3623         ifnet_serialize_array_assert(sc->jme_serialize_arr,
3624             sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE,
3625             slz, serialized);
3626 }
3627
3628 #endif  /* INVARIANTS */
3629
3630 static void
3631 jme_msix_try_alloc(device_t dev)
3632 {
3633         struct jme_softc *sc = device_get_softc(dev);
3634         struct jme_msix_data *msix;
3635         int error, i, r, msix_enable, msix_count;
3636         int offset, offset_def;
3637
3638         msix_count = JME_MSIXCNT(sc->jme_cdata.jme_rx_ring_cnt);
3639         KKASSERT(msix_count <= JME_NMSIX);
3640
3641         msix_enable = device_getenv_int(dev, "msix.enable", jme_msix_enable);
3642
3643         /*
3644          * We leave the 1st MSI-X vector unused, so we
3645          * actually need msix_count + 1 MSI-X vectors.
3646          */
3647         if (!msix_enable || pci_msix_count(dev) < (msix_count + 1))
3648                 return;
3649
3650         for (i = 0; i < msix_count; ++i)
3651                 sc->jme_msix[i].jme_msix_rid = -1;
3652
3653         i = 0;
3654
3655         /*
3656          * Setup status MSI-X
3657          */
3658
3659         msix = &sc->jme_msix[i++];
3660         msix->jme_msix_cpuid = 0;
3661         msix->jme_msix_arg = sc;
3662         msix->jme_msix_func = jme_msix_status;
3663         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3664                 msix->jme_msix_intrs |=
3665                     sc->jme_cdata.jme_rx_data[r].jme_rx_empty;
3666         }
3667         msix->jme_msix_serialize = &sc->jme_serialize;
3668         ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s sts",
3669             device_get_nameunit(dev));
3670
3671         /*
3672          * Setup TX MSI-X
3673          */
3674
3675         offset_def = device_get_unit(dev) % ncpus2;
3676         offset = device_getenv_int(dev, "msix.txoff", offset_def);
3677         if (offset >= ncpus2) {
3678                 device_printf(dev, "invalid msix.txoff %d, use %d\n",
3679                     offset, offset_def);
3680                 offset = offset_def;
3681         }
3682
3683         msix = &sc->jme_msix[i++];
3684         msix->jme_msix_cpuid = offset;
3685         sc->jme_tx_cpuid = msix->jme_msix_cpuid;
3686         msix->jme_msix_arg = &sc->jme_cdata;
3687         msix->jme_msix_func = jme_msix_tx;
3688         msix->jme_msix_intrs = INTR_TXQ_COAL | INTR_TXQ_COAL_TO;
3689         msix->jme_msix_serialize = &sc->jme_cdata.jme_tx_serialize;
3690         ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s tx",
3691             device_get_nameunit(dev));
3692
3693         /*
3694          * Setup RX MSI-X
3695          */
3696
3697         if (sc->jme_cdata.jme_rx_ring_cnt == ncpus2) {
3698                 offset = 0;
3699         } else {
3700                 offset_def = (sc->jme_cdata.jme_rx_ring_cnt *
3701                     device_get_unit(dev)) % ncpus2;
3702
3703                 offset = device_getenv_int(dev, "msix.rxoff", offset_def);
3704                 if (offset >= ncpus2 ||
3705                     offset % sc->jme_cdata.jme_rx_ring_cnt != 0) {
3706                         device_printf(dev, "invalid msix.rxoff %d, use %d\n",
3707                             offset, offset_def);
3708                         offset = offset_def;
3709                 }
3710         }
3711
3712         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3713                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3714
3715                 msix = &sc->jme_msix[i++];
3716                 msix->jme_msix_cpuid = r + offset;
3717                 KKASSERT(msix->jme_msix_cpuid < ncpus2);
3718                 msix->jme_msix_arg = rdata;
3719                 msix->jme_msix_func = jme_msix_rx;
3720                 msix->jme_msix_intrs = rdata->jme_rx_coal;
3721                 msix->jme_msix_serialize = &rdata->jme_rx_serialize;
3722                 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc),
3723                     "%s rx%d", device_get_nameunit(dev), r);
3724         }
3725
3726         KKASSERT(i == msix_count);
3727
3728         error = pci_setup_msix(dev);
3729         if (error)
3730                 return;
3731
3732         /* Setup jme_msix_cnt early, so we could cleanup */
3733         sc->jme_msix_cnt = msix_count;
3734
3735         for (i = 0; i < msix_count; ++i) {
3736                 msix = &sc->jme_msix[i];
3737
3738                 msix->jme_msix_vector = i + 1;
3739                 error = pci_alloc_msix_vector(dev, msix->jme_msix_vector,
3740                     &msix->jme_msix_rid, msix->jme_msix_cpuid);
3741                 if (error)
3742                         goto back;
3743
3744                 msix->jme_msix_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3745                     &msix->jme_msix_rid, RF_ACTIVE);
3746                 if (msix->jme_msix_res == NULL) {
3747                         error = ENOMEM;
3748                         goto back;
3749                 }
3750         }
3751
3752         for (i = 0; i < JME_INTR_CNT; ++i) {
3753                 uint32_t intr_mask = (1 << i);
3754                 int x;
3755
3756                 if ((JME_INTRS & intr_mask) == 0)
3757                         continue;
3758
3759                 for (x = 0; x < msix_count; ++x) {
3760                         msix = &sc->jme_msix[x];
3761                         if (msix->jme_msix_intrs & intr_mask) {
3762                                 int reg, shift;
3763
3764                                 reg = i / JME_MSINUM_FACTOR;
3765                                 KKASSERT(reg < JME_MSINUM_CNT);
3766
3767                                 shift = (i % JME_MSINUM_FACTOR) * 4;
3768
3769                                 sc->jme_msinum[reg] |=
3770                                     (msix->jme_msix_vector << shift);
3771
3772                                 break;
3773                         }
3774                 }
3775         }
3776
3777         if (bootverbose) {
3778                 for (i = 0; i < JME_MSINUM_CNT; ++i) {
3779                         device_printf(dev, "MSINUM%d: %#x\n", i,
3780                             sc->jme_msinum[i]);
3781                 }
3782         }
3783
3784         pci_enable_msix(dev);
3785         sc->jme_irq_type = PCI_INTR_TYPE_MSIX;
3786
3787 back:
3788         if (error)
3789                 jme_msix_free(dev);
3790 }
3791
3792 static int
3793 jme_intr_alloc(device_t dev)
3794 {
3795         struct jme_softc *sc = device_get_softc(dev);
3796         u_int irq_flags;
3797
3798         jme_msix_try_alloc(dev);
3799
3800         if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3801                 sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable,
3802                     &sc->jme_irq_rid, &irq_flags);
3803
3804                 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3805                     &sc->jme_irq_rid, irq_flags);
3806                 if (sc->jme_irq_res == NULL) {
3807                         device_printf(dev, "can't allocate irq\n");
3808                         return ENXIO;
3809                 }
3810         }
3811         return 0;
3812 }
3813
3814 static void
3815 jme_msix_free(device_t dev)
3816 {
3817         struct jme_softc *sc = device_get_softc(dev);
3818         int i;
3819
3820         KKASSERT(sc->jme_msix_cnt > 1);
3821
3822         for (i = 0; i < sc->jme_msix_cnt; ++i) {
3823                 struct jme_msix_data *msix = &sc->jme_msix[i];
3824
3825                 if (msix->jme_msix_res != NULL) {
3826                         bus_release_resource(dev, SYS_RES_IRQ,
3827                             msix->jme_msix_rid, msix->jme_msix_res);
3828                         msix->jme_msix_res = NULL;
3829                 }
3830                 if (msix->jme_msix_rid >= 0) {
3831                         pci_release_msix_vector(dev, msix->jme_msix_rid);
3832                         msix->jme_msix_rid = -1;
3833                 }
3834         }
3835         pci_teardown_msix(dev);
3836 }
3837
3838 static void
3839 jme_intr_free(device_t dev)
3840 {
3841         struct jme_softc *sc = device_get_softc(dev);
3842
3843         if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3844                 if (sc->jme_irq_res != NULL) {
3845                         bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
3846                                              sc->jme_irq_res);
3847                 }
3848                 if (sc->jme_irq_type == PCI_INTR_TYPE_MSI)
3849                         pci_release_msi(dev);
3850         } else {
3851                 jme_msix_free(dev);
3852         }
3853 }
3854
3855 static void
3856 jme_msix_tx(void *xcd)
3857 {
3858         struct jme_chain_data *cd = xcd;
3859         struct jme_softc *sc = cd->jme_sc;
3860         struct ifnet *ifp = &sc->arpcom.ac_if;
3861
3862         ASSERT_SERIALIZED(&cd->jme_tx_serialize);
3863
3864         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3865
3866         CSR_WRITE_4(sc, JME_INTR_STATUS,
3867             INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP);
3868
3869         if (ifp->if_flags & IFF_RUNNING) {
3870                 jme_txeof(sc);
3871                 if (!ifq_is_empty(&ifp->if_snd))
3872                         if_devstart(ifp);
3873         }
3874
3875         CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3876 }
3877
3878 static void
3879 jme_msix_rx(void *xrdata)
3880 {
3881         struct jme_rxdata *rdata = xrdata;
3882         struct jme_softc *sc = rdata->jme_sc;
3883         struct ifnet *ifp = &sc->arpcom.ac_if;
3884
3885         ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3886
3887         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, rdata->jme_rx_coal);
3888
3889         CSR_WRITE_4(sc, JME_INTR_STATUS,
3890             rdata->jme_rx_coal | rdata->jme_rx_comp);
3891
3892         if (ifp->if_flags & IFF_RUNNING)
3893                 jme_rxeof(rdata, -1);
3894
3895         CSR_WRITE_4(sc, JME_INTR_MASK_SET, rdata->jme_rx_coal);
3896 }
3897
3898 static void
3899 jme_msix_status(void *xsc)
3900 {
3901         struct jme_softc *sc = xsc;
3902         struct ifnet *ifp = &sc->arpcom.ac_if;
3903         uint32_t status;
3904
3905         ASSERT_SERIALIZED(&sc->jme_serialize);
3906
3907         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_RXQ_DESC_EMPTY);
3908
3909         status = CSR_READ_4(sc, JME_INTR_STATUS);
3910         status &= INTR_RXQ_DESC_EMPTY;
3911
3912         if (status)
3913                 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3914
3915         if ((ifp->if_flags & IFF_RUNNING) && status) {
3916                 int i;
3917
3918                 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
3919                         struct jme_rxdata *rdata =
3920                             &sc->jme_cdata.jme_rx_data[i];
3921
3922                         if (status & rdata->jme_rx_empty) {
3923                                 lwkt_serialize_enter(&rdata->jme_rx_serialize);
3924                                 jme_rxeof(rdata, -1);
3925 #ifdef JME_RSS_DEBUG
3926                                 rdata->jme_rx_emp++;
3927 #endif
3928                                 lwkt_serialize_exit(&rdata->jme_rx_serialize);
3929                         }
3930                 }
3931                 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3932                     RXCSR_RX_ENB | RXCSR_RXQ_START);
3933         }
3934
3935         CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_RXQ_DESC_EMPTY);
3936 }
3937
3938 static void
3939 jme_set_msinum(struct jme_softc *sc)
3940 {
3941         int i;
3942
3943         for (i = 0; i < JME_MSINUM_CNT; ++i)
3944                 CSR_WRITE_4(sc, JME_MSINUM(i), sc->jme_msinum[i]);
3945 }
3946
3947 static int
3948 jme_intr_setup(device_t dev)
3949 {
3950         struct jme_softc *sc = device_get_softc(dev);
3951         struct ifnet *ifp = &sc->arpcom.ac_if;
3952         int error;
3953
3954         if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3955                 return jme_msix_setup(dev);
3956
3957         error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE,
3958             jme_intr, sc, &sc->jme_irq_handle, &sc->jme_serialize);
3959         if (error) {
3960                 device_printf(dev, "could not set up interrupt handler.\n");
3961                 return error;
3962         }
3963
3964         ifp->if_cpuid = rman_get_cpuid(sc->jme_irq_res);
3965         KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
3966         return 0;
3967 }
3968
3969 static void
3970 jme_intr_teardown(device_t dev)
3971 {
3972         struct jme_softc *sc = device_get_softc(dev);
3973
3974         if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3975                 jme_msix_teardown(dev, sc->jme_msix_cnt);
3976         else
3977                 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
3978 }
3979
3980 static int
3981 jme_msix_setup(device_t dev)
3982 {
3983         struct jme_softc *sc = device_get_softc(dev);
3984         struct ifnet *ifp = &sc->arpcom.ac_if;
3985         int x;
3986
3987         for (x = 0; x < sc->jme_msix_cnt; ++x) {
3988                 struct jme_msix_data *msix = &sc->jme_msix[x];
3989                 int error;
3990
3991                 error = bus_setup_intr_descr(dev, msix->jme_msix_res,
3992                     INTR_MPSAFE, msix->jme_msix_func, msix->jme_msix_arg,
3993                     &msix->jme_msix_handle, msix->jme_msix_serialize,
3994                     msix->jme_msix_desc);
3995                 if (error) {
3996                         device_printf(dev, "could not set up %s "
3997                             "interrupt handler.\n", msix->jme_msix_desc);
3998                         jme_msix_teardown(dev, x);
3999                         return error;
4000                 }
4001         }
4002         ifp->if_cpuid = sc->jme_tx_cpuid;
4003         return 0;
4004 }
4005