d360ccfce882a7a3e0f83199af6fcf77bf38ea5b
[dragonfly.git] / sys / dev / netif / jme / if_jme.c
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28  */
29
30 #include "opt_ifpoll.h"
31 #include "opt_jme.h"
32
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/bus.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
39 #include <sys/proc.h>
40 #include <sys/rman.h>
41 #include <sys/serialize.h>
42 #include <sys/serialize2.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
46
47 #include <net/ethernet.h>
48 #include <net/if.h>
49 #include <net/bpf.h>
50 #include <net/if_arp.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_poll.h>
54 #include <net/ifq_var.h>
55 #include <net/toeplitz.h>
56 #include <net/toeplitz2.h>
57 #include <net/vlan/if_vlan_var.h>
58 #include <net/vlan/if_vlan_ether.h>
59
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62
63 #include <dev/netif/mii_layer/miivar.h>
64 #include <dev/netif/mii_layer/jmphyreg.h>
65
66 #include <bus/pci/pcireg.h>
67 #include <bus/pci/pcivar.h>
68 #include <bus/pci/pcidevs.h>
69
70 #include <dev/netif/jme/if_jmereg.h>
71 #include <dev/netif/jme/if_jmevar.h>
72
73 #include "miibus_if.h"
74
75 #define JME_TX_SERIALIZE        1
76 #define JME_RX_SERIALIZE        2
77
78 #define JME_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
79
80 #ifdef JME_RSS_DEBUG
81 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
82 do { \
83         if ((sc)->jme_rss_debug >= (lvl)) \
84                 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
85 } while (0)
86 #else   /* !JME_RSS_DEBUG */
87 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...)      ((void)0)
88 #endif  /* JME_RSS_DEBUG */
89
90 static int      jme_probe(device_t);
91 static int      jme_attach(device_t);
92 static int      jme_detach(device_t);
93 static int      jme_shutdown(device_t);
94 static int      jme_suspend(device_t);
95 static int      jme_resume(device_t);
96
97 static int      jme_miibus_readreg(device_t, int, int);
98 static int      jme_miibus_writereg(device_t, int, int, int);
99 static void     jme_miibus_statchg(device_t);
100
101 static void     jme_init(void *);
102 static int      jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
103 static void     jme_start(struct ifnet *);
104 static void     jme_watchdog(struct ifnet *);
105 static void     jme_mediastatus(struct ifnet *, struct ifmediareq *);
106 static int      jme_mediachange(struct ifnet *);
107 #ifdef IFPOLL_ENABLE
108 static void     jme_npoll(struct ifnet *, struct ifpoll_info *);
109 #endif
110 static void     jme_serialize(struct ifnet *, enum ifnet_serialize);
111 static void     jme_deserialize(struct ifnet *, enum ifnet_serialize);
112 static int      jme_tryserialize(struct ifnet *, enum ifnet_serialize);
113 #ifdef INVARIANTS
114 static void     jme_serialize_assert(struct ifnet *, enum ifnet_serialize,
115                     boolean_t);
116 #endif
117
118 static void     jme_intr(void *);
119 static void     jme_msix_tx(void *);
120 static void     jme_msix_rx(void *);
121 static void     jme_txeof(struct jme_softc *);
122 static void     jme_rxeof(struct jme_rxdata *, int);
123 static void     jme_rx_intr(struct jme_softc *, uint32_t);
124 static void     jme_enable_intr(struct jme_softc *);
125 static void     jme_disable_intr(struct jme_softc *);
126
127 static int      jme_msix_setup(device_t);
128 static void     jme_msix_teardown(device_t, int);
129 static int      jme_intr_setup(device_t);
130 static void     jme_intr_teardown(device_t);
131 static void     jme_msix_try_alloc(device_t);
132 static void     jme_msix_free(device_t);
133 static int      jme_intr_alloc(device_t);
134 static void     jme_intr_free(device_t);
135 static int      jme_dma_alloc(struct jme_softc *);
136 static void     jme_dma_free(struct jme_softc *);
137 static int      jme_init_rx_ring(struct jme_rxdata *);
138 static void     jme_init_tx_ring(struct jme_softc *);
139 static void     jme_init_ssb(struct jme_softc *);
140 static int      jme_newbuf(struct jme_rxdata *, struct jme_rxdesc *, int);
141 static int      jme_encap(struct jme_softc *, struct mbuf **);
142 static void     jme_rxpkt(struct jme_rxdata *);
143 static int      jme_rxring_dma_alloc(struct jme_rxdata *);
144 static int      jme_rxbuf_dma_alloc(struct jme_rxdata *);
145 static int      jme_rxbuf_dma_filter(void *, bus_addr_t);
146
147 static void     jme_tick(void *);
148 static void     jme_stop(struct jme_softc *);
149 static void     jme_reset(struct jme_softc *);
150 static void     jme_set_msinum(struct jme_softc *);
151 static void     jme_set_vlan(struct jme_softc *);
152 static void     jme_set_filter(struct jme_softc *);
153 static void     jme_stop_tx(struct jme_softc *);
154 static void     jme_stop_rx(struct jme_softc *);
155 static void     jme_mac_config(struct jme_softc *);
156 static void     jme_reg_macaddr(struct jme_softc *, uint8_t[]);
157 static int      jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
158 static int      jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
159 #ifdef notyet
160 static void     jme_setwol(struct jme_softc *);
161 static void     jme_setlinkspeed(struct jme_softc *);
162 #endif
163 static void     jme_set_tx_coal(struct jme_softc *);
164 static void     jme_set_rx_coal(struct jme_softc *);
165 static void     jme_enable_rss(struct jme_softc *);
166 static void     jme_disable_rss(struct jme_softc *);
167 static void     jme_serialize_skipmain(struct jme_softc *);
168 static void     jme_deserialize_skipmain(struct jme_softc *);
169
170 static void     jme_sysctl_node(struct jme_softc *);
171 static int      jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
172 static int      jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
173 static int      jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
174 static int      jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
175 #ifdef IFPOLL_ENABLE
176 static int      jme_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS);
177 static int      jme_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS);
178 #endif
179
180 /*
181  * Devices supported by this driver.
182  */
183 static const struct jme_dev {
184         uint16_t        jme_vendorid;
185         uint16_t        jme_deviceid;
186         uint32_t        jme_caps;
187         const char      *jme_name;
188 } jme_devs[] = {
189         { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
190             JME_CAP_JUMBO,
191             "JMicron Inc, JMC250 Gigabit Ethernet" },
192         { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
193             JME_CAP_FASTETH,
194             "JMicron Inc, JMC260 Fast Ethernet" },
195         { 0, 0, 0, NULL }
196 };
197
198 static device_method_t jme_methods[] = {
199         /* Device interface. */
200         DEVMETHOD(device_probe,         jme_probe),
201         DEVMETHOD(device_attach,        jme_attach),
202         DEVMETHOD(device_detach,        jme_detach),
203         DEVMETHOD(device_shutdown,      jme_shutdown),
204         DEVMETHOD(device_suspend,       jme_suspend),
205         DEVMETHOD(device_resume,        jme_resume),
206
207         /* Bus interface. */
208         DEVMETHOD(bus_print_child,      bus_generic_print_child),
209         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
210
211         /* MII interface. */
212         DEVMETHOD(miibus_readreg,       jme_miibus_readreg),
213         DEVMETHOD(miibus_writereg,      jme_miibus_writereg),
214         DEVMETHOD(miibus_statchg,       jme_miibus_statchg),
215
216         { NULL, NULL }
217 };
218
219 static driver_t jme_driver = {
220         "jme",
221         jme_methods,
222         sizeof(struct jme_softc)
223 };
224
225 static devclass_t jme_devclass;
226
227 DECLARE_DUMMY_MODULE(if_jme);
228 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
229 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL);
230 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL);
231
232 static const struct {
233         uint32_t        jme_coal;
234         uint32_t        jme_comp;
235         uint32_t        jme_empty;
236 } jme_rx_status[JME_NRXRING_MAX] = {
237         { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP,
238           INTR_RXQ0_DESC_EMPTY },
239         { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP,
240           INTR_RXQ1_DESC_EMPTY },
241         { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP,
242           INTR_RXQ2_DESC_EMPTY },
243         { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP,
244           INTR_RXQ3_DESC_EMPTY }
245 };
246
247 static int      jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
248 static int      jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
249 static int      jme_rx_ring_count = 0;
250 static int      jme_msi_enable = 1;
251 static int      jme_msix_enable = 1;
252
253 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
254 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
255 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
256 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable);
257 TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable);
258
259 static __inline void
260 jme_setup_rxdesc(struct jme_rxdesc *rxd)
261 {
262         struct jme_desc *desc;
263
264         desc = rxd->rx_desc;
265         desc->buflen = htole32(MCLBYTES);
266         desc->addr_lo = htole32(JME_ADDR_LO(rxd->rx_paddr));
267         desc->addr_hi = htole32(JME_ADDR_HI(rxd->rx_paddr));
268         desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
269 }
270
271 /*
272  *      Read a PHY register on the MII of the JMC250.
273  */
274 static int
275 jme_miibus_readreg(device_t dev, int phy, int reg)
276 {
277         struct jme_softc *sc = device_get_softc(dev);
278         uint32_t val;
279         int i;
280
281         /* For FPGA version, PHY address 0 should be ignored. */
282         if (sc->jme_caps & JME_CAP_FPGA) {
283                 if (phy == 0)
284                         return (0);
285         } else {
286                 if (sc->jme_phyaddr != phy)
287                         return (0);
288         }
289
290         CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
291             SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
292
293         for (i = JME_PHY_TIMEOUT; i > 0; i--) {
294                 DELAY(1);
295                 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
296                         break;
297         }
298         if (i == 0) {
299                 device_printf(sc->jme_dev, "phy read timeout: "
300                               "phy %d, reg %d\n", phy, reg);
301                 return (0);
302         }
303
304         return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
305 }
306
307 /*
308  *      Write a PHY register on the MII of the JMC250.
309  */
310 static int
311 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
312 {
313         struct jme_softc *sc = device_get_softc(dev);
314         int i;
315
316         /* For FPGA version, PHY address 0 should be ignored. */
317         if (sc->jme_caps & JME_CAP_FPGA) {
318                 if (phy == 0)
319                         return (0);
320         } else {
321                 if (sc->jme_phyaddr != phy)
322                         return (0);
323         }
324
325         CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
326             ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
327             SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
328
329         for (i = JME_PHY_TIMEOUT; i > 0; i--) {
330                 DELAY(1);
331                 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
332                         break;
333         }
334         if (i == 0) {
335                 device_printf(sc->jme_dev, "phy write timeout: "
336                               "phy %d, reg %d\n", phy, reg);
337         }
338
339         return (0);
340 }
341
342 /*
343  *      Callback from MII layer when media changes.
344  */
345 static void
346 jme_miibus_statchg(device_t dev)
347 {
348         struct jme_softc *sc = device_get_softc(dev);
349         struct ifnet *ifp = &sc->arpcom.ac_if;
350         struct mii_data *mii;
351         struct jme_txdesc *txd;
352         bus_addr_t paddr;
353         int i, r;
354
355         if (sc->jme_in_tick)
356                 jme_serialize_skipmain(sc);
357         ASSERT_IFNET_SERIALIZED_ALL(ifp);
358
359         if ((ifp->if_flags & IFF_RUNNING) == 0)
360                 goto done;
361
362         mii = device_get_softc(sc->jme_miibus);
363
364         sc->jme_has_link = FALSE;
365         if ((mii->mii_media_status & IFM_AVALID) != 0) {
366                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
367                 case IFM_10_T:
368                 case IFM_100_TX:
369                         sc->jme_has_link = TRUE;
370                         break;
371                 case IFM_1000_T:
372                         if (sc->jme_caps & JME_CAP_FASTETH)
373                                 break;
374                         sc->jme_has_link = TRUE;
375                         break;
376                 default:
377                         break;
378                 }
379         }
380
381         /*
382          * Disabling Rx/Tx MACs have a side-effect of resetting
383          * JME_TXNDA/JME_RXNDA register to the first address of
384          * Tx/Rx descriptor address. So driver should reset its
385          * internal procucer/consumer pointer and reclaim any
386          * allocated resources.  Note, just saving the value of
387          * JME_TXNDA and JME_RXNDA registers before stopping MAC
388          * and restoring JME_TXNDA/JME_RXNDA register is not
389          * sufficient to make sure correct MAC state because
390          * stopping MAC operation can take a while and hardware
391          * might have updated JME_TXNDA/JME_RXNDA registers
392          * during the stop operation.
393          */
394
395         /* Disable interrupts */
396         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
397
398         /* Stop driver */
399         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
400         ifp->if_timer = 0;
401         callout_stop(&sc->jme_tick_ch);
402
403         /* Stop receiver/transmitter. */
404         jme_stop_rx(sc);
405         jme_stop_tx(sc);
406
407         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
408                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
409
410                 jme_rxeof(rdata, -1);
411                 if (rdata->jme_rxhead != NULL)
412                         m_freem(rdata->jme_rxhead);
413                 JME_RXCHAIN_RESET(rdata);
414
415                 /*
416                  * Reuse configured Rx descriptors and reset
417                  * procuder/consumer index.
418                  */
419                 rdata->jme_rx_cons = 0;
420         }
421         if (JME_ENABLE_HWRSS(sc))
422                 jme_enable_rss(sc);
423         else
424                 jme_disable_rss(sc);
425
426         jme_txeof(sc);
427         if (sc->jme_cdata.jme_tx_cnt != 0) {
428                 /* Remove queued packets for transmit. */
429                 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
430                         txd = &sc->jme_cdata.jme_txdesc[i];
431                         if (txd->tx_m != NULL) {
432                                 bus_dmamap_unload(
433                                     sc->jme_cdata.jme_tx_tag,
434                                     txd->tx_dmamap);
435                                 m_freem(txd->tx_m);
436                                 txd->tx_m = NULL;
437                                 txd->tx_ndesc = 0;
438                                 ifp->if_oerrors++;
439                         }
440                 }
441         }
442         jme_init_tx_ring(sc);
443
444         /* Initialize shadow status block. */
445         jme_init_ssb(sc);
446
447         /* Program MAC with resolved speed/duplex/flow-control. */
448         if (sc->jme_has_link) {
449                 jme_mac_config(sc);
450
451                 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
452
453                 /* Set Tx ring address to the hardware. */
454                 paddr = sc->jme_cdata.jme_tx_ring_paddr;
455                 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
456                 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
457
458                 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
459                         CSR_WRITE_4(sc, JME_RXCSR,
460                             sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
461
462                         /* Set Rx ring address to the hardware. */
463                         paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
464                         CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
465                         CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
466                 }
467
468                 /* Restart receiver/transmitter. */
469                 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
470                     RXCSR_RXQ_START);
471                 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
472         }
473
474         ifp->if_flags |= IFF_RUNNING;
475         ifp->if_flags &= ~IFF_OACTIVE;
476         callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
477
478 #ifdef IFPOLL_ENABLE
479         if (!(ifp->if_flags & IFF_NPOLLING))
480 #endif
481         /* Reenable interrupts. */
482         CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
483
484 done:
485         if (sc->jme_in_tick)
486                 jme_deserialize_skipmain(sc);
487 }
488
489 /*
490  *      Get the current interface media status.
491  */
492 static void
493 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
494 {
495         struct jme_softc *sc = ifp->if_softc;
496         struct mii_data *mii = device_get_softc(sc->jme_miibus);
497
498         ASSERT_IFNET_SERIALIZED_ALL(ifp);
499
500         mii_pollstat(mii);
501         ifmr->ifm_status = mii->mii_media_status;
502         ifmr->ifm_active = mii->mii_media_active;
503 }
504
505 /*
506  *      Set hardware to newly-selected media.
507  */
508 static int
509 jme_mediachange(struct ifnet *ifp)
510 {
511         struct jme_softc *sc = ifp->if_softc;
512         struct mii_data *mii = device_get_softc(sc->jme_miibus);
513         int error;
514
515         ASSERT_IFNET_SERIALIZED_ALL(ifp);
516
517         if (mii->mii_instance != 0) {
518                 struct mii_softc *miisc;
519
520                 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
521                         mii_phy_reset(miisc);
522         }
523         error = mii_mediachg(mii);
524
525         return (error);
526 }
527
528 static int
529 jme_probe(device_t dev)
530 {
531         const struct jme_dev *sp;
532         uint16_t vid, did;
533
534         vid = pci_get_vendor(dev);
535         did = pci_get_device(dev);
536         for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
537                 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
538                         struct jme_softc *sc = device_get_softc(dev);
539
540                         sc->jme_caps = sp->jme_caps;
541                         device_set_desc(dev, sp->jme_name);
542                         return (0);
543                 }
544         }
545         return (ENXIO);
546 }
547
548 static int
549 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
550 {
551         uint32_t reg;
552         int i;
553
554         *val = 0;
555         for (i = JME_TIMEOUT; i > 0; i--) {
556                 reg = CSR_READ_4(sc, JME_SMBCSR);
557                 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
558                         break;
559                 DELAY(1);
560         }
561
562         if (i == 0) {
563                 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
564                 return (ETIMEDOUT);
565         }
566
567         reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
568         CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
569         for (i = JME_TIMEOUT; i > 0; i--) {
570                 DELAY(1);
571                 reg = CSR_READ_4(sc, JME_SMBINTF);
572                 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
573                         break;
574         }
575
576         if (i == 0) {
577                 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
578                 return (ETIMEDOUT);
579         }
580
581         reg = CSR_READ_4(sc, JME_SMBINTF);
582         *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
583
584         return (0);
585 }
586
587 static int
588 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
589 {
590         uint8_t fup, reg, val;
591         uint32_t offset;
592         int match;
593
594         offset = 0;
595         if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
596             fup != JME_EEPROM_SIG0)
597                 return (ENOENT);
598         if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
599             fup != JME_EEPROM_SIG1)
600                 return (ENOENT);
601         match = 0;
602         do {
603                 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
604                         break;
605                 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
606                     (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
607                         if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
608                                 break;
609                         if (reg >= JME_PAR0 &&
610                             reg < JME_PAR0 + ETHER_ADDR_LEN) {
611                                 if (jme_eeprom_read_byte(sc, offset + 2,
612                                     &val) != 0)
613                                         break;
614                                 eaddr[reg - JME_PAR0] = val;
615                                 match++;
616                         }
617                 }
618                 /* Check for the end of EEPROM descriptor. */
619                 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
620                         break;
621                 /* Try next eeprom descriptor. */
622                 offset += JME_EEPROM_DESC_BYTES;
623         } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
624
625         if (match == ETHER_ADDR_LEN)
626                 return (0);
627
628         return (ENOENT);
629 }
630
631 static void
632 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
633 {
634         uint32_t par0, par1;
635
636         /* Read station address. */
637         par0 = CSR_READ_4(sc, JME_PAR0);
638         par1 = CSR_READ_4(sc, JME_PAR1);
639         par1 &= 0xFFFF;
640         if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
641                 device_printf(sc->jme_dev,
642                     "generating fake ethernet address.\n");
643                 par0 = karc4random();
644                 /* Set OUI to JMicron. */
645                 eaddr[0] = 0x00;
646                 eaddr[1] = 0x1B;
647                 eaddr[2] = 0x8C;
648                 eaddr[3] = (par0 >> 16) & 0xff;
649                 eaddr[4] = (par0 >> 8) & 0xff;
650                 eaddr[5] = par0 & 0xff;
651         } else {
652                 eaddr[0] = (par0 >> 0) & 0xFF;
653                 eaddr[1] = (par0 >> 8) & 0xFF;
654                 eaddr[2] = (par0 >> 16) & 0xFF;
655                 eaddr[3] = (par0 >> 24) & 0xFF;
656                 eaddr[4] = (par1 >> 0) & 0xFF;
657                 eaddr[5] = (par1 >> 8) & 0xFF;
658         }
659 }
660
661 static int
662 jme_attach(device_t dev)
663 {
664         struct jme_softc *sc = device_get_softc(dev);
665         struct ifnet *ifp = &sc->arpcom.ac_if;
666         uint32_t reg;
667         uint16_t did;
668         uint8_t pcie_ptr, rev;
669         int error = 0, i, j, rx_desc_cnt;
670         uint8_t eaddr[ETHER_ADDR_LEN];
671 #ifdef IFPOLL_ENABLE
672         int offset, offset_def;
673 #endif
674
675         lwkt_serialize_init(&sc->jme_serialize);
676         lwkt_serialize_init(&sc->jme_cdata.jme_tx_serialize);
677         for (i = 0; i < JME_NRXRING_MAX; ++i) {
678                 lwkt_serialize_init(
679                     &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize);
680         }
681
682         rx_desc_cnt = device_getenv_int(dev, "rx_desc_count",
683             jme_rx_desc_count);
684         rx_desc_cnt = roundup(rx_desc_cnt, JME_NDESC_ALIGN);
685         if (rx_desc_cnt > JME_NDESC_MAX)
686                 rx_desc_cnt = JME_NDESC_MAX;
687
688         sc->jme_cdata.jme_tx_desc_cnt = device_getenv_int(dev, "tx_desc_count",
689             jme_tx_desc_count);
690         sc->jme_cdata.jme_tx_desc_cnt = roundup(sc->jme_cdata.jme_tx_desc_cnt,
691             JME_NDESC_ALIGN);
692         if (sc->jme_cdata.jme_tx_desc_cnt > JME_NDESC_MAX)
693                 sc->jme_cdata.jme_tx_desc_cnt = JME_NDESC_MAX;
694
695         /*
696          * Calculate rx rings
697          */
698         sc->jme_cdata.jme_rx_ring_cnt = device_getenv_int(dev, "rx_ring_count",
699             jme_rx_ring_count);
700         sc->jme_cdata.jme_rx_ring_cnt =
701             if_ring_count2(sc->jme_cdata.jme_rx_ring_cnt, JME_NRXRING_MAX);
702
703         i = 0;
704         sc->jme_serialize_arr[i++] = &sc->jme_serialize;
705
706         KKASSERT(i == JME_TX_SERIALIZE);
707         sc->jme_serialize_arr[i++] = &sc->jme_cdata.jme_tx_serialize;
708
709         KKASSERT(i == JME_RX_SERIALIZE);
710         for (j = 0; j < sc->jme_cdata.jme_rx_ring_cnt; ++j) {
711                 sc->jme_serialize_arr[i++] =
712                     &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize;
713         }
714         KKASSERT(i <= JME_NSERIALIZE);
715         sc->jme_serialize_cnt = i;
716
717         sc->jme_cdata.jme_sc = sc;
718         for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
719                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
720
721                 rdata->jme_sc = sc;
722                 rdata->jme_rx_coal = jme_rx_status[i].jme_coal;
723                 rdata->jme_rx_comp = jme_rx_status[i].jme_comp;
724                 rdata->jme_rx_empty = jme_rx_status[i].jme_empty;
725                 rdata->jme_rx_idx = i;
726                 rdata->jme_rx_desc_cnt = rx_desc_cnt;
727         }
728
729         sc->jme_dev = dev;
730         sc->jme_lowaddr = BUS_SPACE_MAXADDR;
731
732         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
733
734         callout_init(&sc->jme_tick_ch);
735
736 #ifndef BURN_BRIDGES
737         if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
738                 uint32_t irq, mem;
739
740                 irq = pci_read_config(dev, PCIR_INTLINE, 4);
741                 mem = pci_read_config(dev, JME_PCIR_BAR, 4);
742
743                 device_printf(dev, "chip is in D%d power mode "
744                     "-- setting to D0\n", pci_get_powerstate(dev));
745
746                 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
747
748                 pci_write_config(dev, PCIR_INTLINE, irq, 4);
749                 pci_write_config(dev, JME_PCIR_BAR, mem, 4);
750         }
751 #endif  /* !BURN_BRIDGE */
752
753         /* Enable bus mastering */
754         pci_enable_busmaster(dev);
755
756         /*
757          * Allocate IO memory
758          *
759          * JMC250 supports both memory mapped and I/O register space
760          * access.  Because I/O register access should use different
761          * BARs to access registers it's waste of time to use I/O
762          * register spce access.  JMC250 uses 16K to map entire memory
763          * space.
764          */
765         sc->jme_mem_rid = JME_PCIR_BAR;
766         sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
767                                                  &sc->jme_mem_rid, RF_ACTIVE);
768         if (sc->jme_mem_res == NULL) {
769                 device_printf(dev, "can't allocate IO memory\n");
770                 return ENXIO;
771         }
772         sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
773         sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
774
775         /*
776          * Allocate IRQ
777          */
778         error = jme_intr_alloc(dev);
779         if (error)
780                 goto fail;
781
782         /*
783          * Extract revisions
784          */
785         reg = CSR_READ_4(sc, JME_CHIPMODE);
786         if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
787             CHIPMODE_NOT_FPGA) {
788                 sc->jme_caps |= JME_CAP_FPGA;
789                 if (bootverbose) {
790                         device_printf(dev, "FPGA revision: 0x%04x\n",
791                                       (reg & CHIPMODE_FPGA_REV_MASK) >>
792                                       CHIPMODE_FPGA_REV_SHIFT);
793                 }
794         }
795
796         /* NOTE: FM revision is put in the upper 4 bits */
797         rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
798         rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
799         if (bootverbose)
800                 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
801
802         did = pci_get_device(dev);
803         switch (did) {
804         case PCI_PRODUCT_JMICRON_JMC250:
805                 if (rev == JME_REV1_A2)
806                         sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
807                 break;
808
809         case PCI_PRODUCT_JMICRON_JMC260:
810                 if (rev == JME_REV2)
811                         sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
812                 break;
813
814         default:
815                 panic("unknown device id 0x%04x", did);
816         }
817         if (rev >= JME_REV2) {
818                 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
819                 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
820                                       GHC_TXMAC_CLKSRC_1000;
821         }
822
823         /* Reset the ethernet controller. */
824         jme_reset(sc);
825
826         /* Map MSI/MSI-X vectors */
827         jme_set_msinum(sc);
828
829         /* Get station address. */
830         reg = CSR_READ_4(sc, JME_SMBCSR);
831         if (reg & SMBCSR_EEPROM_PRESENT)
832                 error = jme_eeprom_macaddr(sc, eaddr);
833         if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
834                 if (error != 0 && (bootverbose)) {
835                         device_printf(dev, "ethernet hardware address "
836                                       "not found in EEPROM.\n");
837                 }
838                 jme_reg_macaddr(sc, eaddr);
839         }
840
841         /*
842          * Save PHY address.
843          * Integrated JR0211 has fixed PHY address whereas FPGA version
844          * requires PHY probing to get correct PHY address.
845          */
846         if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
847                 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
848                     GPREG0_PHY_ADDR_MASK;
849                 if (bootverbose) {
850                         device_printf(dev, "PHY is at address %d.\n",
851                             sc->jme_phyaddr);
852                 }
853         } else {
854                 sc->jme_phyaddr = 0;
855         }
856
857         /* Set max allowable DMA size. */
858         pcie_ptr = pci_get_pciecap_ptr(dev);
859         if (pcie_ptr != 0) {
860                 uint16_t ctrl;
861
862                 sc->jme_caps |= JME_CAP_PCIE;
863                 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
864                 if (bootverbose) {
865                         device_printf(dev, "Read request size : %d bytes.\n",
866                             128 << ((ctrl >> 12) & 0x07));
867                         device_printf(dev, "TLP payload size : %d bytes.\n",
868                             128 << ((ctrl >> 5) & 0x07));
869                 }
870                 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
871                 case PCIEM_DEVCTL_MAX_READRQ_128:
872                         sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
873                         break;
874                 case PCIEM_DEVCTL_MAX_READRQ_256:
875                         sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
876                         break;
877                 default:
878                         sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
879                         break;
880                 }
881                 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
882         } else {
883                 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
884                 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
885         }
886
887 #ifdef notyet
888         if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
889                 sc->jme_caps |= JME_CAP_PMCAP;
890 #endif
891
892 #ifdef IFPOLL_ENABLE
893         /*
894          * NPOLLING RX CPU offset
895          */
896         if (sc->jme_cdata.jme_rx_ring_cnt == ncpus2) {
897                 offset = 0;
898         } else {
899                 offset_def = (sc->jme_cdata.jme_rx_ring_cnt *
900                     device_get_unit(dev)) % ncpus2;
901                 offset = device_getenv_int(dev, "npoll.rxoff", offset_def);
902                 if (offset >= ncpus2 ||
903                     offset % sc->jme_cdata.jme_rx_ring_cnt != 0) {
904                         device_printf(dev, "invalid npoll.rxoff %d, use %d\n",
905                             offset, offset_def);
906                         offset = offset_def;
907                 }
908         }
909         sc->jme_npoll_rxoff = offset;
910
911         /*
912          * NPOLLING TX CPU offset
913          */
914         offset_def = sc->jme_npoll_rxoff;
915         offset = device_getenv_int(dev, "npoll.txoff", offset_def);
916         if (offset >= ncpus2) {
917                 device_printf(dev, "invalid npoll.txoff %d, use %d\n",
918                     offset, offset_def);
919                 offset = offset_def;
920         }
921         sc->jme_npoll_txoff = offset;
922 #endif
923
924         /*
925          * Create sysctl tree
926          */
927         jme_sysctl_node(sc);
928
929         /* Allocate DMA stuffs */
930         error = jme_dma_alloc(sc);
931         if (error)
932                 goto fail;
933
934         ifp->if_softc = sc;
935         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
936         ifp->if_init = jme_init;
937         ifp->if_ioctl = jme_ioctl;
938         ifp->if_start = jme_start;
939 #ifdef IFPOLL_ENABLE
940         ifp->if_npoll = jme_npoll;
941 #endif
942         ifp->if_watchdog = jme_watchdog;
943         ifp->if_serialize = jme_serialize;
944         ifp->if_deserialize = jme_deserialize;
945         ifp->if_tryserialize = jme_tryserialize;
946 #ifdef INVARIANTS
947         ifp->if_serialize_assert = jme_serialize_assert;
948 #endif
949         ifq_set_maxlen(&ifp->if_snd,
950             sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
951         ifq_set_ready(&ifp->if_snd);
952
953         /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
954         ifp->if_capabilities = IFCAP_HWCSUM |
955                                IFCAP_TSO |
956                                IFCAP_VLAN_MTU |
957                                IFCAP_VLAN_HWTAGGING;
958         if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN)
959                 ifp->if_capabilities |= IFCAP_RSS;
960         ifp->if_capenable = ifp->if_capabilities;
961
962         /*
963          * Disable TXCSUM by default to improve bulk data
964          * transmit performance (+20Mbps improvement).
965          */
966         ifp->if_capenable &= ~IFCAP_TXCSUM;
967
968         if (ifp->if_capenable & IFCAP_TXCSUM)
969                 ifp->if_hwassist |= JME_CSUM_FEATURES;
970         ifp->if_hwassist |= CSUM_TSO;
971
972         /* Set up MII bus. */
973         error = mii_phy_probe(dev, &sc->jme_miibus,
974                               jme_mediachange, jme_mediastatus);
975         if (error) {
976                 device_printf(dev, "no PHY found!\n");
977                 goto fail;
978         }
979
980         /*
981          * Save PHYADDR for FPGA mode PHY.
982          */
983         if (sc->jme_caps & JME_CAP_FPGA) {
984                 struct mii_data *mii = device_get_softc(sc->jme_miibus);
985
986                 if (mii->mii_instance != 0) {
987                         struct mii_softc *miisc;
988
989                         LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
990                                 if (miisc->mii_phy != 0) {
991                                         sc->jme_phyaddr = miisc->mii_phy;
992                                         break;
993                                 }
994                         }
995                         if (sc->jme_phyaddr != 0) {
996                                 device_printf(sc->jme_dev,
997                                     "FPGA PHY is at %d\n", sc->jme_phyaddr);
998                                 /* vendor magic. */
999                                 jme_miibus_writereg(dev, sc->jme_phyaddr,
1000                                     JMPHY_CONF, JMPHY_CONF_DEFFIFO);
1001
1002                                 /* XXX should we clear JME_WA_EXTFIFO */
1003                         }
1004                 }
1005         }
1006
1007         ether_ifattach(ifp, eaddr, NULL);
1008
1009         /* Tell the upper layer(s) we support long frames. */
1010         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1011
1012         error = jme_intr_setup(dev);
1013         if (error) {
1014                 ether_ifdetach(ifp);
1015                 goto fail;
1016         }
1017
1018         return 0;
1019 fail:
1020         jme_detach(dev);
1021         return (error);
1022 }
1023
1024 static int
1025 jme_detach(device_t dev)
1026 {
1027         struct jme_softc *sc = device_get_softc(dev);
1028
1029         if (device_is_attached(dev)) {
1030                 struct ifnet *ifp = &sc->arpcom.ac_if;
1031
1032                 ifnet_serialize_all(ifp);
1033                 jme_stop(sc);
1034                 jme_intr_teardown(dev);
1035                 ifnet_deserialize_all(ifp);
1036
1037                 ether_ifdetach(ifp);
1038         }
1039
1040         if (sc->jme_sysctl_tree != NULL)
1041                 sysctl_ctx_free(&sc->jme_sysctl_ctx);
1042
1043         if (sc->jme_miibus != NULL)
1044                 device_delete_child(dev, sc->jme_miibus);
1045         bus_generic_detach(dev);
1046
1047         jme_intr_free(dev);
1048
1049         if (sc->jme_mem_res != NULL) {
1050                 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
1051                                      sc->jme_mem_res);
1052         }
1053
1054         jme_dma_free(sc);
1055
1056         return (0);
1057 }
1058
1059 static void
1060 jme_sysctl_node(struct jme_softc *sc)
1061 {
1062         int coal_max;
1063 #ifdef JME_RSS_DEBUG
1064         int r;
1065 #endif
1066
1067         sysctl_ctx_init(&sc->jme_sysctl_ctx);
1068         sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
1069                                 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
1070                                 device_get_nameunit(sc->jme_dev),
1071                                 CTLFLAG_RD, 0, "");
1072         if (sc->jme_sysctl_tree == NULL) {
1073                 device_printf(sc->jme_dev, "can't add sysctl node\n");
1074                 return;
1075         }
1076
1077         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1078             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1079             "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1080             sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
1081
1082         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1083             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1084             "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1085             sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
1086
1087         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1088             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1089             "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1090             sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
1091
1092         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1093             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1094             "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1095             sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
1096
1097         SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1098                        SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1099                        "rx_desc_count", CTLFLAG_RD,
1100                        &sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt,
1101                        0, "RX desc count");
1102         SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1103                        SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1104                        "tx_desc_count", CTLFLAG_RD,
1105                        &sc->jme_cdata.jme_tx_desc_cnt,
1106                        0, "TX desc count");
1107         SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1108                        SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1109                        "rx_ring_count", CTLFLAG_RD,
1110                        &sc->jme_cdata.jme_rx_ring_cnt,
1111                        0, "RX ring count");
1112
1113 #ifdef JME_RSS_DEBUG
1114         SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1115                        SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1116                        "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
1117                        0, "RSS debug level");
1118         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1119                 char rx_ring_desc[32];
1120
1121                 ksnprintf(rx_ring_desc, sizeof(rx_ring_desc),
1122                     "rx_ring%d_pkt", r);
1123                 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx,
1124                     SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1125                     rx_ring_desc, CTLFLAG_RW,
1126                     &sc->jme_cdata.jme_rx_data[r].jme_rx_pkt, "RXed packets");
1127
1128                 ksnprintf(rx_ring_desc, sizeof(rx_ring_desc),
1129                     "rx_ring%d_emp", r);
1130                 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx,
1131                     SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1132                     rx_ring_desc, CTLFLAG_RW,
1133                     &sc->jme_cdata.jme_rx_data[r].jme_rx_emp,
1134                     "# of time RX ring empty");
1135         }
1136 #endif
1137
1138 #ifdef IFPOLL_ENABLE
1139         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1140             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1141             "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
1142             jme_sysctl_npoll_rxoff, "I", "NPOLLING RX cpu offset");
1143         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1144             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1145             "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
1146             jme_sysctl_npoll_txoff, "I", "NPOLLING TX cpu offset");
1147 #endif
1148
1149         /*
1150          * Set default coalesce valves
1151          */
1152         sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1153         sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1154         sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1155         sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1156
1157         /*
1158          * Adjust coalesce valves, in case that the number of TX/RX
1159          * descs are set to small values by users.
1160          *
1161          * NOTE: coal_max will not be zero, since number of descs
1162          * must aligned by JME_NDESC_ALIGN (16 currently)
1163          */
1164         coal_max = sc->jme_cdata.jme_tx_desc_cnt / 2;
1165         if (coal_max < sc->jme_tx_coal_pkt)
1166                 sc->jme_tx_coal_pkt = coal_max;
1167
1168         coal_max = sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt / 2;
1169         if (coal_max < sc->jme_rx_coal_pkt)
1170                 sc->jme_rx_coal_pkt = coal_max;
1171 }
1172
1173 static int
1174 jme_dma_alloc(struct jme_softc *sc)
1175 {
1176         struct jme_txdesc *txd;
1177         bus_dmamem_t dmem;
1178         int error, i, asize;
1179
1180         sc->jme_cdata.jme_txdesc =
1181         kmalloc(sc->jme_cdata.jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1182                 M_DEVBUF, M_WAITOK | M_ZERO);
1183         for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1184                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
1185
1186                 rdata->jme_rxdesc =
1187                 kmalloc(rdata->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1188                         M_DEVBUF, M_WAITOK | M_ZERO);
1189         }
1190
1191         /* Create parent ring tag. */
1192         error = bus_dma_tag_create(NULL,/* parent */
1193             1, JME_RING_BOUNDARY,       /* algnmnt, boundary */
1194             sc->jme_lowaddr,            /* lowaddr */
1195             BUS_SPACE_MAXADDR,          /* highaddr */
1196             NULL, NULL,                 /* filter, filterarg */
1197             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1198             0,                          /* nsegments */
1199             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1200             0,                          /* flags */
1201             &sc->jme_cdata.jme_ring_tag);
1202         if (error) {
1203                 device_printf(sc->jme_dev,
1204                     "could not create parent ring DMA tag.\n");
1205                 return error;
1206         }
1207
1208         /*
1209          * Create DMA stuffs for TX ring
1210          */
1211         asize = roundup2(JME_TX_RING_SIZE(sc), JME_TX_RING_ALIGN);
1212         error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1213                         JME_TX_RING_ALIGN, 0,
1214                         BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1215                         asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1216         if (error) {
1217                 device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1218                 return error;
1219         }
1220         sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag;
1221         sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map;
1222         sc->jme_cdata.jme_tx_ring = dmem.dmem_addr;
1223         sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr;
1224
1225         /*
1226          * Create DMA stuffs for RX rings
1227          */
1228         for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1229                 error = jme_rxring_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1230                 if (error)
1231                         return error;
1232         }
1233
1234         /* Create parent buffer tag. */
1235         error = bus_dma_tag_create(NULL,/* parent */
1236             1, 0,                       /* algnmnt, boundary */
1237             sc->jme_lowaddr,            /* lowaddr */
1238             BUS_SPACE_MAXADDR,          /* highaddr */
1239             NULL, NULL,                 /* filter, filterarg */
1240             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1241             0,                          /* nsegments */
1242             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1243             0,                          /* flags */
1244             &sc->jme_cdata.jme_buffer_tag);
1245         if (error) {
1246                 device_printf(sc->jme_dev,
1247                     "could not create parent buffer DMA tag.\n");
1248                 return error;
1249         }
1250
1251         /*
1252          * Create DMA stuffs for shadow status block
1253          */
1254         asize = roundup2(JME_SSB_SIZE, JME_SSB_ALIGN);
1255         error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1256                         JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1257                         asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1258         if (error) {
1259                 device_printf(sc->jme_dev,
1260                     "could not create shadow status block.\n");
1261                 return error;
1262         }
1263         sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1264         sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1265         sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1266         sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1267
1268         /*
1269          * Create DMA stuffs for TX buffers
1270          */
1271
1272         /* Create tag for Tx buffers. */
1273         error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1274             1, 0,                       /* algnmnt, boundary */
1275             BUS_SPACE_MAXADDR,          /* lowaddr */
1276             BUS_SPACE_MAXADDR,          /* highaddr */
1277             NULL, NULL,                 /* filter, filterarg */
1278             JME_TSO_MAXSIZE,            /* maxsize */
1279             JME_MAXTXSEGS,              /* nsegments */
1280             JME_MAXSEGSIZE,             /* maxsegsize */
1281             BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1282             &sc->jme_cdata.jme_tx_tag);
1283         if (error != 0) {
1284                 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1285                 return error;
1286         }
1287
1288         /* Create DMA maps for Tx buffers. */
1289         for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
1290                 txd = &sc->jme_cdata.jme_txdesc[i];
1291                 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag,
1292                                 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1293                                 &txd->tx_dmamap);
1294                 if (error) {
1295                         int j;
1296
1297                         device_printf(sc->jme_dev,
1298                             "could not create %dth Tx dmamap.\n", i);
1299
1300                         for (j = 0; j < i; ++j) {
1301                                 txd = &sc->jme_cdata.jme_txdesc[j];
1302                                 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1303                                                    txd->tx_dmamap);
1304                         }
1305                         bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1306                         sc->jme_cdata.jme_tx_tag = NULL;
1307                         return error;
1308                 }
1309         }
1310
1311         /*
1312          * Create DMA stuffs for RX buffers
1313          */
1314         for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1315                 error = jme_rxbuf_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1316                 if (error)
1317                         return error;
1318         }
1319         return 0;
1320 }
1321
1322 static void
1323 jme_dma_free(struct jme_softc *sc)
1324 {
1325         struct jme_txdesc *txd;
1326         struct jme_rxdesc *rxd;
1327         struct jme_rxdata *rdata;
1328         int i, r;
1329
1330         /* Tx ring */
1331         if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1332                 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1333                     sc->jme_cdata.jme_tx_ring_map);
1334                 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1335                     sc->jme_cdata.jme_tx_ring,
1336                     sc->jme_cdata.jme_tx_ring_map);
1337                 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1338                 sc->jme_cdata.jme_tx_ring_tag = NULL;
1339         }
1340
1341         /* Rx ring */
1342         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1343                 rdata = &sc->jme_cdata.jme_rx_data[r];
1344                 if (rdata->jme_rx_ring_tag != NULL) {
1345                         bus_dmamap_unload(rdata->jme_rx_ring_tag,
1346                                           rdata->jme_rx_ring_map);
1347                         bus_dmamem_free(rdata->jme_rx_ring_tag,
1348                                         rdata->jme_rx_ring,
1349                                         rdata->jme_rx_ring_map);
1350                         bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1351                         rdata->jme_rx_ring_tag = NULL;
1352                 }
1353         }
1354
1355         /* Tx buffers */
1356         if (sc->jme_cdata.jme_tx_tag != NULL) {
1357                 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
1358                         txd = &sc->jme_cdata.jme_txdesc[i];
1359                         bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1360                             txd->tx_dmamap);
1361                 }
1362                 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1363                 sc->jme_cdata.jme_tx_tag = NULL;
1364         }
1365
1366         /* Rx buffers */
1367         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1368                 rdata = &sc->jme_cdata.jme_rx_data[r];
1369                 if (rdata->jme_rx_tag != NULL) {
1370                         for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
1371                                 rxd = &rdata->jme_rxdesc[i];
1372                                 bus_dmamap_destroy(rdata->jme_rx_tag,
1373                                                    rxd->rx_dmamap);
1374                         }
1375                         bus_dmamap_destroy(rdata->jme_rx_tag,
1376                                            rdata->jme_rx_sparemap);
1377                         bus_dma_tag_destroy(rdata->jme_rx_tag);
1378                         rdata->jme_rx_tag = NULL;
1379                 }
1380         }
1381
1382         /* Shadow status block. */
1383         if (sc->jme_cdata.jme_ssb_tag != NULL) {
1384                 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1385                     sc->jme_cdata.jme_ssb_map);
1386                 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1387                     sc->jme_cdata.jme_ssb_block,
1388                     sc->jme_cdata.jme_ssb_map);
1389                 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1390                 sc->jme_cdata.jme_ssb_tag = NULL;
1391         }
1392
1393         if (sc->jme_cdata.jme_buffer_tag != NULL) {
1394                 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1395                 sc->jme_cdata.jme_buffer_tag = NULL;
1396         }
1397         if (sc->jme_cdata.jme_ring_tag != NULL) {
1398                 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1399                 sc->jme_cdata.jme_ring_tag = NULL;
1400         }
1401
1402         if (sc->jme_cdata.jme_txdesc != NULL) {
1403                 kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1404                 sc->jme_cdata.jme_txdesc = NULL;
1405         }
1406         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1407                 rdata = &sc->jme_cdata.jme_rx_data[r];
1408                 if (rdata->jme_rxdesc != NULL) {
1409                         kfree(rdata->jme_rxdesc, M_DEVBUF);
1410                         rdata->jme_rxdesc = NULL;
1411                 }
1412         }
1413 }
1414
1415 /*
1416  *      Make sure the interface is stopped at reboot time.
1417  */
1418 static int
1419 jme_shutdown(device_t dev)
1420 {
1421         return jme_suspend(dev);
1422 }
1423
1424 #ifdef notyet
1425 /*
1426  * Unlike other ethernet controllers, JMC250 requires
1427  * explicit resetting link speed to 10/100Mbps as gigabit
1428  * link will cunsume more power than 375mA.
1429  * Note, we reset the link speed to 10/100Mbps with
1430  * auto-negotiation but we don't know whether that operation
1431  * would succeed or not as we have no control after powering
1432  * off. If the renegotiation fail WOL may not work. Running
1433  * at 1Gbps draws more power than 375mA at 3.3V which is
1434  * specified in PCI specification and that would result in
1435  * complete shutdowning power to ethernet controller.
1436  *
1437  * TODO
1438  *  Save current negotiated media speed/duplex/flow-control
1439  *  to softc and restore the same link again after resuming.
1440  *  PHY handling such as power down/resetting to 100Mbps
1441  *  may be better handled in suspend method in phy driver.
1442  */
1443 static void
1444 jme_setlinkspeed(struct jme_softc *sc)
1445 {
1446         struct mii_data *mii;
1447         int aneg, i;
1448
1449         JME_LOCK_ASSERT(sc);
1450
1451         mii = device_get_softc(sc->jme_miibus);
1452         mii_pollstat(mii);
1453         aneg = 0;
1454         if ((mii->mii_media_status & IFM_AVALID) != 0) {
1455                 switch IFM_SUBTYPE(mii->mii_media_active) {
1456                 case IFM_10_T:
1457                 case IFM_100_TX:
1458                         return;
1459                 case IFM_1000_T:
1460                         aneg++;
1461                 default:
1462                         break;
1463                 }
1464         }
1465         jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1466         jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1467             ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1468         jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1469             BMCR_AUTOEN | BMCR_STARTNEG);
1470         DELAY(1000);
1471         if (aneg != 0) {
1472                 /* Poll link state until jme(4) get a 10/100 link. */
1473                 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1474                         mii_pollstat(mii);
1475                         if ((mii->mii_media_status & IFM_AVALID) != 0) {
1476                                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1477                                 case IFM_10_T:
1478                                 case IFM_100_TX:
1479                                         jme_mac_config(sc);
1480                                         return;
1481                                 default:
1482                                         break;
1483                                 }
1484                         }
1485                         JME_UNLOCK(sc);
1486                         pause("jmelnk", hz);
1487                         JME_LOCK(sc);
1488                 }
1489                 if (i == MII_ANEGTICKS_GIGE)
1490                         device_printf(sc->jme_dev, "establishing link failed, "
1491                             "WOL may not work!");
1492         }
1493         /*
1494          * No link, force MAC to have 100Mbps, full-duplex link.
1495          * This is the last resort and may/may not work.
1496          */
1497         mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1498         mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1499         jme_mac_config(sc);
1500 }
1501
1502 static void
1503 jme_setwol(struct jme_softc *sc)
1504 {
1505         struct ifnet *ifp = &sc->arpcom.ac_if;
1506         uint32_t gpr, pmcs;
1507         uint16_t pmstat;
1508         int pmc;
1509
1510         if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1511                 /* No PME capability, PHY power down. */
1512                 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1513                     MII_BMCR, BMCR_PDOWN);
1514                 return;
1515         }
1516
1517         gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1518         pmcs = CSR_READ_4(sc, JME_PMCS);
1519         pmcs &= ~PMCS_WOL_ENB_MASK;
1520         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1521                 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1522                 /* Enable PME message. */
1523                 gpr |= GPREG0_PME_ENB;
1524                 /* For gigabit controllers, reset link speed to 10/100. */
1525                 if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1526                         jme_setlinkspeed(sc);
1527         }
1528
1529         CSR_WRITE_4(sc, JME_PMCS, pmcs);
1530         CSR_WRITE_4(sc, JME_GPREG0, gpr);
1531
1532         /* Request PME. */
1533         pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1534         pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1535         if ((ifp->if_capenable & IFCAP_WOL) != 0)
1536                 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1537         pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1538         if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1539                 /* No WOL, PHY power down. */
1540                 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1541                     MII_BMCR, BMCR_PDOWN);
1542         }
1543 }
1544 #endif
1545
1546 static int
1547 jme_suspend(device_t dev)
1548 {
1549         struct jme_softc *sc = device_get_softc(dev);
1550         struct ifnet *ifp = &sc->arpcom.ac_if;
1551
1552         ifnet_serialize_all(ifp);
1553         jme_stop(sc);
1554 #ifdef notyet
1555         jme_setwol(sc);
1556 #endif
1557         ifnet_deserialize_all(ifp);
1558
1559         return (0);
1560 }
1561
1562 static int
1563 jme_resume(device_t dev)
1564 {
1565         struct jme_softc *sc = device_get_softc(dev);
1566         struct ifnet *ifp = &sc->arpcom.ac_if;
1567 #ifdef notyet
1568         int pmc;
1569 #endif
1570
1571         ifnet_serialize_all(ifp);
1572
1573 #ifdef notyet
1574         if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1575                 uint16_t pmstat;
1576
1577                 pmstat = pci_read_config(sc->jme_dev,
1578                     pmc + PCIR_POWER_STATUS, 2);
1579                 /* Disable PME clear PME status. */
1580                 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1581                 pci_write_config(sc->jme_dev,
1582                     pmc + PCIR_POWER_STATUS, pmstat, 2);
1583         }
1584 #endif
1585
1586         if (ifp->if_flags & IFF_UP)
1587                 jme_init(sc);
1588
1589         ifnet_deserialize_all(ifp);
1590
1591         return (0);
1592 }
1593
1594 static __inline int
1595 jme_tso_pullup(struct mbuf **mp)
1596 {
1597         int hoff, iphlen, thoff;
1598         struct mbuf *m;
1599
1600         m = *mp;
1601         KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
1602
1603         iphlen = m->m_pkthdr.csum_iphlen;
1604         thoff = m->m_pkthdr.csum_thlen;
1605         hoff = m->m_pkthdr.csum_lhlen;
1606
1607         KASSERT(iphlen > 0, ("invalid ip hlen"));
1608         KASSERT(thoff > 0, ("invalid tcp hlen"));
1609         KASSERT(hoff > 0, ("invalid ether hlen"));
1610
1611         if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
1612                 m = m_pullup(m, hoff + iphlen + thoff);
1613                 if (m == NULL) {
1614                         *mp = NULL;
1615                         return ENOBUFS;
1616                 }
1617                 *mp = m;
1618         }
1619         return 0;
1620 }
1621
1622 static int
1623 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1624 {
1625         struct jme_txdesc *txd;
1626         struct jme_desc *desc;
1627         struct mbuf *m;
1628         bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1629         int maxsegs, nsegs;
1630         int error, i, prod, symbol_desc;
1631         uint32_t cflags, flag64, mss;
1632
1633         M_ASSERTPKTHDR((*m_head));
1634
1635         if ((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) {
1636                 /* XXX Is this necessary? */
1637                 error = jme_tso_pullup(m_head);
1638                 if (error)
1639                         return error;
1640         }
1641
1642         prod = sc->jme_cdata.jme_tx_prod;
1643         txd = &sc->jme_cdata.jme_txdesc[prod];
1644
1645         if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1646                 symbol_desc = 1;
1647         else
1648                 symbol_desc = 0;
1649
1650         maxsegs = (sc->jme_cdata.jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1651                   (JME_TXD_RSVD + symbol_desc);
1652         if (maxsegs > JME_MAXTXSEGS)
1653                 maxsegs = JME_MAXTXSEGS;
1654         KASSERT(maxsegs >= (JME_TXD_SPARE - symbol_desc),
1655                 ("not enough segments %d", maxsegs));
1656
1657         error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag,
1658                         txd->tx_dmamap, m_head,
1659                         txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1660         if (error)
1661                 goto fail;
1662
1663         bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1664                         BUS_DMASYNC_PREWRITE);
1665
1666         m = *m_head;
1667         cflags = 0;
1668         mss = 0;
1669
1670         /* Configure checksum offload. */
1671         if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1672                 mss = (uint32_t)m->m_pkthdr.tso_segsz << JME_TD_MSS_SHIFT;
1673                 cflags |= JME_TD_TSO;
1674         } else if (m->m_pkthdr.csum_flags & JME_CSUM_FEATURES) {
1675                 if (m->m_pkthdr.csum_flags & CSUM_IP)
1676                         cflags |= JME_TD_IPCSUM;
1677                 if (m->m_pkthdr.csum_flags & CSUM_TCP)
1678                         cflags |= JME_TD_TCPCSUM;
1679                 if (m->m_pkthdr.csum_flags & CSUM_UDP)
1680                         cflags |= JME_TD_UDPCSUM;
1681         }
1682
1683         /* Configure VLAN. */
1684         if (m->m_flags & M_VLANTAG) {
1685                 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1686                 cflags |= JME_TD_VLAN_TAG;
1687         }
1688
1689         desc = &sc->jme_cdata.jme_tx_ring[prod];
1690         desc->flags = htole32(cflags);
1691         desc->addr_hi = htole32(m->m_pkthdr.len);
1692         if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1693                 /*
1694                  * Use 64bits TX desc chain format.
1695                  *
1696                  * The first TX desc of the chain, which is setup here,
1697                  * is just a symbol TX desc carrying no payload.
1698                  */
1699                 flag64 = JME_TD_64BIT;
1700                 desc->buflen = htole32(mss);
1701                 desc->addr_lo = 0;
1702
1703                 /* No effective TX desc is consumed */
1704                 i = 0;
1705         } else {
1706                 /*
1707                  * Use 32bits TX desc chain format.
1708                  *
1709                  * The first TX desc of the chain, which is setup here,
1710                  * is an effective TX desc carrying the first segment of
1711                  * the mbuf chain.
1712                  */
1713                 flag64 = 0;
1714                 desc->buflen = htole32(mss | txsegs[0].ds_len);
1715                 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1716
1717                 /* One effective TX desc is consumed */
1718                 i = 1;
1719         }
1720         sc->jme_cdata.jme_tx_cnt++;
1721         KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1722                  sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
1723         JME_DESC_INC(prod, sc->jme_cdata.jme_tx_desc_cnt);
1724
1725         txd->tx_ndesc = 1 - i;
1726         for (; i < nsegs; i++) {
1727                 desc = &sc->jme_cdata.jme_tx_ring[prod];
1728                 desc->buflen = htole32(txsegs[i].ds_len);
1729                 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1730                 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1731                 desc->flags = htole32(JME_TD_OWN | flag64);
1732
1733                 sc->jme_cdata.jme_tx_cnt++;
1734                 KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1735                          sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
1736                 JME_DESC_INC(prod, sc->jme_cdata.jme_tx_desc_cnt);
1737         }
1738
1739         /* Update producer index. */
1740         sc->jme_cdata.jme_tx_prod = prod;
1741         /*
1742          * Finally request interrupt and give the first descriptor
1743          * owenership to hardware.
1744          */
1745         desc = txd->tx_desc;
1746         desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1747
1748         txd->tx_m = m;
1749         txd->tx_ndesc += nsegs;
1750
1751         return 0;
1752 fail:
1753         m_freem(*m_head);
1754         *m_head = NULL;
1755         return error;
1756 }
1757
1758 static void
1759 jme_start(struct ifnet *ifp)
1760 {
1761         struct jme_softc *sc = ifp->if_softc;
1762         struct mbuf *m_head;
1763         int enq = 0;
1764
1765         ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
1766
1767         if (!sc->jme_has_link) {
1768                 ifq_purge(&ifp->if_snd);
1769                 return;
1770         }
1771
1772         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1773                 return;
1774
1775         if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1776                 jme_txeof(sc);
1777
1778         while (!ifq_is_empty(&ifp->if_snd)) {
1779                 /*
1780                  * Check number of available TX descs, always
1781                  * leave JME_TXD_RSVD free TX descs.
1782                  */
1783                 if (sc->jme_cdata.jme_tx_cnt + JME_TXD_SPARE >
1784                     sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD) {
1785                         ifp->if_flags |= IFF_OACTIVE;
1786                         break;
1787                 }
1788
1789                 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1790                 if (m_head == NULL)
1791                         break;
1792
1793                 /*
1794                  * Pack the data into the transmit ring. If we
1795                  * don't have room, set the OACTIVE flag and wait
1796                  * for the NIC to drain the ring.
1797                  */
1798                 if (jme_encap(sc, &m_head)) {
1799                         KKASSERT(m_head == NULL);
1800                         ifp->if_oerrors++;
1801                         ifp->if_flags |= IFF_OACTIVE;
1802                         break;
1803                 }
1804                 enq++;
1805
1806                 /*
1807                  * If there's a BPF listener, bounce a copy of this frame
1808                  * to him.
1809                  */
1810                 ETHER_BPF_MTAP(ifp, m_head);
1811         }
1812
1813         if (enq > 0) {
1814                 /*
1815                  * Reading TXCSR takes very long time under heavy load
1816                  * so cache TXCSR value and writes the ORed value with
1817                  * the kick command to the TXCSR. This saves one register
1818                  * access cycle.
1819                  */
1820                 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1821                     TXCSR_TXQ_N_START(TXCSR_TXQ0));
1822                 /* Set a timeout in case the chip goes out to lunch. */
1823                 ifp->if_timer = JME_TX_TIMEOUT;
1824         }
1825 }
1826
1827 static void
1828 jme_watchdog(struct ifnet *ifp)
1829 {
1830         struct jme_softc *sc = ifp->if_softc;
1831
1832         ASSERT_IFNET_SERIALIZED_ALL(ifp);
1833
1834         if (!sc->jme_has_link) {
1835                 if_printf(ifp, "watchdog timeout (missed link)\n");
1836                 ifp->if_oerrors++;
1837                 jme_init(sc);
1838                 return;
1839         }
1840
1841         jme_txeof(sc);
1842         if (sc->jme_cdata.jme_tx_cnt == 0) {
1843                 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1844                           "-- recovering\n");
1845                 if (!ifq_is_empty(&ifp->if_snd))
1846                         if_devstart(ifp);
1847                 return;
1848         }
1849
1850         if_printf(ifp, "watchdog timeout\n");
1851         ifp->if_oerrors++;
1852         jme_init(sc);
1853         if (!ifq_is_empty(&ifp->if_snd))
1854                 if_devstart(ifp);
1855 }
1856
1857 static int
1858 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1859 {
1860         struct jme_softc *sc = ifp->if_softc;
1861         struct mii_data *mii = device_get_softc(sc->jme_miibus);
1862         struct ifreq *ifr = (struct ifreq *)data;
1863         int error = 0, mask;
1864
1865         ASSERT_IFNET_SERIALIZED_ALL(ifp);
1866
1867         switch (cmd) {
1868         case SIOCSIFMTU:
1869                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1870                     (!(sc->jme_caps & JME_CAP_JUMBO) &&
1871                      ifr->ifr_mtu > JME_MAX_MTU)) {
1872                         error = EINVAL;
1873                         break;
1874                 }
1875
1876                 if (ifp->if_mtu != ifr->ifr_mtu) {
1877                         /*
1878                          * No special configuration is required when interface
1879                          * MTU is changed but availability of Tx checksum
1880                          * offload should be chcked against new MTU size as
1881                          * FIFO size is just 2K.
1882                          */
1883                         if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1884                                 ifp->if_capenable &=
1885                                     ~(IFCAP_TXCSUM | IFCAP_TSO);
1886                                 ifp->if_hwassist &=
1887                                     ~(JME_CSUM_FEATURES | CSUM_TSO);
1888                         }
1889                         ifp->if_mtu = ifr->ifr_mtu;
1890                         if (ifp->if_flags & IFF_RUNNING)
1891                                 jme_init(sc);
1892                 }
1893                 break;
1894
1895         case SIOCSIFFLAGS:
1896                 if (ifp->if_flags & IFF_UP) {
1897                         if (ifp->if_flags & IFF_RUNNING) {
1898                                 if ((ifp->if_flags ^ sc->jme_if_flags) &
1899                                     (IFF_PROMISC | IFF_ALLMULTI))
1900                                         jme_set_filter(sc);
1901                         } else {
1902                                 jme_init(sc);
1903                         }
1904                 } else {
1905                         if (ifp->if_flags & IFF_RUNNING)
1906                                 jme_stop(sc);
1907                 }
1908                 sc->jme_if_flags = ifp->if_flags;
1909                 break;
1910
1911         case SIOCADDMULTI:
1912         case SIOCDELMULTI:
1913                 if (ifp->if_flags & IFF_RUNNING)
1914                         jme_set_filter(sc);
1915                 break;
1916
1917         case SIOCSIFMEDIA:
1918         case SIOCGIFMEDIA:
1919                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1920                 break;
1921
1922         case SIOCSIFCAP:
1923                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1924
1925                 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1926                         ifp->if_capenable ^= IFCAP_TXCSUM;
1927                         if (ifp->if_capenable & IFCAP_TXCSUM)
1928                                 ifp->if_hwassist |= JME_CSUM_FEATURES;
1929                         else
1930                                 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1931                 }
1932                 if (mask & IFCAP_RXCSUM) {
1933                         uint32_t reg;
1934
1935                         ifp->if_capenable ^= IFCAP_RXCSUM;
1936                         reg = CSR_READ_4(sc, JME_RXMAC);
1937                         reg &= ~RXMAC_CSUM_ENB;
1938                         if (ifp->if_capenable & IFCAP_RXCSUM)
1939                                 reg |= RXMAC_CSUM_ENB;
1940                         CSR_WRITE_4(sc, JME_RXMAC, reg);
1941                 }
1942
1943                 if (mask & IFCAP_VLAN_HWTAGGING) {
1944                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1945                         jme_set_vlan(sc);
1946                 }
1947
1948                 if ((mask & IFCAP_TSO) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1949                         ifp->if_capenable ^= IFCAP_TSO;
1950                         if (ifp->if_capenable & IFCAP_TSO)
1951                                 ifp->if_hwassist |= CSUM_TSO;
1952                         else
1953                                 ifp->if_hwassist &= ~CSUM_TSO;
1954                 }
1955
1956                 if (mask & IFCAP_RSS)
1957                         ifp->if_capenable ^= IFCAP_RSS;
1958                 break;
1959
1960         default:
1961                 error = ether_ioctl(ifp, cmd, data);
1962                 break;
1963         }
1964         return (error);
1965 }
1966
1967 static void
1968 jme_mac_config(struct jme_softc *sc)
1969 {
1970         struct mii_data *mii;
1971         uint32_t ghc, rxmac, txmac, txpause, gp1;
1972         int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1973
1974         mii = device_get_softc(sc->jme_miibus);
1975
1976         CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1977         DELAY(10);
1978         CSR_WRITE_4(sc, JME_GHC, 0);
1979         ghc = 0;
1980         rxmac = CSR_READ_4(sc, JME_RXMAC);
1981         rxmac &= ~RXMAC_FC_ENB;
1982         txmac = CSR_READ_4(sc, JME_TXMAC);
1983         txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1984         txpause = CSR_READ_4(sc, JME_TXPFC);
1985         txpause &= ~TXPFC_PAUSE_ENB;
1986         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1987                 ghc |= GHC_FULL_DUPLEX;
1988                 rxmac &= ~RXMAC_COLL_DET_ENB;
1989                 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1990                     TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1991                     TXMAC_FRAME_BURST);
1992 #ifdef notyet
1993                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1994                         txpause |= TXPFC_PAUSE_ENB;
1995                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1996                         rxmac |= RXMAC_FC_ENB;
1997 #endif
1998                 /* Disable retry transmit timer/retry limit. */
1999                 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
2000                     ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
2001         } else {
2002                 rxmac |= RXMAC_COLL_DET_ENB;
2003                 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
2004                 /* Enable retry transmit timer/retry limit. */
2005                 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
2006                     TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
2007         }
2008
2009         /*
2010          * Reprogram Tx/Rx MACs with resolved speed/duplex.
2011          */
2012         gp1 = CSR_READ_4(sc, JME_GPREG1);
2013         gp1 &= ~GPREG1_WA_HDX;
2014
2015         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
2016                 hdx = 1;
2017
2018         switch (IFM_SUBTYPE(mii->mii_media_active)) {
2019         case IFM_10_T:
2020                 ghc |= GHC_SPEED_10 | sc->jme_clksrc;
2021                 if (hdx)
2022                         gp1 |= GPREG1_WA_HDX;
2023                 break;
2024
2025         case IFM_100_TX:
2026                 ghc |= GHC_SPEED_100 | sc->jme_clksrc;
2027                 if (hdx)
2028                         gp1 |= GPREG1_WA_HDX;
2029
2030                 /*
2031                  * Use extended FIFO depth to workaround CRC errors
2032                  * emitted by chips before JMC250B
2033                  */
2034                 phyconf = JMPHY_CONF_EXTFIFO;
2035                 break;
2036
2037         case IFM_1000_T:
2038                 if (sc->jme_caps & JME_CAP_FASTETH)
2039                         break;
2040
2041                 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
2042                 if (hdx)
2043                         txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
2044                 break;
2045
2046         default:
2047                 break;
2048         }
2049         CSR_WRITE_4(sc, JME_GHC, ghc);
2050         CSR_WRITE_4(sc, JME_RXMAC, rxmac);
2051         CSR_WRITE_4(sc, JME_TXMAC, txmac);
2052         CSR_WRITE_4(sc, JME_TXPFC, txpause);
2053
2054         if (sc->jme_workaround & JME_WA_EXTFIFO) {
2055                 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2056                                     JMPHY_CONF, phyconf);
2057         }
2058         if (sc->jme_workaround & JME_WA_HDX)
2059                 CSR_WRITE_4(sc, JME_GPREG1, gp1);
2060 }
2061
2062 static void
2063 jme_intr(void *xsc)
2064 {
2065         struct jme_softc *sc = xsc;
2066         struct ifnet *ifp = &sc->arpcom.ac_if;
2067         uint32_t status;
2068         int r;
2069
2070         ASSERT_SERIALIZED(&sc->jme_serialize);
2071
2072         status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2073         if (status == 0 || status == 0xFFFFFFFF)
2074                 return;
2075
2076         /* Disable interrupts. */
2077         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2078
2079         status = CSR_READ_4(sc, JME_INTR_STATUS);
2080         if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2081                 goto back;
2082
2083         /* Reset PCC counter/timer and Ack interrupts. */
2084         status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2085
2086         if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
2087                 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2088
2089         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2090                 if (status & jme_rx_status[r].jme_coal) {
2091                         status |= jme_rx_status[r].jme_coal |
2092                                   jme_rx_status[r].jme_comp;
2093                 }
2094         }
2095
2096         CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2097
2098         if (ifp->if_flags & IFF_RUNNING) {
2099                 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
2100                         jme_rx_intr(sc, status);
2101
2102                 if (status & INTR_RXQ_DESC_EMPTY) {
2103                         /*
2104                          * Notify hardware availability of new Rx buffers.
2105                          * Reading RXCSR takes very long time under heavy
2106                          * load so cache RXCSR value and writes the ORed
2107                          * value with the kick command to the RXCSR. This
2108                          * saves one register access cycle.
2109                          */
2110                         CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2111                             RXCSR_RX_ENB | RXCSR_RXQ_START);
2112                 }
2113
2114                 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
2115                         lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
2116                         jme_txeof(sc);
2117                         if (!ifq_is_empty(&ifp->if_snd))
2118                                 if_devstart(ifp);
2119                         lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
2120                 }
2121         }
2122 back:
2123         /* Reenable interrupts. */
2124         CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2125 }
2126
2127 static void
2128 jme_txeof(struct jme_softc *sc)
2129 {
2130         struct ifnet *ifp = &sc->arpcom.ac_if;
2131         int cons;
2132
2133         cons = sc->jme_cdata.jme_tx_cons;
2134         if (cons == sc->jme_cdata.jme_tx_prod)
2135                 return;
2136
2137         /*
2138          * Go through our Tx list and free mbufs for those
2139          * frames which have been transmitted.
2140          */
2141         while (cons != sc->jme_cdata.jme_tx_prod) {
2142                 struct jme_txdesc *txd, *next_txd;
2143                 uint32_t status, next_status;
2144                 int next_cons, nsegs;
2145
2146                 txd = &sc->jme_cdata.jme_txdesc[cons];
2147                 KASSERT(txd->tx_m != NULL,
2148                         ("%s: freeing NULL mbuf!", __func__));
2149
2150                 status = le32toh(txd->tx_desc->flags);
2151                 if ((status & JME_TD_OWN) == JME_TD_OWN)
2152                         break;
2153
2154                 /*
2155                  * NOTE:
2156                  * This chip will always update the TX descriptor's
2157                  * buflen field and this updating always happens
2158                  * after clearing the OWN bit, so even if the OWN
2159                  * bit is cleared by the chip, we still don't sure
2160                  * about whether the buflen field has been updated
2161                  * by the chip or not.  To avoid this race, we wait
2162                  * for the next TX descriptor's OWN bit to be cleared
2163                  * by the chip before reusing this TX descriptor.
2164                  */
2165                 next_cons = cons;
2166                 JME_DESC_ADD(next_cons, txd->tx_ndesc,
2167                     sc->jme_cdata.jme_tx_desc_cnt);
2168                 next_txd = &sc->jme_cdata.jme_txdesc[next_cons];
2169                 if (next_txd->tx_m == NULL)
2170                         break;
2171                 next_status = le32toh(next_txd->tx_desc->flags);
2172                 if ((next_status & JME_TD_OWN) == JME_TD_OWN)
2173                         break;
2174
2175                 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2176                         ifp->if_oerrors++;
2177                 } else {
2178                         ifp->if_opackets++;
2179                         if (status & JME_TD_COLLISION) {
2180                                 ifp->if_collisions +=
2181                                     le32toh(txd->tx_desc->buflen) &
2182                                     JME_TD_BUF_LEN_MASK;
2183                         }
2184                 }
2185
2186                 /*
2187                  * Only the first descriptor of multi-descriptor
2188                  * transmission is updated so driver have to skip entire
2189                  * chained buffers for the transmiited frame. In other
2190                  * words, JME_TD_OWN bit is valid only at the first
2191                  * descriptor of a multi-descriptor transmission.
2192                  */
2193                 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2194                         sc->jme_cdata.jme_tx_ring[cons].flags = 0;
2195                         JME_DESC_INC(cons, sc->jme_cdata.jme_tx_desc_cnt);
2196                 }
2197
2198                 /* Reclaim transferred mbufs. */
2199                 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2200                 m_freem(txd->tx_m);
2201                 txd->tx_m = NULL;
2202                 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2203                 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2204                         ("%s: Active Tx desc counter was garbled", __func__));
2205                 txd->tx_ndesc = 0;
2206         }
2207         sc->jme_cdata.jme_tx_cons = cons;
2208
2209         /* 1 for symbol TX descriptor */
2210         if (sc->jme_cdata.jme_tx_cnt <= JME_MAXTXSEGS + 1)
2211                 ifp->if_timer = 0;
2212
2213         if (sc->jme_cdata.jme_tx_cnt + JME_TXD_SPARE <=
2214             sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD)
2215                 ifp->if_flags &= ~IFF_OACTIVE;
2216 }
2217
2218 static __inline void
2219 jme_discard_rxbufs(struct jme_rxdata *rdata, int cons, int count)
2220 {
2221         int i;
2222
2223         for (i = 0; i < count; ++i) {
2224                 jme_setup_rxdesc(&rdata->jme_rxdesc[cons]);
2225                 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2226         }
2227 }
2228
2229 static __inline struct pktinfo *
2230 jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2231 {
2232         if (flags & JME_RD_IPV4)
2233                 pi->pi_netisr = NETISR_IP;
2234         else if (flags & JME_RD_IPV6)
2235                 pi->pi_netisr = NETISR_IPV6;
2236         else
2237                 return NULL;
2238
2239         pi->pi_flags = 0;
2240         pi->pi_l3proto = IPPROTO_UNKNOWN;
2241
2242         if (flags & JME_RD_MORE_FRAG)
2243                 pi->pi_flags |= PKTINFO_FLAG_FRAG;
2244         else if (flags & JME_RD_TCP)
2245                 pi->pi_l3proto = IPPROTO_TCP;
2246         else if (flags & JME_RD_UDP)
2247                 pi->pi_l3proto = IPPROTO_UDP;
2248         else
2249                 pi = NULL;
2250         return pi;
2251 }
2252
2253 /* Receive a frame. */
2254 static void
2255 jme_rxpkt(struct jme_rxdata *rdata)
2256 {
2257         struct ifnet *ifp = &rdata->jme_sc->arpcom.ac_if;
2258         struct jme_desc *desc;
2259         struct jme_rxdesc *rxd;
2260         struct mbuf *mp, *m;
2261         uint32_t flags, status, hash, hashinfo;
2262         int cons, count, nsegs;
2263
2264         cons = rdata->jme_rx_cons;
2265         desc = &rdata->jme_rx_ring[cons];
2266
2267         flags = le32toh(desc->flags);
2268         status = le32toh(desc->buflen);
2269         hash = le32toh(desc->addr_hi);
2270         hashinfo = le32toh(desc->addr_lo);
2271         nsegs = JME_RX_NSEGS(status);
2272
2273         if (nsegs > 1) {
2274                 /* Skip the first descriptor. */
2275                 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2276
2277                 /*
2278                  * Clear the OWN bit of the following RX descriptors;
2279                  * hardware will not clear the OWN bit except the first
2280                  * RX descriptor.
2281                  *
2282                  * Since the first RX descriptor is setup, i.e. OWN bit
2283                  * on, before its followins RX descriptors, leaving the
2284                  * OWN bit on the following RX descriptors will trick
2285                  * the hardware into thinking that the following RX
2286                  * descriptors are ready to be used too.
2287                  */
2288                 for (count = 1; count < nsegs; count++,
2289                      JME_DESC_INC(cons, rdata->jme_rx_desc_cnt))
2290                         rdata->jme_rx_ring[cons].flags = 0;
2291
2292                 cons = rdata->jme_rx_cons;
2293         }
2294
2295         JME_RSS_DPRINTF(rdata->jme_sc, 15, "ring%d, flags 0x%08x, "
2296                         "hash 0x%08x, hash info 0x%08x\n",
2297                         rdata->jme_rx_idx, flags, hash, hashinfo);
2298
2299         if (status & JME_RX_ERR_STAT) {
2300                 ifp->if_ierrors++;
2301                 jme_discard_rxbufs(rdata, cons, nsegs);
2302 #ifdef JME_SHOW_ERRORS
2303                 if_printf(ifp, "%s : receive error = 0x%b\n",
2304                     __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2305 #endif
2306                 rdata->jme_rx_cons += nsegs;
2307                 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2308                 return;
2309         }
2310
2311         rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2312         for (count = 0; count < nsegs; count++,
2313              JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) {
2314                 rxd = &rdata->jme_rxdesc[cons];
2315                 mp = rxd->rx_m;
2316
2317                 /* Add a new receive buffer to the ring. */
2318                 if (jme_newbuf(rdata, rxd, 0) != 0) {
2319                         ifp->if_iqdrops++;
2320                         /* Reuse buffer. */
2321                         jme_discard_rxbufs(rdata, cons, nsegs - count);
2322                         if (rdata->jme_rxhead != NULL) {
2323                                 m_freem(rdata->jme_rxhead);
2324                                 JME_RXCHAIN_RESET(rdata);
2325                         }
2326                         break;
2327                 }
2328
2329                 /*
2330                  * Assume we've received a full sized frame.
2331                  * Actual size is fixed when we encounter the end of
2332                  * multi-segmented frame.
2333                  */
2334                 mp->m_len = MCLBYTES;
2335
2336                 /* Chain received mbufs. */
2337                 if (rdata->jme_rxhead == NULL) {
2338                         rdata->jme_rxhead = mp;
2339                         rdata->jme_rxtail = mp;
2340                 } else {
2341                         /*
2342                          * Receive processor can receive a maximum frame
2343                          * size of 65535 bytes.
2344                          */
2345                         rdata->jme_rxtail->m_next = mp;
2346                         rdata->jme_rxtail = mp;
2347                 }
2348
2349                 if (count == nsegs - 1) {
2350                         struct pktinfo pi0, *pi;
2351
2352                         /* Last desc. for this frame. */
2353                         m = rdata->jme_rxhead;
2354                         m->m_pkthdr.len = rdata->jme_rxlen;
2355                         if (nsegs > 1) {
2356                                 /* Set first mbuf size. */
2357                                 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2358                                 /* Set last mbuf size. */
2359                                 mp->m_len = rdata->jme_rxlen -
2360                                     ((MCLBYTES - JME_RX_PAD_BYTES) +
2361                                     (MCLBYTES * (nsegs - 2)));
2362                         } else {
2363                                 m->m_len = rdata->jme_rxlen;
2364                         }
2365                         m->m_pkthdr.rcvif = ifp;
2366
2367                         /*
2368                          * Account for 10bytes auto padding which is used
2369                          * to align IP header on 32bit boundary. Also note,
2370                          * CRC bytes is automatically removed by the
2371                          * hardware.
2372                          */
2373                         m->m_data += JME_RX_PAD_BYTES;
2374
2375                         /* Set checksum information. */
2376                         if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2377                             (flags & JME_RD_IPV4)) {
2378                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2379                                 if (flags & JME_RD_IPCSUM)
2380                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2381                                 if ((flags & JME_RD_MORE_FRAG) == 0 &&
2382                                     ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2383                                      (JME_RD_TCP | JME_RD_TCPCSUM) ||
2384                                      (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2385                                      (JME_RD_UDP | JME_RD_UDPCSUM))) {
2386                                         m->m_pkthdr.csum_flags |=
2387                                             CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2388                                         m->m_pkthdr.csum_data = 0xffff;
2389                                 }
2390                         }
2391
2392                         /* Check for VLAN tagged packets. */
2393                         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2394                             (flags & JME_RD_VLAN_TAG)) {
2395                                 m->m_pkthdr.ether_vlantag =
2396                                     flags & JME_RD_VLAN_MASK;
2397                                 m->m_flags |= M_VLANTAG;
2398                         }
2399
2400                         ifp->if_ipackets++;
2401
2402                         if (ifp->if_capenable & IFCAP_RSS)
2403                                 pi = jme_pktinfo(&pi0, flags);
2404                         else
2405                                 pi = NULL;
2406
2407                         if (pi != NULL &&
2408                             (hashinfo & JME_RD_HASH_FN_MASK) ==
2409                             JME_RD_HASH_FN_TOEPLITZ) {
2410                                 m->m_flags |= (M_HASH | M_CKHASH);
2411                                 m->m_pkthdr.hash = toeplitz_hash(hash);
2412                         }
2413
2414 #ifdef JME_RSS_DEBUG
2415                         if (pi != NULL) {
2416                                 JME_RSS_DPRINTF(rdata->jme_sc, 10,
2417                                     "isr %d flags %08x, l3 %d %s\n",
2418                                     pi->pi_netisr, pi->pi_flags,
2419                                     pi->pi_l3proto,
2420                                     (m->m_flags & M_HASH) ? "hash" : "");
2421                         }
2422 #endif
2423
2424                         /* Pass it on. */
2425                         ether_input_pkt(ifp, m, pi);
2426
2427                         /* Reset mbuf chains. */
2428                         JME_RXCHAIN_RESET(rdata);
2429 #ifdef JME_RSS_DEBUG
2430                         rdata->jme_rx_pkt++;
2431 #endif
2432                 }
2433         }
2434
2435         rdata->jme_rx_cons += nsegs;
2436         rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2437 }
2438
2439 static void
2440 jme_rxeof(struct jme_rxdata *rdata, int count)
2441 {
2442         struct jme_desc *desc;
2443         int nsegs, pktlen;
2444
2445         for (;;) {
2446 #ifdef IFPOLL_ENABLE
2447                 if (count >= 0 && count-- == 0)
2448                         break;
2449 #endif
2450                 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2451                 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2452                         break;
2453                 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2454                         break;
2455
2456                 /*
2457                  * Check number of segments against received bytes.
2458                  * Non-matching value would indicate that hardware
2459                  * is still trying to update Rx descriptors. I'm not
2460                  * sure whether this check is needed.
2461                  */
2462                 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2463                 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2464                 if (nsegs != howmany(pktlen, MCLBYTES)) {
2465                         if_printf(&rdata->jme_sc->arpcom.ac_if,
2466                             "RX fragment count(%d) and "
2467                             "packet size(%d) mismach\n", nsegs, pktlen);
2468                         break;
2469                 }
2470
2471                 /*
2472                  * NOTE:
2473                  * RSS hash and hash information may _not_ be set by the
2474                  * hardware even if the OWN bit is cleared and VALID bit
2475                  * is set.
2476                  *
2477                  * If the RSS information is not delivered by the hardware
2478                  * yet, we MUST NOT accept this packet, let alone reusing
2479                  * its RX descriptor.  If this packet was accepted and its
2480                  * RX descriptor was reused before hardware delivering the
2481                  * RSS information, the RX buffer's address would be trashed
2482                  * by the RSS information delivered by the hardware.
2483                  */
2484                 if (JME_ENABLE_HWRSS(rdata->jme_sc)) {
2485                         struct jme_rxdesc *rxd;
2486                         uint32_t hashinfo;
2487
2488                         hashinfo = le32toh(desc->addr_lo);
2489                         rxd = &rdata->jme_rxdesc[rdata->jme_rx_cons];
2490
2491                         /*
2492                          * This test should be enough to detect the pending
2493                          * RSS information delivery, given:
2494                          * - If RSS hash is not calculated, the hashinfo
2495                          *   will be 0.  Howvever, the lower 32bits of RX
2496                          *   buffers' physical address will never be 0.
2497                          *   (see jme_rxbuf_dma_filter)
2498                          * - If RSS hash is calculated, the lowest 4 bits
2499                          *   of hashinfo will be set, while the RX buffers
2500                          *   are at least 2K aligned.
2501                          */
2502                         if (hashinfo == JME_ADDR_LO(rxd->rx_paddr)) {
2503 #ifdef JME_SHOW_RSSWB
2504                                 if_printf(&rdata->jme_sc->arpcom.ac_if,
2505                                     "RSS is not written back yet\n");
2506 #endif
2507                                 break;
2508                         }
2509                 }
2510
2511                 /* Received a frame. */
2512                 jme_rxpkt(rdata);
2513         }
2514 }
2515
2516 static void
2517 jme_tick(void *xsc)
2518 {
2519         struct jme_softc *sc = xsc;
2520         struct mii_data *mii = device_get_softc(sc->jme_miibus);
2521
2522         lwkt_serialize_enter(&sc->jme_serialize);
2523
2524         sc->jme_in_tick = TRUE;
2525         mii_tick(mii);
2526         sc->jme_in_tick = FALSE;
2527
2528         callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2529
2530         lwkt_serialize_exit(&sc->jme_serialize);
2531 }
2532
2533 static void
2534 jme_reset(struct jme_softc *sc)
2535 {
2536         uint32_t val;
2537
2538         /* Make sure that TX and RX are stopped */
2539         jme_stop_tx(sc);
2540         jme_stop_rx(sc);
2541
2542         /* Start reset */
2543         CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2544         DELAY(20);
2545
2546         /*
2547          * Hold reset bit before stop reset
2548          */
2549
2550         /* Disable TXMAC and TXOFL clock sources */
2551         CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2552         /* Disable RXMAC clock source */
2553         val = CSR_READ_4(sc, JME_GPREG1);
2554         CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2555         /* Flush */
2556         CSR_READ_4(sc, JME_GHC);
2557
2558         /* Stop reset */
2559         CSR_WRITE_4(sc, JME_GHC, 0);
2560         /* Flush */
2561         CSR_READ_4(sc, JME_GHC);
2562
2563         /*
2564          * Clear reset bit after stop reset
2565          */
2566
2567         /* Enable TXMAC and TXOFL clock sources */
2568         CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2569         /* Enable RXMAC clock source */
2570         val = CSR_READ_4(sc, JME_GPREG1);
2571         CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2572         /* Flush */
2573         CSR_READ_4(sc, JME_GHC);
2574
2575         /* Disable TXMAC and TXOFL clock sources */
2576         CSR_WRITE_4(sc, JME_GHC, 0);
2577         /* Disable RXMAC clock source */
2578         val = CSR_READ_4(sc, JME_GPREG1);
2579         CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2580         /* Flush */
2581         CSR_READ_4(sc, JME_GHC);
2582
2583         /* Enable TX and RX */
2584         val = CSR_READ_4(sc, JME_TXCSR);
2585         CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB);
2586         val = CSR_READ_4(sc, JME_RXCSR);
2587         CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB);
2588         /* Flush */
2589         CSR_READ_4(sc, JME_TXCSR);
2590         CSR_READ_4(sc, JME_RXCSR);
2591
2592         /* Enable TXMAC and TXOFL clock sources */
2593         CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2594         /* Eisable RXMAC clock source */
2595         val = CSR_READ_4(sc, JME_GPREG1);
2596         CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2597         /* Flush */
2598         CSR_READ_4(sc, JME_GHC);
2599
2600         /* Stop TX and RX */
2601         jme_stop_tx(sc);
2602         jme_stop_rx(sc);
2603 }
2604
2605 static void
2606 jme_init(void *xsc)
2607 {
2608         struct jme_softc *sc = xsc;
2609         struct ifnet *ifp = &sc->arpcom.ac_if;
2610         struct mii_data *mii;
2611         uint8_t eaddr[ETHER_ADDR_LEN];
2612         bus_addr_t paddr;
2613         uint32_t reg;
2614         int error, r;
2615
2616         ASSERT_IFNET_SERIALIZED_ALL(ifp);
2617
2618         /*
2619          * Cancel any pending I/O.
2620          */
2621         jme_stop(sc);
2622
2623         /*
2624          * Reset the chip to a known state.
2625          */
2626         jme_reset(sc);
2627
2628         /*
2629          * Setup MSI/MSI-X vectors to interrupts mapping
2630          */
2631         jme_set_msinum(sc);
2632
2633         if (JME_ENABLE_HWRSS(sc))
2634                 jme_enable_rss(sc);
2635         else
2636                 jme_disable_rss(sc);
2637
2638         /* Init RX descriptors */
2639         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2640                 error = jme_init_rx_ring(&sc->jme_cdata.jme_rx_data[r]);
2641                 if (error) {
2642                         if_printf(ifp, "initialization failed: "
2643                                   "no memory for %dth RX ring.\n", r);
2644                         jme_stop(sc);
2645                         return;
2646                 }
2647         }
2648
2649         /* Init TX descriptors */
2650         jme_init_tx_ring(sc);
2651
2652         /* Initialize shadow status block. */
2653         jme_init_ssb(sc);
2654
2655         /* Reprogram the station address. */
2656         bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2657         CSR_WRITE_4(sc, JME_PAR0,
2658             eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2659         CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2660
2661         /*
2662          * Configure Tx queue.
2663          *  Tx priority queue weight value : 0
2664          *  Tx FIFO threshold for processing next packet : 16QW
2665          *  Maximum Tx DMA length : 512
2666          *  Allow Tx DMA burst.
2667          */
2668         sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2669         sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2670         sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2671         sc->jme_txcsr |= sc->jme_tx_dma_size;
2672         sc->jme_txcsr |= TXCSR_DMA_BURST;
2673         CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2674
2675         /* Set Tx descriptor counter. */
2676         CSR_WRITE_4(sc, JME_TXQDC, sc->jme_cdata.jme_tx_desc_cnt);
2677
2678         /* Set Tx ring address to the hardware. */
2679         paddr = sc->jme_cdata.jme_tx_ring_paddr;
2680         CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2681         CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2682
2683         /* Configure TxMAC parameters. */
2684         reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2685         reg |= TXMAC_THRESH_1_PKT;
2686         reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2687         CSR_WRITE_4(sc, JME_TXMAC, reg);
2688
2689         /*
2690          * Configure Rx queue.
2691          *  FIFO full threshold for transmitting Tx pause packet : 128T
2692          *  FIFO threshold for processing next packet : 128QW
2693          *  Rx queue 0 select
2694          *  Max Rx DMA length : 128
2695          *  Rx descriptor retry : 32
2696          *  Rx descriptor retry time gap : 256ns
2697          *  Don't receive runt/bad frame.
2698          */
2699         sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2700 #if 0
2701         /*
2702          * Since Rx FIFO size is 4K bytes, receiving frames larger
2703          * than 4K bytes will suffer from Rx FIFO overruns. So
2704          * decrease FIFO threshold to reduce the FIFO overruns for
2705          * frames larger than 4000 bytes.
2706          * For best performance of standard MTU sized frames use
2707          * maximum allowable FIFO threshold, 128QW.
2708          */
2709         if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2710             JME_RX_FIFO_SIZE)
2711                 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2712         else
2713                 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2714 #else
2715         /* Improve PCI Express compatibility */
2716         sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2717 #endif
2718         sc->jme_rxcsr |= sc->jme_rx_dma_size;
2719         sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2720         sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2721         /* XXX TODO DROP_BAD */
2722
2723         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2724                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
2725
2726                 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2727
2728                 /* Set Rx descriptor counter. */
2729                 CSR_WRITE_4(sc, JME_RXQDC, rdata->jme_rx_desc_cnt);
2730
2731                 /* Set Rx ring address to the hardware. */
2732                 paddr = rdata->jme_rx_ring_paddr;
2733                 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2734                 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2735         }
2736
2737         /* Clear receive filter. */
2738         CSR_WRITE_4(sc, JME_RXMAC, 0);
2739
2740         /* Set up the receive filter. */
2741         jme_set_filter(sc);
2742         jme_set_vlan(sc);
2743
2744         /*
2745          * Disable all WOL bits as WOL can interfere normal Rx
2746          * operation. Also clear WOL detection status bits.
2747          */
2748         reg = CSR_READ_4(sc, JME_PMCS);
2749         reg &= ~PMCS_WOL_ENB_MASK;
2750         CSR_WRITE_4(sc, JME_PMCS, reg);
2751
2752         /*
2753          * Pad 10bytes right before received frame. This will greatly
2754          * help Rx performance on strict-alignment architectures as
2755          * it does not need to copy the frame to align the payload.
2756          */
2757         reg = CSR_READ_4(sc, JME_RXMAC);
2758         reg |= RXMAC_PAD_10BYTES;
2759
2760         if (ifp->if_capenable & IFCAP_RXCSUM)
2761                 reg |= RXMAC_CSUM_ENB;
2762         CSR_WRITE_4(sc, JME_RXMAC, reg);
2763
2764         /* Configure general purpose reg0 */
2765         reg = CSR_READ_4(sc, JME_GPREG0);
2766         reg &= ~GPREG0_PCC_UNIT_MASK;
2767         /* Set PCC timer resolution to micro-seconds unit. */
2768         reg |= GPREG0_PCC_UNIT_US;
2769         /*
2770          * Disable all shadow register posting as we have to read
2771          * JME_INTR_STATUS register in jme_intr. Also it seems
2772          * that it's hard to synchronize interrupt status between
2773          * hardware and software with shadow posting due to
2774          * requirements of bus_dmamap_sync(9).
2775          */
2776         reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2777             GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2778             GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2779             GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2780         /* Disable posting of DW0. */
2781         reg &= ~GPREG0_POST_DW0_ENB;
2782         /* Clear PME message. */
2783         reg &= ~GPREG0_PME_ENB;
2784         /* Set PHY address. */
2785         reg &= ~GPREG0_PHY_ADDR_MASK;
2786         reg |= sc->jme_phyaddr;
2787         CSR_WRITE_4(sc, JME_GPREG0, reg);
2788
2789         /* Configure Tx queue 0 packet completion coalescing. */
2790         jme_set_tx_coal(sc);
2791
2792         /* Configure Rx queues packet completion coalescing. */
2793         jme_set_rx_coal(sc);
2794
2795         /* Configure shadow status block but don't enable posting. */
2796         paddr = sc->jme_cdata.jme_ssb_block_paddr;
2797         CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2798         CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2799
2800         /* Disable Timer 1 and Timer 2. */
2801         CSR_WRITE_4(sc, JME_TIMER1, 0);
2802         CSR_WRITE_4(sc, JME_TIMER2, 0);
2803
2804         /* Configure retry transmit period, retry limit value. */
2805         CSR_WRITE_4(sc, JME_TXTRHD,
2806             ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2807             TXTRHD_RT_PERIOD_MASK) |
2808             ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2809             TXTRHD_RT_LIMIT_SHIFT));
2810
2811 #ifdef IFPOLL_ENABLE
2812         if (!(ifp->if_flags & IFF_NPOLLING))
2813 #endif
2814         /* Initialize the interrupt mask. */
2815         jme_enable_intr(sc);
2816         CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2817
2818         /*
2819          * Enabling Tx/Rx DMA engines and Rx queue processing is
2820          * done after detection of valid link in jme_miibus_statchg.
2821          */
2822         sc->jme_has_link = FALSE;
2823
2824         /* Set the current media. */
2825         mii = device_get_softc(sc->jme_miibus);
2826         mii_mediachg(mii);
2827
2828         callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2829
2830         ifp->if_flags |= IFF_RUNNING;
2831         ifp->if_flags &= ~IFF_OACTIVE;
2832 }
2833
2834 static void
2835 jme_stop(struct jme_softc *sc)
2836 {
2837         struct ifnet *ifp = &sc->arpcom.ac_if;
2838         struct jme_txdesc *txd;
2839         struct jme_rxdesc *rxd;
2840         struct jme_rxdata *rdata;
2841         int i, r;
2842
2843         ASSERT_IFNET_SERIALIZED_ALL(ifp);
2844
2845         /*
2846          * Mark the interface down and cancel the watchdog timer.
2847          */
2848         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2849         ifp->if_timer = 0;
2850
2851         callout_stop(&sc->jme_tick_ch);
2852         sc->jme_has_link = FALSE;
2853
2854         /*
2855          * Disable interrupts.
2856          */
2857         jme_disable_intr(sc);
2858         CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2859
2860         /* Disable updating shadow status block. */
2861         CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2862             CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2863
2864         /* Stop receiver, transmitter. */
2865         jme_stop_rx(sc);
2866         jme_stop_tx(sc);
2867
2868         /*
2869          * Free partial finished RX segments
2870          */
2871         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2872                 rdata = &sc->jme_cdata.jme_rx_data[r];
2873                 if (rdata->jme_rxhead != NULL)
2874                         m_freem(rdata->jme_rxhead);
2875                 JME_RXCHAIN_RESET(rdata);
2876         }
2877
2878         /*
2879          * Free RX and TX mbufs still in the queues.
2880          */
2881         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2882                 rdata = &sc->jme_cdata.jme_rx_data[r];
2883                 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
2884                         rxd = &rdata->jme_rxdesc[i];
2885                         if (rxd->rx_m != NULL) {
2886                                 bus_dmamap_unload(rdata->jme_rx_tag,
2887                                                   rxd->rx_dmamap);
2888                                 m_freem(rxd->rx_m);
2889                                 rxd->rx_m = NULL;
2890                         }
2891                 }
2892         }
2893         for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
2894                 txd = &sc->jme_cdata.jme_txdesc[i];
2895                 if (txd->tx_m != NULL) {
2896                         bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2897                             txd->tx_dmamap);
2898                         m_freem(txd->tx_m);
2899                         txd->tx_m = NULL;
2900                         txd->tx_ndesc = 0;
2901                 }
2902         }
2903 }
2904
2905 static void
2906 jme_stop_tx(struct jme_softc *sc)
2907 {
2908         uint32_t reg;
2909         int i;
2910
2911         reg = CSR_READ_4(sc, JME_TXCSR);
2912         if ((reg & TXCSR_TX_ENB) == 0)
2913                 return;
2914         reg &= ~TXCSR_TX_ENB;
2915         CSR_WRITE_4(sc, JME_TXCSR, reg);
2916         for (i = JME_TIMEOUT; i > 0; i--) {
2917                 DELAY(1);
2918                 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2919                         break;
2920         }
2921         if (i == 0)
2922                 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2923 }
2924
2925 static void
2926 jme_stop_rx(struct jme_softc *sc)
2927 {
2928         uint32_t reg;
2929         int i;
2930
2931         reg = CSR_READ_4(sc, JME_RXCSR);
2932         if ((reg & RXCSR_RX_ENB) == 0)
2933                 return;
2934         reg &= ~RXCSR_RX_ENB;
2935         CSR_WRITE_4(sc, JME_RXCSR, reg);
2936         for (i = JME_TIMEOUT; i > 0; i--) {
2937                 DELAY(1);
2938                 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2939                         break;
2940         }
2941         if (i == 0)
2942                 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2943 }
2944
2945 static void
2946 jme_init_tx_ring(struct jme_softc *sc)
2947 {
2948         struct jme_chain_data *cd;
2949         struct jme_txdesc *txd;
2950         int i;
2951
2952         sc->jme_cdata.jme_tx_prod = 0;
2953         sc->jme_cdata.jme_tx_cons = 0;
2954         sc->jme_cdata.jme_tx_cnt = 0;
2955
2956         cd = &sc->jme_cdata;
2957         bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2958         for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
2959                 txd = &sc->jme_cdata.jme_txdesc[i];
2960                 txd->tx_m = NULL;
2961                 txd->tx_desc = &cd->jme_tx_ring[i];
2962                 txd->tx_ndesc = 0;
2963         }
2964 }
2965
2966 static void
2967 jme_init_ssb(struct jme_softc *sc)
2968 {
2969         struct jme_chain_data *cd;
2970
2971         cd = &sc->jme_cdata;
2972         bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2973 }
2974
2975 static int
2976 jme_init_rx_ring(struct jme_rxdata *rdata)
2977 {
2978         struct jme_rxdesc *rxd;
2979         int i;
2980
2981         KKASSERT(rdata->jme_rxhead == NULL &&
2982                  rdata->jme_rxtail == NULL &&
2983                  rdata->jme_rxlen == 0);
2984         rdata->jme_rx_cons = 0;
2985
2986         bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(rdata));
2987         for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
2988                 int error;
2989
2990                 rxd = &rdata->jme_rxdesc[i];
2991                 rxd->rx_m = NULL;
2992                 rxd->rx_desc = &rdata->jme_rx_ring[i];
2993                 error = jme_newbuf(rdata, rxd, 1);
2994                 if (error)
2995                         return error;
2996         }
2997         return 0;
2998 }
2999
3000 static int
3001 jme_newbuf(struct jme_rxdata *rdata, struct jme_rxdesc *rxd, int init)
3002 {
3003         struct mbuf *m;
3004         bus_dma_segment_t segs;
3005         bus_dmamap_t map;
3006         int error, nsegs;
3007
3008         m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
3009         if (m == NULL)
3010                 return ENOBUFS;
3011         /*
3012          * JMC250 has 64bit boundary alignment limitation so jme(4)
3013          * takes advantage of 10 bytes padding feature of hardware
3014          * in order not to copy entire frame to align IP header on
3015          * 32bit boundary.
3016          */
3017         m->m_len = m->m_pkthdr.len = MCLBYTES;
3018
3019         error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
3020                         rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
3021                         BUS_DMA_NOWAIT);
3022         if (error) {
3023                 m_freem(m);
3024                 if (init) {
3025                         if_printf(&rdata->jme_sc->arpcom.ac_if,
3026                             "can't load RX mbuf\n");
3027                 }
3028                 return error;
3029         }
3030
3031         if (rxd->rx_m != NULL) {
3032                 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
3033                                 BUS_DMASYNC_POSTREAD);
3034                 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
3035         }
3036         map = rxd->rx_dmamap;
3037         rxd->rx_dmamap = rdata->jme_rx_sparemap;
3038         rdata->jme_rx_sparemap = map;
3039         rxd->rx_m = m;
3040         rxd->rx_paddr = segs.ds_addr;
3041
3042         jme_setup_rxdesc(rxd);
3043         return 0;
3044 }
3045
3046 static void
3047 jme_set_vlan(struct jme_softc *sc)
3048 {
3049         struct ifnet *ifp = &sc->arpcom.ac_if;
3050         uint32_t reg;
3051
3052         ASSERT_IFNET_SERIALIZED_ALL(ifp);
3053
3054         reg = CSR_READ_4(sc, JME_RXMAC);
3055         reg &= ~RXMAC_VLAN_ENB;
3056         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
3057                 reg |= RXMAC_VLAN_ENB;
3058         CSR_WRITE_4(sc, JME_RXMAC, reg);
3059 }
3060
3061 static void
3062 jme_set_filter(struct jme_softc *sc)
3063 {
3064         struct ifnet *ifp = &sc->arpcom.ac_if;
3065         struct ifmultiaddr *ifma;
3066         uint32_t crc;
3067         uint32_t mchash[2];
3068         uint32_t rxcfg;
3069
3070         ASSERT_IFNET_SERIALIZED_ALL(ifp);
3071
3072         rxcfg = CSR_READ_4(sc, JME_RXMAC);
3073         rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
3074             RXMAC_ALLMULTI);
3075
3076         /*
3077          * Always accept frames destined to our station address.
3078          * Always accept broadcast frames.
3079          */
3080         rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
3081
3082         if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
3083                 if (ifp->if_flags & IFF_PROMISC)
3084                         rxcfg |= RXMAC_PROMISC;
3085                 if (ifp->if_flags & IFF_ALLMULTI)
3086                         rxcfg |= RXMAC_ALLMULTI;
3087                 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
3088                 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
3089                 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3090                 return;
3091         }
3092
3093         /*
3094          * Set up the multicast address filter by passing all multicast
3095          * addresses through a CRC generator, and then using the low-order
3096          * 6 bits as an index into the 64 bit multicast hash table.  The
3097          * high order bits select the register, while the rest of the bits
3098          * select the bit within the register.
3099          */
3100         rxcfg |= RXMAC_MULTICAST;
3101         bzero(mchash, sizeof(mchash));
3102
3103         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3104                 if (ifma->ifma_addr->sa_family != AF_LINK)
3105                         continue;
3106                 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3107                     ifma->ifma_addr), ETHER_ADDR_LEN);
3108
3109                 /* Just want the 6 least significant bits. */
3110                 crc &= 0x3f;
3111
3112                 /* Set the corresponding bit in the hash table. */
3113                 mchash[crc >> 5] |= 1 << (crc & 0x1f);
3114         }
3115
3116         CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
3117         CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
3118         CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3119 }
3120
3121 static int
3122 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
3123 {
3124         struct jme_softc *sc = arg1;
3125         struct ifnet *ifp = &sc->arpcom.ac_if;
3126         int error, v;
3127
3128         ifnet_serialize_all(ifp);
3129
3130         v = sc->jme_tx_coal_to;
3131         error = sysctl_handle_int(oidp, &v, 0, req);
3132         if (error || req->newptr == NULL)
3133                 goto back;
3134
3135         if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
3136                 error = EINVAL;
3137                 goto back;
3138         }
3139
3140         if (v != sc->jme_tx_coal_to) {
3141                 sc->jme_tx_coal_to = v;
3142                 if (ifp->if_flags & IFF_RUNNING)
3143                         jme_set_tx_coal(sc);
3144         }
3145 back:
3146         ifnet_deserialize_all(ifp);
3147         return error;
3148 }
3149
3150 static int
3151 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
3152 {
3153         struct jme_softc *sc = arg1;
3154         struct ifnet *ifp = &sc->arpcom.ac_if;
3155         int error, v;
3156
3157         ifnet_serialize_all(ifp);
3158
3159         v = sc->jme_tx_coal_pkt;
3160         error = sysctl_handle_int(oidp, &v, 0, req);
3161         if (error || req->newptr == NULL)
3162                 goto back;
3163
3164         if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
3165                 error = EINVAL;
3166                 goto back;
3167         }
3168
3169         if (v != sc->jme_tx_coal_pkt) {
3170                 sc->jme_tx_coal_pkt = v;
3171                 if (ifp->if_flags & IFF_RUNNING)
3172                         jme_set_tx_coal(sc);
3173         }
3174 back:
3175         ifnet_deserialize_all(ifp);
3176         return error;
3177 }
3178
3179 static int
3180 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
3181 {
3182         struct jme_softc *sc = arg1;
3183         struct ifnet *ifp = &sc->arpcom.ac_if;
3184         int error, v;
3185
3186         ifnet_serialize_all(ifp);
3187
3188         v = sc->jme_rx_coal_to;
3189         error = sysctl_handle_int(oidp, &v, 0, req);
3190         if (error || req->newptr == NULL)
3191                 goto back;
3192
3193         if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
3194                 error = EINVAL;
3195                 goto back;
3196         }
3197
3198         if (v != sc->jme_rx_coal_to) {
3199                 sc->jme_rx_coal_to = v;
3200                 if (ifp->if_flags & IFF_RUNNING)
3201                         jme_set_rx_coal(sc);
3202         }
3203 back:
3204         ifnet_deserialize_all(ifp);
3205         return error;
3206 }
3207
3208 static int
3209 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3210 {
3211         struct jme_softc *sc = arg1;
3212         struct ifnet *ifp = &sc->arpcom.ac_if;
3213         int error, v;
3214
3215         ifnet_serialize_all(ifp);
3216
3217         v = sc->jme_rx_coal_pkt;
3218         error = sysctl_handle_int(oidp, &v, 0, req);
3219         if (error || req->newptr == NULL)
3220                 goto back;
3221
3222         if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
3223                 error = EINVAL;
3224                 goto back;
3225         }
3226
3227         if (v != sc->jme_rx_coal_pkt) {
3228                 sc->jme_rx_coal_pkt = v;
3229                 if (ifp->if_flags & IFF_RUNNING)
3230                         jme_set_rx_coal(sc);
3231         }
3232 back:
3233         ifnet_deserialize_all(ifp);
3234         return error;
3235 }
3236
3237 static void
3238 jme_set_tx_coal(struct jme_softc *sc)
3239 {
3240         uint32_t reg;
3241
3242         reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3243             PCCTX_COAL_TO_MASK;
3244         reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3245             PCCTX_COAL_PKT_MASK;
3246         reg |= PCCTX_COAL_TXQ0;
3247         CSR_WRITE_4(sc, JME_PCCTX, reg);
3248 }
3249
3250 static void
3251 jme_set_rx_coal(struct jme_softc *sc)
3252 {
3253         uint32_t reg;
3254         int r;
3255
3256         reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3257             PCCRX_COAL_TO_MASK;
3258         reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3259             PCCRX_COAL_PKT_MASK;
3260         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r)
3261                 CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3262 }
3263
3264 #ifdef IFPOLL_ENABLE
3265
3266 static void
3267 jme_npoll_status(struct ifnet *ifp, int pollhz __unused)
3268 {
3269         struct jme_softc *sc = ifp->if_softc;
3270         uint32_t status;
3271
3272         ASSERT_SERIALIZED(&sc->jme_serialize);
3273
3274         status = CSR_READ_4(sc, JME_INTR_STATUS);
3275         if (status & INTR_RXQ_DESC_EMPTY) {
3276                 int i;
3277
3278                 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
3279                         struct jme_rxdata *rdata =
3280                             &sc->jme_cdata.jme_rx_data[i];
3281
3282                         if (status & rdata->jme_rx_empty) {
3283                                 lwkt_serialize_enter(&rdata->jme_rx_serialize);
3284                                 jme_rxeof(rdata, -1);
3285 #ifdef JME_RSS_DEBUG
3286                                 rdata->jme_rx_emp++;
3287 #endif
3288                                 lwkt_serialize_exit(&rdata->jme_rx_serialize);
3289                         }
3290                 }
3291                 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3292                 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3293                     RXCSR_RX_ENB | RXCSR_RXQ_START);
3294         }
3295 }
3296
3297 static void
3298 jme_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle)
3299 {
3300         struct jme_rxdata *rdata = arg;
3301
3302         ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3303
3304         jme_rxeof(rdata, cycle);
3305 }
3306
3307 static void
3308 jme_npoll_tx(struct ifnet *ifp, void *arg __unused, int cycle __unused)
3309 {
3310         struct jme_softc *sc = ifp->if_softc;
3311
3312         ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
3313
3314         jme_txeof(sc);
3315         if (!ifq_is_empty(&ifp->if_snd))
3316                 if_devstart(ifp);
3317 }
3318
3319 static void
3320 jme_npoll(struct ifnet *ifp, struct ifpoll_info *info)
3321 {
3322         struct jme_softc *sc = ifp->if_softc;
3323
3324         ASSERT_IFNET_SERIALIZED_ALL(ifp);
3325
3326         if (info) {
3327                 int i, off;
3328
3329                 info->ifpi_status.status_func = jme_npoll_status;
3330                 info->ifpi_status.serializer = &sc->jme_serialize;
3331
3332                 off = sc->jme_npoll_txoff;
3333                 KKASSERT(off <= ncpus2);
3334                 info->ifpi_tx[off].poll_func = jme_npoll_tx;
3335                 info->ifpi_tx[off].arg = NULL;
3336                 info->ifpi_tx[off].serializer = &sc->jme_cdata.jme_tx_serialize;
3337
3338                 off = sc->jme_npoll_rxoff;
3339                 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
3340                         struct jme_rxdata *rdata =
3341                             &sc->jme_cdata.jme_rx_data[i];
3342                         int idx = i + off;
3343
3344                         info->ifpi_rx[idx].poll_func = jme_npoll_rx;
3345                         info->ifpi_rx[idx].arg = rdata;
3346                         info->ifpi_rx[idx].serializer =
3347                             &rdata->jme_rx_serialize;
3348                 }
3349
3350                 if (ifp->if_flags & IFF_RUNNING)
3351                         jme_disable_intr(sc);
3352                 ifp->if_npoll_cpuid = sc->jme_npoll_txoff;
3353         } else {
3354                 if (ifp->if_flags & IFF_RUNNING)
3355                         jme_enable_intr(sc);
3356                 ifp->if_npoll_cpuid = -1;
3357         }
3358 }
3359
3360 static int
3361 jme_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS)
3362 {
3363         struct jme_softc *sc = (void *)arg1;
3364         struct ifnet *ifp = &sc->arpcom.ac_if;
3365         int error, off;
3366
3367         off = sc->jme_npoll_rxoff;
3368         error = sysctl_handle_int(oidp, &off, 0, req);
3369         if (error || req->newptr == NULL)
3370                 return error;
3371         if (off < 0)
3372                 return EINVAL;
3373
3374         ifnet_serialize_all(ifp);
3375         if (off >= ncpus2 || off % sc->jme_cdata.jme_rx_ring_cnt != 0) {
3376                 error = EINVAL;
3377         } else {
3378                 error = 0;
3379                 sc->jme_npoll_rxoff = off;
3380         }
3381         ifnet_deserialize_all(ifp);
3382
3383         return error;
3384 }
3385
3386 static int
3387 jme_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS)
3388 {
3389         struct jme_softc *sc = (void *)arg1;
3390         struct ifnet *ifp = &sc->arpcom.ac_if;
3391         int error, off;
3392
3393         off = sc->jme_npoll_txoff;
3394         error = sysctl_handle_int(oidp, &off, 0, req);
3395         if (error || req->newptr == NULL)
3396                 return error;
3397         if (off < 0)
3398                 return EINVAL;
3399
3400         ifnet_serialize_all(ifp);
3401         if (off >= ncpus2) {
3402                 error = EINVAL;
3403         } else {
3404                 error = 0;
3405                 sc->jme_npoll_txoff = off;
3406         }
3407         ifnet_deserialize_all(ifp);
3408
3409         return error;
3410 }
3411
3412 #endif  /* IFPOLL_ENABLE */
3413
3414 static int
3415 jme_rxring_dma_alloc(struct jme_rxdata *rdata)
3416 {
3417         bus_dmamem_t dmem;
3418         int error, asize;
3419
3420         asize = roundup2(JME_RX_RING_SIZE(rdata), JME_RX_RING_ALIGN);
3421         error = bus_dmamem_coherent(rdata->jme_sc->jme_cdata.jme_ring_tag,
3422                         JME_RX_RING_ALIGN, 0,
3423                         BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3424                         asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3425         if (error) {
3426                 device_printf(rdata->jme_sc->jme_dev,
3427                     "could not allocate %dth Rx ring.\n", rdata->jme_rx_idx);
3428                 return error;
3429         }
3430         rdata->jme_rx_ring_tag = dmem.dmem_tag;
3431         rdata->jme_rx_ring_map = dmem.dmem_map;
3432         rdata->jme_rx_ring = dmem.dmem_addr;
3433         rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
3434
3435         return 0;
3436 }
3437
3438 static int
3439 jme_rxbuf_dma_filter(void *arg __unused, bus_addr_t paddr)
3440 {
3441         if ((paddr & 0xffffffff) == 0) {
3442                 /*
3443                  * Don't allow lower 32bits of the RX buffer's
3444                  * physical address to be 0, else it will break
3445                  * hardware pending RSS information delivery
3446                  * detection on RX path.
3447                  */
3448                 return 1;
3449         }
3450         return 0;
3451 }
3452
3453 static int
3454 jme_rxbuf_dma_alloc(struct jme_rxdata *rdata)
3455 {
3456         bus_addr_t lowaddr;
3457         int i, error;
3458
3459         lowaddr = BUS_SPACE_MAXADDR;
3460         if (JME_ENABLE_HWRSS(rdata->jme_sc)) {
3461                 /* jme_rxbuf_dma_filter will be called */
3462                 lowaddr = BUS_SPACE_MAXADDR_32BIT;
3463         }
3464
3465         /* Create tag for Rx buffers. */
3466         error = bus_dma_tag_create(
3467             rdata->jme_sc->jme_cdata.jme_buffer_tag,/* parent */
3468             JME_RX_BUF_ALIGN, 0,        /* algnmnt, boundary */
3469             lowaddr,                    /* lowaddr */
3470             BUS_SPACE_MAXADDR,          /* highaddr */
3471             jme_rxbuf_dma_filter, NULL, /* filter, filterarg */
3472             MCLBYTES,                   /* maxsize */
3473             1,                          /* nsegments */
3474             MCLBYTES,                   /* maxsegsize */
3475             BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
3476             &rdata->jme_rx_tag);
3477         if (error) {
3478                 device_printf(rdata->jme_sc->jme_dev,
3479                     "could not create %dth Rx DMA tag.\n", rdata->jme_rx_idx);
3480                 return error;
3481         }
3482
3483         /* Create DMA maps for Rx buffers. */
3484         error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3485                                   &rdata->jme_rx_sparemap);
3486         if (error) {
3487                 device_printf(rdata->jme_sc->jme_dev,
3488                     "could not create %dth spare Rx dmamap.\n",
3489                     rdata->jme_rx_idx);
3490                 bus_dma_tag_destroy(rdata->jme_rx_tag);
3491                 rdata->jme_rx_tag = NULL;
3492                 return error;
3493         }
3494         for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
3495                 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3496
3497                 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3498                                           &rxd->rx_dmamap);
3499                 if (error) {
3500                         int j;
3501
3502                         device_printf(rdata->jme_sc->jme_dev,
3503                             "could not create %dth Rx dmamap "
3504                             "for %dth RX ring.\n", i, rdata->jme_rx_idx);
3505
3506                         for (j = 0; j < i; ++j) {
3507                                 rxd = &rdata->jme_rxdesc[j];
3508                                 bus_dmamap_destroy(rdata->jme_rx_tag,
3509                                                    rxd->rx_dmamap);
3510                         }
3511                         bus_dmamap_destroy(rdata->jme_rx_tag,
3512                                            rdata->jme_rx_sparemap);
3513                         bus_dma_tag_destroy(rdata->jme_rx_tag);
3514                         rdata->jme_rx_tag = NULL;
3515                         return error;
3516                 }
3517         }
3518         return 0;
3519 }
3520
3521 static void
3522 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3523 {
3524         int r;
3525
3526         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3527                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3528
3529                 if (status & rdata->jme_rx_coal) {
3530                         lwkt_serialize_enter(&rdata->jme_rx_serialize);
3531                         jme_rxeof(rdata, -1);
3532                         lwkt_serialize_exit(&rdata->jme_rx_serialize);
3533                 }
3534         }
3535 }
3536
3537 static void
3538 jme_enable_rss(struct jme_softc *sc)
3539 {
3540         uint32_t rssc, ind;
3541         uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3542         int i;
3543
3544         KASSERT(sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_2 ||
3545                 sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_4,
3546                 ("%s: invalid # of RX rings (%d)",
3547                  sc->arpcom.ac_if.if_xname, sc->jme_cdata.jme_rx_ring_cnt));
3548
3549         rssc = RSSC_HASH_64_ENTRY;
3550         rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3551         rssc |= sc->jme_cdata.jme_rx_ring_cnt >> 1;
3552         JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3553         CSR_WRITE_4(sc, JME_RSSC, rssc);
3554
3555         toeplitz_get_key(key, sizeof(key));
3556         for (i = 0; i < RSSKEY_NREGS; ++i) {
3557                 uint32_t keyreg;
3558
3559                 keyreg = RSSKEY_REGVAL(key, i);
3560                 JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg);
3561
3562                 CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg);
3563         }
3564
3565         /*
3566          * Create redirect table in following fashion:
3567          * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3568          */
3569         ind = 0;
3570         for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3571                 int q;
3572
3573                 q = i % sc->jme_cdata.jme_rx_ring_cnt;
3574                 ind |= q << (i * 8);
3575         }
3576         JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3577
3578         for (i = 0; i < RSSTBL_NREGS; ++i)
3579                 CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3580 }
3581
3582 static void
3583 jme_disable_rss(struct jme_softc *sc)
3584 {
3585         CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3586 }
3587
3588 static void
3589 jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3590 {
3591         struct jme_softc *sc = ifp->if_softc;
3592
3593         ifnet_serialize_array_enter(sc->jme_serialize_arr,
3594             sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz);
3595 }
3596
3597 static void
3598 jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3599 {
3600         struct jme_softc *sc = ifp->if_softc;
3601
3602         ifnet_serialize_array_exit(sc->jme_serialize_arr,
3603             sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz);
3604 }
3605
3606 static int
3607 jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3608 {
3609         struct jme_softc *sc = ifp->if_softc;
3610
3611         return ifnet_serialize_array_try(sc->jme_serialize_arr,
3612             sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz);
3613 }
3614
3615 #ifdef INVARIANTS
3616
3617 static void
3618 jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
3619     boolean_t serialized)
3620 {
3621         struct jme_softc *sc = ifp->if_softc;
3622
3623         ifnet_serialize_array_assert(sc->jme_serialize_arr,
3624             sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE,
3625             slz, serialized);
3626 }
3627
3628 #endif  /* INVARIANTS */
3629
3630 static void
3631 jme_msix_try_alloc(device_t dev)
3632 {
3633         struct jme_softc *sc = device_get_softc(dev);
3634         struct jme_msix_data *msix;
3635         int error, i, r, msix_enable, msix_count;
3636         int offset, offset_def;
3637
3638         msix_count = 1 + sc->jme_cdata.jme_rx_ring_cnt;
3639         KKASSERT(msix_count <= JME_NMSIX);
3640
3641         msix_enable = device_getenv_int(dev, "msix.enable", jme_msix_enable);
3642
3643         /*
3644          * We leave the 1st MSI-X vector unused, so we
3645          * actually need msix_count + 1 MSI-X vectors.
3646          */
3647         if (!msix_enable || pci_msix_count(dev) < (msix_count + 1))
3648                 return;
3649
3650         for (i = 0; i < msix_count; ++i)
3651                 sc->jme_msix[i].jme_msix_rid = -1;
3652
3653         i = 0;
3654
3655         /*
3656          * Setup TX MSI-X
3657          */
3658
3659         offset_def = device_get_unit(dev) % ncpus2;
3660         offset = device_getenv_int(dev, "msix.txoff", offset_def);
3661         if (offset >= ncpus2) {
3662                 device_printf(dev, "invalid msix.txoff %d, use %d\n",
3663                     offset, offset_def);
3664                 offset = offset_def;
3665         }
3666
3667         msix = &sc->jme_msix[i++];
3668         msix->jme_msix_cpuid = offset;
3669         sc->jme_tx_cpuid = msix->jme_msix_cpuid;
3670         msix->jme_msix_arg = &sc->jme_cdata;
3671         msix->jme_msix_func = jme_msix_tx;
3672         msix->jme_msix_intrs = INTR_TXQ_COAL | INTR_TXQ_COAL_TO;
3673         msix->jme_msix_serialize = &sc->jme_cdata.jme_tx_serialize;
3674         ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s tx",
3675             device_get_nameunit(dev));
3676
3677         /*
3678          * Setup RX MSI-X
3679          */
3680
3681         if (sc->jme_cdata.jme_rx_ring_cnt == ncpus2) {
3682                 offset = 0;
3683         } else {
3684                 offset_def = (sc->jme_cdata.jme_rx_ring_cnt *
3685                     device_get_unit(dev)) % ncpus2;
3686
3687                 offset = device_getenv_int(dev, "msix.rxoff", offset_def);
3688                 if (offset >= ncpus2 ||
3689                     offset % sc->jme_cdata.jme_rx_ring_cnt != 0) {
3690                         device_printf(dev, "invalid msix.rxoff %d, use %d\n",
3691                             offset, offset_def);
3692                         offset = offset_def;
3693                 }
3694         }
3695
3696         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3697                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3698
3699                 msix = &sc->jme_msix[i++];
3700                 msix->jme_msix_cpuid = r + offset;
3701                 KKASSERT(msix->jme_msix_cpuid < ncpus2);
3702                 msix->jme_msix_arg = rdata;
3703                 msix->jme_msix_func = jme_msix_rx;
3704                 msix->jme_msix_intrs = rdata->jme_rx_coal | rdata->jme_rx_empty;
3705                 msix->jme_msix_serialize = &rdata->jme_rx_serialize;
3706                 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc),
3707                     "%s rx%d", device_get_nameunit(dev), r);
3708         }
3709
3710         KKASSERT(i == msix_count);
3711
3712         error = pci_setup_msix(dev);
3713         if (error)
3714                 return;
3715
3716         /* Setup jme_msix_cnt early, so we could cleanup */
3717         sc->jme_msix_cnt = msix_count;
3718
3719         for (i = 0; i < msix_count; ++i) {
3720                 msix = &sc->jme_msix[i];
3721
3722                 msix->jme_msix_vector = i + 1;
3723                 error = pci_alloc_msix_vector(dev, msix->jme_msix_vector,
3724                     &msix->jme_msix_rid, msix->jme_msix_cpuid);
3725                 if (error)
3726                         goto back;
3727
3728                 msix->jme_msix_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3729                     &msix->jme_msix_rid, RF_ACTIVE);
3730                 if (msix->jme_msix_res == NULL) {
3731                         error = ENOMEM;
3732                         goto back;
3733                 }
3734         }
3735
3736         for (i = 0; i < JME_INTR_CNT; ++i) {
3737                 uint32_t intr_mask = (1 << i);
3738                 int x;
3739
3740                 if ((JME_INTRS & intr_mask) == 0)
3741                         continue;
3742
3743                 for (x = 0; x < msix_count; ++x) {
3744                         msix = &sc->jme_msix[x];
3745                         if (msix->jme_msix_intrs & intr_mask) {
3746                                 int reg, shift;
3747
3748                                 reg = i / JME_MSINUM_FACTOR;
3749                                 KKASSERT(reg < JME_MSINUM_CNT);
3750
3751                                 shift = (i % JME_MSINUM_FACTOR) * 4;
3752
3753                                 sc->jme_msinum[reg] |=
3754                                     (msix->jme_msix_vector << shift);
3755
3756                                 break;
3757                         }
3758                 }
3759         }
3760
3761         if (bootverbose) {
3762                 for (i = 0; i < JME_MSINUM_CNT; ++i) {
3763                         device_printf(dev, "MSINUM%d: %#x\n", i,
3764                             sc->jme_msinum[i]);
3765                 }
3766         }
3767
3768         pci_enable_msix(dev);
3769         sc->jme_irq_type = PCI_INTR_TYPE_MSIX;
3770
3771 back:
3772         if (error)
3773                 jme_msix_free(dev);
3774 }
3775
3776 static int
3777 jme_intr_alloc(device_t dev)
3778 {
3779         struct jme_softc *sc = device_get_softc(dev);
3780         u_int irq_flags;
3781
3782         jme_msix_try_alloc(dev);
3783
3784         if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3785                 sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable,
3786                     &sc->jme_irq_rid, &irq_flags);
3787
3788                 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3789                     &sc->jme_irq_rid, irq_flags);
3790                 if (sc->jme_irq_res == NULL) {
3791                         device_printf(dev, "can't allocate irq\n");
3792                         return ENXIO;
3793                 }
3794         }
3795         return 0;
3796 }
3797
3798 static void
3799 jme_msix_free(device_t dev)
3800 {
3801         struct jme_softc *sc = device_get_softc(dev);
3802         int i;
3803
3804         KKASSERT(sc->jme_msix_cnt > 1);
3805
3806         for (i = 0; i < sc->jme_msix_cnt; ++i) {
3807                 struct jme_msix_data *msix = &sc->jme_msix[i];
3808
3809                 if (msix->jme_msix_res != NULL) {
3810                         bus_release_resource(dev, SYS_RES_IRQ,
3811                             msix->jme_msix_rid, msix->jme_msix_res);
3812                         msix->jme_msix_res = NULL;
3813                 }
3814                 if (msix->jme_msix_rid >= 0) {
3815                         pci_release_msix_vector(dev, msix->jme_msix_rid);
3816                         msix->jme_msix_rid = -1;
3817                 }
3818         }
3819         pci_teardown_msix(dev);
3820 }
3821
3822 static void
3823 jme_intr_free(device_t dev)
3824 {
3825         struct jme_softc *sc = device_get_softc(dev);
3826
3827         if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3828                 if (sc->jme_irq_res != NULL) {
3829                         bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
3830                                              sc->jme_irq_res);
3831                 }
3832                 if (sc->jme_irq_type == PCI_INTR_TYPE_MSI)
3833                         pci_release_msi(dev);
3834         } else {
3835                 jme_msix_free(dev);
3836         }
3837 }
3838
3839 static void
3840 jme_msix_tx(void *xcd)
3841 {
3842         struct jme_chain_data *cd = xcd;
3843         struct jme_softc *sc = cd->jme_sc;
3844         struct ifnet *ifp = &sc->arpcom.ac_if;
3845
3846         ASSERT_SERIALIZED(&cd->jme_tx_serialize);
3847
3848         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3849
3850         CSR_WRITE_4(sc, JME_INTR_STATUS,
3851             INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP);
3852
3853         if (ifp->if_flags & IFF_RUNNING) {
3854                 jme_txeof(sc);
3855                 if (!ifq_is_empty(&ifp->if_snd))
3856                         if_devstart(ifp);
3857         }
3858
3859         CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3860 }
3861
3862 static void
3863 jme_msix_rx(void *xrdata)
3864 {
3865         struct jme_rxdata *rdata = xrdata;
3866         struct jme_softc *sc = rdata->jme_sc;
3867         struct ifnet *ifp = &sc->arpcom.ac_if;
3868         uint32_t status;
3869
3870         ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3871
3872         CSR_WRITE_4(sc, JME_INTR_MASK_CLR,
3873             (rdata->jme_rx_coal | rdata->jme_rx_empty));
3874
3875         status = CSR_READ_4(sc, JME_INTR_STATUS);
3876         status &= (rdata->jme_rx_coal | rdata->jme_rx_empty);
3877
3878         if (status & rdata->jme_rx_coal)
3879                 status |= (rdata->jme_rx_coal | rdata->jme_rx_comp);
3880         CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3881
3882         if (ifp->if_flags & IFF_RUNNING) {
3883                 if (status & rdata->jme_rx_coal)
3884                         jme_rxeof(rdata, -1);
3885
3886                 if (status & rdata->jme_rx_empty) {
3887                         CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3888                             RXCSR_RX_ENB | RXCSR_RXQ_START);
3889 #ifdef JME_RSS_DEBUG
3890                         rdata->jme_rx_emp++;
3891 #endif
3892                 }
3893         }
3894
3895         CSR_WRITE_4(sc, JME_INTR_MASK_SET,
3896             (rdata->jme_rx_coal | rdata->jme_rx_empty));
3897 }
3898
3899 static void
3900 jme_set_msinum(struct jme_softc *sc)
3901 {
3902         int i;
3903
3904         for (i = 0; i < JME_MSINUM_CNT; ++i)
3905                 CSR_WRITE_4(sc, JME_MSINUM(i), sc->jme_msinum[i]);
3906 }
3907
3908 static int
3909 jme_intr_setup(device_t dev)
3910 {
3911         struct jme_softc *sc = device_get_softc(dev);
3912         struct ifnet *ifp = &sc->arpcom.ac_if;
3913         int error;
3914
3915         if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3916                 return jme_msix_setup(dev);
3917
3918         error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE,
3919             jme_intr, sc, &sc->jme_irq_handle, &sc->jme_serialize);
3920         if (error) {
3921                 device_printf(dev, "could not set up interrupt handler.\n");
3922                 return error;
3923         }
3924
3925         ifp->if_cpuid = rman_get_cpuid(sc->jme_irq_res);
3926         KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
3927         return 0;
3928 }
3929
3930 static void
3931 jme_intr_teardown(device_t dev)
3932 {
3933         struct jme_softc *sc = device_get_softc(dev);
3934
3935         if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3936                 jme_msix_teardown(dev, sc->jme_msix_cnt);
3937         else
3938                 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
3939 }
3940
3941 static int
3942 jme_msix_setup(device_t dev)
3943 {
3944         struct jme_softc *sc = device_get_softc(dev);
3945         struct ifnet *ifp = &sc->arpcom.ac_if;
3946         int x;
3947
3948         for (x = 0; x < sc->jme_msix_cnt; ++x) {
3949                 struct jme_msix_data *msix = &sc->jme_msix[x];
3950                 int error;
3951
3952                 error = bus_setup_intr_descr(dev, msix->jme_msix_res,
3953                     INTR_MPSAFE, msix->jme_msix_func, msix->jme_msix_arg,
3954                     &msix->jme_msix_handle, msix->jme_msix_serialize,
3955                     msix->jme_msix_desc);
3956                 if (error) {
3957                         device_printf(dev, "could not set up %s "
3958                             "interrupt handler.\n", msix->jme_msix_desc);
3959                         jme_msix_teardown(dev, x);
3960                         return error;
3961                 }
3962         }
3963         ifp->if_cpuid = sc->jme_tx_cpuid;
3964         return 0;
3965 }
3966
3967 static void
3968 jme_msix_teardown(device_t dev, int msix_count)
3969 {
3970         struct jme_softc *sc = device_get_softc(dev);
3971         int x;
3972
3973         for (x = 0; x < msix_count; ++x) {
3974                 struct jme_msix_data *msix = &sc->jme_msix[x];
3975
3976                 bus_teardown_intr(dev, msix->jme_msix_res,
3977                     msix->jme_msix_handle);
3978         }
3979 }
3980
3981 static void
3982 jme_serialize_skipmain(struct jme_softc *sc)
3983 {
3984         lwkt_serialize_array_enter(sc->jme_serialize_arr,
3985             sc->jme_serialize_cnt, 1);
3986 }
3987
3988 static void
3989 jme_deserialize_skipmain(struct jme_softc *sc)
3990 {
3991         lwkt_serialize_array_exit(sc->jme_serialize_arr,
3992             sc->jme_serialize_cnt, 1);
3993 }
3994
3995 static void
3996 jme_enable_intr(struct jme_softc *sc)
3997 {
3998         int i;
3999
4000         for (i = 0; i < sc->jme_serialize_cnt; ++i)
4001                 lwkt_serialize_handler_enable(sc->jme_serialize_arr[i]);
4002
4003         CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
4004 }
4005
4006 static void
4007 jme_disable_intr(struct jme_softc *sc)
4008 {
4009         int i;
4010
4011         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
4012
4013         for (i = 0; i < sc->jme_serialize_cnt; ++i)
4014                 lwkt_serialize_handler_disable(sc->jme_serialize_arr[i]);
4015 }