jme: Save physical address of RX buffer, which will be used upon error
[dragonfly.git] / sys / dev / netif / jme / if_jme.c
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28  */
29
30 #include "opt_polling.h"
31 #include "opt_jme.h"
32
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/bus.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
39 #include <sys/proc.h>
40 #include <sys/rman.h>
41 #include <sys/serialize.h>
42 #include <sys/serialize2.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
46
47 #include <net/ethernet.h>
48 #include <net/if.h>
49 #include <net/bpf.h>
50 #include <net/if_arp.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/ifq_var.h>
54 #include <net/toeplitz.h>
55 #include <net/toeplitz2.h>
56 #include <net/vlan/if_vlan_var.h>
57 #include <net/vlan/if_vlan_ether.h>
58
59 #include <netinet/in.h>
60
61 #include <dev/netif/mii_layer/miivar.h>
62 #include <dev/netif/mii_layer/jmphyreg.h>
63
64 #include <bus/pci/pcireg.h>
65 #include <bus/pci/pcivar.h>
66 #include <bus/pci/pcidevs.h>
67
68 #include <dev/netif/jme/if_jmereg.h>
69 #include <dev/netif/jme/if_jmevar.h>
70
71 #include "miibus_if.h"
72
73 /* Define the following to disable printing Rx errors. */
74 #undef  JME_SHOW_ERRORS
75
76 #define JME_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
77
78 #ifdef JME_RSS_DEBUG
79 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
80 do { \
81         if ((sc)->jme_rss_debug >= (lvl)) \
82                 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
83 } while (0)
84 #else   /* !JME_RSS_DEBUG */
85 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...)      ((void)0)
86 #endif  /* JME_RSS_DEBUG */
87
88 static int      jme_probe(device_t);
89 static int      jme_attach(device_t);
90 static int      jme_detach(device_t);
91 static int      jme_shutdown(device_t);
92 static int      jme_suspend(device_t);
93 static int      jme_resume(device_t);
94
95 static int      jme_miibus_readreg(device_t, int, int);
96 static int      jme_miibus_writereg(device_t, int, int, int);
97 static void     jme_miibus_statchg(device_t);
98
99 static void     jme_init(void *);
100 static int      jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
101 static void     jme_start(struct ifnet *);
102 static void     jme_watchdog(struct ifnet *);
103 static void     jme_mediastatus(struct ifnet *, struct ifmediareq *);
104 static int      jme_mediachange(struct ifnet *);
105 #ifdef DEVICE_POLLING
106 static void     jme_poll(struct ifnet *, enum poll_cmd, int);
107 #endif
108 static void     jme_serialize(struct ifnet *, enum ifnet_serialize);
109 static void     jme_deserialize(struct ifnet *, enum ifnet_serialize);
110 static int      jme_tryserialize(struct ifnet *, enum ifnet_serialize);
111 #ifdef INVARIANTS
112 static void     jme_serialize_assert(struct ifnet *, enum ifnet_serialize,
113                     boolean_t);
114 #endif
115
116 static void     jme_intr(void *);
117 static void     jme_msix_tx(void *);
118 static void     jme_msix_rx(void *);
119 static void     jme_txeof(struct jme_softc *);
120 static void     jme_rxeof(struct jme_rxdata *, int);
121 static void     jme_rx_intr(struct jme_softc *, uint32_t);
122
123 static int      jme_msix_setup(device_t);
124 static void     jme_msix_teardown(device_t, int);
125 static int      jme_intr_setup(device_t);
126 static void     jme_intr_teardown(device_t);
127 static void     jme_msix_try_alloc(device_t);
128 static void     jme_msix_free(device_t);
129 static int      jme_intr_alloc(device_t);
130 static void     jme_intr_free(device_t);
131 static int      jme_dma_alloc(struct jme_softc *);
132 static void     jme_dma_free(struct jme_softc *);
133 static int      jme_init_rx_ring(struct jme_rxdata *);
134 static void     jme_init_tx_ring(struct jme_softc *);
135 static void     jme_init_ssb(struct jme_softc *);
136 static int      jme_newbuf(struct jme_rxdata *, struct jme_rxdesc *, int);
137 static int      jme_encap(struct jme_softc *, struct mbuf **);
138 static void     jme_rxpkt(struct jme_rxdata *);
139 static int      jme_rxring_dma_alloc(struct jme_rxdata *);
140 static int      jme_rxbuf_dma_alloc(struct jme_rxdata *);
141
142 static void     jme_tick(void *);
143 static void     jme_stop(struct jme_softc *);
144 static void     jme_reset(struct jme_softc *);
145 static void     jme_set_msinum(struct jme_softc *);
146 static void     jme_set_vlan(struct jme_softc *);
147 static void     jme_set_filter(struct jme_softc *);
148 static void     jme_stop_tx(struct jme_softc *);
149 static void     jme_stop_rx(struct jme_softc *);
150 static void     jme_mac_config(struct jme_softc *);
151 static void     jme_reg_macaddr(struct jme_softc *, uint8_t[]);
152 static int      jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
153 static int      jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
154 #ifdef notyet
155 static void     jme_setwol(struct jme_softc *);
156 static void     jme_setlinkspeed(struct jme_softc *);
157 #endif
158 static void     jme_set_tx_coal(struct jme_softc *);
159 static void     jme_set_rx_coal(struct jme_softc *);
160 static void     jme_enable_rss(struct jme_softc *);
161 static void     jme_disable_rss(struct jme_softc *);
162
163 static void     jme_sysctl_node(struct jme_softc *);
164 static int      jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
165 static int      jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
166 static int      jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
167 static int      jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
168
169 /*
170  * Devices supported by this driver.
171  */
172 static const struct jme_dev {
173         uint16_t        jme_vendorid;
174         uint16_t        jme_deviceid;
175         uint32_t        jme_caps;
176         const char      *jme_name;
177 } jme_devs[] = {
178         { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
179             JME_CAP_JUMBO,
180             "JMicron Inc, JMC250 Gigabit Ethernet" },
181         { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
182             JME_CAP_FASTETH,
183             "JMicron Inc, JMC260 Fast Ethernet" },
184         { 0, 0, 0, NULL }
185 };
186
187 static device_method_t jme_methods[] = {
188         /* Device interface. */
189         DEVMETHOD(device_probe,         jme_probe),
190         DEVMETHOD(device_attach,        jme_attach),
191         DEVMETHOD(device_detach,        jme_detach),
192         DEVMETHOD(device_shutdown,      jme_shutdown),
193         DEVMETHOD(device_suspend,       jme_suspend),
194         DEVMETHOD(device_resume,        jme_resume),
195
196         /* Bus interface. */
197         DEVMETHOD(bus_print_child,      bus_generic_print_child),
198         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
199
200         /* MII interface. */
201         DEVMETHOD(miibus_readreg,       jme_miibus_readreg),
202         DEVMETHOD(miibus_writereg,      jme_miibus_writereg),
203         DEVMETHOD(miibus_statchg,       jme_miibus_statchg),
204
205         { NULL, NULL }
206 };
207
208 static driver_t jme_driver = {
209         "jme",
210         jme_methods,
211         sizeof(struct jme_softc)
212 };
213
214 static devclass_t jme_devclass;
215
216 DECLARE_DUMMY_MODULE(if_jme);
217 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
218 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL);
219 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL);
220
221 static const struct {
222         uint32_t        jme_coal;
223         uint32_t        jme_comp;
224         uint32_t        jme_empty;
225 } jme_rx_status[JME_NRXRING_MAX] = {
226         { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP,
227           INTR_RXQ0_DESC_EMPTY },
228         { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP,
229           INTR_RXQ1_DESC_EMPTY },
230         { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP,
231           INTR_RXQ2_DESC_EMPTY },
232         { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP,
233           INTR_RXQ3_DESC_EMPTY }
234 };
235
236 static int      jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
237 static int      jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
238 static int      jme_rx_ring_count = 1;
239 static int      jme_msi_enable = 1;
240 static int      jme_msix_enable = 1;
241
242 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
243 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
244 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
245 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable);
246 TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable);
247
248 static __inline void
249 jme_setup_rxdesc(struct jme_rxdesc *rxd)
250 {
251         struct jme_desc *desc;
252
253         desc = rxd->rx_desc;
254         desc->buflen = htole32(MCLBYTES);
255         desc->addr_lo = htole32(JME_ADDR_LO(rxd->rx_paddr));
256         desc->addr_hi = htole32(JME_ADDR_HI(rxd->rx_paddr));
257         desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
258 }
259
260 /*
261  *      Read a PHY register on the MII of the JMC250.
262  */
263 static int
264 jme_miibus_readreg(device_t dev, int phy, int reg)
265 {
266         struct jme_softc *sc = device_get_softc(dev);
267         uint32_t val;
268         int i;
269
270         /* For FPGA version, PHY address 0 should be ignored. */
271         if (sc->jme_caps & JME_CAP_FPGA) {
272                 if (phy == 0)
273                         return (0);
274         } else {
275                 if (sc->jme_phyaddr != phy)
276                         return (0);
277         }
278
279         CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
280             SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
281
282         for (i = JME_PHY_TIMEOUT; i > 0; i--) {
283                 DELAY(1);
284                 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
285                         break;
286         }
287         if (i == 0) {
288                 device_printf(sc->jme_dev, "phy read timeout: "
289                               "phy %d, reg %d\n", phy, reg);
290                 return (0);
291         }
292
293         return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
294 }
295
296 /*
297  *      Write a PHY register on the MII of the JMC250.
298  */
299 static int
300 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
301 {
302         struct jme_softc *sc = device_get_softc(dev);
303         int i;
304
305         /* For FPGA version, PHY address 0 should be ignored. */
306         if (sc->jme_caps & JME_CAP_FPGA) {
307                 if (phy == 0)
308                         return (0);
309         } else {
310                 if (sc->jme_phyaddr != phy)
311                         return (0);
312         }
313
314         CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
315             ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
316             SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
317
318         for (i = JME_PHY_TIMEOUT; i > 0; i--) {
319                 DELAY(1);
320                 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
321                         break;
322         }
323         if (i == 0) {
324                 device_printf(sc->jme_dev, "phy write timeout: "
325                               "phy %d, reg %d\n", phy, reg);
326         }
327
328         return (0);
329 }
330
331 /*
332  *      Callback from MII layer when media changes.
333  */
334 static void
335 jme_miibus_statchg(device_t dev)
336 {
337         struct jme_softc *sc = device_get_softc(dev);
338         struct ifnet *ifp = &sc->arpcom.ac_if;
339         struct mii_data *mii;
340         struct jme_txdesc *txd;
341         bus_addr_t paddr;
342         int i, r;
343
344         ASSERT_IFNET_SERIALIZED_ALL(ifp);
345
346         if ((ifp->if_flags & IFF_RUNNING) == 0)
347                 return;
348
349         mii = device_get_softc(sc->jme_miibus);
350
351         sc->jme_flags &= ~JME_FLAG_LINK;
352         if ((mii->mii_media_status & IFM_AVALID) != 0) {
353                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
354                 case IFM_10_T:
355                 case IFM_100_TX:
356                         sc->jme_flags |= JME_FLAG_LINK;
357                         break;
358                 case IFM_1000_T:
359                         if (sc->jme_caps & JME_CAP_FASTETH)
360                                 break;
361                         sc->jme_flags |= JME_FLAG_LINK;
362                         break;
363                 default:
364                         break;
365                 }
366         }
367
368         /*
369          * Disabling Rx/Tx MACs have a side-effect of resetting
370          * JME_TXNDA/JME_RXNDA register to the first address of
371          * Tx/Rx descriptor address. So driver should reset its
372          * internal procucer/consumer pointer and reclaim any
373          * allocated resources.  Note, just saving the value of
374          * JME_TXNDA and JME_RXNDA registers before stopping MAC
375          * and restoring JME_TXNDA/JME_RXNDA register is not
376          * sufficient to make sure correct MAC state because
377          * stopping MAC operation can take a while and hardware
378          * might have updated JME_TXNDA/JME_RXNDA registers
379          * during the stop operation.
380          */
381
382         /* Disable interrupts */
383         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
384
385         /* Stop driver */
386         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
387         ifp->if_timer = 0;
388         callout_stop(&sc->jme_tick_ch);
389
390         /* Stop receiver/transmitter. */
391         jme_stop_rx(sc);
392         jme_stop_tx(sc);
393
394         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
395                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
396
397                 jme_rxeof(rdata, -1);
398                 if (rdata->jme_rxhead != NULL)
399                         m_freem(rdata->jme_rxhead);
400                 JME_RXCHAIN_RESET(rdata);
401
402                 /*
403                  * Reuse configured Rx descriptors and reset
404                  * procuder/consumer index.
405                  */
406                 rdata->jme_rx_cons = 0;
407         }
408
409         jme_txeof(sc);
410         if (sc->jme_cdata.jme_tx_cnt != 0) {
411                 /* Remove queued packets for transmit. */
412                 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
413                         txd = &sc->jme_cdata.jme_txdesc[i];
414                         if (txd->tx_m != NULL) {
415                                 bus_dmamap_unload(
416                                     sc->jme_cdata.jme_tx_tag,
417                                     txd->tx_dmamap);
418                                 m_freem(txd->tx_m);
419                                 txd->tx_m = NULL;
420                                 txd->tx_ndesc = 0;
421                                 ifp->if_oerrors++;
422                         }
423                 }
424         }
425         jme_init_tx_ring(sc);
426
427         /* Initialize shadow status block. */
428         jme_init_ssb(sc);
429
430         /* Program MAC with resolved speed/duplex/flow-control. */
431         if (sc->jme_flags & JME_FLAG_LINK) {
432                 jme_mac_config(sc);
433
434                 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
435
436                 /* Set Tx ring address to the hardware. */
437                 paddr = sc->jme_cdata.jme_tx_ring_paddr;
438                 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
439                 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
440
441                 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
442                         CSR_WRITE_4(sc, JME_RXCSR,
443                             sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
444
445                         /* Set Rx ring address to the hardware. */
446                         paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
447                         CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
448                         CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
449                 }
450
451                 /* Restart receiver/transmitter. */
452                 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
453                     RXCSR_RXQ_START);
454                 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
455         }
456
457         ifp->if_flags |= IFF_RUNNING;
458         ifp->if_flags &= ~IFF_OACTIVE;
459         callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
460
461 #ifdef DEVICE_POLLING
462         if (!(ifp->if_flags & IFF_POLLING))
463 #endif
464         /* Reenable interrupts. */
465         CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
466 }
467
468 /*
469  *      Get the current interface media status.
470  */
471 static void
472 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
473 {
474         struct jme_softc *sc = ifp->if_softc;
475         struct mii_data *mii = device_get_softc(sc->jme_miibus);
476
477         ASSERT_IFNET_SERIALIZED_ALL(ifp);
478
479         mii_pollstat(mii);
480         ifmr->ifm_status = mii->mii_media_status;
481         ifmr->ifm_active = mii->mii_media_active;
482 }
483
484 /*
485  *      Set hardware to newly-selected media.
486  */
487 static int
488 jme_mediachange(struct ifnet *ifp)
489 {
490         struct jme_softc *sc = ifp->if_softc;
491         struct mii_data *mii = device_get_softc(sc->jme_miibus);
492         int error;
493
494         ASSERT_IFNET_SERIALIZED_ALL(ifp);
495
496         if (mii->mii_instance != 0) {
497                 struct mii_softc *miisc;
498
499                 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
500                         mii_phy_reset(miisc);
501         }
502         error = mii_mediachg(mii);
503
504         return (error);
505 }
506
507 static int
508 jme_probe(device_t dev)
509 {
510         const struct jme_dev *sp;
511         uint16_t vid, did;
512
513         vid = pci_get_vendor(dev);
514         did = pci_get_device(dev);
515         for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
516                 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
517                         struct jme_softc *sc = device_get_softc(dev);
518
519                         sc->jme_caps = sp->jme_caps;
520                         device_set_desc(dev, sp->jme_name);
521                         return (0);
522                 }
523         }
524         return (ENXIO);
525 }
526
527 static int
528 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
529 {
530         uint32_t reg;
531         int i;
532
533         *val = 0;
534         for (i = JME_TIMEOUT; i > 0; i--) {
535                 reg = CSR_READ_4(sc, JME_SMBCSR);
536                 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
537                         break;
538                 DELAY(1);
539         }
540
541         if (i == 0) {
542                 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
543                 return (ETIMEDOUT);
544         }
545
546         reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
547         CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
548         for (i = JME_TIMEOUT; i > 0; i--) {
549                 DELAY(1);
550                 reg = CSR_READ_4(sc, JME_SMBINTF);
551                 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
552                         break;
553         }
554
555         if (i == 0) {
556                 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
557                 return (ETIMEDOUT);
558         }
559
560         reg = CSR_READ_4(sc, JME_SMBINTF);
561         *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
562
563         return (0);
564 }
565
566 static int
567 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
568 {
569         uint8_t fup, reg, val;
570         uint32_t offset;
571         int match;
572
573         offset = 0;
574         if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
575             fup != JME_EEPROM_SIG0)
576                 return (ENOENT);
577         if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
578             fup != JME_EEPROM_SIG1)
579                 return (ENOENT);
580         match = 0;
581         do {
582                 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
583                         break;
584                 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
585                     (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
586                         if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
587                                 break;
588                         if (reg >= JME_PAR0 &&
589                             reg < JME_PAR0 + ETHER_ADDR_LEN) {
590                                 if (jme_eeprom_read_byte(sc, offset + 2,
591                                     &val) != 0)
592                                         break;
593                                 eaddr[reg - JME_PAR0] = val;
594                                 match++;
595                         }
596                 }
597                 /* Check for the end of EEPROM descriptor. */
598                 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
599                         break;
600                 /* Try next eeprom descriptor. */
601                 offset += JME_EEPROM_DESC_BYTES;
602         } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
603
604         if (match == ETHER_ADDR_LEN)
605                 return (0);
606
607         return (ENOENT);
608 }
609
610 static void
611 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
612 {
613         uint32_t par0, par1;
614
615         /* Read station address. */
616         par0 = CSR_READ_4(sc, JME_PAR0);
617         par1 = CSR_READ_4(sc, JME_PAR1);
618         par1 &= 0xFFFF;
619         if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
620                 device_printf(sc->jme_dev,
621                     "generating fake ethernet address.\n");
622                 par0 = karc4random();
623                 /* Set OUI to JMicron. */
624                 eaddr[0] = 0x00;
625                 eaddr[1] = 0x1B;
626                 eaddr[2] = 0x8C;
627                 eaddr[3] = (par0 >> 16) & 0xff;
628                 eaddr[4] = (par0 >> 8) & 0xff;
629                 eaddr[5] = par0 & 0xff;
630         } else {
631                 eaddr[0] = (par0 >> 0) & 0xFF;
632                 eaddr[1] = (par0 >> 8) & 0xFF;
633                 eaddr[2] = (par0 >> 16) & 0xFF;
634                 eaddr[3] = (par0 >> 24) & 0xFF;
635                 eaddr[4] = (par1 >> 0) & 0xFF;
636                 eaddr[5] = (par1 >> 8) & 0xFF;
637         }
638 }
639
640 static int
641 jme_attach(device_t dev)
642 {
643         struct jme_softc *sc = device_get_softc(dev);
644         struct ifnet *ifp = &sc->arpcom.ac_if;
645         uint32_t reg;
646         uint16_t did;
647         uint8_t pcie_ptr, rev;
648         int error = 0, i, j, rx_desc_cnt;
649         uint8_t eaddr[ETHER_ADDR_LEN];
650
651         lwkt_serialize_init(&sc->jme_serialize);
652         lwkt_serialize_init(&sc->jme_cdata.jme_tx_serialize);
653         for (i = 0; i < JME_NRXRING_MAX; ++i) {
654                 lwkt_serialize_init(
655                     &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize);
656         }
657
658         rx_desc_cnt = device_getenv_int(dev, "rx_desc_count",
659             jme_rx_desc_count);
660         rx_desc_cnt = roundup(rx_desc_cnt, JME_NDESC_ALIGN);
661         if (rx_desc_cnt > JME_NDESC_MAX)
662                 rx_desc_cnt = JME_NDESC_MAX;
663
664         sc->jme_cdata.jme_tx_desc_cnt = device_getenv_int(dev, "tx_desc_count",
665             jme_tx_desc_count);
666         sc->jme_cdata.jme_tx_desc_cnt = roundup(sc->jme_cdata.jme_tx_desc_cnt,
667             JME_NDESC_ALIGN);
668         if (sc->jme_cdata.jme_tx_desc_cnt > JME_NDESC_MAX)
669                 sc->jme_cdata.jme_tx_desc_cnt = JME_NDESC_MAX;
670
671         /*
672          * Calculate rx rings
673          */
674         sc->jme_cdata.jme_rx_ring_cnt = device_getenv_int(dev, "rx_ring_count",
675             jme_rx_ring_count);
676         sc->jme_cdata.jme_rx_ring_cnt =
677             if_ring_count2(sc->jme_cdata.jme_rx_ring_cnt, JME_NRXRING_MAX);
678
679         i = 0;
680         sc->jme_serialize_arr[i++] = &sc->jme_serialize;
681         sc->jme_serialize_arr[i++] = &sc->jme_cdata.jme_tx_serialize;
682         for (j = 0; j < sc->jme_cdata.jme_rx_ring_cnt; ++j) {
683                 sc->jme_serialize_arr[i++] =
684                     &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize;
685         }
686         KKASSERT(i <= JME_NSERIALIZE);
687         sc->jme_serialize_cnt = i;
688
689         sc->jme_cdata.jme_sc = sc;
690         for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
691                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
692
693                 rdata->jme_sc = sc;
694                 rdata->jme_rx_coal = jme_rx_status[i].jme_coal;
695                 rdata->jme_rx_comp = jme_rx_status[i].jme_comp;
696                 rdata->jme_rx_empty = jme_rx_status[i].jme_empty;
697                 rdata->jme_rx_idx = i;
698                 rdata->jme_rx_desc_cnt = rx_desc_cnt;
699         }
700
701         sc->jme_dev = dev;
702         sc->jme_lowaddr = BUS_SPACE_MAXADDR;
703
704         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
705
706         callout_init(&sc->jme_tick_ch);
707
708 #ifndef BURN_BRIDGES
709         if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
710                 uint32_t irq, mem;
711
712                 irq = pci_read_config(dev, PCIR_INTLINE, 4);
713                 mem = pci_read_config(dev, JME_PCIR_BAR, 4);
714
715                 device_printf(dev, "chip is in D%d power mode "
716                     "-- setting to D0\n", pci_get_powerstate(dev));
717
718                 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
719
720                 pci_write_config(dev, PCIR_INTLINE, irq, 4);
721                 pci_write_config(dev, JME_PCIR_BAR, mem, 4);
722         }
723 #endif  /* !BURN_BRIDGE */
724
725         /* Enable bus mastering */
726         pci_enable_busmaster(dev);
727
728         /*
729          * Allocate IO memory
730          *
731          * JMC250 supports both memory mapped and I/O register space
732          * access.  Because I/O register access should use different
733          * BARs to access registers it's waste of time to use I/O
734          * register spce access.  JMC250 uses 16K to map entire memory
735          * space.
736          */
737         sc->jme_mem_rid = JME_PCIR_BAR;
738         sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
739                                                  &sc->jme_mem_rid, RF_ACTIVE);
740         if (sc->jme_mem_res == NULL) {
741                 device_printf(dev, "can't allocate IO memory\n");
742                 return ENXIO;
743         }
744         sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
745         sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
746
747         /*
748          * Allocate IRQ
749          */
750         error = jme_intr_alloc(dev);
751         if (error)
752                 goto fail;
753
754         /*
755          * Extract revisions
756          */
757         reg = CSR_READ_4(sc, JME_CHIPMODE);
758         if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
759             CHIPMODE_NOT_FPGA) {
760                 sc->jme_caps |= JME_CAP_FPGA;
761                 if (bootverbose) {
762                         device_printf(dev, "FPGA revision: 0x%04x\n",
763                                       (reg & CHIPMODE_FPGA_REV_MASK) >>
764                                       CHIPMODE_FPGA_REV_SHIFT);
765                 }
766         }
767
768         /* NOTE: FM revision is put in the upper 4 bits */
769         rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
770         rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
771         if (bootverbose)
772                 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
773
774         did = pci_get_device(dev);
775         switch (did) {
776         case PCI_PRODUCT_JMICRON_JMC250:
777                 if (rev == JME_REV1_A2)
778                         sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
779                 break;
780
781         case PCI_PRODUCT_JMICRON_JMC260:
782                 if (rev == JME_REV2)
783                         sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
784                 break;
785
786         default:
787                 panic("unknown device id 0x%04x", did);
788         }
789         if (rev >= JME_REV2) {
790                 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
791                 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
792                                       GHC_TXMAC_CLKSRC_1000;
793         }
794
795         /* Reset the ethernet controller. */
796         jme_reset(sc);
797
798         /* Map MSI/MSI-X vectors */
799         jme_set_msinum(sc);
800
801         /* Get station address. */
802         reg = CSR_READ_4(sc, JME_SMBCSR);
803         if (reg & SMBCSR_EEPROM_PRESENT)
804                 error = jme_eeprom_macaddr(sc, eaddr);
805         if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
806                 if (error != 0 && (bootverbose)) {
807                         device_printf(dev, "ethernet hardware address "
808                                       "not found in EEPROM.\n");
809                 }
810                 jme_reg_macaddr(sc, eaddr);
811         }
812
813         /*
814          * Save PHY address.
815          * Integrated JR0211 has fixed PHY address whereas FPGA version
816          * requires PHY probing to get correct PHY address.
817          */
818         if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
819                 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
820                     GPREG0_PHY_ADDR_MASK;
821                 if (bootverbose) {
822                         device_printf(dev, "PHY is at address %d.\n",
823                             sc->jme_phyaddr);
824                 }
825         } else {
826                 sc->jme_phyaddr = 0;
827         }
828
829         /* Set max allowable DMA size. */
830         pcie_ptr = pci_get_pciecap_ptr(dev);
831         if (pcie_ptr != 0) {
832                 uint16_t ctrl;
833
834                 sc->jme_caps |= JME_CAP_PCIE;
835                 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
836                 if (bootverbose) {
837                         device_printf(dev, "Read request size : %d bytes.\n",
838                             128 << ((ctrl >> 12) & 0x07));
839                         device_printf(dev, "TLP payload size : %d bytes.\n",
840                             128 << ((ctrl >> 5) & 0x07));
841                 }
842                 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
843                 case PCIEM_DEVCTL_MAX_READRQ_128:
844                         sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
845                         break;
846                 case PCIEM_DEVCTL_MAX_READRQ_256:
847                         sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
848                         break;
849                 default:
850                         sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
851                         break;
852                 }
853                 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
854         } else {
855                 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
856                 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
857         }
858
859 #ifdef notyet
860         if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
861                 sc->jme_caps |= JME_CAP_PMCAP;
862 #endif
863
864         /*
865          * Create sysctl tree
866          */
867         jme_sysctl_node(sc);
868
869         /* Allocate DMA stuffs */
870         error = jme_dma_alloc(sc);
871         if (error)
872                 goto fail;
873
874         ifp->if_softc = sc;
875         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
876         ifp->if_init = jme_init;
877         ifp->if_ioctl = jme_ioctl;
878         ifp->if_start = jme_start;
879 #ifdef DEVICE_POLLING
880         ifp->if_poll = jme_poll;
881 #endif
882         ifp->if_watchdog = jme_watchdog;
883         ifp->if_serialize = jme_serialize;
884         ifp->if_deserialize = jme_deserialize;
885         ifp->if_tryserialize = jme_tryserialize;
886 #ifdef INVARIANTS
887         ifp->if_serialize_assert = jme_serialize_assert;
888 #endif
889         ifq_set_maxlen(&ifp->if_snd,
890             sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
891         ifq_set_ready(&ifp->if_snd);
892
893         /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
894         ifp->if_capabilities = IFCAP_HWCSUM |
895                                IFCAP_VLAN_MTU |
896                                IFCAP_VLAN_HWTAGGING;
897         if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN)
898                 ifp->if_capabilities |= IFCAP_RSS;
899         ifp->if_capenable = ifp->if_capabilities;
900
901         /*
902          * Disable TXCSUM by default to improve bulk data
903          * transmit performance (+20Mbps improvement).
904          */
905         ifp->if_capenable &= ~IFCAP_TXCSUM;
906
907         if (ifp->if_capenable & IFCAP_TXCSUM)
908                 ifp->if_hwassist = JME_CSUM_FEATURES;
909
910         /* Set up MII bus. */
911         error = mii_phy_probe(dev, &sc->jme_miibus,
912                               jme_mediachange, jme_mediastatus);
913         if (error) {
914                 device_printf(dev, "no PHY found!\n");
915                 goto fail;
916         }
917
918         /*
919          * Save PHYADDR for FPGA mode PHY.
920          */
921         if (sc->jme_caps & JME_CAP_FPGA) {
922                 struct mii_data *mii = device_get_softc(sc->jme_miibus);
923
924                 if (mii->mii_instance != 0) {
925                         struct mii_softc *miisc;
926
927                         LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
928                                 if (miisc->mii_phy != 0) {
929                                         sc->jme_phyaddr = miisc->mii_phy;
930                                         break;
931                                 }
932                         }
933                         if (sc->jme_phyaddr != 0) {
934                                 device_printf(sc->jme_dev,
935                                     "FPGA PHY is at %d\n", sc->jme_phyaddr);
936                                 /* vendor magic. */
937                                 jme_miibus_writereg(dev, sc->jme_phyaddr,
938                                     JMPHY_CONF, JMPHY_CONF_DEFFIFO);
939
940                                 /* XXX should we clear JME_WA_EXTFIFO */
941                         }
942                 }
943         }
944
945         ether_ifattach(ifp, eaddr, NULL);
946
947         /* Tell the upper layer(s) we support long frames. */
948         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
949
950         error = jme_intr_setup(dev);
951         if (error) {
952                 ether_ifdetach(ifp);
953                 goto fail;
954         }
955
956         return 0;
957 fail:
958         jme_detach(dev);
959         return (error);
960 }
961
962 static int
963 jme_detach(device_t dev)
964 {
965         struct jme_softc *sc = device_get_softc(dev);
966
967         if (device_is_attached(dev)) {
968                 struct ifnet *ifp = &sc->arpcom.ac_if;
969
970                 ifnet_serialize_all(ifp);
971                 jme_stop(sc);
972                 jme_intr_teardown(dev);
973                 ifnet_deserialize_all(ifp);
974
975                 ether_ifdetach(ifp);
976         }
977
978         if (sc->jme_sysctl_tree != NULL)
979                 sysctl_ctx_free(&sc->jme_sysctl_ctx);
980
981         if (sc->jme_miibus != NULL)
982                 device_delete_child(dev, sc->jme_miibus);
983         bus_generic_detach(dev);
984
985         jme_intr_free(dev);
986
987         if (sc->jme_mem_res != NULL) {
988                 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
989                                      sc->jme_mem_res);
990         }
991
992         jme_dma_free(sc);
993
994         return (0);
995 }
996
997 static void
998 jme_sysctl_node(struct jme_softc *sc)
999 {
1000         int coal_max;
1001 #ifdef JME_RSS_DEBUG
1002         int r;
1003 #endif
1004
1005         sysctl_ctx_init(&sc->jme_sysctl_ctx);
1006         sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
1007                                 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
1008                                 device_get_nameunit(sc->jme_dev),
1009                                 CTLFLAG_RD, 0, "");
1010         if (sc->jme_sysctl_tree == NULL) {
1011                 device_printf(sc->jme_dev, "can't add sysctl node\n");
1012                 return;
1013         }
1014
1015         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1016             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1017             "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1018             sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
1019
1020         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1021             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1022             "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1023             sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
1024
1025         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1026             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1027             "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1028             sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
1029
1030         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1031             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1032             "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1033             sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
1034
1035         SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1036                        SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1037                        "rx_desc_count", CTLFLAG_RD,
1038                        &sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt,
1039                        0, "RX desc count");
1040         SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1041                        SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1042                        "tx_desc_count", CTLFLAG_RD,
1043                        &sc->jme_cdata.jme_tx_desc_cnt,
1044                        0, "TX desc count");
1045         SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1046                        SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1047                        "rx_ring_count", CTLFLAG_RD,
1048                        &sc->jme_cdata.jme_rx_ring_cnt,
1049                        0, "RX ring count");
1050 #ifdef JME_RSS_DEBUG
1051         SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1052                        SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1053                        "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
1054                        0, "RSS debug level");
1055         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1056                 char rx_ring_pkt[32];
1057
1058                 ksnprintf(rx_ring_pkt, sizeof(rx_ring_pkt), "rx_ring%d_pkt", r);
1059                 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx,
1060                     SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1061                     rx_ring_pkt, CTLFLAG_RW,
1062                     &sc->jme_cdata.jme_rx_data[r].jme_rx_pkt, "RXed packets");
1063         }
1064 #endif
1065
1066         /*
1067          * Set default coalesce valves
1068          */
1069         sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1070         sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1071         sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1072         sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1073
1074         /*
1075          * Adjust coalesce valves, in case that the number of TX/RX
1076          * descs are set to small values by users.
1077          *
1078          * NOTE: coal_max will not be zero, since number of descs
1079          * must aligned by JME_NDESC_ALIGN (16 currently)
1080          */
1081         coal_max = sc->jme_cdata.jme_tx_desc_cnt / 6;
1082         if (coal_max < sc->jme_tx_coal_pkt)
1083                 sc->jme_tx_coal_pkt = coal_max;
1084
1085         coal_max = sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt / 4;
1086         if (coal_max < sc->jme_rx_coal_pkt)
1087                 sc->jme_rx_coal_pkt = coal_max;
1088 }
1089
1090 static int
1091 jme_dma_alloc(struct jme_softc *sc)
1092 {
1093         struct jme_txdesc *txd;
1094         bus_dmamem_t dmem;
1095         int error, i, asize;
1096
1097         sc->jme_cdata.jme_txdesc =
1098         kmalloc(sc->jme_cdata.jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1099                 M_DEVBUF, M_WAITOK | M_ZERO);
1100         for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1101                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
1102
1103                 rdata->jme_rxdesc =
1104                 kmalloc(rdata->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1105                         M_DEVBUF, M_WAITOK | M_ZERO);
1106         }
1107
1108         /* Create parent ring tag. */
1109         error = bus_dma_tag_create(NULL,/* parent */
1110             1, JME_RING_BOUNDARY,       /* algnmnt, boundary */
1111             sc->jme_lowaddr,            /* lowaddr */
1112             BUS_SPACE_MAXADDR,          /* highaddr */
1113             NULL, NULL,                 /* filter, filterarg */
1114             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1115             0,                          /* nsegments */
1116             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1117             0,                          /* flags */
1118             &sc->jme_cdata.jme_ring_tag);
1119         if (error) {
1120                 device_printf(sc->jme_dev,
1121                     "could not create parent ring DMA tag.\n");
1122                 return error;
1123         }
1124
1125         /*
1126          * Create DMA stuffs for TX ring
1127          */
1128         asize = roundup2(JME_TX_RING_SIZE(sc), JME_TX_RING_ALIGN);
1129         error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1130                         JME_TX_RING_ALIGN, 0,
1131                         BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1132                         asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1133         if (error) {
1134                 device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1135                 return error;
1136         }
1137         sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag;
1138         sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map;
1139         sc->jme_cdata.jme_tx_ring = dmem.dmem_addr;
1140         sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr;
1141
1142         /*
1143          * Create DMA stuffs for RX rings
1144          */
1145         for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1146                 error = jme_rxring_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1147                 if (error)
1148                         return error;
1149         }
1150
1151         /* Create parent buffer tag. */
1152         error = bus_dma_tag_create(NULL,/* parent */
1153             1, 0,                       /* algnmnt, boundary */
1154             sc->jme_lowaddr,            /* lowaddr */
1155             BUS_SPACE_MAXADDR,          /* highaddr */
1156             NULL, NULL,                 /* filter, filterarg */
1157             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1158             0,                          /* nsegments */
1159             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1160             0,                          /* flags */
1161             &sc->jme_cdata.jme_buffer_tag);
1162         if (error) {
1163                 device_printf(sc->jme_dev,
1164                     "could not create parent buffer DMA tag.\n");
1165                 return error;
1166         }
1167
1168         /*
1169          * Create DMA stuffs for shadow status block
1170          */
1171         asize = roundup2(JME_SSB_SIZE, JME_SSB_ALIGN);
1172         error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1173                         JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1174                         asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1175         if (error) {
1176                 device_printf(sc->jme_dev,
1177                     "could not create shadow status block.\n");
1178                 return error;
1179         }
1180         sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1181         sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1182         sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1183         sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1184
1185         /*
1186          * Create DMA stuffs for TX buffers
1187          */
1188
1189         /* Create tag for Tx buffers. */
1190         error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1191             1, 0,                       /* algnmnt, boundary */
1192             BUS_SPACE_MAXADDR,          /* lowaddr */
1193             BUS_SPACE_MAXADDR,          /* highaddr */
1194             NULL, NULL,                 /* filter, filterarg */
1195             JME_JUMBO_FRAMELEN,         /* maxsize */
1196             JME_MAXTXSEGS,              /* nsegments */
1197             JME_MAXSEGSIZE,             /* maxsegsize */
1198             BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1199             &sc->jme_cdata.jme_tx_tag);
1200         if (error != 0) {
1201                 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1202                 return error;
1203         }
1204
1205         /* Create DMA maps for Tx buffers. */
1206         for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
1207                 txd = &sc->jme_cdata.jme_txdesc[i];
1208                 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag,
1209                                 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1210                                 &txd->tx_dmamap);
1211                 if (error) {
1212                         int j;
1213
1214                         device_printf(sc->jme_dev,
1215                             "could not create %dth Tx dmamap.\n", i);
1216
1217                         for (j = 0; j < i; ++j) {
1218                                 txd = &sc->jme_cdata.jme_txdesc[j];
1219                                 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1220                                                    txd->tx_dmamap);
1221                         }
1222                         bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1223                         sc->jme_cdata.jme_tx_tag = NULL;
1224                         return error;
1225                 }
1226         }
1227
1228         /*
1229          * Create DMA stuffs for RX buffers
1230          */
1231         for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1232                 error = jme_rxbuf_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1233                 if (error)
1234                         return error;
1235         }
1236         return 0;
1237 }
1238
1239 static void
1240 jme_dma_free(struct jme_softc *sc)
1241 {
1242         struct jme_txdesc *txd;
1243         struct jme_rxdesc *rxd;
1244         struct jme_rxdata *rdata;
1245         int i, r;
1246
1247         /* Tx ring */
1248         if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1249                 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1250                     sc->jme_cdata.jme_tx_ring_map);
1251                 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1252                     sc->jme_cdata.jme_tx_ring,
1253                     sc->jme_cdata.jme_tx_ring_map);
1254                 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1255                 sc->jme_cdata.jme_tx_ring_tag = NULL;
1256         }
1257
1258         /* Rx ring */
1259         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1260                 rdata = &sc->jme_cdata.jme_rx_data[r];
1261                 if (rdata->jme_rx_ring_tag != NULL) {
1262                         bus_dmamap_unload(rdata->jme_rx_ring_tag,
1263                                           rdata->jme_rx_ring_map);
1264                         bus_dmamem_free(rdata->jme_rx_ring_tag,
1265                                         rdata->jme_rx_ring,
1266                                         rdata->jme_rx_ring_map);
1267                         bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1268                         rdata->jme_rx_ring_tag = NULL;
1269                 }
1270         }
1271
1272         /* Tx buffers */
1273         if (sc->jme_cdata.jme_tx_tag != NULL) {
1274                 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
1275                         txd = &sc->jme_cdata.jme_txdesc[i];
1276                         bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1277                             txd->tx_dmamap);
1278                 }
1279                 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1280                 sc->jme_cdata.jme_tx_tag = NULL;
1281         }
1282
1283         /* Rx buffers */
1284         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1285                 rdata = &sc->jme_cdata.jme_rx_data[r];
1286                 if (rdata->jme_rx_tag != NULL) {
1287                         for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
1288                                 rxd = &rdata->jme_rxdesc[i];
1289                                 bus_dmamap_destroy(rdata->jme_rx_tag,
1290                                                    rxd->rx_dmamap);
1291                         }
1292                         bus_dmamap_destroy(rdata->jme_rx_tag,
1293                                            rdata->jme_rx_sparemap);
1294                         bus_dma_tag_destroy(rdata->jme_rx_tag);
1295                         rdata->jme_rx_tag = NULL;
1296                 }
1297         }
1298
1299         /* Shadow status block. */
1300         if (sc->jme_cdata.jme_ssb_tag != NULL) {
1301                 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1302                     sc->jme_cdata.jme_ssb_map);
1303                 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1304                     sc->jme_cdata.jme_ssb_block,
1305                     sc->jme_cdata.jme_ssb_map);
1306                 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1307                 sc->jme_cdata.jme_ssb_tag = NULL;
1308         }
1309
1310         if (sc->jme_cdata.jme_buffer_tag != NULL) {
1311                 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1312                 sc->jme_cdata.jme_buffer_tag = NULL;
1313         }
1314         if (sc->jme_cdata.jme_ring_tag != NULL) {
1315                 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1316                 sc->jme_cdata.jme_ring_tag = NULL;
1317         }
1318
1319         if (sc->jme_cdata.jme_txdesc != NULL) {
1320                 kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1321                 sc->jme_cdata.jme_txdesc = NULL;
1322         }
1323         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1324                 rdata = &sc->jme_cdata.jme_rx_data[r];
1325                 if (rdata->jme_rxdesc != NULL) {
1326                         kfree(rdata->jme_rxdesc, M_DEVBUF);
1327                         rdata->jme_rxdesc = NULL;
1328                 }
1329         }
1330 }
1331
1332 /*
1333  *      Make sure the interface is stopped at reboot time.
1334  */
1335 static int
1336 jme_shutdown(device_t dev)
1337 {
1338         return jme_suspend(dev);
1339 }
1340
1341 #ifdef notyet
1342 /*
1343  * Unlike other ethernet controllers, JMC250 requires
1344  * explicit resetting link speed to 10/100Mbps as gigabit
1345  * link will cunsume more power than 375mA.
1346  * Note, we reset the link speed to 10/100Mbps with
1347  * auto-negotiation but we don't know whether that operation
1348  * would succeed or not as we have no control after powering
1349  * off. If the renegotiation fail WOL may not work. Running
1350  * at 1Gbps draws more power than 375mA at 3.3V which is
1351  * specified in PCI specification and that would result in
1352  * complete shutdowning power to ethernet controller.
1353  *
1354  * TODO
1355  *  Save current negotiated media speed/duplex/flow-control
1356  *  to softc and restore the same link again after resuming.
1357  *  PHY handling such as power down/resetting to 100Mbps
1358  *  may be better handled in suspend method in phy driver.
1359  */
1360 static void
1361 jme_setlinkspeed(struct jme_softc *sc)
1362 {
1363         struct mii_data *mii;
1364         int aneg, i;
1365
1366         JME_LOCK_ASSERT(sc);
1367
1368         mii = device_get_softc(sc->jme_miibus);
1369         mii_pollstat(mii);
1370         aneg = 0;
1371         if ((mii->mii_media_status & IFM_AVALID) != 0) {
1372                 switch IFM_SUBTYPE(mii->mii_media_active) {
1373                 case IFM_10_T:
1374                 case IFM_100_TX:
1375                         return;
1376                 case IFM_1000_T:
1377                         aneg++;
1378                 default:
1379                         break;
1380                 }
1381         }
1382         jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1383         jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1384             ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1385         jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1386             BMCR_AUTOEN | BMCR_STARTNEG);
1387         DELAY(1000);
1388         if (aneg != 0) {
1389                 /* Poll link state until jme(4) get a 10/100 link. */
1390                 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1391                         mii_pollstat(mii);
1392                         if ((mii->mii_media_status & IFM_AVALID) != 0) {
1393                                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1394                                 case IFM_10_T:
1395                                 case IFM_100_TX:
1396                                         jme_mac_config(sc);
1397                                         return;
1398                                 default:
1399                                         break;
1400                                 }
1401                         }
1402                         JME_UNLOCK(sc);
1403                         pause("jmelnk", hz);
1404                         JME_LOCK(sc);
1405                 }
1406                 if (i == MII_ANEGTICKS_GIGE)
1407                         device_printf(sc->jme_dev, "establishing link failed, "
1408                             "WOL may not work!");
1409         }
1410         /*
1411          * No link, force MAC to have 100Mbps, full-duplex link.
1412          * This is the last resort and may/may not work.
1413          */
1414         mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1415         mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1416         jme_mac_config(sc);
1417 }
1418
1419 static void
1420 jme_setwol(struct jme_softc *sc)
1421 {
1422         struct ifnet *ifp = &sc->arpcom.ac_if;
1423         uint32_t gpr, pmcs;
1424         uint16_t pmstat;
1425         int pmc;
1426
1427         if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1428                 /* No PME capability, PHY power down. */
1429                 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1430                     MII_BMCR, BMCR_PDOWN);
1431                 return;
1432         }
1433
1434         gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1435         pmcs = CSR_READ_4(sc, JME_PMCS);
1436         pmcs &= ~PMCS_WOL_ENB_MASK;
1437         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1438                 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1439                 /* Enable PME message. */
1440                 gpr |= GPREG0_PME_ENB;
1441                 /* For gigabit controllers, reset link speed to 10/100. */
1442                 if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1443                         jme_setlinkspeed(sc);
1444         }
1445
1446         CSR_WRITE_4(sc, JME_PMCS, pmcs);
1447         CSR_WRITE_4(sc, JME_GPREG0, gpr);
1448
1449         /* Request PME. */
1450         pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1451         pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1452         if ((ifp->if_capenable & IFCAP_WOL) != 0)
1453                 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1454         pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1455         if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1456                 /* No WOL, PHY power down. */
1457                 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1458                     MII_BMCR, BMCR_PDOWN);
1459         }
1460 }
1461 #endif
1462
1463 static int
1464 jme_suspend(device_t dev)
1465 {
1466         struct jme_softc *sc = device_get_softc(dev);
1467         struct ifnet *ifp = &sc->arpcom.ac_if;
1468
1469         ifnet_serialize_all(ifp);
1470         jme_stop(sc);
1471 #ifdef notyet
1472         jme_setwol(sc);
1473 #endif
1474         ifnet_deserialize_all(ifp);
1475
1476         return (0);
1477 }
1478
1479 static int
1480 jme_resume(device_t dev)
1481 {
1482         struct jme_softc *sc = device_get_softc(dev);
1483         struct ifnet *ifp = &sc->arpcom.ac_if;
1484 #ifdef notyet
1485         int pmc;
1486 #endif
1487
1488         ifnet_serialize_all(ifp);
1489
1490 #ifdef notyet
1491         if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1492                 uint16_t pmstat;
1493
1494                 pmstat = pci_read_config(sc->jme_dev,
1495                     pmc + PCIR_POWER_STATUS, 2);
1496                 /* Disable PME clear PME status. */
1497                 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1498                 pci_write_config(sc->jme_dev,
1499                     pmc + PCIR_POWER_STATUS, pmstat, 2);
1500         }
1501 #endif
1502
1503         if (ifp->if_flags & IFF_UP)
1504                 jme_init(sc);
1505
1506         ifnet_deserialize_all(ifp);
1507
1508         return (0);
1509 }
1510
1511 static int
1512 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1513 {
1514         struct jme_txdesc *txd;
1515         struct jme_desc *desc;
1516         struct mbuf *m;
1517         bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1518         int maxsegs, nsegs;
1519         int error, i, prod, symbol_desc;
1520         uint32_t cflags, flag64;
1521
1522         M_ASSERTPKTHDR((*m_head));
1523
1524         prod = sc->jme_cdata.jme_tx_prod;
1525         txd = &sc->jme_cdata.jme_txdesc[prod];
1526
1527         if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1528                 symbol_desc = 1;
1529         else
1530                 symbol_desc = 0;
1531
1532         maxsegs = (sc->jme_cdata.jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1533                   (JME_TXD_RSVD + symbol_desc);
1534         if (maxsegs > JME_MAXTXSEGS)
1535                 maxsegs = JME_MAXTXSEGS;
1536         KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc),
1537                 ("not enough segments %d", maxsegs));
1538
1539         error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag,
1540                         txd->tx_dmamap, m_head,
1541                         txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1542         if (error)
1543                 goto fail;
1544
1545         bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1546                         BUS_DMASYNC_PREWRITE);
1547
1548         m = *m_head;
1549         cflags = 0;
1550
1551         /* Configure checksum offload. */
1552         if (m->m_pkthdr.csum_flags & CSUM_IP)
1553                 cflags |= JME_TD_IPCSUM;
1554         if (m->m_pkthdr.csum_flags & CSUM_TCP)
1555                 cflags |= JME_TD_TCPCSUM;
1556         if (m->m_pkthdr.csum_flags & CSUM_UDP)
1557                 cflags |= JME_TD_UDPCSUM;
1558
1559         /* Configure VLAN. */
1560         if (m->m_flags & M_VLANTAG) {
1561                 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1562                 cflags |= JME_TD_VLAN_TAG;
1563         }
1564
1565         desc = &sc->jme_cdata.jme_tx_ring[prod];
1566         desc->flags = htole32(cflags);
1567         desc->addr_hi = htole32(m->m_pkthdr.len);
1568         if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1569                 /*
1570                  * Use 64bits TX desc chain format.
1571                  *
1572                  * The first TX desc of the chain, which is setup here,
1573                  * is just a symbol TX desc carrying no payload.
1574                  */
1575                 flag64 = JME_TD_64BIT;
1576                 desc->buflen = 0;
1577                 desc->addr_lo = 0;
1578
1579                 /* No effective TX desc is consumed */
1580                 i = 0;
1581         } else {
1582                 /*
1583                  * Use 32bits TX desc chain format.
1584                  *
1585                  * The first TX desc of the chain, which is setup here,
1586                  * is an effective TX desc carrying the first segment of
1587                  * the mbuf chain.
1588                  */
1589                 flag64 = 0;
1590                 desc->buflen = htole32(txsegs[0].ds_len);
1591                 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1592
1593                 /* One effective TX desc is consumed */
1594                 i = 1;
1595         }
1596         sc->jme_cdata.jme_tx_cnt++;
1597         KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1598                  sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
1599         JME_DESC_INC(prod, sc->jme_cdata.jme_tx_desc_cnt);
1600
1601         txd->tx_ndesc = 1 - i;
1602         for (; i < nsegs; i++) {
1603                 desc = &sc->jme_cdata.jme_tx_ring[prod];
1604                 desc->buflen = htole32(txsegs[i].ds_len);
1605                 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1606                 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1607                 desc->flags = htole32(JME_TD_OWN | flag64);
1608
1609                 sc->jme_cdata.jme_tx_cnt++;
1610                 KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1611                          sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
1612                 JME_DESC_INC(prod, sc->jme_cdata.jme_tx_desc_cnt);
1613         }
1614
1615         /* Update producer index. */
1616         sc->jme_cdata.jme_tx_prod = prod;
1617         /*
1618          * Finally request interrupt and give the first descriptor
1619          * owenership to hardware.
1620          */
1621         desc = txd->tx_desc;
1622         desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1623
1624         txd->tx_m = m;
1625         txd->tx_ndesc += nsegs;
1626
1627         return 0;
1628 fail:
1629         m_freem(*m_head);
1630         *m_head = NULL;
1631         return error;
1632 }
1633
1634 static void
1635 jme_start(struct ifnet *ifp)
1636 {
1637         struct jme_softc *sc = ifp->if_softc;
1638         struct mbuf *m_head;
1639         int enq = 0;
1640
1641         ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
1642
1643         if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1644                 ifq_purge(&ifp->if_snd);
1645                 return;
1646         }
1647
1648         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1649                 return;
1650
1651         if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1652                 jme_txeof(sc);
1653
1654         while (!ifq_is_empty(&ifp->if_snd)) {
1655                 /*
1656                  * Check number of available TX descs, always
1657                  * leave JME_TXD_RSVD free TX descs.
1658                  */
1659                 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1660                     sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD) {
1661                         ifp->if_flags |= IFF_OACTIVE;
1662                         break;
1663                 }
1664
1665                 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1666                 if (m_head == NULL)
1667                         break;
1668
1669                 /*
1670                  * Pack the data into the transmit ring. If we
1671                  * don't have room, set the OACTIVE flag and wait
1672                  * for the NIC to drain the ring.
1673                  */
1674                 if (jme_encap(sc, &m_head)) {
1675                         KKASSERT(m_head == NULL);
1676                         ifp->if_oerrors++;
1677                         ifp->if_flags |= IFF_OACTIVE;
1678                         break;
1679                 }
1680                 enq++;
1681
1682                 /*
1683                  * If there's a BPF listener, bounce a copy of this frame
1684                  * to him.
1685                  */
1686                 ETHER_BPF_MTAP(ifp, m_head);
1687         }
1688
1689         if (enq > 0) {
1690                 /*
1691                  * Reading TXCSR takes very long time under heavy load
1692                  * so cache TXCSR value and writes the ORed value with
1693                  * the kick command to the TXCSR. This saves one register
1694                  * access cycle.
1695                  */
1696                 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1697                     TXCSR_TXQ_N_START(TXCSR_TXQ0));
1698                 /* Set a timeout in case the chip goes out to lunch. */
1699                 ifp->if_timer = JME_TX_TIMEOUT;
1700         }
1701 }
1702
1703 static void
1704 jme_watchdog(struct ifnet *ifp)
1705 {
1706         struct jme_softc *sc = ifp->if_softc;
1707
1708         ASSERT_IFNET_SERIALIZED_ALL(ifp);
1709
1710         if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1711                 if_printf(ifp, "watchdog timeout (missed link)\n");
1712                 ifp->if_oerrors++;
1713                 jme_init(sc);
1714                 return;
1715         }
1716
1717         jme_txeof(sc);
1718         if (sc->jme_cdata.jme_tx_cnt == 0) {
1719                 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1720                           "-- recovering\n");
1721                 if (!ifq_is_empty(&ifp->if_snd))
1722                         if_devstart(ifp);
1723                 return;
1724         }
1725
1726         if_printf(ifp, "watchdog timeout\n");
1727         ifp->if_oerrors++;
1728         jme_init(sc);
1729         if (!ifq_is_empty(&ifp->if_snd))
1730                 if_devstart(ifp);
1731 }
1732
1733 static int
1734 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1735 {
1736         struct jme_softc *sc = ifp->if_softc;
1737         struct mii_data *mii = device_get_softc(sc->jme_miibus);
1738         struct ifreq *ifr = (struct ifreq *)data;
1739         int error = 0, mask;
1740
1741         ASSERT_IFNET_SERIALIZED_ALL(ifp);
1742
1743         switch (cmd) {
1744         case SIOCSIFMTU:
1745                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1746                     (!(sc->jme_caps & JME_CAP_JUMBO) &&
1747                      ifr->ifr_mtu > JME_MAX_MTU)) {
1748                         error = EINVAL;
1749                         break;
1750                 }
1751
1752                 if (ifp->if_mtu != ifr->ifr_mtu) {
1753                         /*
1754                          * No special configuration is required when interface
1755                          * MTU is changed but availability of Tx checksum
1756                          * offload should be chcked against new MTU size as
1757                          * FIFO size is just 2K.
1758                          */
1759                         if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1760                                 ifp->if_capenable &= ~IFCAP_TXCSUM;
1761                                 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1762                         }
1763                         ifp->if_mtu = ifr->ifr_mtu;
1764                         if (ifp->if_flags & IFF_RUNNING)
1765                                 jme_init(sc);
1766                 }
1767                 break;
1768
1769         case SIOCSIFFLAGS:
1770                 if (ifp->if_flags & IFF_UP) {
1771                         if (ifp->if_flags & IFF_RUNNING) {
1772                                 if ((ifp->if_flags ^ sc->jme_if_flags) &
1773                                     (IFF_PROMISC | IFF_ALLMULTI))
1774                                         jme_set_filter(sc);
1775                         } else {
1776                                 jme_init(sc);
1777                         }
1778                 } else {
1779                         if (ifp->if_flags & IFF_RUNNING)
1780                                 jme_stop(sc);
1781                 }
1782                 sc->jme_if_flags = ifp->if_flags;
1783                 break;
1784
1785         case SIOCADDMULTI:
1786         case SIOCDELMULTI:
1787                 if (ifp->if_flags & IFF_RUNNING)
1788                         jme_set_filter(sc);
1789                 break;
1790
1791         case SIOCSIFMEDIA:
1792         case SIOCGIFMEDIA:
1793                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1794                 break;
1795
1796         case SIOCSIFCAP:
1797                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1798
1799                 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1800                         ifp->if_capenable ^= IFCAP_TXCSUM;
1801                         if (IFCAP_TXCSUM & ifp->if_capenable)
1802                                 ifp->if_hwassist |= JME_CSUM_FEATURES;
1803                         else
1804                                 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1805                 }
1806                 if (mask & IFCAP_RXCSUM) {
1807                         uint32_t reg;
1808
1809                         ifp->if_capenable ^= IFCAP_RXCSUM;
1810                         reg = CSR_READ_4(sc, JME_RXMAC);
1811                         reg &= ~RXMAC_CSUM_ENB;
1812                         if (ifp->if_capenable & IFCAP_RXCSUM)
1813                                 reg |= RXMAC_CSUM_ENB;
1814                         CSR_WRITE_4(sc, JME_RXMAC, reg);
1815                 }
1816
1817                 if (mask & IFCAP_VLAN_HWTAGGING) {
1818                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1819                         jme_set_vlan(sc);
1820                 }
1821
1822                 if (mask & IFCAP_RSS)
1823                         ifp->if_capenable ^= IFCAP_RSS;
1824                 break;
1825
1826         default:
1827                 error = ether_ioctl(ifp, cmd, data);
1828                 break;
1829         }
1830         return (error);
1831 }
1832
1833 static void
1834 jme_mac_config(struct jme_softc *sc)
1835 {
1836         struct mii_data *mii;
1837         uint32_t ghc, rxmac, txmac, txpause, gp1;
1838         int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1839
1840         mii = device_get_softc(sc->jme_miibus);
1841
1842         CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1843         DELAY(10);
1844         CSR_WRITE_4(sc, JME_GHC, 0);
1845         ghc = 0;
1846         rxmac = CSR_READ_4(sc, JME_RXMAC);
1847         rxmac &= ~RXMAC_FC_ENB;
1848         txmac = CSR_READ_4(sc, JME_TXMAC);
1849         txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1850         txpause = CSR_READ_4(sc, JME_TXPFC);
1851         txpause &= ~TXPFC_PAUSE_ENB;
1852         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1853                 ghc |= GHC_FULL_DUPLEX;
1854                 rxmac &= ~RXMAC_COLL_DET_ENB;
1855                 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1856                     TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1857                     TXMAC_FRAME_BURST);
1858 #ifdef notyet
1859                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1860                         txpause |= TXPFC_PAUSE_ENB;
1861                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1862                         rxmac |= RXMAC_FC_ENB;
1863 #endif
1864                 /* Disable retry transmit timer/retry limit. */
1865                 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1866                     ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1867         } else {
1868                 rxmac |= RXMAC_COLL_DET_ENB;
1869                 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1870                 /* Enable retry transmit timer/retry limit. */
1871                 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1872                     TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1873         }
1874
1875         /*
1876          * Reprogram Tx/Rx MACs with resolved speed/duplex.
1877          */
1878         gp1 = CSR_READ_4(sc, JME_GPREG1);
1879         gp1 &= ~GPREG1_WA_HDX;
1880
1881         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1882                 hdx = 1;
1883
1884         switch (IFM_SUBTYPE(mii->mii_media_active)) {
1885         case IFM_10_T:
1886                 ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1887                 if (hdx)
1888                         gp1 |= GPREG1_WA_HDX;
1889                 break;
1890
1891         case IFM_100_TX:
1892                 ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1893                 if (hdx)
1894                         gp1 |= GPREG1_WA_HDX;
1895
1896                 /*
1897                  * Use extended FIFO depth to workaround CRC errors
1898                  * emitted by chips before JMC250B
1899                  */
1900                 phyconf = JMPHY_CONF_EXTFIFO;
1901                 break;
1902
1903         case IFM_1000_T:
1904                 if (sc->jme_caps & JME_CAP_FASTETH)
1905                         break;
1906
1907                 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
1908                 if (hdx)
1909                         txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1910                 break;
1911
1912         default:
1913                 break;
1914         }
1915         CSR_WRITE_4(sc, JME_GHC, ghc);
1916         CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1917         CSR_WRITE_4(sc, JME_TXMAC, txmac);
1918         CSR_WRITE_4(sc, JME_TXPFC, txpause);
1919
1920         if (sc->jme_workaround & JME_WA_EXTFIFO) {
1921                 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1922                                     JMPHY_CONF, phyconf);
1923         }
1924         if (sc->jme_workaround & JME_WA_HDX)
1925                 CSR_WRITE_4(sc, JME_GPREG1, gp1);
1926 }
1927
1928 static void
1929 jme_intr(void *xsc)
1930 {
1931         struct jme_softc *sc = xsc;
1932         struct ifnet *ifp = &sc->arpcom.ac_if;
1933         uint32_t status;
1934         int r;
1935
1936         ASSERT_SERIALIZED(&sc->jme_serialize);
1937
1938         status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1939         if (status == 0 || status == 0xFFFFFFFF)
1940                 return;
1941
1942         /* Disable interrupts. */
1943         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1944
1945         status = CSR_READ_4(sc, JME_INTR_STATUS);
1946         if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1947                 goto back;
1948
1949         /* Reset PCC counter/timer and Ack interrupts. */
1950         status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1951
1952         if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1953                 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1954
1955         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1956                 if (status & jme_rx_status[r].jme_coal) {
1957                         status |= jme_rx_status[r].jme_coal |
1958                                   jme_rx_status[r].jme_comp;
1959                 }
1960         }
1961
1962         CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1963
1964         if (ifp->if_flags & IFF_RUNNING) {
1965                 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1966                         jme_rx_intr(sc, status);
1967
1968                 if (status & INTR_RXQ_DESC_EMPTY) {
1969                         /*
1970                          * Notify hardware availability of new Rx buffers.
1971                          * Reading RXCSR takes very long time under heavy
1972                          * load so cache RXCSR value and writes the ORed
1973                          * value with the kick command to the RXCSR. This
1974                          * saves one register access cycle.
1975                          */
1976                         CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1977                             RXCSR_RX_ENB | RXCSR_RXQ_START);
1978                 }
1979
1980                 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1981                         lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
1982                         jme_txeof(sc);
1983                         if (!ifq_is_empty(&ifp->if_snd))
1984                                 if_devstart(ifp);
1985                         lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
1986                 }
1987         }
1988 back:
1989         /* Reenable interrupts. */
1990         CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1991 }
1992
1993 static void
1994 jme_txeof(struct jme_softc *sc)
1995 {
1996         struct ifnet *ifp = &sc->arpcom.ac_if;
1997         struct jme_txdesc *txd;
1998         uint32_t status;
1999         int cons, nsegs;
2000
2001         cons = sc->jme_cdata.jme_tx_cons;
2002         if (cons == sc->jme_cdata.jme_tx_prod)
2003                 return;
2004
2005         /*
2006          * Go through our Tx list and free mbufs for those
2007          * frames which have been transmitted.
2008          */
2009         while (cons != sc->jme_cdata.jme_tx_prod) {
2010                 txd = &sc->jme_cdata.jme_txdesc[cons];
2011                 KASSERT(txd->tx_m != NULL,
2012                         ("%s: freeing NULL mbuf!", __func__));
2013
2014                 status = le32toh(txd->tx_desc->flags);
2015                 if ((status & JME_TD_OWN) == JME_TD_OWN)
2016                         break;
2017
2018                 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2019                         ifp->if_oerrors++;
2020                 } else {
2021                         ifp->if_opackets++;
2022                         if (status & JME_TD_COLLISION) {
2023                                 ifp->if_collisions +=
2024                                     le32toh(txd->tx_desc->buflen) &
2025                                     JME_TD_BUF_LEN_MASK;
2026                         }
2027                 }
2028
2029                 /*
2030                  * Only the first descriptor of multi-descriptor
2031                  * transmission is updated so driver have to skip entire
2032                  * chained buffers for the transmiited frame. In other
2033                  * words, JME_TD_OWN bit is valid only at the first
2034                  * descriptor of a multi-descriptor transmission.
2035                  */
2036                 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2037                         sc->jme_cdata.jme_tx_ring[cons].flags = 0;
2038                         JME_DESC_INC(cons, sc->jme_cdata.jme_tx_desc_cnt);
2039                 }
2040
2041                 /* Reclaim transferred mbufs. */
2042                 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2043                 m_freem(txd->tx_m);
2044                 txd->tx_m = NULL;
2045                 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2046                 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2047                         ("%s: Active Tx desc counter was garbled", __func__));
2048                 txd->tx_ndesc = 0;
2049         }
2050         sc->jme_cdata.jme_tx_cons = cons;
2051
2052         if (sc->jme_cdata.jme_tx_cnt == 0)
2053                 ifp->if_timer = 0;
2054
2055         if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
2056             sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD)
2057                 ifp->if_flags &= ~IFF_OACTIVE;
2058 }
2059
2060 static __inline void
2061 jme_discard_rxbufs(struct jme_rxdata *rdata, int cons, int count)
2062 {
2063         int i;
2064
2065         for (i = 0; i < count; ++i) {
2066                 jme_setup_rxdesc(&rdata->jme_rxdesc[cons]);
2067                 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2068         }
2069 }
2070
2071 static __inline struct pktinfo *
2072 jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2073 {
2074         if (flags & JME_RD_IPV4)
2075                 pi->pi_netisr = NETISR_IP;
2076         else if (flags & JME_RD_IPV6)
2077                 pi->pi_netisr = NETISR_IPV6;
2078         else
2079                 return NULL;
2080
2081         pi->pi_flags = 0;
2082         pi->pi_l3proto = IPPROTO_UNKNOWN;
2083
2084         if (flags & JME_RD_MORE_FRAG)
2085                 pi->pi_flags |= PKTINFO_FLAG_FRAG;
2086         else if (flags & JME_RD_TCP)
2087                 pi->pi_l3proto = IPPROTO_TCP;
2088         else if (flags & JME_RD_UDP)
2089                 pi->pi_l3proto = IPPROTO_UDP;
2090         else
2091                 pi = NULL;
2092         return pi;
2093 }
2094
2095 /* Receive a frame. */
2096 static void
2097 jme_rxpkt(struct jme_rxdata *rdata)
2098 {
2099         struct ifnet *ifp = &rdata->jme_sc->arpcom.ac_if;
2100         struct jme_desc *desc;
2101         struct jme_rxdesc *rxd;
2102         struct mbuf *mp, *m;
2103         uint32_t flags, status, hash, hashinfo;
2104         int cons, count, nsegs;
2105
2106         cons = rdata->jme_rx_cons;
2107         desc = &rdata->jme_rx_ring[cons];
2108         flags = le32toh(desc->flags);
2109         status = le32toh(desc->buflen);
2110         hash = le32toh(desc->addr_hi);
2111         hashinfo = le32toh(desc->addr_lo);
2112         nsegs = JME_RX_NSEGS(status);
2113
2114         JME_RSS_DPRINTF(rdata->jme_sc, 15, "ring%d, flags 0x%08x, "
2115                         "hash 0x%08x, hash info 0x%08x\n",
2116                         rdata->jme_rx_idx, flags, hash, hashinfo);
2117
2118         if (status & JME_RX_ERR_STAT) {
2119                 ifp->if_ierrors++;
2120                 jme_discard_rxbufs(rdata, cons, nsegs);
2121 #ifdef JME_SHOW_ERRORS
2122                 if_printf(ifp, "%s : receive error = 0x%b\n",
2123                     __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2124 #endif
2125                 rdata->jme_rx_cons += nsegs;
2126                 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2127                 return;
2128         }
2129
2130         rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2131         for (count = 0; count < nsegs; count++,
2132              JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) {
2133                 rxd = &rdata->jme_rxdesc[cons];
2134                 mp = rxd->rx_m;
2135
2136                 /* Add a new receive buffer to the ring. */
2137                 if (jme_newbuf(rdata, rxd, 0) != 0) {
2138                         ifp->if_iqdrops++;
2139                         /* Reuse buffer. */
2140                         jme_discard_rxbufs(rdata, cons, nsegs - count);
2141                         if (rdata->jme_rxhead != NULL) {
2142                                 m_freem(rdata->jme_rxhead);
2143                                 JME_RXCHAIN_RESET(rdata);
2144                         }
2145                         break;
2146                 }
2147
2148                 /*
2149                  * Assume we've received a full sized frame.
2150                  * Actual size is fixed when we encounter the end of
2151                  * multi-segmented frame.
2152                  */
2153                 mp->m_len = MCLBYTES;
2154
2155                 /* Chain received mbufs. */
2156                 if (rdata->jme_rxhead == NULL) {
2157                         rdata->jme_rxhead = mp;
2158                         rdata->jme_rxtail = mp;
2159                 } else {
2160                         /*
2161                          * Receive processor can receive a maximum frame
2162                          * size of 65535 bytes.
2163                          */
2164                         rdata->jme_rxtail->m_next = mp;
2165                         rdata->jme_rxtail = mp;
2166                 }
2167
2168                 if (count == nsegs - 1) {
2169                         struct pktinfo pi0, *pi;
2170
2171                         /* Last desc. for this frame. */
2172                         m = rdata->jme_rxhead;
2173                         m->m_pkthdr.len = rdata->jme_rxlen;
2174                         if (nsegs > 1) {
2175                                 /* Set first mbuf size. */
2176                                 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2177                                 /* Set last mbuf size. */
2178                                 mp->m_len = rdata->jme_rxlen -
2179                                     ((MCLBYTES - JME_RX_PAD_BYTES) +
2180                                     (MCLBYTES * (nsegs - 2)));
2181                         } else {
2182                                 m->m_len = rdata->jme_rxlen;
2183                         }
2184                         m->m_pkthdr.rcvif = ifp;
2185
2186                         /*
2187                          * Account for 10bytes auto padding which is used
2188                          * to align IP header on 32bit boundary. Also note,
2189                          * CRC bytes is automatically removed by the
2190                          * hardware.
2191                          */
2192                         m->m_data += JME_RX_PAD_BYTES;
2193
2194                         /* Set checksum information. */
2195                         if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2196                             (flags & JME_RD_IPV4)) {
2197                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2198                                 if (flags & JME_RD_IPCSUM)
2199                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2200                                 if ((flags & JME_RD_MORE_FRAG) == 0 &&
2201                                     ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2202                                      (JME_RD_TCP | JME_RD_TCPCSUM) ||
2203                                      (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2204                                      (JME_RD_UDP | JME_RD_UDPCSUM))) {
2205                                         m->m_pkthdr.csum_flags |=
2206                                             CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2207                                         m->m_pkthdr.csum_data = 0xffff;
2208                                 }
2209                         }
2210
2211                         /* Check for VLAN tagged packets. */
2212                         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2213                             (flags & JME_RD_VLAN_TAG)) {
2214                                 m->m_pkthdr.ether_vlantag =
2215                                     flags & JME_RD_VLAN_MASK;
2216                                 m->m_flags |= M_VLANTAG;
2217                         }
2218
2219                         ifp->if_ipackets++;
2220
2221                         if (ifp->if_capenable & IFCAP_RSS)
2222                                 pi = jme_pktinfo(&pi0, flags);
2223                         else
2224                                 pi = NULL;
2225
2226                         if (pi != NULL &&
2227                             (hashinfo & JME_RD_HASH_FN_MASK) != 0) {
2228                                 m->m_flags |= M_HASH;
2229                                 m->m_pkthdr.hash = toeplitz_hash(hash);
2230                         }
2231
2232 #ifdef JME_RSS_DEBUG
2233                         if (pi != NULL) {
2234                                 JME_RSS_DPRINTF(rdata->jme_sc, 10,
2235                                     "isr %d flags %08x, l3 %d %s\n",
2236                                     pi->pi_netisr, pi->pi_flags,
2237                                     pi->pi_l3proto,
2238                                     (m->m_flags & M_HASH) ? "hash" : "");
2239                         }
2240 #endif
2241
2242                         /* Pass it on. */
2243                         ether_input_pkt(ifp, m, pi);
2244
2245                         /* Reset mbuf chains. */
2246                         JME_RXCHAIN_RESET(rdata);
2247 #ifdef JME_RSS_DEBUG
2248                         rdata->jme_rx_pkt++;
2249 #endif
2250                 }
2251         }
2252
2253         rdata->jme_rx_cons += nsegs;
2254         rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2255 }
2256
2257 static void
2258 jme_rxeof(struct jme_rxdata *rdata, int count)
2259 {
2260         struct jme_desc *desc;
2261         int nsegs, pktlen;
2262
2263         for (;;) {
2264 #ifdef DEVICE_POLLING
2265                 if (count >= 0 && count-- == 0)
2266                         break;
2267 #endif
2268                 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2269                 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2270                         break;
2271                 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2272                         break;
2273
2274                 /*
2275                  * Check number of segments against received bytes.
2276                  * Non-matching value would indicate that hardware
2277                  * is still trying to update Rx descriptors. I'm not
2278                  * sure whether this check is needed.
2279                  */
2280                 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2281                 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2282                 if (nsegs != howmany(pktlen, MCLBYTES)) {
2283                         if_printf(&rdata->jme_sc->arpcom.ac_if,
2284                             "RX fragment count(%d) and "
2285                             "packet size(%d) mismach\n", nsegs, pktlen);
2286                         break;
2287                 }
2288
2289                 /* Received a frame. */
2290                 jme_rxpkt(rdata);
2291         }
2292 }
2293
2294 static void
2295 jme_tick(void *xsc)
2296 {
2297         struct jme_softc *sc = xsc;
2298         struct ifnet *ifp = &sc->arpcom.ac_if;
2299         struct mii_data *mii = device_get_softc(sc->jme_miibus);
2300
2301         ifnet_serialize_all(ifp);
2302
2303         mii_tick(mii);
2304         callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2305
2306         ifnet_deserialize_all(ifp);
2307 }
2308
2309 static void
2310 jme_reset(struct jme_softc *sc)
2311 {
2312         uint32_t val;
2313
2314         /* Make sure that TX and RX are stopped */
2315         jme_stop_tx(sc);
2316         jme_stop_rx(sc);
2317
2318         /* Start reset */
2319         CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2320         DELAY(20);
2321
2322         /*
2323          * Hold reset bit before stop reset
2324          */
2325
2326         /* Disable TXMAC and TXOFL clock sources */
2327         CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2328         /* Disable RXMAC clock source */
2329         val = CSR_READ_4(sc, JME_GPREG1);
2330         CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2331         /* Flush */
2332         CSR_READ_4(sc, JME_GHC);
2333
2334         /* Stop reset */
2335         CSR_WRITE_4(sc, JME_GHC, 0);
2336         /* Flush */
2337         CSR_READ_4(sc, JME_GHC);
2338
2339         /*
2340          * Clear reset bit after stop reset
2341          */
2342
2343         /* Enable TXMAC and TXOFL clock sources */
2344         CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2345         /* Enable RXMAC clock source */
2346         val = CSR_READ_4(sc, JME_GPREG1);
2347         CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2348         /* Flush */
2349         CSR_READ_4(sc, JME_GHC);
2350
2351         /* Disable TXMAC and TXOFL clock sources */
2352         CSR_WRITE_4(sc, JME_GHC, 0);
2353         /* Disable RXMAC clock source */
2354         val = CSR_READ_4(sc, JME_GPREG1);
2355         CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2356         /* Flush */
2357         CSR_READ_4(sc, JME_GHC);
2358
2359         /* Enable TX and RX */
2360         val = CSR_READ_4(sc, JME_TXCSR);
2361         CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB);
2362         val = CSR_READ_4(sc, JME_RXCSR);
2363         CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB);
2364         /* Flush */
2365         CSR_READ_4(sc, JME_TXCSR);
2366         CSR_READ_4(sc, JME_RXCSR);
2367
2368         /* Enable TXMAC and TXOFL clock sources */
2369         CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2370         /* Eisable RXMAC clock source */
2371         val = CSR_READ_4(sc, JME_GPREG1);
2372         CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2373         /* Flush */
2374         CSR_READ_4(sc, JME_GHC);
2375
2376         /* Stop TX and RX */
2377         jme_stop_tx(sc);
2378         jme_stop_rx(sc);
2379 }
2380
2381 static void
2382 jme_init(void *xsc)
2383 {
2384         struct jme_softc *sc = xsc;
2385         struct ifnet *ifp = &sc->arpcom.ac_if;
2386         struct mii_data *mii;
2387         uint8_t eaddr[ETHER_ADDR_LEN];
2388         bus_addr_t paddr;
2389         uint32_t reg;
2390         int error, r;
2391
2392         ASSERT_IFNET_SERIALIZED_ALL(ifp);
2393
2394         /*
2395          * Cancel any pending I/O.
2396          */
2397         jme_stop(sc);
2398
2399         /*
2400          * Reset the chip to a known state.
2401          */
2402         jme_reset(sc);
2403
2404         /*
2405          * Setup MSI/MSI-X vectors to interrupts mapping
2406          */
2407         jme_set_msinum(sc);
2408
2409         sc->jme_txd_spare =
2410         howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES);
2411         KKASSERT(sc->jme_txd_spare >= 1);
2412
2413         /*
2414          * If we use 64bit address mode for transmitting, each Tx request
2415          * needs one more symbol descriptor.
2416          */
2417         if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
2418                 sc->jme_txd_spare += 1;
2419
2420         if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN)
2421                 jme_enable_rss(sc);
2422         else
2423                 jme_disable_rss(sc);
2424
2425         /* Init RX descriptors */
2426         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2427                 error = jme_init_rx_ring(&sc->jme_cdata.jme_rx_data[r]);
2428                 if (error) {
2429                         if_printf(ifp, "initialization failed: "
2430                                   "no memory for %dth RX ring.\n", r);
2431                         jme_stop(sc);
2432                         return;
2433                 }
2434         }
2435
2436         /* Init TX descriptors */
2437         jme_init_tx_ring(sc);
2438
2439         /* Initialize shadow status block. */
2440         jme_init_ssb(sc);
2441
2442         /* Reprogram the station address. */
2443         bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2444         CSR_WRITE_4(sc, JME_PAR0,
2445             eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2446         CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2447
2448         /*
2449          * Configure Tx queue.
2450          *  Tx priority queue weight value : 0
2451          *  Tx FIFO threshold for processing next packet : 16QW
2452          *  Maximum Tx DMA length : 512
2453          *  Allow Tx DMA burst.
2454          */
2455         sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2456         sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2457         sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2458         sc->jme_txcsr |= sc->jme_tx_dma_size;
2459         sc->jme_txcsr |= TXCSR_DMA_BURST;
2460         CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2461
2462         /* Set Tx descriptor counter. */
2463         CSR_WRITE_4(sc, JME_TXQDC, sc->jme_cdata.jme_tx_desc_cnt);
2464
2465         /* Set Tx ring address to the hardware. */
2466         paddr = sc->jme_cdata.jme_tx_ring_paddr;
2467         CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2468         CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2469
2470         /* Configure TxMAC parameters. */
2471         reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2472         reg |= TXMAC_THRESH_1_PKT;
2473         reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2474         CSR_WRITE_4(sc, JME_TXMAC, reg);
2475
2476         /*
2477          * Configure Rx queue.
2478          *  FIFO full threshold for transmitting Tx pause packet : 128T
2479          *  FIFO threshold for processing next packet : 128QW
2480          *  Rx queue 0 select
2481          *  Max Rx DMA length : 128
2482          *  Rx descriptor retry : 32
2483          *  Rx descriptor retry time gap : 256ns
2484          *  Don't receive runt/bad frame.
2485          */
2486         sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2487 #if 0
2488         /*
2489          * Since Rx FIFO size is 4K bytes, receiving frames larger
2490          * than 4K bytes will suffer from Rx FIFO overruns. So
2491          * decrease FIFO threshold to reduce the FIFO overruns for
2492          * frames larger than 4000 bytes.
2493          * For best performance of standard MTU sized frames use
2494          * maximum allowable FIFO threshold, 128QW.
2495          */
2496         if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2497             JME_RX_FIFO_SIZE)
2498                 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2499         else
2500                 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2501 #else
2502         /* Improve PCI Express compatibility */
2503         sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2504 #endif
2505         sc->jme_rxcsr |= sc->jme_rx_dma_size;
2506         sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2507         sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2508         /* XXX TODO DROP_BAD */
2509
2510         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2511                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
2512
2513                 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2514
2515                 /* Set Rx descriptor counter. */
2516                 CSR_WRITE_4(sc, JME_RXQDC, rdata->jme_rx_desc_cnt);
2517
2518                 /* Set Rx ring address to the hardware. */
2519                 paddr = rdata->jme_rx_ring_paddr;
2520                 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2521                 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2522         }
2523
2524         /* Clear receive filter. */
2525         CSR_WRITE_4(sc, JME_RXMAC, 0);
2526
2527         /* Set up the receive filter. */
2528         jme_set_filter(sc);
2529         jme_set_vlan(sc);
2530
2531         /*
2532          * Disable all WOL bits as WOL can interfere normal Rx
2533          * operation. Also clear WOL detection status bits.
2534          */
2535         reg = CSR_READ_4(sc, JME_PMCS);
2536         reg &= ~PMCS_WOL_ENB_MASK;
2537         CSR_WRITE_4(sc, JME_PMCS, reg);
2538
2539         /*
2540          * Pad 10bytes right before received frame. This will greatly
2541          * help Rx performance on strict-alignment architectures as
2542          * it does not need to copy the frame to align the payload.
2543          */
2544         reg = CSR_READ_4(sc, JME_RXMAC);
2545         reg |= RXMAC_PAD_10BYTES;
2546
2547         if (ifp->if_capenable & IFCAP_RXCSUM)
2548                 reg |= RXMAC_CSUM_ENB;
2549         CSR_WRITE_4(sc, JME_RXMAC, reg);
2550
2551         /* Configure general purpose reg0 */
2552         reg = CSR_READ_4(sc, JME_GPREG0);
2553         reg &= ~GPREG0_PCC_UNIT_MASK;
2554         /* Set PCC timer resolution to micro-seconds unit. */
2555         reg |= GPREG0_PCC_UNIT_US;
2556         /*
2557          * Disable all shadow register posting as we have to read
2558          * JME_INTR_STATUS register in jme_intr. Also it seems
2559          * that it's hard to synchronize interrupt status between
2560          * hardware and software with shadow posting due to
2561          * requirements of bus_dmamap_sync(9).
2562          */
2563         reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2564             GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2565             GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2566             GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2567         /* Disable posting of DW0. */
2568         reg &= ~GPREG0_POST_DW0_ENB;
2569         /* Clear PME message. */
2570         reg &= ~GPREG0_PME_ENB;
2571         /* Set PHY address. */
2572         reg &= ~GPREG0_PHY_ADDR_MASK;
2573         reg |= sc->jme_phyaddr;
2574         CSR_WRITE_4(sc, JME_GPREG0, reg);
2575
2576         /* Configure Tx queue 0 packet completion coalescing. */
2577         jme_set_tx_coal(sc);
2578
2579         /* Configure Rx queues packet completion coalescing. */
2580         jme_set_rx_coal(sc);
2581
2582         /* Configure shadow status block but don't enable posting. */
2583         paddr = sc->jme_cdata.jme_ssb_block_paddr;
2584         CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2585         CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2586
2587         /* Disable Timer 1 and Timer 2. */
2588         CSR_WRITE_4(sc, JME_TIMER1, 0);
2589         CSR_WRITE_4(sc, JME_TIMER2, 0);
2590
2591         /* Configure retry transmit period, retry limit value. */
2592         CSR_WRITE_4(sc, JME_TXTRHD,
2593             ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2594             TXTRHD_RT_PERIOD_MASK) |
2595             ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2596             TXTRHD_RT_LIMIT_SHIFT));
2597
2598 #ifdef DEVICE_POLLING
2599         if (!(ifp->if_flags & IFF_POLLING))
2600 #endif
2601         /* Initialize the interrupt mask. */
2602         CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2603         CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2604
2605         /*
2606          * Enabling Tx/Rx DMA engines and Rx queue processing is
2607          * done after detection of valid link in jme_miibus_statchg.
2608          */
2609         sc->jme_flags &= ~JME_FLAG_LINK;
2610
2611         /* Set the current media. */
2612         mii = device_get_softc(sc->jme_miibus);
2613         mii_mediachg(mii);
2614
2615         callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2616
2617         ifp->if_flags |= IFF_RUNNING;
2618         ifp->if_flags &= ~IFF_OACTIVE;
2619 }
2620
2621 static void
2622 jme_stop(struct jme_softc *sc)
2623 {
2624         struct ifnet *ifp = &sc->arpcom.ac_if;
2625         struct jme_txdesc *txd;
2626         struct jme_rxdesc *rxd;
2627         struct jme_rxdata *rdata;
2628         int i, r;
2629
2630         ASSERT_IFNET_SERIALIZED_ALL(ifp);
2631
2632         /*
2633          * Mark the interface down and cancel the watchdog timer.
2634          */
2635         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2636         ifp->if_timer = 0;
2637
2638         callout_stop(&sc->jme_tick_ch);
2639         sc->jme_flags &= ~JME_FLAG_LINK;
2640
2641         /*
2642          * Disable interrupts.
2643          */
2644         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2645         CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2646
2647         /* Disable updating shadow status block. */
2648         CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2649             CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2650
2651         /* Stop receiver, transmitter. */
2652         jme_stop_rx(sc);
2653         jme_stop_tx(sc);
2654
2655         /*
2656          * Free partial finished RX segments
2657          */
2658         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2659                 rdata = &sc->jme_cdata.jme_rx_data[r];
2660                 if (rdata->jme_rxhead != NULL)
2661                         m_freem(rdata->jme_rxhead);
2662                 JME_RXCHAIN_RESET(rdata);
2663         }
2664
2665         /*
2666          * Free RX and TX mbufs still in the queues.
2667          */
2668         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2669                 rdata = &sc->jme_cdata.jme_rx_data[r];
2670                 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
2671                         rxd = &rdata->jme_rxdesc[i];
2672                         if (rxd->rx_m != NULL) {
2673                                 bus_dmamap_unload(rdata->jme_rx_tag,
2674                                                   rxd->rx_dmamap);
2675                                 m_freem(rxd->rx_m);
2676                                 rxd->rx_m = NULL;
2677                         }
2678                 }
2679         }
2680         for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
2681                 txd = &sc->jme_cdata.jme_txdesc[i];
2682                 if (txd->tx_m != NULL) {
2683                         bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2684                             txd->tx_dmamap);
2685                         m_freem(txd->tx_m);
2686                         txd->tx_m = NULL;
2687                         txd->tx_ndesc = 0;
2688                 }
2689         }
2690 }
2691
2692 static void
2693 jme_stop_tx(struct jme_softc *sc)
2694 {
2695         uint32_t reg;
2696         int i;
2697
2698         reg = CSR_READ_4(sc, JME_TXCSR);
2699         if ((reg & TXCSR_TX_ENB) == 0)
2700                 return;
2701         reg &= ~TXCSR_TX_ENB;
2702         CSR_WRITE_4(sc, JME_TXCSR, reg);
2703         for (i = JME_TIMEOUT; i > 0; i--) {
2704                 DELAY(1);
2705                 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2706                         break;
2707         }
2708         if (i == 0)
2709                 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2710 }
2711
2712 static void
2713 jme_stop_rx(struct jme_softc *sc)
2714 {
2715         uint32_t reg;
2716         int i;
2717
2718         reg = CSR_READ_4(sc, JME_RXCSR);
2719         if ((reg & RXCSR_RX_ENB) == 0)
2720                 return;
2721         reg &= ~RXCSR_RX_ENB;
2722         CSR_WRITE_4(sc, JME_RXCSR, reg);
2723         for (i = JME_TIMEOUT; i > 0; i--) {
2724                 DELAY(1);
2725                 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2726                         break;
2727         }
2728         if (i == 0)
2729                 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2730 }
2731
2732 static void
2733 jme_init_tx_ring(struct jme_softc *sc)
2734 {
2735         struct jme_chain_data *cd;
2736         struct jme_txdesc *txd;
2737         int i;
2738
2739         sc->jme_cdata.jme_tx_prod = 0;
2740         sc->jme_cdata.jme_tx_cons = 0;
2741         sc->jme_cdata.jme_tx_cnt = 0;
2742
2743         cd = &sc->jme_cdata;
2744         bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2745         for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
2746                 txd = &sc->jme_cdata.jme_txdesc[i];
2747                 txd->tx_m = NULL;
2748                 txd->tx_desc = &cd->jme_tx_ring[i];
2749                 txd->tx_ndesc = 0;
2750         }
2751 }
2752
2753 static void
2754 jme_init_ssb(struct jme_softc *sc)
2755 {
2756         struct jme_chain_data *cd;
2757
2758         cd = &sc->jme_cdata;
2759         bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2760 }
2761
2762 static int
2763 jme_init_rx_ring(struct jme_rxdata *rdata)
2764 {
2765         struct jme_rxdesc *rxd;
2766         int i;
2767
2768         KKASSERT(rdata->jme_rxhead == NULL &&
2769                  rdata->jme_rxtail == NULL &&
2770                  rdata->jme_rxlen == 0);
2771         rdata->jme_rx_cons = 0;
2772
2773         bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(rdata));
2774         for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
2775                 int error;
2776
2777                 rxd = &rdata->jme_rxdesc[i];
2778                 rxd->rx_m = NULL;
2779                 rxd->rx_desc = &rdata->jme_rx_ring[i];
2780                 error = jme_newbuf(rdata, rxd, 1);
2781                 if (error)
2782                         return error;
2783         }
2784         return 0;
2785 }
2786
2787 static int
2788 jme_newbuf(struct jme_rxdata *rdata, struct jme_rxdesc *rxd, int init)
2789 {
2790         struct mbuf *m;
2791         bus_dma_segment_t segs;
2792         bus_dmamap_t map;
2793         int error, nsegs;
2794
2795         m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2796         if (m == NULL)
2797                 return ENOBUFS;
2798         /*
2799          * JMC250 has 64bit boundary alignment limitation so jme(4)
2800          * takes advantage of 10 bytes padding feature of hardware
2801          * in order not to copy entire frame to align IP header on
2802          * 32bit boundary.
2803          */
2804         m->m_len = m->m_pkthdr.len = MCLBYTES;
2805
2806         error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
2807                         rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
2808                         BUS_DMA_NOWAIT);
2809         if (error) {
2810                 m_freem(m);
2811                 if (init) {
2812                         if_printf(&rdata->jme_sc->arpcom.ac_if,
2813                             "can't load RX mbuf\n");
2814                 }
2815                 return error;
2816         }
2817
2818         if (rxd->rx_m != NULL) {
2819                 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
2820                                 BUS_DMASYNC_POSTREAD);
2821                 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
2822         }
2823         map = rxd->rx_dmamap;
2824         rxd->rx_dmamap = rdata->jme_rx_sparemap;
2825         rdata->jme_rx_sparemap = map;
2826         rxd->rx_m = m;
2827         rxd->rx_paddr = segs.ds_addr;
2828
2829         jme_setup_rxdesc(rxd);
2830         return 0;
2831 }
2832
2833 static void
2834 jme_set_vlan(struct jme_softc *sc)
2835 {
2836         struct ifnet *ifp = &sc->arpcom.ac_if;
2837         uint32_t reg;
2838
2839         ASSERT_IFNET_SERIALIZED_ALL(ifp);
2840
2841         reg = CSR_READ_4(sc, JME_RXMAC);
2842         reg &= ~RXMAC_VLAN_ENB;
2843         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2844                 reg |= RXMAC_VLAN_ENB;
2845         CSR_WRITE_4(sc, JME_RXMAC, reg);
2846 }
2847
2848 static void
2849 jme_set_filter(struct jme_softc *sc)
2850 {
2851         struct ifnet *ifp = &sc->arpcom.ac_if;
2852         struct ifmultiaddr *ifma;
2853         uint32_t crc;
2854         uint32_t mchash[2];
2855         uint32_t rxcfg;
2856
2857         ASSERT_IFNET_SERIALIZED_ALL(ifp);
2858
2859         rxcfg = CSR_READ_4(sc, JME_RXMAC);
2860         rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2861             RXMAC_ALLMULTI);
2862
2863         /*
2864          * Always accept frames destined to our station address.
2865          * Always accept broadcast frames.
2866          */
2867         rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2868
2869         if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2870                 if (ifp->if_flags & IFF_PROMISC)
2871                         rxcfg |= RXMAC_PROMISC;
2872                 if (ifp->if_flags & IFF_ALLMULTI)
2873                         rxcfg |= RXMAC_ALLMULTI;
2874                 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2875                 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2876                 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2877                 return;
2878         }
2879
2880         /*
2881          * Set up the multicast address filter by passing all multicast
2882          * addresses through a CRC generator, and then using the low-order
2883          * 6 bits as an index into the 64 bit multicast hash table.  The
2884          * high order bits select the register, while the rest of the bits
2885          * select the bit within the register.
2886          */
2887         rxcfg |= RXMAC_MULTICAST;
2888         bzero(mchash, sizeof(mchash));
2889
2890         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2891                 if (ifma->ifma_addr->sa_family != AF_LINK)
2892                         continue;
2893                 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2894                     ifma->ifma_addr), ETHER_ADDR_LEN);
2895
2896                 /* Just want the 6 least significant bits. */
2897                 crc &= 0x3f;
2898
2899                 /* Set the corresponding bit in the hash table. */
2900                 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2901         }
2902
2903         CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2904         CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2905         CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2906 }
2907
2908 static int
2909 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
2910 {
2911         struct jme_softc *sc = arg1;
2912         struct ifnet *ifp = &sc->arpcom.ac_if;
2913         int error, v;
2914
2915         ifnet_serialize_all(ifp);
2916
2917         v = sc->jme_tx_coal_to;
2918         error = sysctl_handle_int(oidp, &v, 0, req);
2919         if (error || req->newptr == NULL)
2920                 goto back;
2921
2922         if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2923                 error = EINVAL;
2924                 goto back;
2925         }
2926
2927         if (v != sc->jme_tx_coal_to) {
2928                 sc->jme_tx_coal_to = v;
2929                 if (ifp->if_flags & IFF_RUNNING)
2930                         jme_set_tx_coal(sc);
2931         }
2932 back:
2933         ifnet_deserialize_all(ifp);
2934         return error;
2935 }
2936
2937 static int
2938 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2939 {
2940         struct jme_softc *sc = arg1;
2941         struct ifnet *ifp = &sc->arpcom.ac_if;
2942         int error, v;
2943
2944         ifnet_serialize_all(ifp);
2945
2946         v = sc->jme_tx_coal_pkt;
2947         error = sysctl_handle_int(oidp, &v, 0, req);
2948         if (error || req->newptr == NULL)
2949                 goto back;
2950
2951         if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
2952                 error = EINVAL;
2953                 goto back;
2954         }
2955
2956         if (v != sc->jme_tx_coal_pkt) {
2957                 sc->jme_tx_coal_pkt = v;
2958                 if (ifp->if_flags & IFF_RUNNING)
2959                         jme_set_tx_coal(sc);
2960         }
2961 back:
2962         ifnet_deserialize_all(ifp);
2963         return error;
2964 }
2965
2966 static int
2967 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
2968 {
2969         struct jme_softc *sc = arg1;
2970         struct ifnet *ifp = &sc->arpcom.ac_if;
2971         int error, v;
2972
2973         ifnet_serialize_all(ifp);
2974
2975         v = sc->jme_rx_coal_to;
2976         error = sysctl_handle_int(oidp, &v, 0, req);
2977         if (error || req->newptr == NULL)
2978                 goto back;
2979
2980         if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
2981                 error = EINVAL;
2982                 goto back;
2983         }
2984
2985         if (v != sc->jme_rx_coal_to) {
2986                 sc->jme_rx_coal_to = v;
2987                 if (ifp->if_flags & IFF_RUNNING)
2988                         jme_set_rx_coal(sc);
2989         }
2990 back:
2991         ifnet_deserialize_all(ifp);
2992         return error;
2993 }
2994
2995 static int
2996 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
2997 {
2998         struct jme_softc *sc = arg1;
2999         struct ifnet *ifp = &sc->arpcom.ac_if;
3000         int error, v;
3001
3002         ifnet_serialize_all(ifp);
3003
3004         v = sc->jme_rx_coal_pkt;
3005         error = sysctl_handle_int(oidp, &v, 0, req);
3006         if (error || req->newptr == NULL)
3007                 goto back;
3008
3009         if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
3010                 error = EINVAL;
3011                 goto back;
3012         }
3013
3014         if (v != sc->jme_rx_coal_pkt) {
3015                 sc->jme_rx_coal_pkt = v;
3016                 if (ifp->if_flags & IFF_RUNNING)
3017                         jme_set_rx_coal(sc);
3018         }
3019 back:
3020         ifnet_deserialize_all(ifp);
3021         return error;
3022 }
3023
3024 static void
3025 jme_set_tx_coal(struct jme_softc *sc)
3026 {
3027         uint32_t reg;
3028
3029         reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3030             PCCTX_COAL_TO_MASK;
3031         reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3032             PCCTX_COAL_PKT_MASK;
3033         reg |= PCCTX_COAL_TXQ0;
3034         CSR_WRITE_4(sc, JME_PCCTX, reg);
3035 }
3036
3037 static void
3038 jme_set_rx_coal(struct jme_softc *sc)
3039 {
3040         uint32_t reg;
3041         int r;
3042
3043         reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3044             PCCRX_COAL_TO_MASK;
3045         reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3046             PCCRX_COAL_PKT_MASK;
3047         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r)
3048                 CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3049 }
3050
3051 #ifdef DEVICE_POLLING
3052
3053 static void
3054 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3055 {
3056         struct jme_softc *sc = ifp->if_softc;
3057         uint32_t status;
3058         int r;
3059
3060         ASSERT_SERIALIZED(&sc->jme_serialize);
3061
3062         switch (cmd) {
3063         case POLL_REGISTER:
3064                 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3065                 break;
3066
3067         case POLL_DEREGISTER:
3068                 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
3069                 break;
3070
3071         case POLL_AND_CHECK_STATUS:
3072         case POLL_ONLY:
3073                 status = CSR_READ_4(sc, JME_INTR_STATUS);
3074
3075                 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3076                         struct jme_rxdata *rdata =
3077                             &sc->jme_cdata.jme_rx_data[r];
3078
3079                         lwkt_serialize_enter(&rdata->jme_rx_serialize);
3080                         jme_rxeof(rdata, count);
3081                         lwkt_serialize_exit(&rdata->jme_rx_serialize);
3082                 }
3083
3084                 if (status & INTR_RXQ_DESC_EMPTY) {
3085                         CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3086                         CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3087                             RXCSR_RX_ENB | RXCSR_RXQ_START);
3088                 }
3089
3090                 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
3091                 jme_txeof(sc);
3092                 if (!ifq_is_empty(&ifp->if_snd))
3093                         if_devstart(ifp);
3094                 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
3095                 break;
3096         }
3097 }
3098
3099 #endif  /* DEVICE_POLLING */
3100
3101 static int
3102 jme_rxring_dma_alloc(struct jme_rxdata *rdata)
3103 {
3104         bus_dmamem_t dmem;
3105         int error, asize;
3106
3107         asize = roundup2(JME_RX_RING_SIZE(rdata), JME_RX_RING_ALIGN);
3108         error = bus_dmamem_coherent(rdata->jme_sc->jme_cdata.jme_ring_tag,
3109                         JME_RX_RING_ALIGN, 0,
3110                         BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3111                         asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3112         if (error) {
3113                 device_printf(rdata->jme_sc->jme_dev,
3114                     "could not allocate %dth Rx ring.\n", rdata->jme_rx_idx);
3115                 return error;
3116         }
3117         rdata->jme_rx_ring_tag = dmem.dmem_tag;
3118         rdata->jme_rx_ring_map = dmem.dmem_map;
3119         rdata->jme_rx_ring = dmem.dmem_addr;
3120         rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
3121
3122         return 0;
3123 }
3124
3125 static int
3126 jme_rxbuf_dma_alloc(struct jme_rxdata *rdata)
3127 {
3128         int i, error;
3129
3130         /* Create tag for Rx buffers. */
3131         error = bus_dma_tag_create(
3132             rdata->jme_sc->jme_cdata.jme_buffer_tag,/* parent */
3133             JME_RX_BUF_ALIGN, 0,        /* algnmnt, boundary */
3134             BUS_SPACE_MAXADDR,          /* lowaddr */
3135             BUS_SPACE_MAXADDR,          /* highaddr */
3136             NULL, NULL,                 /* filter, filterarg */
3137             MCLBYTES,                   /* maxsize */
3138             1,                          /* nsegments */
3139             MCLBYTES,                   /* maxsegsize */
3140             BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
3141             &rdata->jme_rx_tag);
3142         if (error) {
3143                 device_printf(rdata->jme_sc->jme_dev,
3144                     "could not create %dth Rx DMA tag.\n", rdata->jme_rx_idx);
3145                 return error;
3146         }
3147
3148         /* Create DMA maps for Rx buffers. */
3149         error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3150                                   &rdata->jme_rx_sparemap);
3151         if (error) {
3152                 device_printf(rdata->jme_sc->jme_dev,
3153                     "could not create %dth spare Rx dmamap.\n",
3154                     rdata->jme_rx_idx);
3155                 bus_dma_tag_destroy(rdata->jme_rx_tag);
3156                 rdata->jme_rx_tag = NULL;
3157                 return error;
3158         }
3159         for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
3160                 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3161
3162                 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3163                                           &rxd->rx_dmamap);
3164                 if (error) {
3165                         int j;
3166
3167                         device_printf(rdata->jme_sc->jme_dev,
3168                             "could not create %dth Rx dmamap "
3169                             "for %dth RX ring.\n", i, rdata->jme_rx_idx);
3170
3171                         for (j = 0; j < i; ++j) {
3172                                 rxd = &rdata->jme_rxdesc[j];
3173                                 bus_dmamap_destroy(rdata->jme_rx_tag,
3174                                                    rxd->rx_dmamap);
3175                         }
3176                         bus_dmamap_destroy(rdata->jme_rx_tag,
3177                                            rdata->jme_rx_sparemap);
3178                         bus_dma_tag_destroy(rdata->jme_rx_tag);
3179                         rdata->jme_rx_tag = NULL;
3180                         return error;
3181                 }
3182         }
3183         return 0;
3184 }
3185
3186 static void
3187 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3188 {
3189         int r;
3190
3191         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3192                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3193
3194                 if (status & rdata->jme_rx_coal) {
3195                         lwkt_serialize_enter(&rdata->jme_rx_serialize);
3196                         jme_rxeof(rdata, -1);
3197                         lwkt_serialize_exit(&rdata->jme_rx_serialize);
3198                 }
3199         }
3200 }
3201
3202 static void
3203 jme_enable_rss(struct jme_softc *sc)
3204 {
3205         uint32_t rssc, ind;
3206         uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3207         int i;
3208
3209         KASSERT(sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_2 ||
3210                 sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_4,
3211                 ("%s: invalid # of RX rings (%d)",
3212                  sc->arpcom.ac_if.if_xname, sc->jme_cdata.jme_rx_ring_cnt));
3213
3214         rssc = RSSC_HASH_64_ENTRY;
3215         rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3216         rssc |= sc->jme_cdata.jme_rx_ring_cnt >> 1;
3217         JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3218         CSR_WRITE_4(sc, JME_RSSC, rssc);
3219
3220         toeplitz_get_key(key, sizeof(key));
3221         for (i = 0; i < RSSKEY_NREGS; ++i) {
3222                 uint32_t keyreg;
3223
3224                 keyreg = RSSKEY_REGVAL(key, i);
3225                 JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg);
3226
3227                 CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg);
3228         }
3229
3230         /*
3231          * Create redirect table in following fashion:
3232          * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3233          */
3234         ind = 0;
3235         for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3236                 int q;
3237
3238                 q = i % sc->jme_cdata.jme_rx_ring_cnt;
3239                 ind |= q << (i * 8);
3240         }
3241         JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3242
3243         for (i = 0; i < RSSTBL_NREGS; ++i)
3244                 CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3245 }
3246
3247 static void
3248 jme_disable_rss(struct jme_softc *sc)
3249 {
3250         CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3251 }
3252
3253 static void
3254 jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3255 {
3256         struct jme_softc *sc = ifp->if_softc;
3257
3258         switch (slz) {
3259         case IFNET_SERIALIZE_ALL:
3260                 lwkt_serialize_array_enter(sc->jme_serialize_arr,
3261                     sc->jme_serialize_cnt, 0);
3262                 break;
3263
3264         case IFNET_SERIALIZE_MAIN:
3265                 lwkt_serialize_enter(&sc->jme_serialize);
3266                 break;
3267
3268         case IFNET_SERIALIZE_TX:
3269                 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
3270                 break;
3271
3272         case IFNET_SERIALIZE_RX(0):
3273                 lwkt_serialize_enter(
3274                     &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize);
3275                 break;
3276
3277         case IFNET_SERIALIZE_RX(1):
3278                 lwkt_serialize_enter(
3279                     &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize);
3280                 break;
3281
3282         case IFNET_SERIALIZE_RX(2):
3283                 lwkt_serialize_enter(
3284                     &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize);
3285                 break;
3286
3287         case IFNET_SERIALIZE_RX(3):
3288                 lwkt_serialize_enter(
3289                     &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize);
3290                 break;
3291
3292         default:
3293                 panic("%s unsupported serialize type", ifp->if_xname);
3294         }
3295 }
3296
3297 static void
3298 jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3299 {
3300         struct jme_softc *sc = ifp->if_softc;
3301
3302         switch (slz) {
3303         case IFNET_SERIALIZE_ALL:
3304                 lwkt_serialize_array_exit(sc->jme_serialize_arr,
3305                     sc->jme_serialize_cnt, 0);
3306                 break;
3307
3308         case IFNET_SERIALIZE_MAIN:
3309                 lwkt_serialize_exit(&sc->jme_serialize);
3310                 break;
3311
3312         case IFNET_SERIALIZE_TX:
3313                 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
3314                 break;
3315
3316         case IFNET_SERIALIZE_RX(0):
3317                 lwkt_serialize_exit(
3318                     &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize);
3319                 break;
3320
3321         case IFNET_SERIALIZE_RX(1):
3322                 lwkt_serialize_exit(
3323                     &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize);
3324                 break;
3325
3326         case IFNET_SERIALIZE_RX(2):
3327                 lwkt_serialize_exit(
3328                     &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize);
3329                 break;
3330
3331         case IFNET_SERIALIZE_RX(3):
3332                 lwkt_serialize_exit(
3333                     &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize);
3334                 break;
3335
3336         default:
3337                 panic("%s unsupported serialize type", ifp->if_xname);
3338         }
3339 }
3340
3341 static int
3342 jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3343 {
3344         struct jme_softc *sc = ifp->if_softc;
3345
3346         switch (slz) {
3347         case IFNET_SERIALIZE_ALL:
3348                 return lwkt_serialize_array_try(sc->jme_serialize_arr,
3349                     sc->jme_serialize_cnt, 0);
3350
3351         case IFNET_SERIALIZE_MAIN:
3352                 return lwkt_serialize_try(&sc->jme_serialize);
3353
3354         case IFNET_SERIALIZE_TX:
3355                 return lwkt_serialize_try(&sc->jme_cdata.jme_tx_serialize);
3356
3357         case IFNET_SERIALIZE_RX(0):
3358                 return lwkt_serialize_try(
3359                     &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize);
3360
3361         case IFNET_SERIALIZE_RX(1):
3362                 return lwkt_serialize_try(
3363                     &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize);
3364
3365         case IFNET_SERIALIZE_RX(2):
3366                 return lwkt_serialize_try(
3367                     &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize);
3368
3369         case IFNET_SERIALIZE_RX(3):
3370                 return lwkt_serialize_try(
3371                     &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize);
3372
3373         default:
3374                 panic("%s unsupported serialize type", ifp->if_xname);
3375         }
3376 }
3377
3378 #ifdef INVARIANTS
3379
3380 static void
3381 jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
3382     boolean_t serialized)
3383 {
3384         struct jme_softc *sc = ifp->if_softc;
3385         struct jme_rxdata *rdata;
3386         int i;
3387
3388         switch (slz) {
3389         case IFNET_SERIALIZE_ALL:
3390                 if (serialized) {
3391                         for (i = 0; i < sc->jme_serialize_cnt; ++i)
3392                                 ASSERT_SERIALIZED(sc->jme_serialize_arr[i]);
3393                 } else {
3394                         for (i = 0; i < sc->jme_serialize_cnt; ++i)
3395                                 ASSERT_NOT_SERIALIZED(sc->jme_serialize_arr[i]);
3396                 }
3397                 break;
3398
3399         case IFNET_SERIALIZE_MAIN:
3400                 if (serialized)
3401                         ASSERT_SERIALIZED(&sc->jme_serialize);
3402                 else
3403                         ASSERT_NOT_SERIALIZED(&sc->jme_serialize);
3404                 break;
3405
3406         case IFNET_SERIALIZE_TX:
3407                 if (serialized)
3408                         ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
3409                 else
3410                         ASSERT_NOT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
3411                 break;
3412
3413         case IFNET_SERIALIZE_RX(0):
3414                 rdata = &sc->jme_cdata.jme_rx_data[0];
3415                 if (serialized)
3416                         ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3417                 else
3418                         ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3419                 break;
3420
3421         case IFNET_SERIALIZE_RX(1):
3422                 rdata = &sc->jme_cdata.jme_rx_data[1];
3423                 if (serialized)
3424                         ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3425                 else
3426                         ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3427                 break;
3428
3429         case IFNET_SERIALIZE_RX(2):
3430                 rdata = &sc->jme_cdata.jme_rx_data[2];
3431                 if (serialized)
3432                         ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3433                 else
3434                         ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3435                 break;
3436
3437         case IFNET_SERIALIZE_RX(3):
3438                 rdata = &sc->jme_cdata.jme_rx_data[3];
3439                 if (serialized)
3440                         ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3441                 else
3442                         ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3443                 break;
3444
3445         default:
3446                 panic("%s unsupported serialize type", ifp->if_xname);
3447         }
3448 }
3449
3450 #endif  /* INVARIANTS */
3451
3452 static void
3453 jme_msix_try_alloc(device_t dev)
3454 {
3455         struct jme_softc *sc = device_get_softc(dev);
3456         struct jme_msix_data *msix;
3457         int error, i, r, msix_enable, msix_count;
3458
3459         msix_count = 1 + sc->jme_cdata.jme_rx_ring_cnt;
3460         KKASSERT(msix_count <= JME_NMSIX);
3461
3462         msix_enable = device_getenv_int(dev, "msix.enable", jme_msix_enable);
3463
3464         /*
3465          * We leave the 1st MSI-X vector unused, so we
3466          * actually need msix_count + 1 MSI-X vectors.
3467          */
3468         if (!msix_enable || pci_msix_count(dev) < (msix_count + 1))
3469                 return;
3470
3471         for (i = 0; i < msix_count; ++i)
3472                 sc->jme_msix[i].jme_msix_rid = -1;
3473
3474         i = 0;
3475
3476         msix = &sc->jme_msix[i++];
3477         msix->jme_msix_cpuid = 0;               /* XXX Put TX to cpu0 */
3478         msix->jme_msix_arg = &sc->jme_cdata;
3479         msix->jme_msix_func = jme_msix_tx;
3480         msix->jme_msix_intrs = INTR_TXQ_COAL | INTR_TXQ_COAL_TO;
3481         msix->jme_msix_serialize = &sc->jme_cdata.jme_tx_serialize;
3482         ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s tx",
3483             device_get_nameunit(dev));
3484
3485         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3486                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3487
3488                 msix = &sc->jme_msix[i++];
3489                 msix->jme_msix_cpuid = r;       /* XXX Put RX to cpuX */
3490                 msix->jme_msix_arg = rdata;
3491                 msix->jme_msix_func = jme_msix_rx;
3492                 msix->jme_msix_intrs = rdata->jme_rx_coal | rdata->jme_rx_empty;
3493                 msix->jme_msix_serialize = &rdata->jme_rx_serialize;
3494                 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc),
3495                     "%s rx%d", device_get_nameunit(dev), r);
3496         }
3497
3498         KKASSERT(i == msix_count);
3499
3500         error = pci_setup_msix(dev);
3501         if (error)
3502                 return;
3503
3504         /* Setup jme_msix_cnt early, so we could cleanup */
3505         sc->jme_msix_cnt = msix_count;
3506
3507         for (i = 0; i < msix_count; ++i) {
3508                 msix = &sc->jme_msix[i];
3509
3510                 msix->jme_msix_vector = i + 1;
3511                 error = pci_alloc_msix_vector(dev, msix->jme_msix_vector,
3512                     &msix->jme_msix_rid, msix->jme_msix_cpuid);
3513                 if (error)
3514                         goto back;
3515
3516                 msix->jme_msix_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3517                     &msix->jme_msix_rid, RF_ACTIVE);
3518                 if (msix->jme_msix_res == NULL) {
3519                         error = ENOMEM;
3520                         goto back;
3521                 }
3522         }
3523
3524         for (i = 0; i < JME_INTR_CNT; ++i) {
3525                 uint32_t intr_mask = (1 << i);
3526                 int x;
3527
3528                 if ((JME_INTRS & intr_mask) == 0)
3529                         continue;
3530
3531                 for (x = 0; x < msix_count; ++x) {
3532                         msix = &sc->jme_msix[x];
3533                         if (msix->jme_msix_intrs & intr_mask) {
3534                                 int reg, shift;
3535
3536                                 reg = i / JME_MSINUM_FACTOR;
3537                                 KKASSERT(reg < JME_MSINUM_CNT);
3538
3539                                 shift = (i % JME_MSINUM_FACTOR) * 4;
3540
3541                                 sc->jme_msinum[reg] |=
3542                                     (msix->jme_msix_vector << shift);
3543
3544                                 break;
3545                         }
3546                 }
3547         }
3548
3549         if (bootverbose) {
3550                 for (i = 0; i < JME_MSINUM_CNT; ++i) {
3551                         device_printf(dev, "MSINUM%d: %#x\n", i,
3552                             sc->jme_msinum[i]);
3553                 }
3554         }
3555
3556         pci_enable_msix(dev);
3557         sc->jme_irq_type = PCI_INTR_TYPE_MSIX;
3558
3559 back:
3560         if (error)
3561                 jme_msix_free(dev);
3562 }
3563
3564 static int
3565 jme_intr_alloc(device_t dev)
3566 {
3567         struct jme_softc *sc = device_get_softc(dev);
3568         u_int irq_flags;
3569
3570         jme_msix_try_alloc(dev);
3571
3572         if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3573                 sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable,
3574                     &sc->jme_irq_rid, &irq_flags);
3575
3576                 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3577                     &sc->jme_irq_rid, irq_flags);
3578                 if (sc->jme_irq_res == NULL) {
3579                         device_printf(dev, "can't allocate irq\n");
3580                         return ENXIO;
3581                 }
3582         }
3583         return 0;
3584 }
3585
3586 static void
3587 jme_msix_free(device_t dev)
3588 {
3589         struct jme_softc *sc = device_get_softc(dev);
3590         int i;
3591
3592         KKASSERT(sc->jme_msix_cnt > 1);
3593
3594         for (i = 0; i < sc->jme_msix_cnt; ++i) {
3595                 struct jme_msix_data *msix = &sc->jme_msix[i];
3596
3597                 if (msix->jme_msix_res != NULL) {
3598                         bus_release_resource(dev, SYS_RES_IRQ,
3599                             msix->jme_msix_rid, msix->jme_msix_res);
3600                         msix->jme_msix_res = NULL;
3601                 }
3602                 if (msix->jme_msix_rid >= 0) {
3603                         pci_release_msix_vector(dev, msix->jme_msix_rid);
3604                         msix->jme_msix_rid = -1;
3605                 }
3606         }
3607         pci_teardown_msix(dev);
3608 }
3609
3610 static void
3611 jme_intr_free(device_t dev)
3612 {
3613         struct jme_softc *sc = device_get_softc(dev);
3614
3615         if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3616                 if (sc->jme_irq_res != NULL) {
3617                         bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
3618                                              sc->jme_irq_res);
3619                 }
3620                 if (sc->jme_irq_type == PCI_INTR_TYPE_MSI)
3621                         pci_release_msi(dev);
3622         } else {
3623                 jme_msix_free(dev);
3624         }
3625 }
3626
3627 static void
3628 jme_msix_tx(void *xcd)
3629 {
3630         struct jme_chain_data *cd = xcd;
3631         struct jme_softc *sc = cd->jme_sc;
3632         struct ifnet *ifp = &sc->arpcom.ac_if;
3633
3634         ASSERT_SERIALIZED(&cd->jme_tx_serialize);
3635
3636         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3637
3638         CSR_WRITE_4(sc, JME_INTR_STATUS,
3639             INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP);
3640
3641         if (ifp->if_flags & IFF_RUNNING) {
3642                 jme_txeof(sc);
3643                 if (!ifq_is_empty(&ifp->if_snd))
3644                         if_devstart(ifp);
3645         }
3646
3647         CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3648 }
3649
3650 static void
3651 jme_msix_rx(void *xrdata)
3652 {
3653         struct jme_rxdata *rdata = xrdata;
3654         struct jme_softc *sc = rdata->jme_sc;
3655         struct ifnet *ifp = &sc->arpcom.ac_if;
3656         uint32_t status;
3657
3658         ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3659
3660         CSR_WRITE_4(sc, JME_INTR_MASK_CLR,
3661             (rdata->jme_rx_coal | rdata->jme_rx_empty));
3662
3663         status = CSR_READ_4(sc, JME_INTR_STATUS);
3664         status &= (rdata->jme_rx_coal | rdata->jme_rx_empty);
3665
3666         if (status & rdata->jme_rx_coal)
3667                 status |= (rdata->jme_rx_coal | rdata->jme_rx_comp);
3668         CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3669
3670         if (ifp->if_flags & IFF_RUNNING) {
3671                 if (status & rdata->jme_rx_coal)
3672                         jme_rxeof(rdata, -1);
3673
3674                 if (status & rdata->jme_rx_empty) {
3675                         CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3676                             RXCSR_RX_ENB | RXCSR_RXQ_START);
3677                 }
3678         }
3679
3680         CSR_WRITE_4(sc, JME_INTR_MASK_SET,
3681             (rdata->jme_rx_coal | rdata->jme_rx_empty));
3682 }
3683
3684 static void
3685 jme_set_msinum(struct jme_softc *sc)
3686 {
3687         int i;
3688
3689         for (i = 0; i < JME_MSINUM_CNT; ++i)
3690                 CSR_WRITE_4(sc, JME_MSINUM(i), sc->jme_msinum[i]);
3691 }
3692
3693 static int
3694 jme_intr_setup(device_t dev)
3695 {
3696         struct jme_softc *sc = device_get_softc(dev);
3697         struct ifnet *ifp = &sc->arpcom.ac_if;
3698         int error;
3699
3700         if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3701                 return jme_msix_setup(dev);
3702
3703         error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE,
3704             jme_intr, sc, &sc->jme_irq_handle, &sc->jme_serialize);
3705         if (error) {
3706                 device_printf(dev, "could not set up interrupt handler.\n");
3707                 return error;
3708         }
3709
3710         ifp->if_cpuid = rman_get_cpuid(sc->jme_irq_res);
3711         KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
3712         return 0;
3713 }
3714
3715 static void
3716 jme_intr_teardown(device_t dev)
3717 {
3718         struct jme_softc *sc = device_get_softc(dev);
3719
3720         if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3721                 jme_msix_teardown(dev, sc->jme_msix_cnt);
3722         else
3723                 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
3724 }
3725
3726 static int
3727 jme_msix_setup(device_t dev)
3728 {
3729         struct jme_softc *sc = device_get_softc(dev);
3730         struct ifnet *ifp = &sc->arpcom.ac_if;
3731         int x;
3732
3733         for (x = 0; x < sc->jme_msix_cnt; ++x) {
3734                 struct jme_msix_data *msix = &sc->jme_msix[x];
3735                 int error;
3736
3737                 error = bus_setup_intr_descr(dev, msix->jme_msix_res,
3738                     INTR_MPSAFE, msix->jme_msix_func, msix->jme_msix_arg,
3739                     &msix->jme_msix_handle, msix->jme_msix_serialize,
3740                     msix->jme_msix_desc);
3741                 if (error) {
3742                         device_printf(dev, "could not set up %s "
3743                             "interrupt handler.\n", msix->jme_msix_desc);
3744                         jme_msix_teardown(dev, x);
3745                         return error;
3746                 }
3747         }
3748         ifp->if_cpuid = 0; /* XXX */
3749         return 0;
3750 }
3751
3752 static void
3753 jme_msix_teardown(device_t dev, int msix_count)
3754 {
3755         struct jme_softc *sc = device_get_softc(dev);
3756         int x;
3757
3758         for (x = 0; x < msix_count; ++x) {
3759                 struct jme_msix_data *msix = &sc->jme_msix[x];
3760
3761                 bus_teardown_intr(dev, msix->jme_msix_res,
3762                     msix->jme_msix_handle);
3763         }
3764 }