0745632261b00e9d019b279f55c10a19e7fc434f
[dragonfly.git] / sys / dev / netif / jme / if_jme.c
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28  */
29
30 #include "opt_polling.h"
31 #include "opt_jme.h"
32
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/bus.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
39 #include <sys/proc.h>
40 #include <sys/rman.h>
41 #include <sys/serialize.h>
42 #include <sys/serialize2.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
46
47 #include <net/ethernet.h>
48 #include <net/if.h>
49 #include <net/bpf.h>
50 #include <net/if_arp.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/ifq_var.h>
54 #include <net/toeplitz.h>
55 #include <net/toeplitz2.h>
56 #include <net/vlan/if_vlan_var.h>
57 #include <net/vlan/if_vlan_ether.h>
58
59 #include <netinet/in.h>
60
61 #include <dev/netif/mii_layer/miivar.h>
62 #include <dev/netif/mii_layer/jmphyreg.h>
63
64 #include <bus/pci/pcireg.h>
65 #include <bus/pci/pcivar.h>
66 #include <bus/pci/pcidevs.h>
67
68 #include <dev/netif/jme/if_jmereg.h>
69 #include <dev/netif/jme/if_jmevar.h>
70
71 #include "miibus_if.h"
72
73 /* Define the following to disable printing Rx errors. */
74 #undef  JME_SHOW_ERRORS
75
76 #define JME_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
77
78 #ifdef JME_RSS_DEBUG
79 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
80 do { \
81         if ((sc)->jme_rss_debug >= (lvl)) \
82                 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
83 } while (0)
84 #else   /* !JME_RSS_DEBUG */
85 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...)      ((void)0)
86 #endif  /* JME_RSS_DEBUG */
87
88 static int      jme_probe(device_t);
89 static int      jme_attach(device_t);
90 static int      jme_detach(device_t);
91 static int      jme_shutdown(device_t);
92 static int      jme_suspend(device_t);
93 static int      jme_resume(device_t);
94
95 static int      jme_miibus_readreg(device_t, int, int);
96 static int      jme_miibus_writereg(device_t, int, int, int);
97 static void     jme_miibus_statchg(device_t);
98
99 static void     jme_init(void *);
100 static int      jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
101 static void     jme_start(struct ifnet *);
102 static void     jme_watchdog(struct ifnet *);
103 static void     jme_mediastatus(struct ifnet *, struct ifmediareq *);
104 static int      jme_mediachange(struct ifnet *);
105 #ifdef DEVICE_POLLING
106 static void     jme_poll(struct ifnet *, enum poll_cmd, int);
107 #endif
108 static void     jme_serialize(struct ifnet *, enum ifnet_serialize);
109 static void     jme_deserialize(struct ifnet *, enum ifnet_serialize);
110 static int      jme_tryserialize(struct ifnet *, enum ifnet_serialize);
111 #ifdef INVARIANTS
112 static void     jme_serialize_assert(struct ifnet *, enum ifnet_serialize,
113                     boolean_t);
114 #endif
115
116 static void     jme_intr(void *);
117 static void     jme_msix_tx(void *);
118 static void     jme_msix_rx(void *);
119 static void     jme_txeof(struct jme_softc *);
120 static void     jme_rxeof(struct jme_rxdata *, int);
121 static void     jme_rx_intr(struct jme_softc *, uint32_t);
122
123 static int      jme_msix_setup(device_t);
124 static void     jme_msix_teardown(device_t, int);
125 static int      jme_intr_setup(device_t);
126 static void     jme_intr_teardown(device_t);
127 static void     jme_msix_try_alloc(device_t);
128 static void     jme_msix_free(device_t);
129 static int      jme_intr_alloc(device_t);
130 static void     jme_intr_free(device_t);
131 static int      jme_dma_alloc(struct jme_softc *);
132 static void     jme_dma_free(struct jme_softc *);
133 static int      jme_init_rx_ring(struct jme_rxdata *);
134 static void     jme_init_tx_ring(struct jme_softc *);
135 static void     jme_init_ssb(struct jme_softc *);
136 static int      jme_newbuf(struct jme_rxdata *, struct jme_rxdesc *, int);
137 static int      jme_encap(struct jme_softc *, struct mbuf **);
138 static void     jme_rxpkt(struct jme_rxdata *);
139 static int      jme_rxring_dma_alloc(struct jme_rxdata *);
140 static int      jme_rxbuf_dma_alloc(struct jme_rxdata *);
141
142 static void     jme_tick(void *);
143 static void     jme_stop(struct jme_softc *);
144 static void     jme_reset(struct jme_softc *);
145 static void     jme_set_msinum(struct jme_softc *);
146 static void     jme_set_vlan(struct jme_softc *);
147 static void     jme_set_filter(struct jme_softc *);
148 static void     jme_stop_tx(struct jme_softc *);
149 static void     jme_stop_rx(struct jme_softc *);
150 static void     jme_mac_config(struct jme_softc *);
151 static void     jme_reg_macaddr(struct jme_softc *, uint8_t[]);
152 static int      jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
153 static int      jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
154 #ifdef notyet
155 static void     jme_setwol(struct jme_softc *);
156 static void     jme_setlinkspeed(struct jme_softc *);
157 #endif
158 static void     jme_set_tx_coal(struct jme_softc *);
159 static void     jme_set_rx_coal(struct jme_softc *);
160 static void     jme_enable_rss(struct jme_softc *);
161 static void     jme_disable_rss(struct jme_softc *);
162
163 static void     jme_sysctl_node(struct jme_softc *);
164 static int      jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
165 static int      jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
166 static int      jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
167 static int      jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
168
169 /*
170  * Devices supported by this driver.
171  */
172 static const struct jme_dev {
173         uint16_t        jme_vendorid;
174         uint16_t        jme_deviceid;
175         uint32_t        jme_caps;
176         const char      *jme_name;
177 } jme_devs[] = {
178         { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
179             JME_CAP_JUMBO,
180             "JMicron Inc, JMC250 Gigabit Ethernet" },
181         { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
182             JME_CAP_FASTETH,
183             "JMicron Inc, JMC260 Fast Ethernet" },
184         { 0, 0, 0, NULL }
185 };
186
187 static device_method_t jme_methods[] = {
188         /* Device interface. */
189         DEVMETHOD(device_probe,         jme_probe),
190         DEVMETHOD(device_attach,        jme_attach),
191         DEVMETHOD(device_detach,        jme_detach),
192         DEVMETHOD(device_shutdown,      jme_shutdown),
193         DEVMETHOD(device_suspend,       jme_suspend),
194         DEVMETHOD(device_resume,        jme_resume),
195
196         /* Bus interface. */
197         DEVMETHOD(bus_print_child,      bus_generic_print_child),
198         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
199
200         /* MII interface. */
201         DEVMETHOD(miibus_readreg,       jme_miibus_readreg),
202         DEVMETHOD(miibus_writereg,      jme_miibus_writereg),
203         DEVMETHOD(miibus_statchg,       jme_miibus_statchg),
204
205         { NULL, NULL }
206 };
207
208 static driver_t jme_driver = {
209         "jme",
210         jme_methods,
211         sizeof(struct jme_softc)
212 };
213
214 static devclass_t jme_devclass;
215
216 DECLARE_DUMMY_MODULE(if_jme);
217 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
218 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL);
219 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL);
220
221 static const struct {
222         uint32_t        jme_coal;
223         uint32_t        jme_comp;
224         uint32_t        jme_empty;
225 } jme_rx_status[JME_NRXRING_MAX] = {
226         { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP,
227           INTR_RXQ0_DESC_EMPTY },
228         { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP,
229           INTR_RXQ1_DESC_EMPTY },
230         { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP,
231           INTR_RXQ2_DESC_EMPTY },
232         { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP,
233           INTR_RXQ3_DESC_EMPTY }
234 };
235
236 static int      jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
237 static int      jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
238 static int      jme_rx_ring_count = 1;
239 static int      jme_msi_enable = 1;
240 static int      jme_msix_enable = 1;
241
242 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
243 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
244 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
245 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable);
246 TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable);
247
248 /*
249  *      Read a PHY register on the MII of the JMC250.
250  */
251 static int
252 jme_miibus_readreg(device_t dev, int phy, int reg)
253 {
254         struct jme_softc *sc = device_get_softc(dev);
255         uint32_t val;
256         int i;
257
258         /* For FPGA version, PHY address 0 should be ignored. */
259         if (sc->jme_caps & JME_CAP_FPGA) {
260                 if (phy == 0)
261                         return (0);
262         } else {
263                 if (sc->jme_phyaddr != phy)
264                         return (0);
265         }
266
267         CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
268             SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
269
270         for (i = JME_PHY_TIMEOUT; i > 0; i--) {
271                 DELAY(1);
272                 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
273                         break;
274         }
275         if (i == 0) {
276                 device_printf(sc->jme_dev, "phy read timeout: "
277                               "phy %d, reg %d\n", phy, reg);
278                 return (0);
279         }
280
281         return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
282 }
283
284 /*
285  *      Write a PHY register on the MII of the JMC250.
286  */
287 static int
288 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
289 {
290         struct jme_softc *sc = device_get_softc(dev);
291         int i;
292
293         /* For FPGA version, PHY address 0 should be ignored. */
294         if (sc->jme_caps & JME_CAP_FPGA) {
295                 if (phy == 0)
296                         return (0);
297         } else {
298                 if (sc->jme_phyaddr != phy)
299                         return (0);
300         }
301
302         CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
303             ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
304             SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
305
306         for (i = JME_PHY_TIMEOUT; i > 0; i--) {
307                 DELAY(1);
308                 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
309                         break;
310         }
311         if (i == 0) {
312                 device_printf(sc->jme_dev, "phy write timeout: "
313                               "phy %d, reg %d\n", phy, reg);
314         }
315
316         return (0);
317 }
318
319 /*
320  *      Callback from MII layer when media changes.
321  */
322 static void
323 jme_miibus_statchg(device_t dev)
324 {
325         struct jme_softc *sc = device_get_softc(dev);
326         struct ifnet *ifp = &sc->arpcom.ac_if;
327         struct mii_data *mii;
328         struct jme_txdesc *txd;
329         bus_addr_t paddr;
330         int i, r;
331
332         ASSERT_IFNET_SERIALIZED_ALL(ifp);
333
334         if ((ifp->if_flags & IFF_RUNNING) == 0)
335                 return;
336
337         mii = device_get_softc(sc->jme_miibus);
338
339         sc->jme_flags &= ~JME_FLAG_LINK;
340         if ((mii->mii_media_status & IFM_AVALID) != 0) {
341                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
342                 case IFM_10_T:
343                 case IFM_100_TX:
344                         sc->jme_flags |= JME_FLAG_LINK;
345                         break;
346                 case IFM_1000_T:
347                         if (sc->jme_caps & JME_CAP_FASTETH)
348                                 break;
349                         sc->jme_flags |= JME_FLAG_LINK;
350                         break;
351                 default:
352                         break;
353                 }
354         }
355
356         /*
357          * Disabling Rx/Tx MACs have a side-effect of resetting
358          * JME_TXNDA/JME_RXNDA register to the first address of
359          * Tx/Rx descriptor address. So driver should reset its
360          * internal procucer/consumer pointer and reclaim any
361          * allocated resources.  Note, just saving the value of
362          * JME_TXNDA and JME_RXNDA registers before stopping MAC
363          * and restoring JME_TXNDA/JME_RXNDA register is not
364          * sufficient to make sure correct MAC state because
365          * stopping MAC operation can take a while and hardware
366          * might have updated JME_TXNDA/JME_RXNDA registers
367          * during the stop operation.
368          */
369
370         /* Disable interrupts */
371         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
372
373         /* Stop driver */
374         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
375         ifp->if_timer = 0;
376         callout_stop(&sc->jme_tick_ch);
377
378         /* Stop receiver/transmitter. */
379         jme_stop_rx(sc);
380         jme_stop_tx(sc);
381
382         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
383                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
384
385                 jme_rxeof(rdata, -1);
386                 if (rdata->jme_rxhead != NULL)
387                         m_freem(rdata->jme_rxhead);
388                 JME_RXCHAIN_RESET(rdata);
389
390                 /*
391                  * Reuse configured Rx descriptors and reset
392                  * procuder/consumer index.
393                  */
394                 rdata->jme_rx_cons = 0;
395         }
396
397         jme_txeof(sc);
398         if (sc->jme_cdata.jme_tx_cnt != 0) {
399                 /* Remove queued packets for transmit. */
400                 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
401                         txd = &sc->jme_cdata.jme_txdesc[i];
402                         if (txd->tx_m != NULL) {
403                                 bus_dmamap_unload(
404                                     sc->jme_cdata.jme_tx_tag,
405                                     txd->tx_dmamap);
406                                 m_freem(txd->tx_m);
407                                 txd->tx_m = NULL;
408                                 txd->tx_ndesc = 0;
409                                 ifp->if_oerrors++;
410                         }
411                 }
412         }
413         jme_init_tx_ring(sc);
414
415         /* Initialize shadow status block. */
416         jme_init_ssb(sc);
417
418         /* Program MAC with resolved speed/duplex/flow-control. */
419         if (sc->jme_flags & JME_FLAG_LINK) {
420                 jme_mac_config(sc);
421
422                 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
423
424                 /* Set Tx ring address to the hardware. */
425                 paddr = sc->jme_cdata.jme_tx_ring_paddr;
426                 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
427                 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
428
429                 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
430                         CSR_WRITE_4(sc, JME_RXCSR,
431                             sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
432
433                         /* Set Rx ring address to the hardware. */
434                         paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
435                         CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
436                         CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
437                 }
438
439                 /* Restart receiver/transmitter. */
440                 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
441                     RXCSR_RXQ_START);
442                 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
443         }
444
445         ifp->if_flags |= IFF_RUNNING;
446         ifp->if_flags &= ~IFF_OACTIVE;
447         callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
448
449 #ifdef DEVICE_POLLING
450         if (!(ifp->if_flags & IFF_POLLING))
451 #endif
452         /* Reenable interrupts. */
453         CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
454 }
455
456 /*
457  *      Get the current interface media status.
458  */
459 static void
460 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
461 {
462         struct jme_softc *sc = ifp->if_softc;
463         struct mii_data *mii = device_get_softc(sc->jme_miibus);
464
465         ASSERT_IFNET_SERIALIZED_ALL(ifp);
466
467         mii_pollstat(mii);
468         ifmr->ifm_status = mii->mii_media_status;
469         ifmr->ifm_active = mii->mii_media_active;
470 }
471
472 /*
473  *      Set hardware to newly-selected media.
474  */
475 static int
476 jme_mediachange(struct ifnet *ifp)
477 {
478         struct jme_softc *sc = ifp->if_softc;
479         struct mii_data *mii = device_get_softc(sc->jme_miibus);
480         int error;
481
482         ASSERT_IFNET_SERIALIZED_ALL(ifp);
483
484         if (mii->mii_instance != 0) {
485                 struct mii_softc *miisc;
486
487                 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
488                         mii_phy_reset(miisc);
489         }
490         error = mii_mediachg(mii);
491
492         return (error);
493 }
494
495 static int
496 jme_probe(device_t dev)
497 {
498         const struct jme_dev *sp;
499         uint16_t vid, did;
500
501         vid = pci_get_vendor(dev);
502         did = pci_get_device(dev);
503         for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
504                 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
505                         struct jme_softc *sc = device_get_softc(dev);
506
507                         sc->jme_caps = sp->jme_caps;
508                         device_set_desc(dev, sp->jme_name);
509                         return (0);
510                 }
511         }
512         return (ENXIO);
513 }
514
515 static int
516 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
517 {
518         uint32_t reg;
519         int i;
520
521         *val = 0;
522         for (i = JME_TIMEOUT; i > 0; i--) {
523                 reg = CSR_READ_4(sc, JME_SMBCSR);
524                 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
525                         break;
526                 DELAY(1);
527         }
528
529         if (i == 0) {
530                 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
531                 return (ETIMEDOUT);
532         }
533
534         reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
535         CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
536         for (i = JME_TIMEOUT; i > 0; i--) {
537                 DELAY(1);
538                 reg = CSR_READ_4(sc, JME_SMBINTF);
539                 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
540                         break;
541         }
542
543         if (i == 0) {
544                 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
545                 return (ETIMEDOUT);
546         }
547
548         reg = CSR_READ_4(sc, JME_SMBINTF);
549         *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
550
551         return (0);
552 }
553
554 static int
555 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
556 {
557         uint8_t fup, reg, val;
558         uint32_t offset;
559         int match;
560
561         offset = 0;
562         if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
563             fup != JME_EEPROM_SIG0)
564                 return (ENOENT);
565         if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
566             fup != JME_EEPROM_SIG1)
567                 return (ENOENT);
568         match = 0;
569         do {
570                 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
571                         break;
572                 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
573                     (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
574                         if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
575                                 break;
576                         if (reg >= JME_PAR0 &&
577                             reg < JME_PAR0 + ETHER_ADDR_LEN) {
578                                 if (jme_eeprom_read_byte(sc, offset + 2,
579                                     &val) != 0)
580                                         break;
581                                 eaddr[reg - JME_PAR0] = val;
582                                 match++;
583                         }
584                 }
585                 /* Check for the end of EEPROM descriptor. */
586                 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
587                         break;
588                 /* Try next eeprom descriptor. */
589                 offset += JME_EEPROM_DESC_BYTES;
590         } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
591
592         if (match == ETHER_ADDR_LEN)
593                 return (0);
594
595         return (ENOENT);
596 }
597
598 static void
599 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
600 {
601         uint32_t par0, par1;
602
603         /* Read station address. */
604         par0 = CSR_READ_4(sc, JME_PAR0);
605         par1 = CSR_READ_4(sc, JME_PAR1);
606         par1 &= 0xFFFF;
607         if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
608                 device_printf(sc->jme_dev,
609                     "generating fake ethernet address.\n");
610                 par0 = karc4random();
611                 /* Set OUI to JMicron. */
612                 eaddr[0] = 0x00;
613                 eaddr[1] = 0x1B;
614                 eaddr[2] = 0x8C;
615                 eaddr[3] = (par0 >> 16) & 0xff;
616                 eaddr[4] = (par0 >> 8) & 0xff;
617                 eaddr[5] = par0 & 0xff;
618         } else {
619                 eaddr[0] = (par0 >> 0) & 0xFF;
620                 eaddr[1] = (par0 >> 8) & 0xFF;
621                 eaddr[2] = (par0 >> 16) & 0xFF;
622                 eaddr[3] = (par0 >> 24) & 0xFF;
623                 eaddr[4] = (par1 >> 0) & 0xFF;
624                 eaddr[5] = (par1 >> 8) & 0xFF;
625         }
626 }
627
628 static int
629 jme_attach(device_t dev)
630 {
631         struct jme_softc *sc = device_get_softc(dev);
632         struct ifnet *ifp = &sc->arpcom.ac_if;
633         uint32_t reg;
634         uint16_t did;
635         uint8_t pcie_ptr, rev;
636         int error = 0, i, j, rx_desc_cnt;
637         uint8_t eaddr[ETHER_ADDR_LEN];
638
639         lwkt_serialize_init(&sc->jme_serialize);
640         lwkt_serialize_init(&sc->jme_cdata.jme_tx_serialize);
641         for (i = 0; i < JME_NRXRING_MAX; ++i) {
642                 lwkt_serialize_init(
643                     &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize);
644         }
645
646         rx_desc_cnt = device_getenv_int(dev, "rx_desc_count",
647             jme_rx_desc_count);
648         rx_desc_cnt = roundup(rx_desc_cnt, JME_NDESC_ALIGN);
649         if (rx_desc_cnt > JME_NDESC_MAX)
650                 rx_desc_cnt = JME_NDESC_MAX;
651
652         sc->jme_cdata.jme_tx_desc_cnt = device_getenv_int(dev, "tx_desc_count",
653             jme_tx_desc_count);
654         sc->jme_cdata.jme_tx_desc_cnt = roundup(sc->jme_cdata.jme_tx_desc_cnt,
655             JME_NDESC_ALIGN);
656         if (sc->jme_cdata.jme_tx_desc_cnt > JME_NDESC_MAX)
657                 sc->jme_cdata.jme_tx_desc_cnt = JME_NDESC_MAX;
658
659         /*
660          * Calculate rx rings
661          */
662         sc->jme_cdata.jme_rx_ring_cnt = device_getenv_int(dev, "rx_ring_count",
663             jme_rx_ring_count);
664         sc->jme_cdata.jme_rx_ring_cnt =
665             if_ring_count2(sc->jme_cdata.jme_rx_ring_cnt, JME_NRXRING_MAX);
666
667         i = 0;
668         sc->jme_serialize_arr[i++] = &sc->jme_serialize;
669         sc->jme_serialize_arr[i++] = &sc->jme_cdata.jme_tx_serialize;
670         for (j = 0; j < sc->jme_cdata.jme_rx_ring_cnt; ++j) {
671                 sc->jme_serialize_arr[i++] =
672                     &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize;
673         }
674         KKASSERT(i <= JME_NSERIALIZE);
675         sc->jme_serialize_cnt = i;
676
677         sc->jme_cdata.jme_sc = sc;
678         for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
679                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
680
681                 rdata->jme_sc = sc;
682                 rdata->jme_rx_coal = jme_rx_status[i].jme_coal;
683                 rdata->jme_rx_comp = jme_rx_status[i].jme_comp;
684                 rdata->jme_rx_empty = jme_rx_status[i].jme_empty;
685                 rdata->jme_rx_idx = i;
686                 rdata->jme_rx_desc_cnt = rx_desc_cnt;
687         }
688
689         sc->jme_dev = dev;
690         sc->jme_lowaddr = BUS_SPACE_MAXADDR;
691
692         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
693
694         callout_init(&sc->jme_tick_ch);
695
696 #ifndef BURN_BRIDGES
697         if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
698                 uint32_t irq, mem;
699
700                 irq = pci_read_config(dev, PCIR_INTLINE, 4);
701                 mem = pci_read_config(dev, JME_PCIR_BAR, 4);
702
703                 device_printf(dev, "chip is in D%d power mode "
704                     "-- setting to D0\n", pci_get_powerstate(dev));
705
706                 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
707
708                 pci_write_config(dev, PCIR_INTLINE, irq, 4);
709                 pci_write_config(dev, JME_PCIR_BAR, mem, 4);
710         }
711 #endif  /* !BURN_BRIDGE */
712
713         /* Enable bus mastering */
714         pci_enable_busmaster(dev);
715
716         /*
717          * Allocate IO memory
718          *
719          * JMC250 supports both memory mapped and I/O register space
720          * access.  Because I/O register access should use different
721          * BARs to access registers it's waste of time to use I/O
722          * register spce access.  JMC250 uses 16K to map entire memory
723          * space.
724          */
725         sc->jme_mem_rid = JME_PCIR_BAR;
726         sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
727                                                  &sc->jme_mem_rid, RF_ACTIVE);
728         if (sc->jme_mem_res == NULL) {
729                 device_printf(dev, "can't allocate IO memory\n");
730                 return ENXIO;
731         }
732         sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
733         sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
734
735         /*
736          * Allocate IRQ
737          */
738         error = jme_intr_alloc(dev);
739         if (error)
740                 goto fail;
741
742         /*
743          * Extract revisions
744          */
745         reg = CSR_READ_4(sc, JME_CHIPMODE);
746         if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
747             CHIPMODE_NOT_FPGA) {
748                 sc->jme_caps |= JME_CAP_FPGA;
749                 if (bootverbose) {
750                         device_printf(dev, "FPGA revision: 0x%04x\n",
751                                       (reg & CHIPMODE_FPGA_REV_MASK) >>
752                                       CHIPMODE_FPGA_REV_SHIFT);
753                 }
754         }
755
756         /* NOTE: FM revision is put in the upper 4 bits */
757         rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
758         rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
759         if (bootverbose)
760                 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
761
762         did = pci_get_device(dev);
763         switch (did) {
764         case PCI_PRODUCT_JMICRON_JMC250:
765                 if (rev == JME_REV1_A2)
766                         sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
767                 break;
768
769         case PCI_PRODUCT_JMICRON_JMC260:
770                 if (rev == JME_REV2)
771                         sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
772                 break;
773
774         default:
775                 panic("unknown device id 0x%04x\n", did);
776         }
777         if (rev >= JME_REV2) {
778                 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
779                 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
780                                       GHC_TXMAC_CLKSRC_1000;
781         }
782
783         /* Reset the ethernet controller. */
784         jme_reset(sc);
785
786         /* Map MSI/MSI-X vectors */
787         jme_set_msinum(sc);
788
789         /* Get station address. */
790         reg = CSR_READ_4(sc, JME_SMBCSR);
791         if (reg & SMBCSR_EEPROM_PRESENT)
792                 error = jme_eeprom_macaddr(sc, eaddr);
793         if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
794                 if (error != 0 && (bootverbose)) {
795                         device_printf(dev, "ethernet hardware address "
796                                       "not found in EEPROM.\n");
797                 }
798                 jme_reg_macaddr(sc, eaddr);
799         }
800
801         /*
802          * Save PHY address.
803          * Integrated JR0211 has fixed PHY address whereas FPGA version
804          * requires PHY probing to get correct PHY address.
805          */
806         if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
807                 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
808                     GPREG0_PHY_ADDR_MASK;
809                 if (bootverbose) {
810                         device_printf(dev, "PHY is at address %d.\n",
811                             sc->jme_phyaddr);
812                 }
813         } else {
814                 sc->jme_phyaddr = 0;
815         }
816
817         /* Set max allowable DMA size. */
818         pcie_ptr = pci_get_pciecap_ptr(dev);
819         if (pcie_ptr != 0) {
820                 uint16_t ctrl;
821
822                 sc->jme_caps |= JME_CAP_PCIE;
823                 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
824                 if (bootverbose) {
825                         device_printf(dev, "Read request size : %d bytes.\n",
826                             128 << ((ctrl >> 12) & 0x07));
827                         device_printf(dev, "TLP payload size : %d bytes.\n",
828                             128 << ((ctrl >> 5) & 0x07));
829                 }
830                 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
831                 case PCIEM_DEVCTL_MAX_READRQ_128:
832                         sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
833                         break;
834                 case PCIEM_DEVCTL_MAX_READRQ_256:
835                         sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
836                         break;
837                 default:
838                         sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
839                         break;
840                 }
841                 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
842         } else {
843                 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
844                 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
845         }
846
847 #ifdef notyet
848         if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
849                 sc->jme_caps |= JME_CAP_PMCAP;
850 #endif
851
852         /*
853          * Create sysctl tree
854          */
855         jme_sysctl_node(sc);
856
857         /* Allocate DMA stuffs */
858         error = jme_dma_alloc(sc);
859         if (error)
860                 goto fail;
861
862         ifp->if_softc = sc;
863         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
864         ifp->if_init = jme_init;
865         ifp->if_ioctl = jme_ioctl;
866         ifp->if_start = jme_start;
867 #ifdef DEVICE_POLLING
868         ifp->if_poll = jme_poll;
869 #endif
870         ifp->if_watchdog = jme_watchdog;
871         ifp->if_serialize = jme_serialize;
872         ifp->if_deserialize = jme_deserialize;
873         ifp->if_tryserialize = jme_tryserialize;
874 #ifdef INVARIANTS
875         ifp->if_serialize_assert = jme_serialize_assert;
876 #endif
877         ifq_set_maxlen(&ifp->if_snd,
878             sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
879         ifq_set_ready(&ifp->if_snd);
880
881         /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
882         ifp->if_capabilities = IFCAP_HWCSUM |
883                                IFCAP_VLAN_MTU |
884                                IFCAP_VLAN_HWTAGGING;
885         if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN)
886                 ifp->if_capabilities |= IFCAP_RSS;
887         ifp->if_capenable = ifp->if_capabilities;
888
889         /*
890          * Disable TXCSUM by default to improve bulk data
891          * transmit performance (+20Mbps improvement).
892          */
893         ifp->if_capenable &= ~IFCAP_TXCSUM;
894
895         if (ifp->if_capenable & IFCAP_TXCSUM)
896                 ifp->if_hwassist = JME_CSUM_FEATURES;
897
898         /* Set up MII bus. */
899         error = mii_phy_probe(dev, &sc->jme_miibus,
900                               jme_mediachange, jme_mediastatus);
901         if (error) {
902                 device_printf(dev, "no PHY found!\n");
903                 goto fail;
904         }
905
906         /*
907          * Save PHYADDR for FPGA mode PHY.
908          */
909         if (sc->jme_caps & JME_CAP_FPGA) {
910                 struct mii_data *mii = device_get_softc(sc->jme_miibus);
911
912                 if (mii->mii_instance != 0) {
913                         struct mii_softc *miisc;
914
915                         LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
916                                 if (miisc->mii_phy != 0) {
917                                         sc->jme_phyaddr = miisc->mii_phy;
918                                         break;
919                                 }
920                         }
921                         if (sc->jme_phyaddr != 0) {
922                                 device_printf(sc->jme_dev,
923                                     "FPGA PHY is at %d\n", sc->jme_phyaddr);
924                                 /* vendor magic. */
925                                 jme_miibus_writereg(dev, sc->jme_phyaddr,
926                                     JMPHY_CONF, JMPHY_CONF_DEFFIFO);
927
928                                 /* XXX should we clear JME_WA_EXTFIFO */
929                         }
930                 }
931         }
932
933         ether_ifattach(ifp, eaddr, NULL);
934
935         /* Tell the upper layer(s) we support long frames. */
936         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
937
938         error = jme_intr_setup(dev);
939         if (error) {
940                 ether_ifdetach(ifp);
941                 goto fail;
942         }
943
944         return 0;
945 fail:
946         jme_detach(dev);
947         return (error);
948 }
949
950 static int
951 jme_detach(device_t dev)
952 {
953         struct jme_softc *sc = device_get_softc(dev);
954
955         if (device_is_attached(dev)) {
956                 struct ifnet *ifp = &sc->arpcom.ac_if;
957
958                 ifnet_serialize_all(ifp);
959                 jme_stop(sc);
960                 jme_intr_teardown(dev);
961                 ifnet_deserialize_all(ifp);
962
963                 ether_ifdetach(ifp);
964         }
965
966         if (sc->jme_sysctl_tree != NULL)
967                 sysctl_ctx_free(&sc->jme_sysctl_ctx);
968
969         if (sc->jme_miibus != NULL)
970                 device_delete_child(dev, sc->jme_miibus);
971         bus_generic_detach(dev);
972
973         jme_intr_free(dev);
974
975         if (sc->jme_mem_res != NULL) {
976                 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
977                                      sc->jme_mem_res);
978         }
979
980         jme_dma_free(sc);
981
982         return (0);
983 }
984
985 static void
986 jme_sysctl_node(struct jme_softc *sc)
987 {
988         int coal_max;
989 #ifdef JME_RSS_DEBUG
990         int r;
991 #endif
992
993         sysctl_ctx_init(&sc->jme_sysctl_ctx);
994         sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
995                                 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
996                                 device_get_nameunit(sc->jme_dev),
997                                 CTLFLAG_RD, 0, "");
998         if (sc->jme_sysctl_tree == NULL) {
999                 device_printf(sc->jme_dev, "can't add sysctl node\n");
1000                 return;
1001         }
1002
1003         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1004             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1005             "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1006             sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
1007
1008         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1009             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1010             "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1011             sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
1012
1013         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1014             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1015             "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1016             sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
1017
1018         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1019             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1020             "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1021             sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
1022
1023         SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1024                        SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1025                        "rx_desc_count", CTLFLAG_RD,
1026                        &sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt,
1027                        0, "RX desc count");
1028         SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1029                        SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1030                        "tx_desc_count", CTLFLAG_RD,
1031                        &sc->jme_cdata.jme_tx_desc_cnt,
1032                        0, "TX desc count");
1033         SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1034                        SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1035                        "rx_ring_count", CTLFLAG_RD,
1036                        &sc->jme_cdata.jme_rx_ring_cnt,
1037                        0, "RX ring count");
1038 #ifdef JME_RSS_DEBUG
1039         SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1040                        SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1041                        "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
1042                        0, "RSS debug level");
1043         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1044                 char rx_ring_pkt[32];
1045
1046                 ksnprintf(rx_ring_pkt, sizeof(rx_ring_pkt), "rx_ring%d_pkt", r);
1047                 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx,
1048                     SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1049                     rx_ring_pkt, CTLFLAG_RW,
1050                     &sc->jme_cdata.jme_rx_data[r].jme_rx_pkt, "RXed packets");
1051         }
1052 #endif
1053
1054         /*
1055          * Set default coalesce valves
1056          */
1057         sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1058         sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1059         sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1060         sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1061
1062         /*
1063          * Adjust coalesce valves, in case that the number of TX/RX
1064          * descs are set to small values by users.
1065          *
1066          * NOTE: coal_max will not be zero, since number of descs
1067          * must aligned by JME_NDESC_ALIGN (16 currently)
1068          */
1069         coal_max = sc->jme_cdata.jme_tx_desc_cnt / 6;
1070         if (coal_max < sc->jme_tx_coal_pkt)
1071                 sc->jme_tx_coal_pkt = coal_max;
1072
1073         coal_max = sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt / 4;
1074         if (coal_max < sc->jme_rx_coal_pkt)
1075                 sc->jme_rx_coal_pkt = coal_max;
1076 }
1077
1078 static int
1079 jme_dma_alloc(struct jme_softc *sc)
1080 {
1081         struct jme_txdesc *txd;
1082         bus_dmamem_t dmem;
1083         int error, i;
1084
1085         sc->jme_cdata.jme_txdesc =
1086         kmalloc(sc->jme_cdata.jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1087                 M_DEVBUF, M_WAITOK | M_ZERO);
1088         for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1089                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
1090
1091                 rdata->jme_rxdesc =
1092                 kmalloc(rdata->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1093                         M_DEVBUF, M_WAITOK | M_ZERO);
1094         }
1095
1096         /* Create parent ring tag. */
1097         error = bus_dma_tag_create(NULL,/* parent */
1098             1, JME_RING_BOUNDARY,       /* algnmnt, boundary */
1099             sc->jme_lowaddr,            /* lowaddr */
1100             BUS_SPACE_MAXADDR,          /* highaddr */
1101             NULL, NULL,                 /* filter, filterarg */
1102             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1103             0,                          /* nsegments */
1104             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1105             0,                          /* flags */
1106             &sc->jme_cdata.jme_ring_tag);
1107         if (error) {
1108                 device_printf(sc->jme_dev,
1109                     "could not create parent ring DMA tag.\n");
1110                 return error;
1111         }
1112
1113         /*
1114          * Create DMA stuffs for TX ring
1115          */
1116         error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1117                         JME_TX_RING_ALIGN, 0,
1118                         BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1119                         JME_TX_RING_SIZE(sc),
1120                         BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1121         if (error) {
1122                 device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1123                 return error;
1124         }
1125         sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag;
1126         sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map;
1127         sc->jme_cdata.jme_tx_ring = dmem.dmem_addr;
1128         sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr;
1129
1130         /*
1131          * Create DMA stuffs for RX rings
1132          */
1133         for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1134                 error = jme_rxring_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1135                 if (error)
1136                         return error;
1137         }
1138
1139         /* Create parent buffer tag. */
1140         error = bus_dma_tag_create(NULL,/* parent */
1141             1, 0,                       /* algnmnt, boundary */
1142             sc->jme_lowaddr,            /* lowaddr */
1143             BUS_SPACE_MAXADDR,          /* highaddr */
1144             NULL, NULL,                 /* filter, filterarg */
1145             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1146             0,                          /* nsegments */
1147             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1148             0,                          /* flags */
1149             &sc->jme_cdata.jme_buffer_tag);
1150         if (error) {
1151                 device_printf(sc->jme_dev,
1152                     "could not create parent buffer DMA tag.\n");
1153                 return error;
1154         }
1155
1156         /*
1157          * Create DMA stuffs for shadow status block
1158          */
1159         error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1160                         JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1161                         JME_SSB_SIZE, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1162         if (error) {
1163                 device_printf(sc->jme_dev,
1164                     "could not create shadow status block.\n");
1165                 return error;
1166         }
1167         sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1168         sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1169         sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1170         sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1171
1172         /*
1173          * Create DMA stuffs for TX buffers
1174          */
1175
1176         /* Create tag for Tx buffers. */
1177         error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1178             1, 0,                       /* algnmnt, boundary */
1179             BUS_SPACE_MAXADDR,          /* lowaddr */
1180             BUS_SPACE_MAXADDR,          /* highaddr */
1181             NULL, NULL,                 /* filter, filterarg */
1182             JME_JUMBO_FRAMELEN,         /* maxsize */
1183             JME_MAXTXSEGS,              /* nsegments */
1184             JME_MAXSEGSIZE,             /* maxsegsize */
1185             BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1186             &sc->jme_cdata.jme_tx_tag);
1187         if (error != 0) {
1188                 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1189                 return error;
1190         }
1191
1192         /* Create DMA maps for Tx buffers. */
1193         for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
1194                 txd = &sc->jme_cdata.jme_txdesc[i];
1195                 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag,
1196                                 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1197                                 &txd->tx_dmamap);
1198                 if (error) {
1199                         int j;
1200
1201                         device_printf(sc->jme_dev,
1202                             "could not create %dth Tx dmamap.\n", i);
1203
1204                         for (j = 0; j < i; ++j) {
1205                                 txd = &sc->jme_cdata.jme_txdesc[j];
1206                                 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1207                                                    txd->tx_dmamap);
1208                         }
1209                         bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1210                         sc->jme_cdata.jme_tx_tag = NULL;
1211                         return error;
1212                 }
1213         }
1214
1215         /*
1216          * Create DMA stuffs for RX buffers
1217          */
1218         for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1219                 error = jme_rxbuf_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1220                 if (error)
1221                         return error;
1222         }
1223         return 0;
1224 }
1225
1226 static void
1227 jme_dma_free(struct jme_softc *sc)
1228 {
1229         struct jme_txdesc *txd;
1230         struct jme_rxdesc *rxd;
1231         struct jme_rxdata *rdata;
1232         int i, r;
1233
1234         /* Tx ring */
1235         if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1236                 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1237                     sc->jme_cdata.jme_tx_ring_map);
1238                 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1239                     sc->jme_cdata.jme_tx_ring,
1240                     sc->jme_cdata.jme_tx_ring_map);
1241                 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1242                 sc->jme_cdata.jme_tx_ring_tag = NULL;
1243         }
1244
1245         /* Rx ring */
1246         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1247                 rdata = &sc->jme_cdata.jme_rx_data[r];
1248                 if (rdata->jme_rx_ring_tag != NULL) {
1249                         bus_dmamap_unload(rdata->jme_rx_ring_tag,
1250                                           rdata->jme_rx_ring_map);
1251                         bus_dmamem_free(rdata->jme_rx_ring_tag,
1252                                         rdata->jme_rx_ring,
1253                                         rdata->jme_rx_ring_map);
1254                         bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1255                         rdata->jme_rx_ring_tag = NULL;
1256                 }
1257         }
1258
1259         /* Tx buffers */
1260         if (sc->jme_cdata.jme_tx_tag != NULL) {
1261                 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
1262                         txd = &sc->jme_cdata.jme_txdesc[i];
1263                         bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1264                             txd->tx_dmamap);
1265                 }
1266                 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1267                 sc->jme_cdata.jme_tx_tag = NULL;
1268         }
1269
1270         /* Rx buffers */
1271         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1272                 rdata = &sc->jme_cdata.jme_rx_data[r];
1273                 if (rdata->jme_rx_tag != NULL) {
1274                         for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
1275                                 rxd = &rdata->jme_rxdesc[i];
1276                                 bus_dmamap_destroy(rdata->jme_rx_tag,
1277                                                    rxd->rx_dmamap);
1278                         }
1279                         bus_dmamap_destroy(rdata->jme_rx_tag,
1280                                            rdata->jme_rx_sparemap);
1281                         bus_dma_tag_destroy(rdata->jme_rx_tag);
1282                         rdata->jme_rx_tag = NULL;
1283                 }
1284         }
1285
1286         /* Shadow status block. */
1287         if (sc->jme_cdata.jme_ssb_tag != NULL) {
1288                 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1289                     sc->jme_cdata.jme_ssb_map);
1290                 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1291                     sc->jme_cdata.jme_ssb_block,
1292                     sc->jme_cdata.jme_ssb_map);
1293                 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1294                 sc->jme_cdata.jme_ssb_tag = NULL;
1295         }
1296
1297         if (sc->jme_cdata.jme_buffer_tag != NULL) {
1298                 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1299                 sc->jme_cdata.jme_buffer_tag = NULL;
1300         }
1301         if (sc->jme_cdata.jme_ring_tag != NULL) {
1302                 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1303                 sc->jme_cdata.jme_ring_tag = NULL;
1304         }
1305
1306         if (sc->jme_cdata.jme_txdesc != NULL) {
1307                 kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1308                 sc->jme_cdata.jme_txdesc = NULL;
1309         }
1310         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1311                 rdata = &sc->jme_cdata.jme_rx_data[r];
1312                 if (rdata->jme_rxdesc != NULL) {
1313                         kfree(rdata->jme_rxdesc, M_DEVBUF);
1314                         rdata->jme_rxdesc = NULL;
1315                 }
1316         }
1317 }
1318
1319 /*
1320  *      Make sure the interface is stopped at reboot time.
1321  */
1322 static int
1323 jme_shutdown(device_t dev)
1324 {
1325         return jme_suspend(dev);
1326 }
1327
1328 #ifdef notyet
1329 /*
1330  * Unlike other ethernet controllers, JMC250 requires
1331  * explicit resetting link speed to 10/100Mbps as gigabit
1332  * link will cunsume more power than 375mA.
1333  * Note, we reset the link speed to 10/100Mbps with
1334  * auto-negotiation but we don't know whether that operation
1335  * would succeed or not as we have no control after powering
1336  * off. If the renegotiation fail WOL may not work. Running
1337  * at 1Gbps draws more power than 375mA at 3.3V which is
1338  * specified in PCI specification and that would result in
1339  * complete shutdowning power to ethernet controller.
1340  *
1341  * TODO
1342  *  Save current negotiated media speed/duplex/flow-control
1343  *  to softc and restore the same link again after resuming.
1344  *  PHY handling such as power down/resetting to 100Mbps
1345  *  may be better handled in suspend method in phy driver.
1346  */
1347 static void
1348 jme_setlinkspeed(struct jme_softc *sc)
1349 {
1350         struct mii_data *mii;
1351         int aneg, i;
1352
1353         JME_LOCK_ASSERT(sc);
1354
1355         mii = device_get_softc(sc->jme_miibus);
1356         mii_pollstat(mii);
1357         aneg = 0;
1358         if ((mii->mii_media_status & IFM_AVALID) != 0) {
1359                 switch IFM_SUBTYPE(mii->mii_media_active) {
1360                 case IFM_10_T:
1361                 case IFM_100_TX:
1362                         return;
1363                 case IFM_1000_T:
1364                         aneg++;
1365                 default:
1366                         break;
1367                 }
1368         }
1369         jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1370         jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1371             ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1372         jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1373             BMCR_AUTOEN | BMCR_STARTNEG);
1374         DELAY(1000);
1375         if (aneg != 0) {
1376                 /* Poll link state until jme(4) get a 10/100 link. */
1377                 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1378                         mii_pollstat(mii);
1379                         if ((mii->mii_media_status & IFM_AVALID) != 0) {
1380                                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1381                                 case IFM_10_T:
1382                                 case IFM_100_TX:
1383                                         jme_mac_config(sc);
1384                                         return;
1385                                 default:
1386                                         break;
1387                                 }
1388                         }
1389                         JME_UNLOCK(sc);
1390                         pause("jmelnk", hz);
1391                         JME_LOCK(sc);
1392                 }
1393                 if (i == MII_ANEGTICKS_GIGE)
1394                         device_printf(sc->jme_dev, "establishing link failed, "
1395                             "WOL may not work!");
1396         }
1397         /*
1398          * No link, force MAC to have 100Mbps, full-duplex link.
1399          * This is the last resort and may/may not work.
1400          */
1401         mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1402         mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1403         jme_mac_config(sc);
1404 }
1405
1406 static void
1407 jme_setwol(struct jme_softc *sc)
1408 {
1409         struct ifnet *ifp = &sc->arpcom.ac_if;
1410         uint32_t gpr, pmcs;
1411         uint16_t pmstat;
1412         int pmc;
1413
1414         if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1415                 /* No PME capability, PHY power down. */
1416                 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1417                     MII_BMCR, BMCR_PDOWN);
1418                 return;
1419         }
1420
1421         gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1422         pmcs = CSR_READ_4(sc, JME_PMCS);
1423         pmcs &= ~PMCS_WOL_ENB_MASK;
1424         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1425                 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1426                 /* Enable PME message. */
1427                 gpr |= GPREG0_PME_ENB;
1428                 /* For gigabit controllers, reset link speed to 10/100. */
1429                 if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1430                         jme_setlinkspeed(sc);
1431         }
1432
1433         CSR_WRITE_4(sc, JME_PMCS, pmcs);
1434         CSR_WRITE_4(sc, JME_GPREG0, gpr);
1435
1436         /* Request PME. */
1437         pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1438         pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1439         if ((ifp->if_capenable & IFCAP_WOL) != 0)
1440                 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1441         pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1442         if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1443                 /* No WOL, PHY power down. */
1444                 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1445                     MII_BMCR, BMCR_PDOWN);
1446         }
1447 }
1448 #endif
1449
1450 static int
1451 jme_suspend(device_t dev)
1452 {
1453         struct jme_softc *sc = device_get_softc(dev);
1454         struct ifnet *ifp = &sc->arpcom.ac_if;
1455
1456         ifnet_serialize_all(ifp);
1457         jme_stop(sc);
1458 #ifdef notyet
1459         jme_setwol(sc);
1460 #endif
1461         ifnet_deserialize_all(ifp);
1462
1463         return (0);
1464 }
1465
1466 static int
1467 jme_resume(device_t dev)
1468 {
1469         struct jme_softc *sc = device_get_softc(dev);
1470         struct ifnet *ifp = &sc->arpcom.ac_if;
1471 #ifdef notyet
1472         int pmc;
1473 #endif
1474
1475         ifnet_serialize_all(ifp);
1476
1477 #ifdef notyet
1478         if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1479                 uint16_t pmstat;
1480
1481                 pmstat = pci_read_config(sc->jme_dev,
1482                     pmc + PCIR_POWER_STATUS, 2);
1483                 /* Disable PME clear PME status. */
1484                 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1485                 pci_write_config(sc->jme_dev,
1486                     pmc + PCIR_POWER_STATUS, pmstat, 2);
1487         }
1488 #endif
1489
1490         if (ifp->if_flags & IFF_UP)
1491                 jme_init(sc);
1492
1493         ifnet_deserialize_all(ifp);
1494
1495         return (0);
1496 }
1497
1498 static int
1499 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1500 {
1501         struct jme_txdesc *txd;
1502         struct jme_desc *desc;
1503         struct mbuf *m;
1504         bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1505         int maxsegs, nsegs;
1506         int error, i, prod, symbol_desc;
1507         uint32_t cflags, flag64;
1508
1509         M_ASSERTPKTHDR((*m_head));
1510
1511         prod = sc->jme_cdata.jme_tx_prod;
1512         txd = &sc->jme_cdata.jme_txdesc[prod];
1513
1514         if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1515                 symbol_desc = 1;
1516         else
1517                 symbol_desc = 0;
1518
1519         maxsegs = (sc->jme_cdata.jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1520                   (JME_TXD_RSVD + symbol_desc);
1521         if (maxsegs > JME_MAXTXSEGS)
1522                 maxsegs = JME_MAXTXSEGS;
1523         KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc),
1524                 ("not enough segments %d\n", maxsegs));
1525
1526         error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag,
1527                         txd->tx_dmamap, m_head,
1528                         txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1529         if (error)
1530                 goto fail;
1531
1532         bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1533                         BUS_DMASYNC_PREWRITE);
1534
1535         m = *m_head;
1536         cflags = 0;
1537
1538         /* Configure checksum offload. */
1539         if (m->m_pkthdr.csum_flags & CSUM_IP)
1540                 cflags |= JME_TD_IPCSUM;
1541         if (m->m_pkthdr.csum_flags & CSUM_TCP)
1542                 cflags |= JME_TD_TCPCSUM;
1543         if (m->m_pkthdr.csum_flags & CSUM_UDP)
1544                 cflags |= JME_TD_UDPCSUM;
1545
1546         /* Configure VLAN. */
1547         if (m->m_flags & M_VLANTAG) {
1548                 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1549                 cflags |= JME_TD_VLAN_TAG;
1550         }
1551
1552         desc = &sc->jme_cdata.jme_tx_ring[prod];
1553         desc->flags = htole32(cflags);
1554         desc->addr_hi = htole32(m->m_pkthdr.len);
1555         if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1556                 /*
1557                  * Use 64bits TX desc chain format.
1558                  *
1559                  * The first TX desc of the chain, which is setup here,
1560                  * is just a symbol TX desc carrying no payload.
1561                  */
1562                 flag64 = JME_TD_64BIT;
1563                 desc->buflen = 0;
1564                 desc->addr_lo = 0;
1565
1566                 /* No effective TX desc is consumed */
1567                 i = 0;
1568         } else {
1569                 /*
1570                  * Use 32bits TX desc chain format.
1571                  *
1572                  * The first TX desc of the chain, which is setup here,
1573                  * is an effective TX desc carrying the first segment of
1574                  * the mbuf chain.
1575                  */
1576                 flag64 = 0;
1577                 desc->buflen = htole32(txsegs[0].ds_len);
1578                 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1579
1580                 /* One effective TX desc is consumed */
1581                 i = 1;
1582         }
1583         sc->jme_cdata.jme_tx_cnt++;
1584         KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1585                  sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
1586         JME_DESC_INC(prod, sc->jme_cdata.jme_tx_desc_cnt);
1587
1588         txd->tx_ndesc = 1 - i;
1589         for (; i < nsegs; i++) {
1590                 desc = &sc->jme_cdata.jme_tx_ring[prod];
1591                 desc->flags = htole32(JME_TD_OWN | flag64);
1592                 desc->buflen = htole32(txsegs[i].ds_len);
1593                 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1594                 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1595
1596                 sc->jme_cdata.jme_tx_cnt++;
1597                 KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1598                          sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
1599                 JME_DESC_INC(prod, sc->jme_cdata.jme_tx_desc_cnt);
1600         }
1601
1602         /* Update producer index. */
1603         sc->jme_cdata.jme_tx_prod = prod;
1604         /*
1605          * Finally request interrupt and give the first descriptor
1606          * owenership to hardware.
1607          */
1608         desc = txd->tx_desc;
1609         desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1610
1611         txd->tx_m = m;
1612         txd->tx_ndesc += nsegs;
1613
1614         return 0;
1615 fail:
1616         m_freem(*m_head);
1617         *m_head = NULL;
1618         return error;
1619 }
1620
1621 static void
1622 jme_start(struct ifnet *ifp)
1623 {
1624         struct jme_softc *sc = ifp->if_softc;
1625         struct mbuf *m_head;
1626         int enq = 0;
1627
1628         ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
1629
1630         if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1631                 ifq_purge(&ifp->if_snd);
1632                 return;
1633         }
1634
1635         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1636                 return;
1637
1638         if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1639                 jme_txeof(sc);
1640
1641         while (!ifq_is_empty(&ifp->if_snd)) {
1642                 /*
1643                  * Check number of available TX descs, always
1644                  * leave JME_TXD_RSVD free TX descs.
1645                  */
1646                 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1647                     sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD) {
1648                         ifp->if_flags |= IFF_OACTIVE;
1649                         break;
1650                 }
1651
1652                 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1653                 if (m_head == NULL)
1654                         break;
1655
1656                 /*
1657                  * Pack the data into the transmit ring. If we
1658                  * don't have room, set the OACTIVE flag and wait
1659                  * for the NIC to drain the ring.
1660                  */
1661                 if (jme_encap(sc, &m_head)) {
1662                         KKASSERT(m_head == NULL);
1663                         ifp->if_oerrors++;
1664                         ifp->if_flags |= IFF_OACTIVE;
1665                         break;
1666                 }
1667                 enq++;
1668
1669                 /*
1670                  * If there's a BPF listener, bounce a copy of this frame
1671                  * to him.
1672                  */
1673                 ETHER_BPF_MTAP(ifp, m_head);
1674         }
1675
1676         if (enq > 0) {
1677                 /*
1678                  * Reading TXCSR takes very long time under heavy load
1679                  * so cache TXCSR value and writes the ORed value with
1680                  * the kick command to the TXCSR. This saves one register
1681                  * access cycle.
1682                  */
1683                 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1684                     TXCSR_TXQ_N_START(TXCSR_TXQ0));
1685                 /* Set a timeout in case the chip goes out to lunch. */
1686                 ifp->if_timer = JME_TX_TIMEOUT;
1687         }
1688 }
1689
1690 static void
1691 jme_watchdog(struct ifnet *ifp)
1692 {
1693         struct jme_softc *sc = ifp->if_softc;
1694
1695         ASSERT_IFNET_SERIALIZED_ALL(ifp);
1696
1697         if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1698                 if_printf(ifp, "watchdog timeout (missed link)\n");
1699                 ifp->if_oerrors++;
1700                 jme_init(sc);
1701                 return;
1702         }
1703
1704         jme_txeof(sc);
1705         if (sc->jme_cdata.jme_tx_cnt == 0) {
1706                 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1707                           "-- recovering\n");
1708                 if (!ifq_is_empty(&ifp->if_snd))
1709                         if_devstart(ifp);
1710                 return;
1711         }
1712
1713         if_printf(ifp, "watchdog timeout\n");
1714         ifp->if_oerrors++;
1715         jme_init(sc);
1716         if (!ifq_is_empty(&ifp->if_snd))
1717                 if_devstart(ifp);
1718 }
1719
1720 static int
1721 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1722 {
1723         struct jme_softc *sc = ifp->if_softc;
1724         struct mii_data *mii = device_get_softc(sc->jme_miibus);
1725         struct ifreq *ifr = (struct ifreq *)data;
1726         int error = 0, mask;
1727
1728         ASSERT_IFNET_SERIALIZED_ALL(ifp);
1729
1730         switch (cmd) {
1731         case SIOCSIFMTU:
1732                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1733                     (!(sc->jme_caps & JME_CAP_JUMBO) &&
1734                      ifr->ifr_mtu > JME_MAX_MTU)) {
1735                         error = EINVAL;
1736                         break;
1737                 }
1738
1739                 if (ifp->if_mtu != ifr->ifr_mtu) {
1740                         /*
1741                          * No special configuration is required when interface
1742                          * MTU is changed but availability of Tx checksum
1743                          * offload should be chcked against new MTU size as
1744                          * FIFO size is just 2K.
1745                          */
1746                         if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1747                                 ifp->if_capenable &= ~IFCAP_TXCSUM;
1748                                 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1749                         }
1750                         ifp->if_mtu = ifr->ifr_mtu;
1751                         if (ifp->if_flags & IFF_RUNNING)
1752                                 jme_init(sc);
1753                 }
1754                 break;
1755
1756         case SIOCSIFFLAGS:
1757                 if (ifp->if_flags & IFF_UP) {
1758                         if (ifp->if_flags & IFF_RUNNING) {
1759                                 if ((ifp->if_flags ^ sc->jme_if_flags) &
1760                                     (IFF_PROMISC | IFF_ALLMULTI))
1761                                         jme_set_filter(sc);
1762                         } else {
1763                                 jme_init(sc);
1764                         }
1765                 } else {
1766                         if (ifp->if_flags & IFF_RUNNING)
1767                                 jme_stop(sc);
1768                 }
1769                 sc->jme_if_flags = ifp->if_flags;
1770                 break;
1771
1772         case SIOCADDMULTI:
1773         case SIOCDELMULTI:
1774                 if (ifp->if_flags & IFF_RUNNING)
1775                         jme_set_filter(sc);
1776                 break;
1777
1778         case SIOCSIFMEDIA:
1779         case SIOCGIFMEDIA:
1780                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1781                 break;
1782
1783         case SIOCSIFCAP:
1784                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1785
1786                 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1787                         ifp->if_capenable ^= IFCAP_TXCSUM;
1788                         if (IFCAP_TXCSUM & ifp->if_capenable)
1789                                 ifp->if_hwassist |= JME_CSUM_FEATURES;
1790                         else
1791                                 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1792                 }
1793                 if (mask & IFCAP_RXCSUM) {
1794                         uint32_t reg;
1795
1796                         ifp->if_capenable ^= IFCAP_RXCSUM;
1797                         reg = CSR_READ_4(sc, JME_RXMAC);
1798                         reg &= ~RXMAC_CSUM_ENB;
1799                         if (ifp->if_capenable & IFCAP_RXCSUM)
1800                                 reg |= RXMAC_CSUM_ENB;
1801                         CSR_WRITE_4(sc, JME_RXMAC, reg);
1802                 }
1803
1804                 if (mask & IFCAP_VLAN_HWTAGGING) {
1805                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1806                         jme_set_vlan(sc);
1807                 }
1808
1809                 if (mask & IFCAP_RSS)
1810                         ifp->if_capenable ^= IFCAP_RSS;
1811                 break;
1812
1813         default:
1814                 error = ether_ioctl(ifp, cmd, data);
1815                 break;
1816         }
1817         return (error);
1818 }
1819
1820 static void
1821 jme_mac_config(struct jme_softc *sc)
1822 {
1823         struct mii_data *mii;
1824         uint32_t ghc, rxmac, txmac, txpause, gp1;
1825         int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1826
1827         mii = device_get_softc(sc->jme_miibus);
1828
1829         CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1830         DELAY(10);
1831         CSR_WRITE_4(sc, JME_GHC, 0);
1832         ghc = 0;
1833         rxmac = CSR_READ_4(sc, JME_RXMAC);
1834         rxmac &= ~RXMAC_FC_ENB;
1835         txmac = CSR_READ_4(sc, JME_TXMAC);
1836         txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1837         txpause = CSR_READ_4(sc, JME_TXPFC);
1838         txpause &= ~TXPFC_PAUSE_ENB;
1839         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1840                 ghc |= GHC_FULL_DUPLEX;
1841                 rxmac &= ~RXMAC_COLL_DET_ENB;
1842                 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1843                     TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1844                     TXMAC_FRAME_BURST);
1845 #ifdef notyet
1846                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1847                         txpause |= TXPFC_PAUSE_ENB;
1848                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1849                         rxmac |= RXMAC_FC_ENB;
1850 #endif
1851                 /* Disable retry transmit timer/retry limit. */
1852                 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1853                     ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1854         } else {
1855                 rxmac |= RXMAC_COLL_DET_ENB;
1856                 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1857                 /* Enable retry transmit timer/retry limit. */
1858                 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1859                     TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1860         }
1861
1862         /*
1863          * Reprogram Tx/Rx MACs with resolved speed/duplex.
1864          */
1865         gp1 = CSR_READ_4(sc, JME_GPREG1);
1866         gp1 &= ~GPREG1_WA_HDX;
1867
1868         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1869                 hdx = 1;
1870
1871         switch (IFM_SUBTYPE(mii->mii_media_active)) {
1872         case IFM_10_T:
1873                 ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1874                 if (hdx)
1875                         gp1 |= GPREG1_WA_HDX;
1876                 break;
1877
1878         case IFM_100_TX:
1879                 ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1880                 if (hdx)
1881                         gp1 |= GPREG1_WA_HDX;
1882
1883                 /*
1884                  * Use extended FIFO depth to workaround CRC errors
1885                  * emitted by chips before JMC250B
1886                  */
1887                 phyconf = JMPHY_CONF_EXTFIFO;
1888                 break;
1889
1890         case IFM_1000_T:
1891                 if (sc->jme_caps & JME_CAP_FASTETH)
1892                         break;
1893
1894                 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
1895                 if (hdx)
1896                         txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1897                 break;
1898
1899         default:
1900                 break;
1901         }
1902         CSR_WRITE_4(sc, JME_GHC, ghc);
1903         CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1904         CSR_WRITE_4(sc, JME_TXMAC, txmac);
1905         CSR_WRITE_4(sc, JME_TXPFC, txpause);
1906
1907         if (sc->jme_workaround & JME_WA_EXTFIFO) {
1908                 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1909                                     JMPHY_CONF, phyconf);
1910         }
1911         if (sc->jme_workaround & JME_WA_HDX)
1912                 CSR_WRITE_4(sc, JME_GPREG1, gp1);
1913 }
1914
1915 static void
1916 jme_intr(void *xsc)
1917 {
1918         struct jme_softc *sc = xsc;
1919         struct ifnet *ifp = &sc->arpcom.ac_if;
1920         uint32_t status;
1921         int r;
1922
1923         ASSERT_SERIALIZED(&sc->jme_serialize);
1924
1925         status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1926         if (status == 0 || status == 0xFFFFFFFF)
1927                 return;
1928
1929         /* Disable interrupts. */
1930         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1931
1932         status = CSR_READ_4(sc, JME_INTR_STATUS);
1933         if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1934                 goto back;
1935
1936         /* Reset PCC counter/timer and Ack interrupts. */
1937         status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1938
1939         if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1940                 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1941
1942         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1943                 if (status & jme_rx_status[r].jme_coal) {
1944                         status |= jme_rx_status[r].jme_coal |
1945                                   jme_rx_status[r].jme_comp;
1946                 }
1947         }
1948
1949         CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1950
1951         if (ifp->if_flags & IFF_RUNNING) {
1952                 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1953                         jme_rx_intr(sc, status);
1954
1955                 if (status & INTR_RXQ_DESC_EMPTY) {
1956                         /*
1957                          * Notify hardware availability of new Rx buffers.
1958                          * Reading RXCSR takes very long time under heavy
1959                          * load so cache RXCSR value and writes the ORed
1960                          * value with the kick command to the RXCSR. This
1961                          * saves one register access cycle.
1962                          */
1963                         CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1964                             RXCSR_RX_ENB | RXCSR_RXQ_START);
1965                 }
1966
1967                 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1968                         lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
1969                         jme_txeof(sc);
1970                         if (!ifq_is_empty(&ifp->if_snd))
1971                                 if_devstart(ifp);
1972                         lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
1973                 }
1974         }
1975 back:
1976         /* Reenable interrupts. */
1977         CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1978 }
1979
1980 static void
1981 jme_txeof(struct jme_softc *sc)
1982 {
1983         struct ifnet *ifp = &sc->arpcom.ac_if;
1984         struct jme_txdesc *txd;
1985         uint32_t status;
1986         int cons, nsegs;
1987
1988         cons = sc->jme_cdata.jme_tx_cons;
1989         if (cons == sc->jme_cdata.jme_tx_prod)
1990                 return;
1991
1992         /*
1993          * Go through our Tx list and free mbufs for those
1994          * frames which have been transmitted.
1995          */
1996         while (cons != sc->jme_cdata.jme_tx_prod) {
1997                 txd = &sc->jme_cdata.jme_txdesc[cons];
1998                 KASSERT(txd->tx_m != NULL,
1999                         ("%s: freeing NULL mbuf!\n", __func__));
2000
2001                 status = le32toh(txd->tx_desc->flags);
2002                 if ((status & JME_TD_OWN) == JME_TD_OWN)
2003                         break;
2004
2005                 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2006                         ifp->if_oerrors++;
2007                 } else {
2008                         ifp->if_opackets++;
2009                         if (status & JME_TD_COLLISION) {
2010                                 ifp->if_collisions +=
2011                                     le32toh(txd->tx_desc->buflen) &
2012                                     JME_TD_BUF_LEN_MASK;
2013                         }
2014                 }
2015
2016                 /*
2017                  * Only the first descriptor of multi-descriptor
2018                  * transmission is updated so driver have to skip entire
2019                  * chained buffers for the transmiited frame. In other
2020                  * words, JME_TD_OWN bit is valid only at the first
2021                  * descriptor of a multi-descriptor transmission.
2022                  */
2023                 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2024                         sc->jme_cdata.jme_tx_ring[cons].flags = 0;
2025                         JME_DESC_INC(cons, sc->jme_cdata.jme_tx_desc_cnt);
2026                 }
2027
2028                 /* Reclaim transferred mbufs. */
2029                 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2030                 m_freem(txd->tx_m);
2031                 txd->tx_m = NULL;
2032                 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2033                 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2034                         ("%s: Active Tx desc counter was garbled\n", __func__));
2035                 txd->tx_ndesc = 0;
2036         }
2037         sc->jme_cdata.jme_tx_cons = cons;
2038
2039         if (sc->jme_cdata.jme_tx_cnt == 0)
2040                 ifp->if_timer = 0;
2041
2042         if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
2043             sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD)
2044                 ifp->if_flags &= ~IFF_OACTIVE;
2045 }
2046
2047 static __inline void
2048 jme_discard_rxbufs(struct jme_rxdata *rdata, int cons, int count)
2049 {
2050         int i;
2051
2052         for (i = 0; i < count; ++i) {
2053                 struct jme_desc *desc = &rdata->jme_rx_ring[cons];
2054
2055                 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2056                 desc->buflen = htole32(MCLBYTES);
2057                 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2058         }
2059 }
2060
2061 static __inline struct pktinfo *
2062 jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2063 {
2064         if (flags & JME_RD_IPV4)
2065                 pi->pi_netisr = NETISR_IP;
2066         else if (flags & JME_RD_IPV6)
2067                 pi->pi_netisr = NETISR_IPV6;
2068         else
2069                 return NULL;
2070
2071         pi->pi_flags = 0;
2072         pi->pi_l3proto = IPPROTO_UNKNOWN;
2073
2074         if (flags & JME_RD_MORE_FRAG)
2075                 pi->pi_flags |= PKTINFO_FLAG_FRAG;
2076         else if (flags & JME_RD_TCP)
2077                 pi->pi_l3proto = IPPROTO_TCP;
2078         else if (flags & JME_RD_UDP)
2079                 pi->pi_l3proto = IPPROTO_UDP;
2080         else
2081                 pi = NULL;
2082         return pi;
2083 }
2084
2085 /* Receive a frame. */
2086 static void
2087 jme_rxpkt(struct jme_rxdata *rdata)
2088 {
2089         struct ifnet *ifp = &rdata->jme_sc->arpcom.ac_if;
2090         struct jme_desc *desc;
2091         struct jme_rxdesc *rxd;
2092         struct mbuf *mp, *m;
2093         uint32_t flags, status, hash, hashinfo;
2094         int cons, count, nsegs;
2095
2096         cons = rdata->jme_rx_cons;
2097         desc = &rdata->jme_rx_ring[cons];
2098         flags = le32toh(desc->flags);
2099         status = le32toh(desc->buflen);
2100         hash = le32toh(desc->addr_hi);
2101         hashinfo = le32toh(desc->addr_lo);
2102         nsegs = JME_RX_NSEGS(status);
2103
2104         JME_RSS_DPRINTF(rdata->jme_sc, 15, "ring%d, flags 0x%08x, "
2105                         "hash 0x%08x, hash info 0x%08x\n",
2106                         rdata->jme_rx_idx, flags, hash, hashinfo);
2107
2108         if (status & JME_RX_ERR_STAT) {
2109                 ifp->if_ierrors++;
2110                 jme_discard_rxbufs(rdata, cons, nsegs);
2111 #ifdef JME_SHOW_ERRORS
2112                 if_printf(ifp, "%s : receive error = 0x%b\n",
2113                     __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2114 #endif
2115                 rdata->jme_rx_cons += nsegs;
2116                 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2117                 return;
2118         }
2119
2120         rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2121         for (count = 0; count < nsegs; count++,
2122              JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) {
2123                 rxd = &rdata->jme_rxdesc[cons];
2124                 mp = rxd->rx_m;
2125
2126                 /* Add a new receive buffer to the ring. */
2127                 if (jme_newbuf(rdata, rxd, 0) != 0) {
2128                         ifp->if_iqdrops++;
2129                         /* Reuse buffer. */
2130                         jme_discard_rxbufs(rdata, cons, nsegs - count);
2131                         if (rdata->jme_rxhead != NULL) {
2132                                 m_freem(rdata->jme_rxhead);
2133                                 JME_RXCHAIN_RESET(rdata);
2134                         }
2135                         break;
2136                 }
2137
2138                 /*
2139                  * Assume we've received a full sized frame.
2140                  * Actual size is fixed when we encounter the end of
2141                  * multi-segmented frame.
2142                  */
2143                 mp->m_len = MCLBYTES;
2144
2145                 /* Chain received mbufs. */
2146                 if (rdata->jme_rxhead == NULL) {
2147                         rdata->jme_rxhead = mp;
2148                         rdata->jme_rxtail = mp;
2149                 } else {
2150                         /*
2151                          * Receive processor can receive a maximum frame
2152                          * size of 65535 bytes.
2153                          */
2154                         rdata->jme_rxtail->m_next = mp;
2155                         rdata->jme_rxtail = mp;
2156                 }
2157
2158                 if (count == nsegs - 1) {
2159                         struct pktinfo pi0, *pi;
2160
2161                         /* Last desc. for this frame. */
2162                         m = rdata->jme_rxhead;
2163                         m->m_pkthdr.len = rdata->jme_rxlen;
2164                         if (nsegs > 1) {
2165                                 /* Set first mbuf size. */
2166                                 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2167                                 /* Set last mbuf size. */
2168                                 mp->m_len = rdata->jme_rxlen -
2169                                     ((MCLBYTES - JME_RX_PAD_BYTES) +
2170                                     (MCLBYTES * (nsegs - 2)));
2171                         } else {
2172                                 m->m_len = rdata->jme_rxlen;
2173                         }
2174                         m->m_pkthdr.rcvif = ifp;
2175
2176                         /*
2177                          * Account for 10bytes auto padding which is used
2178                          * to align IP header on 32bit boundary. Also note,
2179                          * CRC bytes is automatically removed by the
2180                          * hardware.
2181                          */
2182                         m->m_data += JME_RX_PAD_BYTES;
2183
2184                         /* Set checksum information. */
2185                         if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2186                             (flags & JME_RD_IPV4)) {
2187                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2188                                 if (flags & JME_RD_IPCSUM)
2189                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2190                                 if ((flags & JME_RD_MORE_FRAG) == 0 &&
2191                                     ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2192                                      (JME_RD_TCP | JME_RD_TCPCSUM) ||
2193                                      (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2194                                      (JME_RD_UDP | JME_RD_UDPCSUM))) {
2195                                         m->m_pkthdr.csum_flags |=
2196                                             CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2197                                         m->m_pkthdr.csum_data = 0xffff;
2198                                 }
2199                         }
2200
2201                         /* Check for VLAN tagged packets. */
2202                         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2203                             (flags & JME_RD_VLAN_TAG)) {
2204                                 m->m_pkthdr.ether_vlantag =
2205                                     flags & JME_RD_VLAN_MASK;
2206                                 m->m_flags |= M_VLANTAG;
2207                         }
2208
2209                         ifp->if_ipackets++;
2210
2211                         if (ifp->if_capenable & IFCAP_RSS)
2212                                 pi = jme_pktinfo(&pi0, flags);
2213                         else
2214                                 pi = NULL;
2215
2216                         if (pi != NULL &&
2217                             (hashinfo & JME_RD_HASH_FN_MASK) != 0) {
2218                                 m->m_flags |= M_HASH;
2219                                 m->m_pkthdr.hash = toeplitz_hash(hash);
2220                         }
2221
2222 #ifdef JME_RSS_DEBUG
2223                         if (pi != NULL) {
2224                                 JME_RSS_DPRINTF(rdata->jme_sc, 10,
2225                                     "isr %d flags %08x, l3 %d %s\n",
2226                                     pi->pi_netisr, pi->pi_flags,
2227                                     pi->pi_l3proto,
2228                                     (m->m_flags & M_HASH) ? "hash" : "");
2229                         }
2230 #endif
2231
2232                         /* Pass it on. */
2233                         ether_input_pkt(ifp, m, pi);
2234
2235                         /* Reset mbuf chains. */
2236                         JME_RXCHAIN_RESET(rdata);
2237 #ifdef JME_RSS_DEBUG
2238                         rdata->jme_rx_pkt++;
2239 #endif
2240                 }
2241         }
2242
2243         rdata->jme_rx_cons += nsegs;
2244         rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2245 }
2246
2247 static void
2248 jme_rxeof(struct jme_rxdata *rdata, int count)
2249 {
2250         struct jme_desc *desc;
2251         int nsegs, pktlen;
2252
2253         for (;;) {
2254 #ifdef DEVICE_POLLING
2255                 if (count >= 0 && count-- == 0)
2256                         break;
2257 #endif
2258                 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2259                 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2260                         break;
2261                 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2262                         break;
2263
2264                 /*
2265                  * Check number of segments against received bytes.
2266                  * Non-matching value would indicate that hardware
2267                  * is still trying to update Rx descriptors. I'm not
2268                  * sure whether this check is needed.
2269                  */
2270                 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2271                 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2272                 if (nsegs != howmany(pktlen, MCLBYTES)) {
2273                         if_printf(&rdata->jme_sc->arpcom.ac_if,
2274                             "RX fragment count(%d) and "
2275                             "packet size(%d) mismach\n", nsegs, pktlen);
2276                         break;
2277                 }
2278
2279                 /* Received a frame. */
2280                 jme_rxpkt(rdata);
2281         }
2282 }
2283
2284 static void
2285 jme_tick(void *xsc)
2286 {
2287         struct jme_softc *sc = xsc;
2288         struct ifnet *ifp = &sc->arpcom.ac_if;
2289         struct mii_data *mii = device_get_softc(sc->jme_miibus);
2290
2291         ifnet_serialize_all(ifp);
2292
2293         mii_tick(mii);
2294         callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2295
2296         ifnet_deserialize_all(ifp);
2297 }
2298
2299 static void
2300 jme_reset(struct jme_softc *sc)
2301 {
2302         uint32_t val;
2303
2304         /* Make sure that TX and RX are stopped */
2305         jme_stop_tx(sc);
2306         jme_stop_rx(sc);
2307
2308         /* Start reset */
2309         CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2310         DELAY(20);
2311
2312         /*
2313          * Hold reset bit before stop reset
2314          */
2315
2316         /* Disable TXMAC and TXOFL clock sources */
2317         CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2318         /* Disable RXMAC clock source */
2319         val = CSR_READ_4(sc, JME_GPREG1);
2320         CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2321         /* Flush */
2322         CSR_READ_4(sc, JME_GHC);
2323
2324         /* Stop reset */
2325         CSR_WRITE_4(sc, JME_GHC, 0);
2326         /* Flush */
2327         CSR_READ_4(sc, JME_GHC);
2328
2329         /*
2330          * Clear reset bit after stop reset
2331          */
2332
2333         /* Enable TXMAC and TXOFL clock sources */
2334         CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2335         /* Enable RXMAC clock source */
2336         val = CSR_READ_4(sc, JME_GPREG1);
2337         CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2338         /* Flush */
2339         CSR_READ_4(sc, JME_GHC);
2340
2341         /* Disable TXMAC and TXOFL clock sources */
2342         CSR_WRITE_4(sc, JME_GHC, 0);
2343         /* Disable RXMAC clock source */
2344         val = CSR_READ_4(sc, JME_GPREG1);
2345         CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2346         /* Flush */
2347         CSR_READ_4(sc, JME_GHC);
2348
2349         /* Enable TX and RX */
2350         val = CSR_READ_4(sc, JME_TXCSR);
2351         CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB);
2352         val = CSR_READ_4(sc, JME_RXCSR);
2353         CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB);
2354         /* Flush */
2355         CSR_READ_4(sc, JME_TXCSR);
2356         CSR_READ_4(sc, JME_RXCSR);
2357
2358         /* Enable TXMAC and TXOFL clock sources */
2359         CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2360         /* Eisable RXMAC clock source */
2361         val = CSR_READ_4(sc, JME_GPREG1);
2362         CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2363         /* Flush */
2364         CSR_READ_4(sc, JME_GHC);
2365
2366         /* Stop TX and RX */
2367         jme_stop_tx(sc);
2368         jme_stop_rx(sc);
2369 }
2370
2371 static void
2372 jme_init(void *xsc)
2373 {
2374         struct jme_softc *sc = xsc;
2375         struct ifnet *ifp = &sc->arpcom.ac_if;
2376         struct mii_data *mii;
2377         uint8_t eaddr[ETHER_ADDR_LEN];
2378         bus_addr_t paddr;
2379         uint32_t reg;
2380         int error, r;
2381
2382         ASSERT_IFNET_SERIALIZED_ALL(ifp);
2383
2384         /*
2385          * Cancel any pending I/O.
2386          */
2387         jme_stop(sc);
2388
2389         /*
2390          * Reset the chip to a known state.
2391          */
2392         jme_reset(sc);
2393
2394         /*
2395          * Setup MSI/MSI-X vectors to interrupts mapping
2396          */
2397         jme_set_msinum(sc);
2398
2399         sc->jme_txd_spare =
2400         howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES);
2401         KKASSERT(sc->jme_txd_spare >= 1);
2402
2403         /*
2404          * If we use 64bit address mode for transmitting, each Tx request
2405          * needs one more symbol descriptor.
2406          */
2407         if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
2408                 sc->jme_txd_spare += 1;
2409
2410         if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN)
2411                 jme_enable_rss(sc);
2412         else
2413                 jme_disable_rss(sc);
2414
2415         /* Init RX descriptors */
2416         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2417                 error = jme_init_rx_ring(&sc->jme_cdata.jme_rx_data[r]);
2418                 if (error) {
2419                         if_printf(ifp, "initialization failed: "
2420                                   "no memory for %dth RX ring.\n", r);
2421                         jme_stop(sc);
2422                         return;
2423                 }
2424         }
2425
2426         /* Init TX descriptors */
2427         jme_init_tx_ring(sc);
2428
2429         /* Initialize shadow status block. */
2430         jme_init_ssb(sc);
2431
2432         /* Reprogram the station address. */
2433         bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2434         CSR_WRITE_4(sc, JME_PAR0,
2435             eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2436         CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2437
2438         /*
2439          * Configure Tx queue.
2440          *  Tx priority queue weight value : 0
2441          *  Tx FIFO threshold for processing next packet : 16QW
2442          *  Maximum Tx DMA length : 512
2443          *  Allow Tx DMA burst.
2444          */
2445         sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2446         sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2447         sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2448         sc->jme_txcsr |= sc->jme_tx_dma_size;
2449         sc->jme_txcsr |= TXCSR_DMA_BURST;
2450         CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2451
2452         /* Set Tx descriptor counter. */
2453         CSR_WRITE_4(sc, JME_TXQDC, sc->jme_cdata.jme_tx_desc_cnt);
2454
2455         /* Set Tx ring address to the hardware. */
2456         paddr = sc->jme_cdata.jme_tx_ring_paddr;
2457         CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2458         CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2459
2460         /* Configure TxMAC parameters. */
2461         reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2462         reg |= TXMAC_THRESH_1_PKT;
2463         reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2464         CSR_WRITE_4(sc, JME_TXMAC, reg);
2465
2466         /*
2467          * Configure Rx queue.
2468          *  FIFO full threshold for transmitting Tx pause packet : 128T
2469          *  FIFO threshold for processing next packet : 128QW
2470          *  Rx queue 0 select
2471          *  Max Rx DMA length : 128
2472          *  Rx descriptor retry : 32
2473          *  Rx descriptor retry time gap : 256ns
2474          *  Don't receive runt/bad frame.
2475          */
2476         sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2477 #if 0
2478         /*
2479          * Since Rx FIFO size is 4K bytes, receiving frames larger
2480          * than 4K bytes will suffer from Rx FIFO overruns. So
2481          * decrease FIFO threshold to reduce the FIFO overruns for
2482          * frames larger than 4000 bytes.
2483          * For best performance of standard MTU sized frames use
2484          * maximum allowable FIFO threshold, 128QW.
2485          */
2486         if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2487             JME_RX_FIFO_SIZE)
2488                 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2489         else
2490                 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2491 #else
2492         /* Improve PCI Express compatibility */
2493         sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2494 #endif
2495         sc->jme_rxcsr |= sc->jme_rx_dma_size;
2496         sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2497         sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2498         /* XXX TODO DROP_BAD */
2499
2500         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2501                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
2502
2503                 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2504
2505                 /* Set Rx descriptor counter. */
2506                 CSR_WRITE_4(sc, JME_RXQDC, rdata->jme_rx_desc_cnt);
2507
2508                 /* Set Rx ring address to the hardware. */
2509                 paddr = rdata->jme_rx_ring_paddr;
2510                 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2511                 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2512         }
2513
2514         /* Clear receive filter. */
2515         CSR_WRITE_4(sc, JME_RXMAC, 0);
2516
2517         /* Set up the receive filter. */
2518         jme_set_filter(sc);
2519         jme_set_vlan(sc);
2520
2521         /*
2522          * Disable all WOL bits as WOL can interfere normal Rx
2523          * operation. Also clear WOL detection status bits.
2524          */
2525         reg = CSR_READ_4(sc, JME_PMCS);
2526         reg &= ~PMCS_WOL_ENB_MASK;
2527         CSR_WRITE_4(sc, JME_PMCS, reg);
2528
2529         /*
2530          * Pad 10bytes right before received frame. This will greatly
2531          * help Rx performance on strict-alignment architectures as
2532          * it does not need to copy the frame to align the payload.
2533          */
2534         reg = CSR_READ_4(sc, JME_RXMAC);
2535         reg |= RXMAC_PAD_10BYTES;
2536
2537         if (ifp->if_capenable & IFCAP_RXCSUM)
2538                 reg |= RXMAC_CSUM_ENB;
2539         CSR_WRITE_4(sc, JME_RXMAC, reg);
2540
2541         /* Configure general purpose reg0 */
2542         reg = CSR_READ_4(sc, JME_GPREG0);
2543         reg &= ~GPREG0_PCC_UNIT_MASK;
2544         /* Set PCC timer resolution to micro-seconds unit. */
2545         reg |= GPREG0_PCC_UNIT_US;
2546         /*
2547          * Disable all shadow register posting as we have to read
2548          * JME_INTR_STATUS register in jme_intr. Also it seems
2549          * that it's hard to synchronize interrupt status between
2550          * hardware and software with shadow posting due to
2551          * requirements of bus_dmamap_sync(9).
2552          */
2553         reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2554             GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2555             GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2556             GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2557         /* Disable posting of DW0. */
2558         reg &= ~GPREG0_POST_DW0_ENB;
2559         /* Clear PME message. */
2560         reg &= ~GPREG0_PME_ENB;
2561         /* Set PHY address. */
2562         reg &= ~GPREG0_PHY_ADDR_MASK;
2563         reg |= sc->jme_phyaddr;
2564         CSR_WRITE_4(sc, JME_GPREG0, reg);
2565
2566         /* Configure Tx queue 0 packet completion coalescing. */
2567         jme_set_tx_coal(sc);
2568
2569         /* Configure Rx queues packet completion coalescing. */
2570         jme_set_rx_coal(sc);
2571
2572         /* Configure shadow status block but don't enable posting. */
2573         paddr = sc->jme_cdata.jme_ssb_block_paddr;
2574         CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2575         CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2576
2577         /* Disable Timer 1 and Timer 2. */
2578         CSR_WRITE_4(sc, JME_TIMER1, 0);
2579         CSR_WRITE_4(sc, JME_TIMER2, 0);
2580
2581         /* Configure retry transmit period, retry limit value. */
2582         CSR_WRITE_4(sc, JME_TXTRHD,
2583             ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2584             TXTRHD_RT_PERIOD_MASK) |
2585             ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2586             TXTRHD_RT_LIMIT_SHIFT));
2587
2588 #ifdef DEVICE_POLLING
2589         if (!(ifp->if_flags & IFF_POLLING))
2590 #endif
2591         /* Initialize the interrupt mask. */
2592         CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2593         CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2594
2595         /*
2596          * Enabling Tx/Rx DMA engines and Rx queue processing is
2597          * done after detection of valid link in jme_miibus_statchg.
2598          */
2599         sc->jme_flags &= ~JME_FLAG_LINK;
2600
2601         /* Set the current media. */
2602         mii = device_get_softc(sc->jme_miibus);
2603         mii_mediachg(mii);
2604
2605         callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2606
2607         ifp->if_flags |= IFF_RUNNING;
2608         ifp->if_flags &= ~IFF_OACTIVE;
2609 }
2610
2611 static void
2612 jme_stop(struct jme_softc *sc)
2613 {
2614         struct ifnet *ifp = &sc->arpcom.ac_if;
2615         struct jme_txdesc *txd;
2616         struct jme_rxdesc *rxd;
2617         struct jme_rxdata *rdata;
2618         int i, r;
2619
2620         ASSERT_IFNET_SERIALIZED_ALL(ifp);
2621
2622         /*
2623          * Mark the interface down and cancel the watchdog timer.
2624          */
2625         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2626         ifp->if_timer = 0;
2627
2628         callout_stop(&sc->jme_tick_ch);
2629         sc->jme_flags &= ~JME_FLAG_LINK;
2630
2631         /*
2632          * Disable interrupts.
2633          */
2634         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2635         CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2636
2637         /* Disable updating shadow status block. */
2638         CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2639             CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2640
2641         /* Stop receiver, transmitter. */
2642         jme_stop_rx(sc);
2643         jme_stop_tx(sc);
2644
2645         /*
2646          * Free partial finished RX segments
2647          */
2648         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2649                 rdata = &sc->jme_cdata.jme_rx_data[r];
2650                 if (rdata->jme_rxhead != NULL)
2651                         m_freem(rdata->jme_rxhead);
2652                 JME_RXCHAIN_RESET(rdata);
2653         }
2654
2655         /*
2656          * Free RX and TX mbufs still in the queues.
2657          */
2658         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2659                 rdata = &sc->jme_cdata.jme_rx_data[r];
2660                 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
2661                         rxd = &rdata->jme_rxdesc[i];
2662                         if (rxd->rx_m != NULL) {
2663                                 bus_dmamap_unload(rdata->jme_rx_tag,
2664                                                   rxd->rx_dmamap);
2665                                 m_freem(rxd->rx_m);
2666                                 rxd->rx_m = NULL;
2667                         }
2668                 }
2669         }
2670         for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
2671                 txd = &sc->jme_cdata.jme_txdesc[i];
2672                 if (txd->tx_m != NULL) {
2673                         bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2674                             txd->tx_dmamap);
2675                         m_freem(txd->tx_m);
2676                         txd->tx_m = NULL;
2677                         txd->tx_ndesc = 0;
2678                 }
2679         }
2680 }
2681
2682 static void
2683 jme_stop_tx(struct jme_softc *sc)
2684 {
2685         uint32_t reg;
2686         int i;
2687
2688         reg = CSR_READ_4(sc, JME_TXCSR);
2689         if ((reg & TXCSR_TX_ENB) == 0)
2690                 return;
2691         reg &= ~TXCSR_TX_ENB;
2692         CSR_WRITE_4(sc, JME_TXCSR, reg);
2693         for (i = JME_TIMEOUT; i > 0; i--) {
2694                 DELAY(1);
2695                 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2696                         break;
2697         }
2698         if (i == 0)
2699                 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2700 }
2701
2702 static void
2703 jme_stop_rx(struct jme_softc *sc)
2704 {
2705         uint32_t reg;
2706         int i;
2707
2708         reg = CSR_READ_4(sc, JME_RXCSR);
2709         if ((reg & RXCSR_RX_ENB) == 0)
2710                 return;
2711         reg &= ~RXCSR_RX_ENB;
2712         CSR_WRITE_4(sc, JME_RXCSR, reg);
2713         for (i = JME_TIMEOUT; i > 0; i--) {
2714                 DELAY(1);
2715                 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2716                         break;
2717         }
2718         if (i == 0)
2719                 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2720 }
2721
2722 static void
2723 jme_init_tx_ring(struct jme_softc *sc)
2724 {
2725         struct jme_chain_data *cd;
2726         struct jme_txdesc *txd;
2727         int i;
2728
2729         sc->jme_cdata.jme_tx_prod = 0;
2730         sc->jme_cdata.jme_tx_cons = 0;
2731         sc->jme_cdata.jme_tx_cnt = 0;
2732
2733         cd = &sc->jme_cdata;
2734         bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2735         for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
2736                 txd = &sc->jme_cdata.jme_txdesc[i];
2737                 txd->tx_m = NULL;
2738                 txd->tx_desc = &cd->jme_tx_ring[i];
2739                 txd->tx_ndesc = 0;
2740         }
2741 }
2742
2743 static void
2744 jme_init_ssb(struct jme_softc *sc)
2745 {
2746         struct jme_chain_data *cd;
2747
2748         cd = &sc->jme_cdata;
2749         bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2750 }
2751
2752 static int
2753 jme_init_rx_ring(struct jme_rxdata *rdata)
2754 {
2755         struct jme_rxdesc *rxd;
2756         int i;
2757
2758         KKASSERT(rdata->jme_rxhead == NULL &&
2759                  rdata->jme_rxtail == NULL &&
2760                  rdata->jme_rxlen == 0);
2761         rdata->jme_rx_cons = 0;
2762
2763         bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(rdata));
2764         for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
2765                 int error;
2766
2767                 rxd = &rdata->jme_rxdesc[i];
2768                 rxd->rx_m = NULL;
2769                 rxd->rx_desc = &rdata->jme_rx_ring[i];
2770                 error = jme_newbuf(rdata, rxd, 1);
2771                 if (error)
2772                         return error;
2773         }
2774         return 0;
2775 }
2776
2777 static int
2778 jme_newbuf(struct jme_rxdata *rdata, struct jme_rxdesc *rxd, int init)
2779 {
2780         struct jme_desc *desc;
2781         struct mbuf *m;
2782         bus_dma_segment_t segs;
2783         bus_dmamap_t map;
2784         int error, nsegs;
2785
2786         m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2787         if (m == NULL)
2788                 return ENOBUFS;
2789         /*
2790          * JMC250 has 64bit boundary alignment limitation so jme(4)
2791          * takes advantage of 10 bytes padding feature of hardware
2792          * in order not to copy entire frame to align IP header on
2793          * 32bit boundary.
2794          */
2795         m->m_len = m->m_pkthdr.len = MCLBYTES;
2796
2797         error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
2798                         rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
2799                         BUS_DMA_NOWAIT);
2800         if (error) {
2801                 m_freem(m);
2802                 if (init) {
2803                         if_printf(&rdata->jme_sc->arpcom.ac_if,
2804                             "can't load RX mbuf\n");
2805                 }
2806                 return error;
2807         }
2808
2809         if (rxd->rx_m != NULL) {
2810                 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
2811                                 BUS_DMASYNC_POSTREAD);
2812                 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
2813         }
2814         map = rxd->rx_dmamap;
2815         rxd->rx_dmamap = rdata->jme_rx_sparemap;
2816         rdata->jme_rx_sparemap = map;
2817         rxd->rx_m = m;
2818
2819         desc = rxd->rx_desc;
2820         desc->buflen = htole32(segs.ds_len);
2821         desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr));
2822         desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr));
2823         desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2824
2825         return 0;
2826 }
2827
2828 static void
2829 jme_set_vlan(struct jme_softc *sc)
2830 {
2831         struct ifnet *ifp = &sc->arpcom.ac_if;
2832         uint32_t reg;
2833
2834         ASSERT_IFNET_SERIALIZED_ALL(ifp);
2835
2836         reg = CSR_READ_4(sc, JME_RXMAC);
2837         reg &= ~RXMAC_VLAN_ENB;
2838         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2839                 reg |= RXMAC_VLAN_ENB;
2840         CSR_WRITE_4(sc, JME_RXMAC, reg);
2841 }
2842
2843 static void
2844 jme_set_filter(struct jme_softc *sc)
2845 {
2846         struct ifnet *ifp = &sc->arpcom.ac_if;
2847         struct ifmultiaddr *ifma;
2848         uint32_t crc;
2849         uint32_t mchash[2];
2850         uint32_t rxcfg;
2851
2852         ASSERT_IFNET_SERIALIZED_ALL(ifp);
2853
2854         rxcfg = CSR_READ_4(sc, JME_RXMAC);
2855         rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2856             RXMAC_ALLMULTI);
2857
2858         /*
2859          * Always accept frames destined to our station address.
2860          * Always accept broadcast frames.
2861          */
2862         rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2863
2864         if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2865                 if (ifp->if_flags & IFF_PROMISC)
2866                         rxcfg |= RXMAC_PROMISC;
2867                 if (ifp->if_flags & IFF_ALLMULTI)
2868                         rxcfg |= RXMAC_ALLMULTI;
2869                 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2870                 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2871                 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2872                 return;
2873         }
2874
2875         /*
2876          * Set up the multicast address filter by passing all multicast
2877          * addresses through a CRC generator, and then using the low-order
2878          * 6 bits as an index into the 64 bit multicast hash table.  The
2879          * high order bits select the register, while the rest of the bits
2880          * select the bit within the register.
2881          */
2882         rxcfg |= RXMAC_MULTICAST;
2883         bzero(mchash, sizeof(mchash));
2884
2885         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2886                 if (ifma->ifma_addr->sa_family != AF_LINK)
2887                         continue;
2888                 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2889                     ifma->ifma_addr), ETHER_ADDR_LEN);
2890
2891                 /* Just want the 6 least significant bits. */
2892                 crc &= 0x3f;
2893
2894                 /* Set the corresponding bit in the hash table. */
2895                 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2896         }
2897
2898         CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2899         CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2900         CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2901 }
2902
2903 static int
2904 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
2905 {
2906         struct jme_softc *sc = arg1;
2907         struct ifnet *ifp = &sc->arpcom.ac_if;
2908         int error, v;
2909
2910         ifnet_serialize_all(ifp);
2911
2912         v = sc->jme_tx_coal_to;
2913         error = sysctl_handle_int(oidp, &v, 0, req);
2914         if (error || req->newptr == NULL)
2915                 goto back;
2916
2917         if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2918                 error = EINVAL;
2919                 goto back;
2920         }
2921
2922         if (v != sc->jme_tx_coal_to) {
2923                 sc->jme_tx_coal_to = v;
2924                 if (ifp->if_flags & IFF_RUNNING)
2925                         jme_set_tx_coal(sc);
2926         }
2927 back:
2928         ifnet_deserialize_all(ifp);
2929         return error;
2930 }
2931
2932 static int
2933 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2934 {
2935         struct jme_softc *sc = arg1;
2936         struct ifnet *ifp = &sc->arpcom.ac_if;
2937         int error, v;
2938
2939         ifnet_serialize_all(ifp);
2940
2941         v = sc->jme_tx_coal_pkt;
2942         error = sysctl_handle_int(oidp, &v, 0, req);
2943         if (error || req->newptr == NULL)
2944                 goto back;
2945
2946         if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
2947                 error = EINVAL;
2948                 goto back;
2949         }
2950
2951         if (v != sc->jme_tx_coal_pkt) {
2952                 sc->jme_tx_coal_pkt = v;
2953                 if (ifp->if_flags & IFF_RUNNING)
2954                         jme_set_tx_coal(sc);
2955         }
2956 back:
2957         ifnet_deserialize_all(ifp);
2958         return error;
2959 }
2960
2961 static int
2962 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
2963 {
2964         struct jme_softc *sc = arg1;
2965         struct ifnet *ifp = &sc->arpcom.ac_if;
2966         int error, v;
2967
2968         ifnet_serialize_all(ifp);
2969
2970         v = sc->jme_rx_coal_to;
2971         error = sysctl_handle_int(oidp, &v, 0, req);
2972         if (error || req->newptr == NULL)
2973                 goto back;
2974
2975         if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
2976                 error = EINVAL;
2977                 goto back;
2978         }
2979
2980         if (v != sc->jme_rx_coal_to) {
2981                 sc->jme_rx_coal_to = v;
2982                 if (ifp->if_flags & IFF_RUNNING)
2983                         jme_set_rx_coal(sc);
2984         }
2985 back:
2986         ifnet_deserialize_all(ifp);
2987         return error;
2988 }
2989
2990 static int
2991 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
2992 {
2993         struct jme_softc *sc = arg1;
2994         struct ifnet *ifp = &sc->arpcom.ac_if;
2995         int error, v;
2996
2997         ifnet_serialize_all(ifp);
2998
2999         v = sc->jme_rx_coal_pkt;
3000         error = sysctl_handle_int(oidp, &v, 0, req);
3001         if (error || req->newptr == NULL)
3002                 goto back;
3003
3004         if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
3005                 error = EINVAL;
3006                 goto back;
3007         }
3008
3009         if (v != sc->jme_rx_coal_pkt) {
3010                 sc->jme_rx_coal_pkt = v;
3011                 if (ifp->if_flags & IFF_RUNNING)
3012                         jme_set_rx_coal(sc);
3013         }
3014 back:
3015         ifnet_deserialize_all(ifp);
3016         return error;
3017 }
3018
3019 static void
3020 jme_set_tx_coal(struct jme_softc *sc)
3021 {
3022         uint32_t reg;
3023
3024         reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3025             PCCTX_COAL_TO_MASK;
3026         reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3027             PCCTX_COAL_PKT_MASK;
3028         reg |= PCCTX_COAL_TXQ0;
3029         CSR_WRITE_4(sc, JME_PCCTX, reg);
3030 }
3031
3032 static void
3033 jme_set_rx_coal(struct jme_softc *sc)
3034 {
3035         uint32_t reg;
3036         int r;
3037
3038         reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3039             PCCRX_COAL_TO_MASK;
3040         reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3041             PCCRX_COAL_PKT_MASK;
3042         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r)
3043                 CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3044 }
3045
3046 #ifdef DEVICE_POLLING
3047
3048 static void
3049 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3050 {
3051         struct jme_softc *sc = ifp->if_softc;
3052         uint32_t status;
3053         int r;
3054
3055         ASSERT_SERIALIZED(&sc->jme_serialize);
3056
3057         switch (cmd) {
3058         case POLL_REGISTER:
3059                 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3060                 break;
3061
3062         case POLL_DEREGISTER:
3063                 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
3064                 break;
3065
3066         case POLL_AND_CHECK_STATUS:
3067         case POLL_ONLY:
3068                 status = CSR_READ_4(sc, JME_INTR_STATUS);
3069
3070                 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3071                         struct jme_rxdata *rdata =
3072                             &sc->jme_cdata.jme_rx_data[r];
3073
3074                         lwkt_serialize_enter(&rdata->jme_rx_serialize);
3075                         jme_rxeof(rdata, count);
3076                         lwkt_serialize_exit(&rdata->jme_rx_serialize);
3077                 }
3078
3079                 if (status & INTR_RXQ_DESC_EMPTY) {
3080                         CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3081                         CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3082                             RXCSR_RX_ENB | RXCSR_RXQ_START);
3083                 }
3084
3085                 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
3086                 jme_txeof(sc);
3087                 if (!ifq_is_empty(&ifp->if_snd))
3088                         if_devstart(ifp);
3089                 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
3090                 break;
3091         }
3092 }
3093
3094 #endif  /* DEVICE_POLLING */
3095
3096 static int
3097 jme_rxring_dma_alloc(struct jme_rxdata *rdata)
3098 {
3099         bus_dmamem_t dmem;
3100         int error;
3101
3102         error = bus_dmamem_coherent(rdata->jme_sc->jme_cdata.jme_ring_tag,
3103                         JME_RX_RING_ALIGN, 0,
3104                         BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3105                         JME_RX_RING_SIZE(rdata),
3106                         BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3107         if (error) {
3108                 device_printf(rdata->jme_sc->jme_dev,
3109                     "could not allocate %dth Rx ring.\n", rdata->jme_rx_idx);
3110                 return error;
3111         }
3112         rdata->jme_rx_ring_tag = dmem.dmem_tag;
3113         rdata->jme_rx_ring_map = dmem.dmem_map;
3114         rdata->jme_rx_ring = dmem.dmem_addr;
3115         rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
3116
3117         return 0;
3118 }
3119
3120 static int
3121 jme_rxbuf_dma_alloc(struct jme_rxdata *rdata)
3122 {
3123         int i, error;
3124
3125         /* Create tag for Rx buffers. */
3126         error = bus_dma_tag_create(
3127             rdata->jme_sc->jme_cdata.jme_buffer_tag,/* parent */
3128             JME_RX_BUF_ALIGN, 0,        /* algnmnt, boundary */
3129             BUS_SPACE_MAXADDR,          /* lowaddr */
3130             BUS_SPACE_MAXADDR,          /* highaddr */
3131             NULL, NULL,                 /* filter, filterarg */
3132             MCLBYTES,                   /* maxsize */
3133             1,                          /* nsegments */
3134             MCLBYTES,                   /* maxsegsize */
3135             BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
3136             &rdata->jme_rx_tag);
3137         if (error) {
3138                 device_printf(rdata->jme_sc->jme_dev,
3139                     "could not create %dth Rx DMA tag.\n", rdata->jme_rx_idx);
3140                 return error;
3141         }
3142
3143         /* Create DMA maps for Rx buffers. */
3144         error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3145                                   &rdata->jme_rx_sparemap);
3146         if (error) {
3147                 device_printf(rdata->jme_sc->jme_dev,
3148                     "could not create %dth spare Rx dmamap.\n",
3149                     rdata->jme_rx_idx);
3150                 bus_dma_tag_destroy(rdata->jme_rx_tag);
3151                 rdata->jme_rx_tag = NULL;
3152                 return error;
3153         }
3154         for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
3155                 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3156
3157                 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3158                                           &rxd->rx_dmamap);
3159                 if (error) {
3160                         int j;
3161
3162                         device_printf(rdata->jme_sc->jme_dev,
3163                             "could not create %dth Rx dmamap "
3164                             "for %dth RX ring.\n", i, rdata->jme_rx_idx);
3165
3166                         for (j = 0; j < i; ++j) {
3167                                 rxd = &rdata->jme_rxdesc[j];
3168                                 bus_dmamap_destroy(rdata->jme_rx_tag,
3169                                                    rxd->rx_dmamap);
3170                         }
3171                         bus_dmamap_destroy(rdata->jme_rx_tag,
3172                                            rdata->jme_rx_sparemap);
3173                         bus_dma_tag_destroy(rdata->jme_rx_tag);
3174                         rdata->jme_rx_tag = NULL;
3175                         return error;
3176                 }
3177         }
3178         return 0;
3179 }
3180
3181 static void
3182 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3183 {
3184         int r;
3185
3186         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3187                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3188
3189                 if (status & rdata->jme_rx_coal) {
3190                         lwkt_serialize_enter(&rdata->jme_rx_serialize);
3191                         jme_rxeof(rdata, -1);
3192                         lwkt_serialize_exit(&rdata->jme_rx_serialize);
3193                 }
3194         }
3195 }
3196
3197 static void
3198 jme_enable_rss(struct jme_softc *sc)
3199 {
3200         uint32_t rssc, ind;
3201         uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3202         int i;
3203
3204         KASSERT(sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_2 ||
3205                 sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_4,
3206                 ("%s: invalid # of RX rings (%d)\n",
3207                  sc->arpcom.ac_if.if_xname, sc->jme_cdata.jme_rx_ring_cnt));
3208
3209         rssc = RSSC_HASH_64_ENTRY;
3210         rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3211         rssc |= sc->jme_cdata.jme_rx_ring_cnt >> 1;
3212         JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3213         CSR_WRITE_4(sc, JME_RSSC, rssc);
3214
3215         toeplitz_get_key(key, sizeof(key));
3216         for (i = 0; i < RSSKEY_NREGS; ++i) {
3217                 uint32_t keyreg;
3218
3219                 keyreg = RSSKEY_REGVAL(key, i);
3220                 JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg);
3221
3222                 CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg);
3223         }
3224
3225         /*
3226          * Create redirect table in following fashion:
3227          * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3228          */
3229         ind = 0;
3230         for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3231                 int q;
3232
3233                 q = i % sc->jme_cdata.jme_rx_ring_cnt;
3234                 ind |= q << (i * 8);
3235         }
3236         JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3237
3238         for (i = 0; i < RSSTBL_NREGS; ++i)
3239                 CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3240 }
3241
3242 static void
3243 jme_disable_rss(struct jme_softc *sc)
3244 {
3245         CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3246 }
3247
3248 static void
3249 jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3250 {
3251         struct jme_softc *sc = ifp->if_softc;
3252
3253         switch (slz) {
3254         case IFNET_SERIALIZE_ALL:
3255                 lwkt_serialize_array_enter(sc->jme_serialize_arr,
3256                     sc->jme_serialize_cnt, 0);
3257                 break;
3258
3259         case IFNET_SERIALIZE_MAIN:
3260                 lwkt_serialize_enter(&sc->jme_serialize);
3261                 break;
3262
3263         case IFNET_SERIALIZE_TX:
3264                 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
3265                 break;
3266
3267         case IFNET_SERIALIZE_RX(0):
3268                 lwkt_serialize_enter(
3269                     &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize);
3270                 break;
3271
3272         case IFNET_SERIALIZE_RX(1):
3273                 lwkt_serialize_enter(
3274                     &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize);
3275                 break;
3276
3277         case IFNET_SERIALIZE_RX(2):
3278                 lwkt_serialize_enter(
3279                     &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize);
3280                 break;
3281
3282         case IFNET_SERIALIZE_RX(3):
3283                 lwkt_serialize_enter(
3284                     &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize);
3285                 break;
3286
3287         default:
3288                 panic("%s unsupported serialize type\n", ifp->if_xname);
3289         }
3290 }
3291
3292 static void
3293 jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3294 {
3295         struct jme_softc *sc = ifp->if_softc;
3296
3297         switch (slz) {
3298         case IFNET_SERIALIZE_ALL:
3299                 lwkt_serialize_array_exit(sc->jme_serialize_arr,
3300                     sc->jme_serialize_cnt, 0);
3301                 break;
3302
3303         case IFNET_SERIALIZE_MAIN:
3304                 lwkt_serialize_exit(&sc->jme_serialize);
3305                 break;
3306
3307         case IFNET_SERIALIZE_TX:
3308                 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
3309                 break;
3310
3311         case IFNET_SERIALIZE_RX(0):
3312                 lwkt_serialize_exit(
3313                     &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize);
3314                 break;
3315
3316         case IFNET_SERIALIZE_RX(1):
3317                 lwkt_serialize_exit(
3318                     &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize);
3319                 break;
3320
3321         case IFNET_SERIALIZE_RX(2):
3322                 lwkt_serialize_exit(
3323                     &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize);
3324                 break;
3325
3326         case IFNET_SERIALIZE_RX(3):
3327                 lwkt_serialize_exit(
3328                     &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize);
3329                 break;
3330
3331         default:
3332                 panic("%s unsupported serialize type\n", ifp->if_xname);
3333         }
3334 }
3335
3336 static int
3337 jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3338 {
3339         struct jme_softc *sc = ifp->if_softc;
3340
3341         switch (slz) {
3342         case IFNET_SERIALIZE_ALL:
3343                 return lwkt_serialize_array_try(sc->jme_serialize_arr,
3344                     sc->jme_serialize_cnt, 0);
3345
3346         case IFNET_SERIALIZE_MAIN:
3347                 return lwkt_serialize_try(&sc->jme_serialize);
3348
3349         case IFNET_SERIALIZE_TX:
3350                 return lwkt_serialize_try(&sc->jme_cdata.jme_tx_serialize);
3351
3352         case IFNET_SERIALIZE_RX(0):
3353                 return lwkt_serialize_try(
3354                     &sc->jme_cdata.jme_rx_data[0].jme_rx_serialize);
3355
3356         case IFNET_SERIALIZE_RX(1):
3357                 return lwkt_serialize_try(
3358                     &sc->jme_cdata.jme_rx_data[1].jme_rx_serialize);
3359
3360         case IFNET_SERIALIZE_RX(2):
3361                 return lwkt_serialize_try(
3362                     &sc->jme_cdata.jme_rx_data[2].jme_rx_serialize);
3363
3364         case IFNET_SERIALIZE_RX(3):
3365                 return lwkt_serialize_try(
3366                     &sc->jme_cdata.jme_rx_data[3].jme_rx_serialize);
3367
3368         default:
3369                 panic("%s unsupported serialize type\n", ifp->if_xname);
3370         }
3371 }
3372
3373 #ifdef INVARIANTS
3374
3375 static void
3376 jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
3377     boolean_t serialized)
3378 {
3379         struct jme_softc *sc = ifp->if_softc;
3380         struct jme_rxdata *rdata;
3381         int i;
3382
3383         switch (slz) {
3384         case IFNET_SERIALIZE_ALL:
3385                 if (serialized) {
3386                         for (i = 0; i < sc->jme_serialize_cnt; ++i)
3387                                 ASSERT_SERIALIZED(sc->jme_serialize_arr[i]);
3388                 } else {
3389                         for (i = 0; i < sc->jme_serialize_cnt; ++i)
3390                                 ASSERT_NOT_SERIALIZED(sc->jme_serialize_arr[i]);
3391                 }
3392                 break;
3393
3394         case IFNET_SERIALIZE_MAIN:
3395                 if (serialized)
3396                         ASSERT_SERIALIZED(&sc->jme_serialize);
3397                 else
3398                         ASSERT_NOT_SERIALIZED(&sc->jme_serialize);
3399                 break;
3400
3401         case IFNET_SERIALIZE_TX:
3402                 if (serialized)
3403                         ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
3404                 else
3405                         ASSERT_NOT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
3406                 break;
3407
3408         case IFNET_SERIALIZE_RX(0):
3409                 rdata = &sc->jme_cdata.jme_rx_data[0];
3410                 if (serialized)
3411                         ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3412                 else
3413                         ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3414                 break;
3415
3416         case IFNET_SERIALIZE_RX(1):
3417                 rdata = &sc->jme_cdata.jme_rx_data[1];
3418                 if (serialized)
3419                         ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3420                 else
3421                         ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3422                 break;
3423
3424         case IFNET_SERIALIZE_RX(2):
3425                 rdata = &sc->jme_cdata.jme_rx_data[2];
3426                 if (serialized)
3427                         ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3428                 else
3429                         ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3430                 break;
3431
3432         case IFNET_SERIALIZE_RX(3):
3433                 rdata = &sc->jme_cdata.jme_rx_data[3];
3434                 if (serialized)
3435                         ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3436                 else
3437                         ASSERT_NOT_SERIALIZED(&rdata->jme_rx_serialize);
3438                 break;
3439
3440         default:
3441                 panic("%s unsupported serialize type\n", ifp->if_xname);
3442         }
3443 }
3444
3445 #endif  /* INVARIANTS */
3446
3447 static void
3448 jme_msix_try_alloc(device_t dev)
3449 {
3450         struct jme_softc *sc = device_get_softc(dev);
3451         struct jme_msix_data *msix;
3452         int error, i, r, msix_enable, msix_count;
3453
3454         msix_count = 1 + sc->jme_cdata.jme_rx_ring_cnt;
3455         KKASSERT(msix_count <= JME_NMSIX);
3456
3457         msix_enable = device_getenv_int(dev, "msix.enable", jme_msix_enable);
3458
3459         /*
3460          * We leave the 1st MSI-X vector unused, so we
3461          * actually need msix_count + 1 MSI-X vectors.
3462          */
3463         if (!msix_enable || pci_msix_count(dev) < (msix_count + 1))
3464                 return;
3465
3466         for (i = 0; i < msix_count; ++i)
3467                 sc->jme_msix[i].jme_msix_rid = -1;
3468
3469         i = 0;
3470
3471         msix = &sc->jme_msix[i++];
3472         msix->jme_msix_cpuid = 0;               /* XXX Put TX to cpu0 */
3473         msix->jme_msix_arg = &sc->jme_cdata;
3474         msix->jme_msix_func = jme_msix_tx;
3475         msix->jme_msix_intrs = INTR_TXQ_COAL | INTR_TXQ_COAL_TO;
3476         msix->jme_msix_serialize = &sc->jme_cdata.jme_tx_serialize;
3477         ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s tx",
3478             device_get_nameunit(dev));
3479
3480         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3481                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3482
3483                 msix = &sc->jme_msix[i++];
3484                 msix->jme_msix_cpuid = r;       /* XXX Put RX to cpuX */
3485                 msix->jme_msix_arg = rdata;
3486                 msix->jme_msix_func = jme_msix_rx;
3487                 msix->jme_msix_intrs = rdata->jme_rx_coal | rdata->jme_rx_empty;
3488                 msix->jme_msix_serialize = &rdata->jme_rx_serialize;
3489                 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc),
3490                     "%s rx%d", device_get_nameunit(dev), r);
3491         }
3492
3493         KKASSERT(i == msix_count);
3494
3495         error = pci_setup_msix(dev);
3496         if (error)
3497                 return;
3498
3499         /* Setup jme_msix_cnt early, so we could cleanup */
3500         sc->jme_msix_cnt = msix_count;
3501
3502         for (i = 0; i < msix_count; ++i) {
3503                 msix = &sc->jme_msix[i];
3504
3505                 msix->jme_msix_vector = i + 1;
3506                 error = pci_alloc_msix_vector(dev, msix->jme_msix_vector,
3507                     &msix->jme_msix_rid, msix->jme_msix_cpuid);
3508                 if (error)
3509                         goto back;
3510
3511                 msix->jme_msix_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3512                     &msix->jme_msix_rid, RF_ACTIVE);
3513                 if (msix->jme_msix_res == NULL) {
3514                         error = ENOMEM;
3515                         goto back;
3516                 }
3517         }
3518
3519         for (i = 0; i < JME_INTR_CNT; ++i) {
3520                 uint32_t intr_mask = (1 << i);
3521                 int x;
3522
3523                 if ((JME_INTRS & intr_mask) == 0)
3524                         continue;
3525
3526                 for (x = 0; x < msix_count; ++x) {
3527                         msix = &sc->jme_msix[x];
3528                         if (msix->jme_msix_intrs & intr_mask) {
3529                                 int reg, shift;
3530
3531                                 reg = i / JME_MSINUM_FACTOR;
3532                                 KKASSERT(reg < JME_MSINUM_CNT);
3533
3534                                 shift = (i % JME_MSINUM_FACTOR) * 4;
3535
3536                                 sc->jme_msinum[reg] |=
3537                                     (msix->jme_msix_vector << shift);
3538
3539                                 break;
3540                         }
3541                 }
3542         }
3543
3544         if (bootverbose) {
3545                 for (i = 0; i < JME_MSINUM_CNT; ++i) {
3546                         device_printf(dev, "MSINUM%d: %#x\n", i,
3547                             sc->jme_msinum[i]);
3548                 }
3549         }
3550
3551         pci_enable_msix(dev);
3552         sc->jme_irq_type = PCI_INTR_TYPE_MSIX;
3553
3554 back:
3555         if (error)
3556                 jme_msix_free(dev);
3557 }
3558
3559 static int
3560 jme_intr_alloc(device_t dev)
3561 {
3562         struct jme_softc *sc = device_get_softc(dev);
3563         u_int irq_flags;
3564
3565         jme_msix_try_alloc(dev);
3566
3567         if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3568                 sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable,
3569                     &sc->jme_irq_rid, &irq_flags);
3570
3571                 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3572                     &sc->jme_irq_rid, irq_flags);
3573                 if (sc->jme_irq_res == NULL) {
3574                         device_printf(dev, "can't allocate irq\n");
3575                         return ENXIO;
3576                 }
3577         }
3578         return 0;
3579 }
3580
3581 static void
3582 jme_msix_free(device_t dev)
3583 {
3584         struct jme_softc *sc = device_get_softc(dev);
3585         int i;
3586
3587         KKASSERT(sc->jme_msix_cnt > 1);
3588
3589         for (i = 0; i < sc->jme_msix_cnt; ++i) {
3590                 struct jme_msix_data *msix = &sc->jme_msix[i];
3591
3592                 if (msix->jme_msix_res != NULL) {
3593                         bus_release_resource(dev, SYS_RES_IRQ,
3594                             msix->jme_msix_rid, msix->jme_msix_res);
3595                         msix->jme_msix_res = NULL;
3596                 }
3597                 if (msix->jme_msix_rid >= 0) {
3598                         pci_release_msix_vector(dev, msix->jme_msix_rid);
3599                         msix->jme_msix_rid = -1;
3600                 }
3601         }
3602         pci_teardown_msix(dev);
3603 }
3604
3605 static void
3606 jme_intr_free(device_t dev)
3607 {
3608         struct jme_softc *sc = device_get_softc(dev);
3609
3610         if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3611                 if (sc->jme_irq_res != NULL) {
3612                         bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
3613                                              sc->jme_irq_res);
3614                 }
3615                 if (sc->jme_irq_type == PCI_INTR_TYPE_MSI)
3616                         pci_release_msi(dev);
3617         } else {
3618                 jme_msix_free(dev);
3619         }
3620 }
3621
3622 static void
3623 jme_msix_tx(void *xcd)
3624 {
3625         struct jme_chain_data *cd = xcd;
3626         struct jme_softc *sc = cd->jme_sc;
3627         struct ifnet *ifp = &sc->arpcom.ac_if;
3628
3629         ASSERT_SERIALIZED(&cd->jme_tx_serialize);
3630
3631         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3632
3633         CSR_WRITE_4(sc, JME_INTR_STATUS,
3634             INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP);
3635
3636         if (ifp->if_flags & IFF_RUNNING) {
3637                 jme_txeof(sc);
3638                 if (!ifq_is_empty(&ifp->if_snd))
3639                         if_devstart(ifp);
3640         }
3641
3642         CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3643 }
3644
3645 static void
3646 jme_msix_rx(void *xrdata)
3647 {
3648         struct jme_rxdata *rdata = xrdata;
3649         struct jme_softc *sc = rdata->jme_sc;
3650         struct ifnet *ifp = &sc->arpcom.ac_if;
3651         uint32_t status;
3652
3653         ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3654
3655         CSR_WRITE_4(sc, JME_INTR_MASK_CLR,
3656             (rdata->jme_rx_coal | rdata->jme_rx_empty));
3657
3658         status = CSR_READ_4(sc, JME_INTR_STATUS);
3659         status &= (rdata->jme_rx_coal | rdata->jme_rx_empty);
3660
3661         if (status & rdata->jme_rx_coal)
3662                 status |= (rdata->jme_rx_coal | rdata->jme_rx_comp);
3663         CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3664
3665         if (ifp->if_flags & IFF_RUNNING) {
3666                 if (status & rdata->jme_rx_coal)
3667                         jme_rxeof(rdata, -1);
3668
3669                 if (status & rdata->jme_rx_empty) {
3670                         CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3671                             RXCSR_RX_ENB | RXCSR_RXQ_START);
3672                 }
3673         }
3674
3675         CSR_WRITE_4(sc, JME_INTR_MASK_SET,
3676             (rdata->jme_rx_coal | rdata->jme_rx_empty));
3677 }
3678
3679 static void
3680 jme_set_msinum(struct jme_softc *sc)
3681 {
3682         int i;
3683
3684         for (i = 0; i < JME_MSINUM_CNT; ++i)
3685                 CSR_WRITE_4(sc, JME_MSINUM(i), sc->jme_msinum[i]);
3686 }
3687
3688 static int
3689 jme_intr_setup(device_t dev)
3690 {
3691         struct jme_softc *sc = device_get_softc(dev);
3692         struct ifnet *ifp = &sc->arpcom.ac_if;
3693         int error;
3694
3695         if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3696                 return jme_msix_setup(dev);
3697
3698         error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE,
3699             jme_intr, sc, &sc->jme_irq_handle, &sc->jme_serialize);
3700         if (error) {
3701                 device_printf(dev, "could not set up interrupt handler.\n");
3702                 return error;
3703         }
3704
3705         ifp->if_cpuid = rman_get_cpuid(sc->jme_irq_res);
3706         KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
3707         return 0;
3708 }
3709
3710 static void
3711 jme_intr_teardown(device_t dev)
3712 {
3713         struct jme_softc *sc = device_get_softc(dev);
3714
3715         if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3716                 jme_msix_teardown(dev, sc->jme_msix_cnt);
3717         else
3718                 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
3719 }
3720
3721 static int
3722 jme_msix_setup(device_t dev)
3723 {
3724         struct jme_softc *sc = device_get_softc(dev);
3725         struct ifnet *ifp = &sc->arpcom.ac_if;
3726         int x;
3727
3728         for (x = 0; x < sc->jme_msix_cnt; ++x) {
3729                 struct jme_msix_data *msix = &sc->jme_msix[x];
3730                 int error;
3731
3732                 error = bus_setup_intr_descr(dev, msix->jme_msix_res,
3733                     INTR_MPSAFE, msix->jme_msix_func, msix->jme_msix_arg,
3734                     &msix->jme_msix_handle, msix->jme_msix_serialize,
3735                     msix->jme_msix_desc);
3736                 if (error) {
3737                         device_printf(dev, "could not set up %s "
3738                             "interrupt handler.\n", msix->jme_msix_desc);
3739                         jme_msix_teardown(dev, x);
3740                         return error;
3741                 }
3742         }
3743         ifp->if_cpuid = 0; /* XXX */
3744         return 0;
3745 }
3746
3747 static void
3748 jme_msix_teardown(device_t dev, int msix_count)
3749 {
3750         struct jme_softc *sc = device_get_softc(dev);
3751         int x;
3752
3753         for (x = 0; x < msix_count; ++x) {
3754                 struct jme_msix_data *msix = &sc->jme_msix[x];
3755
3756                 bus_teardown_intr(dev, msix->jme_msix_res,
3757                     msix->jme_msix_handle);
3758         }
3759 }