jme: Utilize ifnet_serialize_array_ functions
[dragonfly.git] / sys / dev / netif / jme / if_jme.c
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28  */
29
30 #include "opt_polling.h"
31 #include "opt_jme.h"
32
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/bus.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
39 #include <sys/proc.h>
40 #include <sys/rman.h>
41 #include <sys/serialize.h>
42 #include <sys/serialize2.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
46
47 #include <net/ethernet.h>
48 #include <net/if.h>
49 #include <net/bpf.h>
50 #include <net/if_arp.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/ifq_var.h>
54 #include <net/toeplitz.h>
55 #include <net/toeplitz2.h>
56 #include <net/vlan/if_vlan_var.h>
57 #include <net/vlan/if_vlan_ether.h>
58
59 #include <netinet/in.h>
60
61 #include <dev/netif/mii_layer/miivar.h>
62 #include <dev/netif/mii_layer/jmphyreg.h>
63
64 #include <bus/pci/pcireg.h>
65 #include <bus/pci/pcivar.h>
66 #include <bus/pci/pcidevs.h>
67
68 #include <dev/netif/jme/if_jmereg.h>
69 #include <dev/netif/jme/if_jmevar.h>
70
71 #include "miibus_if.h"
72
73 /* Define the following to disable printing Rx errors. */
74 #undef  JME_SHOW_ERRORS
75
76 #define JME_TX_SERIALIZE        1
77 #define JME_RX_SERIALIZE        2
78
79 #define JME_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
80
81 #ifdef JME_RSS_DEBUG
82 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
83 do { \
84         if ((sc)->jme_rss_debug >= (lvl)) \
85                 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
86 } while (0)
87 #else   /* !JME_RSS_DEBUG */
88 #define JME_RSS_DPRINTF(sc, lvl, fmt, ...)      ((void)0)
89 #endif  /* JME_RSS_DEBUG */
90
91 static int      jme_probe(device_t);
92 static int      jme_attach(device_t);
93 static int      jme_detach(device_t);
94 static int      jme_shutdown(device_t);
95 static int      jme_suspend(device_t);
96 static int      jme_resume(device_t);
97
98 static int      jme_miibus_readreg(device_t, int, int);
99 static int      jme_miibus_writereg(device_t, int, int, int);
100 static void     jme_miibus_statchg(device_t);
101
102 static void     jme_init(void *);
103 static int      jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
104 static void     jme_start(struct ifnet *);
105 static void     jme_watchdog(struct ifnet *);
106 static void     jme_mediastatus(struct ifnet *, struct ifmediareq *);
107 static int      jme_mediachange(struct ifnet *);
108 #ifdef DEVICE_POLLING
109 static void     jme_poll(struct ifnet *, enum poll_cmd, int);
110 #endif
111 static void     jme_serialize(struct ifnet *, enum ifnet_serialize);
112 static void     jme_deserialize(struct ifnet *, enum ifnet_serialize);
113 static int      jme_tryserialize(struct ifnet *, enum ifnet_serialize);
114 #ifdef INVARIANTS
115 static void     jme_serialize_assert(struct ifnet *, enum ifnet_serialize,
116                     boolean_t);
117 #endif
118
119 static void     jme_intr(void *);
120 static void     jme_msix_tx(void *);
121 static void     jme_msix_rx(void *);
122 static void     jme_txeof(struct jme_softc *);
123 static void     jme_rxeof(struct jme_rxdata *, int);
124 static void     jme_rx_intr(struct jme_softc *, uint32_t);
125
126 static int      jme_msix_setup(device_t);
127 static void     jme_msix_teardown(device_t, int);
128 static int      jme_intr_setup(device_t);
129 static void     jme_intr_teardown(device_t);
130 static void     jme_msix_try_alloc(device_t);
131 static void     jme_msix_free(device_t);
132 static int      jme_intr_alloc(device_t);
133 static void     jme_intr_free(device_t);
134 static int      jme_dma_alloc(struct jme_softc *);
135 static void     jme_dma_free(struct jme_softc *);
136 static int      jme_init_rx_ring(struct jme_rxdata *);
137 static void     jme_init_tx_ring(struct jme_softc *);
138 static void     jme_init_ssb(struct jme_softc *);
139 static int      jme_newbuf(struct jme_rxdata *, struct jme_rxdesc *, int);
140 static int      jme_encap(struct jme_softc *, struct mbuf **);
141 static void     jme_rxpkt(struct jme_rxdata *);
142 static int      jme_rxring_dma_alloc(struct jme_rxdata *);
143 static int      jme_rxbuf_dma_alloc(struct jme_rxdata *);
144
145 static void     jme_tick(void *);
146 static void     jme_stop(struct jme_softc *);
147 static void     jme_reset(struct jme_softc *);
148 static void     jme_set_msinum(struct jme_softc *);
149 static void     jme_set_vlan(struct jme_softc *);
150 static void     jme_set_filter(struct jme_softc *);
151 static void     jme_stop_tx(struct jme_softc *);
152 static void     jme_stop_rx(struct jme_softc *);
153 static void     jme_mac_config(struct jme_softc *);
154 static void     jme_reg_macaddr(struct jme_softc *, uint8_t[]);
155 static int      jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
156 static int      jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
157 #ifdef notyet
158 static void     jme_setwol(struct jme_softc *);
159 static void     jme_setlinkspeed(struct jme_softc *);
160 #endif
161 static void     jme_set_tx_coal(struct jme_softc *);
162 static void     jme_set_rx_coal(struct jme_softc *);
163 static void     jme_enable_rss(struct jme_softc *);
164 static void     jme_disable_rss(struct jme_softc *);
165
166 static void     jme_sysctl_node(struct jme_softc *);
167 static int      jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
168 static int      jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
169 static int      jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
170 static int      jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
171
172 /*
173  * Devices supported by this driver.
174  */
175 static const struct jme_dev {
176         uint16_t        jme_vendorid;
177         uint16_t        jme_deviceid;
178         uint32_t        jme_caps;
179         const char      *jme_name;
180 } jme_devs[] = {
181         { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
182             JME_CAP_JUMBO,
183             "JMicron Inc, JMC250 Gigabit Ethernet" },
184         { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
185             JME_CAP_FASTETH,
186             "JMicron Inc, JMC260 Fast Ethernet" },
187         { 0, 0, 0, NULL }
188 };
189
190 static device_method_t jme_methods[] = {
191         /* Device interface. */
192         DEVMETHOD(device_probe,         jme_probe),
193         DEVMETHOD(device_attach,        jme_attach),
194         DEVMETHOD(device_detach,        jme_detach),
195         DEVMETHOD(device_shutdown,      jme_shutdown),
196         DEVMETHOD(device_suspend,       jme_suspend),
197         DEVMETHOD(device_resume,        jme_resume),
198
199         /* Bus interface. */
200         DEVMETHOD(bus_print_child,      bus_generic_print_child),
201         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
202
203         /* MII interface. */
204         DEVMETHOD(miibus_readreg,       jme_miibus_readreg),
205         DEVMETHOD(miibus_writereg,      jme_miibus_writereg),
206         DEVMETHOD(miibus_statchg,       jme_miibus_statchg),
207
208         { NULL, NULL }
209 };
210
211 static driver_t jme_driver = {
212         "jme",
213         jme_methods,
214         sizeof(struct jme_softc)
215 };
216
217 static devclass_t jme_devclass;
218
219 DECLARE_DUMMY_MODULE(if_jme);
220 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
221 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL);
222 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL);
223
224 static const struct {
225         uint32_t        jme_coal;
226         uint32_t        jme_comp;
227         uint32_t        jme_empty;
228 } jme_rx_status[JME_NRXRING_MAX] = {
229         { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP,
230           INTR_RXQ0_DESC_EMPTY },
231         { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP,
232           INTR_RXQ1_DESC_EMPTY },
233         { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP,
234           INTR_RXQ2_DESC_EMPTY },
235         { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP,
236           INTR_RXQ3_DESC_EMPTY }
237 };
238
239 static int      jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
240 static int      jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
241 static int      jme_rx_ring_count = 1;
242 static int      jme_msi_enable = 1;
243 static int      jme_msix_enable = 1;
244
245 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
246 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
247 TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
248 TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable);
249 TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable);
250
251 static __inline void
252 jme_setup_rxdesc(struct jme_rxdesc *rxd)
253 {
254         struct jme_desc *desc;
255
256         desc = rxd->rx_desc;
257         desc->buflen = htole32(MCLBYTES);
258         desc->addr_lo = htole32(JME_ADDR_LO(rxd->rx_paddr));
259         desc->addr_hi = htole32(JME_ADDR_HI(rxd->rx_paddr));
260         desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
261 }
262
263 /*
264  *      Read a PHY register on the MII of the JMC250.
265  */
266 static int
267 jme_miibus_readreg(device_t dev, int phy, int reg)
268 {
269         struct jme_softc *sc = device_get_softc(dev);
270         uint32_t val;
271         int i;
272
273         /* For FPGA version, PHY address 0 should be ignored. */
274         if (sc->jme_caps & JME_CAP_FPGA) {
275                 if (phy == 0)
276                         return (0);
277         } else {
278                 if (sc->jme_phyaddr != phy)
279                         return (0);
280         }
281
282         CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
283             SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
284
285         for (i = JME_PHY_TIMEOUT; i > 0; i--) {
286                 DELAY(1);
287                 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
288                         break;
289         }
290         if (i == 0) {
291                 device_printf(sc->jme_dev, "phy read timeout: "
292                               "phy %d, reg %d\n", phy, reg);
293                 return (0);
294         }
295
296         return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
297 }
298
299 /*
300  *      Write a PHY register on the MII of the JMC250.
301  */
302 static int
303 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
304 {
305         struct jme_softc *sc = device_get_softc(dev);
306         int i;
307
308         /* For FPGA version, PHY address 0 should be ignored. */
309         if (sc->jme_caps & JME_CAP_FPGA) {
310                 if (phy == 0)
311                         return (0);
312         } else {
313                 if (sc->jme_phyaddr != phy)
314                         return (0);
315         }
316
317         CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
318             ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
319             SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
320
321         for (i = JME_PHY_TIMEOUT; i > 0; i--) {
322                 DELAY(1);
323                 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
324                         break;
325         }
326         if (i == 0) {
327                 device_printf(sc->jme_dev, "phy write timeout: "
328                               "phy %d, reg %d\n", phy, reg);
329         }
330
331         return (0);
332 }
333
334 /*
335  *      Callback from MII layer when media changes.
336  */
337 static void
338 jme_miibus_statchg(device_t dev)
339 {
340         struct jme_softc *sc = device_get_softc(dev);
341         struct ifnet *ifp = &sc->arpcom.ac_if;
342         struct mii_data *mii;
343         struct jme_txdesc *txd;
344         bus_addr_t paddr;
345         int i, r;
346
347         ASSERT_IFNET_SERIALIZED_ALL(ifp);
348
349         if ((ifp->if_flags & IFF_RUNNING) == 0)
350                 return;
351
352         mii = device_get_softc(sc->jme_miibus);
353
354         sc->jme_flags &= ~JME_FLAG_LINK;
355         if ((mii->mii_media_status & IFM_AVALID) != 0) {
356                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
357                 case IFM_10_T:
358                 case IFM_100_TX:
359                         sc->jme_flags |= JME_FLAG_LINK;
360                         break;
361                 case IFM_1000_T:
362                         if (sc->jme_caps & JME_CAP_FASTETH)
363                                 break;
364                         sc->jme_flags |= JME_FLAG_LINK;
365                         break;
366                 default:
367                         break;
368                 }
369         }
370
371         /*
372          * Disabling Rx/Tx MACs have a side-effect of resetting
373          * JME_TXNDA/JME_RXNDA register to the first address of
374          * Tx/Rx descriptor address. So driver should reset its
375          * internal procucer/consumer pointer and reclaim any
376          * allocated resources.  Note, just saving the value of
377          * JME_TXNDA and JME_RXNDA registers before stopping MAC
378          * and restoring JME_TXNDA/JME_RXNDA register is not
379          * sufficient to make sure correct MAC state because
380          * stopping MAC operation can take a while and hardware
381          * might have updated JME_TXNDA/JME_RXNDA registers
382          * during the stop operation.
383          */
384
385         /* Disable interrupts */
386         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
387
388         /* Stop driver */
389         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
390         ifp->if_timer = 0;
391         callout_stop(&sc->jme_tick_ch);
392
393         /* Stop receiver/transmitter. */
394         jme_stop_rx(sc);
395         jme_stop_tx(sc);
396
397         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
398                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
399
400                 jme_rxeof(rdata, -1);
401                 if (rdata->jme_rxhead != NULL)
402                         m_freem(rdata->jme_rxhead);
403                 JME_RXCHAIN_RESET(rdata);
404
405                 /*
406                  * Reuse configured Rx descriptors and reset
407                  * procuder/consumer index.
408                  */
409                 rdata->jme_rx_cons = 0;
410         }
411         if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN)
412                 jme_enable_rss(sc);
413         else
414                 jme_disable_rss(sc);
415
416         jme_txeof(sc);
417         if (sc->jme_cdata.jme_tx_cnt != 0) {
418                 /* Remove queued packets for transmit. */
419                 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
420                         txd = &sc->jme_cdata.jme_txdesc[i];
421                         if (txd->tx_m != NULL) {
422                                 bus_dmamap_unload(
423                                     sc->jme_cdata.jme_tx_tag,
424                                     txd->tx_dmamap);
425                                 m_freem(txd->tx_m);
426                                 txd->tx_m = NULL;
427                                 txd->tx_ndesc = 0;
428                                 ifp->if_oerrors++;
429                         }
430                 }
431         }
432         jme_init_tx_ring(sc);
433
434         /* Initialize shadow status block. */
435         jme_init_ssb(sc);
436
437         /* Program MAC with resolved speed/duplex/flow-control. */
438         if (sc->jme_flags & JME_FLAG_LINK) {
439                 jme_mac_config(sc);
440
441                 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
442
443                 /* Set Tx ring address to the hardware. */
444                 paddr = sc->jme_cdata.jme_tx_ring_paddr;
445                 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
446                 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
447
448                 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
449                         CSR_WRITE_4(sc, JME_RXCSR,
450                             sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
451
452                         /* Set Rx ring address to the hardware. */
453                         paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
454                         CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
455                         CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
456                 }
457
458                 /* Restart receiver/transmitter. */
459                 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
460                     RXCSR_RXQ_START);
461                 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
462         }
463
464         ifp->if_flags |= IFF_RUNNING;
465         ifp->if_flags &= ~IFF_OACTIVE;
466         callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
467
468 #ifdef DEVICE_POLLING
469         if (!(ifp->if_flags & IFF_POLLING))
470 #endif
471         /* Reenable interrupts. */
472         CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
473 }
474
475 /*
476  *      Get the current interface media status.
477  */
478 static void
479 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
480 {
481         struct jme_softc *sc = ifp->if_softc;
482         struct mii_data *mii = device_get_softc(sc->jme_miibus);
483
484         ASSERT_IFNET_SERIALIZED_ALL(ifp);
485
486         mii_pollstat(mii);
487         ifmr->ifm_status = mii->mii_media_status;
488         ifmr->ifm_active = mii->mii_media_active;
489 }
490
491 /*
492  *      Set hardware to newly-selected media.
493  */
494 static int
495 jme_mediachange(struct ifnet *ifp)
496 {
497         struct jme_softc *sc = ifp->if_softc;
498         struct mii_data *mii = device_get_softc(sc->jme_miibus);
499         int error;
500
501         ASSERT_IFNET_SERIALIZED_ALL(ifp);
502
503         if (mii->mii_instance != 0) {
504                 struct mii_softc *miisc;
505
506                 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
507                         mii_phy_reset(miisc);
508         }
509         error = mii_mediachg(mii);
510
511         return (error);
512 }
513
514 static int
515 jme_probe(device_t dev)
516 {
517         const struct jme_dev *sp;
518         uint16_t vid, did;
519
520         vid = pci_get_vendor(dev);
521         did = pci_get_device(dev);
522         for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
523                 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
524                         struct jme_softc *sc = device_get_softc(dev);
525
526                         sc->jme_caps = sp->jme_caps;
527                         device_set_desc(dev, sp->jme_name);
528                         return (0);
529                 }
530         }
531         return (ENXIO);
532 }
533
534 static int
535 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
536 {
537         uint32_t reg;
538         int i;
539
540         *val = 0;
541         for (i = JME_TIMEOUT; i > 0; i--) {
542                 reg = CSR_READ_4(sc, JME_SMBCSR);
543                 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
544                         break;
545                 DELAY(1);
546         }
547
548         if (i == 0) {
549                 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
550                 return (ETIMEDOUT);
551         }
552
553         reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
554         CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
555         for (i = JME_TIMEOUT; i > 0; i--) {
556                 DELAY(1);
557                 reg = CSR_READ_4(sc, JME_SMBINTF);
558                 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
559                         break;
560         }
561
562         if (i == 0) {
563                 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
564                 return (ETIMEDOUT);
565         }
566
567         reg = CSR_READ_4(sc, JME_SMBINTF);
568         *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
569
570         return (0);
571 }
572
573 static int
574 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
575 {
576         uint8_t fup, reg, val;
577         uint32_t offset;
578         int match;
579
580         offset = 0;
581         if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
582             fup != JME_EEPROM_SIG0)
583                 return (ENOENT);
584         if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
585             fup != JME_EEPROM_SIG1)
586                 return (ENOENT);
587         match = 0;
588         do {
589                 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
590                         break;
591                 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
592                     (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
593                         if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
594                                 break;
595                         if (reg >= JME_PAR0 &&
596                             reg < JME_PAR0 + ETHER_ADDR_LEN) {
597                                 if (jme_eeprom_read_byte(sc, offset + 2,
598                                     &val) != 0)
599                                         break;
600                                 eaddr[reg - JME_PAR0] = val;
601                                 match++;
602                         }
603                 }
604                 /* Check for the end of EEPROM descriptor. */
605                 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
606                         break;
607                 /* Try next eeprom descriptor. */
608                 offset += JME_EEPROM_DESC_BYTES;
609         } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
610
611         if (match == ETHER_ADDR_LEN)
612                 return (0);
613
614         return (ENOENT);
615 }
616
617 static void
618 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
619 {
620         uint32_t par0, par1;
621
622         /* Read station address. */
623         par0 = CSR_READ_4(sc, JME_PAR0);
624         par1 = CSR_READ_4(sc, JME_PAR1);
625         par1 &= 0xFFFF;
626         if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
627                 device_printf(sc->jme_dev,
628                     "generating fake ethernet address.\n");
629                 par0 = karc4random();
630                 /* Set OUI to JMicron. */
631                 eaddr[0] = 0x00;
632                 eaddr[1] = 0x1B;
633                 eaddr[2] = 0x8C;
634                 eaddr[3] = (par0 >> 16) & 0xff;
635                 eaddr[4] = (par0 >> 8) & 0xff;
636                 eaddr[5] = par0 & 0xff;
637         } else {
638                 eaddr[0] = (par0 >> 0) & 0xFF;
639                 eaddr[1] = (par0 >> 8) & 0xFF;
640                 eaddr[2] = (par0 >> 16) & 0xFF;
641                 eaddr[3] = (par0 >> 24) & 0xFF;
642                 eaddr[4] = (par1 >> 0) & 0xFF;
643                 eaddr[5] = (par1 >> 8) & 0xFF;
644         }
645 }
646
647 static int
648 jme_attach(device_t dev)
649 {
650         struct jme_softc *sc = device_get_softc(dev);
651         struct ifnet *ifp = &sc->arpcom.ac_if;
652         uint32_t reg;
653         uint16_t did;
654         uint8_t pcie_ptr, rev;
655         int error = 0, i, j, rx_desc_cnt;
656         uint8_t eaddr[ETHER_ADDR_LEN];
657
658         lwkt_serialize_init(&sc->jme_serialize);
659         lwkt_serialize_init(&sc->jme_cdata.jme_tx_serialize);
660         for (i = 0; i < JME_NRXRING_MAX; ++i) {
661                 lwkt_serialize_init(
662                     &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize);
663         }
664
665         rx_desc_cnt = device_getenv_int(dev, "rx_desc_count",
666             jme_rx_desc_count);
667         rx_desc_cnt = roundup(rx_desc_cnt, JME_NDESC_ALIGN);
668         if (rx_desc_cnt > JME_NDESC_MAX)
669                 rx_desc_cnt = JME_NDESC_MAX;
670
671         sc->jme_cdata.jme_tx_desc_cnt = device_getenv_int(dev, "tx_desc_count",
672             jme_tx_desc_count);
673         sc->jme_cdata.jme_tx_desc_cnt = roundup(sc->jme_cdata.jme_tx_desc_cnt,
674             JME_NDESC_ALIGN);
675         if (sc->jme_cdata.jme_tx_desc_cnt > JME_NDESC_MAX)
676                 sc->jme_cdata.jme_tx_desc_cnt = JME_NDESC_MAX;
677
678         /*
679          * Calculate rx rings
680          */
681         sc->jme_cdata.jme_rx_ring_cnt = device_getenv_int(dev, "rx_ring_count",
682             jme_rx_ring_count);
683         sc->jme_cdata.jme_rx_ring_cnt =
684             if_ring_count2(sc->jme_cdata.jme_rx_ring_cnt, JME_NRXRING_MAX);
685
686         i = 0;
687         sc->jme_serialize_arr[i++] = &sc->jme_serialize;
688
689         KKASSERT(i == JME_TX_SERIALIZE);
690         sc->jme_serialize_arr[i++] = &sc->jme_cdata.jme_tx_serialize;
691
692         KKASSERT(i == JME_RX_SERIALIZE);
693         for (j = 0; j < sc->jme_cdata.jme_rx_ring_cnt; ++j) {
694                 sc->jme_serialize_arr[i++] =
695                     &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize;
696         }
697         KKASSERT(i <= JME_NSERIALIZE);
698         sc->jme_serialize_cnt = i;
699
700         sc->jme_cdata.jme_sc = sc;
701         for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
702                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
703
704                 rdata->jme_sc = sc;
705                 rdata->jme_rx_coal = jme_rx_status[i].jme_coal;
706                 rdata->jme_rx_comp = jme_rx_status[i].jme_comp;
707                 rdata->jme_rx_empty = jme_rx_status[i].jme_empty;
708                 rdata->jme_rx_idx = i;
709                 rdata->jme_rx_desc_cnt = rx_desc_cnt;
710         }
711
712         sc->jme_dev = dev;
713         sc->jme_lowaddr = BUS_SPACE_MAXADDR;
714
715         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
716
717         callout_init(&sc->jme_tick_ch);
718
719 #ifndef BURN_BRIDGES
720         if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
721                 uint32_t irq, mem;
722
723                 irq = pci_read_config(dev, PCIR_INTLINE, 4);
724                 mem = pci_read_config(dev, JME_PCIR_BAR, 4);
725
726                 device_printf(dev, "chip is in D%d power mode "
727                     "-- setting to D0\n", pci_get_powerstate(dev));
728
729                 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
730
731                 pci_write_config(dev, PCIR_INTLINE, irq, 4);
732                 pci_write_config(dev, JME_PCIR_BAR, mem, 4);
733         }
734 #endif  /* !BURN_BRIDGE */
735
736         /* Enable bus mastering */
737         pci_enable_busmaster(dev);
738
739         /*
740          * Allocate IO memory
741          *
742          * JMC250 supports both memory mapped and I/O register space
743          * access.  Because I/O register access should use different
744          * BARs to access registers it's waste of time to use I/O
745          * register spce access.  JMC250 uses 16K to map entire memory
746          * space.
747          */
748         sc->jme_mem_rid = JME_PCIR_BAR;
749         sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
750                                                  &sc->jme_mem_rid, RF_ACTIVE);
751         if (sc->jme_mem_res == NULL) {
752                 device_printf(dev, "can't allocate IO memory\n");
753                 return ENXIO;
754         }
755         sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
756         sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
757
758         /*
759          * Allocate IRQ
760          */
761         error = jme_intr_alloc(dev);
762         if (error)
763                 goto fail;
764
765         /*
766          * Extract revisions
767          */
768         reg = CSR_READ_4(sc, JME_CHIPMODE);
769         if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
770             CHIPMODE_NOT_FPGA) {
771                 sc->jme_caps |= JME_CAP_FPGA;
772                 if (bootverbose) {
773                         device_printf(dev, "FPGA revision: 0x%04x\n",
774                                       (reg & CHIPMODE_FPGA_REV_MASK) >>
775                                       CHIPMODE_FPGA_REV_SHIFT);
776                 }
777         }
778
779         /* NOTE: FM revision is put in the upper 4 bits */
780         rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
781         rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
782         if (bootverbose)
783                 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
784
785         did = pci_get_device(dev);
786         switch (did) {
787         case PCI_PRODUCT_JMICRON_JMC250:
788                 if (rev == JME_REV1_A2)
789                         sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
790                 break;
791
792         case PCI_PRODUCT_JMICRON_JMC260:
793                 if (rev == JME_REV2)
794                         sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
795                 break;
796
797         default:
798                 panic("unknown device id 0x%04x", did);
799         }
800         if (rev >= JME_REV2) {
801                 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
802                 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
803                                       GHC_TXMAC_CLKSRC_1000;
804         }
805
806         /* Reset the ethernet controller. */
807         jme_reset(sc);
808
809         /* Map MSI/MSI-X vectors */
810         jme_set_msinum(sc);
811
812         /* Get station address. */
813         reg = CSR_READ_4(sc, JME_SMBCSR);
814         if (reg & SMBCSR_EEPROM_PRESENT)
815                 error = jme_eeprom_macaddr(sc, eaddr);
816         if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
817                 if (error != 0 && (bootverbose)) {
818                         device_printf(dev, "ethernet hardware address "
819                                       "not found in EEPROM.\n");
820                 }
821                 jme_reg_macaddr(sc, eaddr);
822         }
823
824         /*
825          * Save PHY address.
826          * Integrated JR0211 has fixed PHY address whereas FPGA version
827          * requires PHY probing to get correct PHY address.
828          */
829         if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
830                 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
831                     GPREG0_PHY_ADDR_MASK;
832                 if (bootverbose) {
833                         device_printf(dev, "PHY is at address %d.\n",
834                             sc->jme_phyaddr);
835                 }
836         } else {
837                 sc->jme_phyaddr = 0;
838         }
839
840         /* Set max allowable DMA size. */
841         pcie_ptr = pci_get_pciecap_ptr(dev);
842         if (pcie_ptr != 0) {
843                 uint16_t ctrl;
844
845                 sc->jme_caps |= JME_CAP_PCIE;
846                 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
847                 if (bootverbose) {
848                         device_printf(dev, "Read request size : %d bytes.\n",
849                             128 << ((ctrl >> 12) & 0x07));
850                         device_printf(dev, "TLP payload size : %d bytes.\n",
851                             128 << ((ctrl >> 5) & 0x07));
852                 }
853                 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
854                 case PCIEM_DEVCTL_MAX_READRQ_128:
855                         sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
856                         break;
857                 case PCIEM_DEVCTL_MAX_READRQ_256:
858                         sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
859                         break;
860                 default:
861                         sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
862                         break;
863                 }
864                 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
865         } else {
866                 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
867                 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
868         }
869
870 #ifdef notyet
871         if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
872                 sc->jme_caps |= JME_CAP_PMCAP;
873 #endif
874
875         /*
876          * Create sysctl tree
877          */
878         jme_sysctl_node(sc);
879
880         /* Allocate DMA stuffs */
881         error = jme_dma_alloc(sc);
882         if (error)
883                 goto fail;
884
885         ifp->if_softc = sc;
886         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
887         ifp->if_init = jme_init;
888         ifp->if_ioctl = jme_ioctl;
889         ifp->if_start = jme_start;
890 #ifdef DEVICE_POLLING
891         ifp->if_poll = jme_poll;
892 #endif
893         ifp->if_watchdog = jme_watchdog;
894         ifp->if_serialize = jme_serialize;
895         ifp->if_deserialize = jme_deserialize;
896         ifp->if_tryserialize = jme_tryserialize;
897 #ifdef INVARIANTS
898         ifp->if_serialize_assert = jme_serialize_assert;
899 #endif
900         ifq_set_maxlen(&ifp->if_snd,
901             sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
902         ifq_set_ready(&ifp->if_snd);
903
904         /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
905         ifp->if_capabilities = IFCAP_HWCSUM |
906                                IFCAP_VLAN_MTU |
907                                IFCAP_VLAN_HWTAGGING;
908         if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN)
909                 ifp->if_capabilities |= IFCAP_RSS;
910         ifp->if_capenable = ifp->if_capabilities;
911
912         /*
913          * Disable TXCSUM by default to improve bulk data
914          * transmit performance (+20Mbps improvement).
915          */
916         ifp->if_capenable &= ~IFCAP_TXCSUM;
917
918         if (ifp->if_capenable & IFCAP_TXCSUM)
919                 ifp->if_hwassist = JME_CSUM_FEATURES;
920
921         /* Set up MII bus. */
922         error = mii_phy_probe(dev, &sc->jme_miibus,
923                               jme_mediachange, jme_mediastatus);
924         if (error) {
925                 device_printf(dev, "no PHY found!\n");
926                 goto fail;
927         }
928
929         /*
930          * Save PHYADDR for FPGA mode PHY.
931          */
932         if (sc->jme_caps & JME_CAP_FPGA) {
933                 struct mii_data *mii = device_get_softc(sc->jme_miibus);
934
935                 if (mii->mii_instance != 0) {
936                         struct mii_softc *miisc;
937
938                         LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
939                                 if (miisc->mii_phy != 0) {
940                                         sc->jme_phyaddr = miisc->mii_phy;
941                                         break;
942                                 }
943                         }
944                         if (sc->jme_phyaddr != 0) {
945                                 device_printf(sc->jme_dev,
946                                     "FPGA PHY is at %d\n", sc->jme_phyaddr);
947                                 /* vendor magic. */
948                                 jme_miibus_writereg(dev, sc->jme_phyaddr,
949                                     JMPHY_CONF, JMPHY_CONF_DEFFIFO);
950
951                                 /* XXX should we clear JME_WA_EXTFIFO */
952                         }
953                 }
954         }
955
956         ether_ifattach(ifp, eaddr, NULL);
957
958         /* Tell the upper layer(s) we support long frames. */
959         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
960
961         error = jme_intr_setup(dev);
962         if (error) {
963                 ether_ifdetach(ifp);
964                 goto fail;
965         }
966
967         return 0;
968 fail:
969         jme_detach(dev);
970         return (error);
971 }
972
973 static int
974 jme_detach(device_t dev)
975 {
976         struct jme_softc *sc = device_get_softc(dev);
977
978         if (device_is_attached(dev)) {
979                 struct ifnet *ifp = &sc->arpcom.ac_if;
980
981                 ifnet_serialize_all(ifp);
982                 jme_stop(sc);
983                 jme_intr_teardown(dev);
984                 ifnet_deserialize_all(ifp);
985
986                 ether_ifdetach(ifp);
987         }
988
989         if (sc->jme_sysctl_tree != NULL)
990                 sysctl_ctx_free(&sc->jme_sysctl_ctx);
991
992         if (sc->jme_miibus != NULL)
993                 device_delete_child(dev, sc->jme_miibus);
994         bus_generic_detach(dev);
995
996         jme_intr_free(dev);
997
998         if (sc->jme_mem_res != NULL) {
999                 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
1000                                      sc->jme_mem_res);
1001         }
1002
1003         jme_dma_free(sc);
1004
1005         return (0);
1006 }
1007
1008 static void
1009 jme_sysctl_node(struct jme_softc *sc)
1010 {
1011         int coal_max;
1012 #ifdef JME_RSS_DEBUG
1013         int r;
1014 #endif
1015
1016         sysctl_ctx_init(&sc->jme_sysctl_ctx);
1017         sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
1018                                 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
1019                                 device_get_nameunit(sc->jme_dev),
1020                                 CTLFLAG_RD, 0, "");
1021         if (sc->jme_sysctl_tree == NULL) {
1022                 device_printf(sc->jme_dev, "can't add sysctl node\n");
1023                 return;
1024         }
1025
1026         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1027             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1028             "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1029             sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
1030
1031         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1032             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1033             "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1034             sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
1035
1036         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1037             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1038             "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1039             sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
1040
1041         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1042             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1043             "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1044             sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
1045
1046         SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1047                        SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1048                        "rx_desc_count", CTLFLAG_RD,
1049                        &sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt,
1050                        0, "RX desc count");
1051         SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1052                        SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1053                        "tx_desc_count", CTLFLAG_RD,
1054                        &sc->jme_cdata.jme_tx_desc_cnt,
1055                        0, "TX desc count");
1056         SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1057                        SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1058                        "rx_ring_count", CTLFLAG_RD,
1059                        &sc->jme_cdata.jme_rx_ring_cnt,
1060                        0, "RX ring count");
1061 #ifdef JME_RSS_DEBUG
1062         SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1063                        SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1064                        "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
1065                        0, "RSS debug level");
1066         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1067                 char rx_ring_pkt[32];
1068
1069                 ksnprintf(rx_ring_pkt, sizeof(rx_ring_pkt), "rx_ring%d_pkt", r);
1070                 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx,
1071                     SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1072                     rx_ring_pkt, CTLFLAG_RW,
1073                     &sc->jme_cdata.jme_rx_data[r].jme_rx_pkt, "RXed packets");
1074         }
1075 #endif
1076
1077         /*
1078          * Set default coalesce valves
1079          */
1080         sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1081         sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1082         sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1083         sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1084
1085         /*
1086          * Adjust coalesce valves, in case that the number of TX/RX
1087          * descs are set to small values by users.
1088          *
1089          * NOTE: coal_max will not be zero, since number of descs
1090          * must aligned by JME_NDESC_ALIGN (16 currently)
1091          */
1092         coal_max = sc->jme_cdata.jme_tx_desc_cnt / 6;
1093         if (coal_max < sc->jme_tx_coal_pkt)
1094                 sc->jme_tx_coal_pkt = coal_max;
1095
1096         coal_max = sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt / 4;
1097         if (coal_max < sc->jme_rx_coal_pkt)
1098                 sc->jme_rx_coal_pkt = coal_max;
1099 }
1100
1101 static int
1102 jme_dma_alloc(struct jme_softc *sc)
1103 {
1104         struct jme_txdesc *txd;
1105         bus_dmamem_t dmem;
1106         int error, i, asize;
1107
1108         sc->jme_cdata.jme_txdesc =
1109         kmalloc(sc->jme_cdata.jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1110                 M_DEVBUF, M_WAITOK | M_ZERO);
1111         for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1112                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
1113
1114                 rdata->jme_rxdesc =
1115                 kmalloc(rdata->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1116                         M_DEVBUF, M_WAITOK | M_ZERO);
1117         }
1118
1119         /* Create parent ring tag. */
1120         error = bus_dma_tag_create(NULL,/* parent */
1121             1, JME_RING_BOUNDARY,       /* algnmnt, boundary */
1122             sc->jme_lowaddr,            /* lowaddr */
1123             BUS_SPACE_MAXADDR,          /* highaddr */
1124             NULL, NULL,                 /* filter, filterarg */
1125             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1126             0,                          /* nsegments */
1127             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1128             0,                          /* flags */
1129             &sc->jme_cdata.jme_ring_tag);
1130         if (error) {
1131                 device_printf(sc->jme_dev,
1132                     "could not create parent ring DMA tag.\n");
1133                 return error;
1134         }
1135
1136         /*
1137          * Create DMA stuffs for TX ring
1138          */
1139         asize = roundup2(JME_TX_RING_SIZE(sc), JME_TX_RING_ALIGN);
1140         error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1141                         JME_TX_RING_ALIGN, 0,
1142                         BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1143                         asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1144         if (error) {
1145                 device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
1146                 return error;
1147         }
1148         sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag;
1149         sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map;
1150         sc->jme_cdata.jme_tx_ring = dmem.dmem_addr;
1151         sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr;
1152
1153         /*
1154          * Create DMA stuffs for RX rings
1155          */
1156         for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1157                 error = jme_rxring_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1158                 if (error)
1159                         return error;
1160         }
1161
1162         /* Create parent buffer tag. */
1163         error = bus_dma_tag_create(NULL,/* parent */
1164             1, 0,                       /* algnmnt, boundary */
1165             sc->jme_lowaddr,            /* lowaddr */
1166             BUS_SPACE_MAXADDR,          /* highaddr */
1167             NULL, NULL,                 /* filter, filterarg */
1168             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1169             0,                          /* nsegments */
1170             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1171             0,                          /* flags */
1172             &sc->jme_cdata.jme_buffer_tag);
1173         if (error) {
1174                 device_printf(sc->jme_dev,
1175                     "could not create parent buffer DMA tag.\n");
1176                 return error;
1177         }
1178
1179         /*
1180          * Create DMA stuffs for shadow status block
1181          */
1182         asize = roundup2(JME_SSB_SIZE, JME_SSB_ALIGN);
1183         error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
1184                         JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1185                         asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1186         if (error) {
1187                 device_printf(sc->jme_dev,
1188                     "could not create shadow status block.\n");
1189                 return error;
1190         }
1191         sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1192         sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1193         sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1194         sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
1195
1196         /*
1197          * Create DMA stuffs for TX buffers
1198          */
1199
1200         /* Create tag for Tx buffers. */
1201         error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1202             1, 0,                       /* algnmnt, boundary */
1203             BUS_SPACE_MAXADDR,          /* lowaddr */
1204             BUS_SPACE_MAXADDR,          /* highaddr */
1205             NULL, NULL,                 /* filter, filterarg */
1206             JME_JUMBO_FRAMELEN,         /* maxsize */
1207             JME_MAXTXSEGS,              /* nsegments */
1208             JME_MAXSEGSIZE,             /* maxsegsize */
1209             BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
1210             &sc->jme_cdata.jme_tx_tag);
1211         if (error != 0) {
1212                 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1213                 return error;
1214         }
1215
1216         /* Create DMA maps for Tx buffers. */
1217         for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
1218                 txd = &sc->jme_cdata.jme_txdesc[i];
1219                 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag,
1220                                 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1221                                 &txd->tx_dmamap);
1222                 if (error) {
1223                         int j;
1224
1225                         device_printf(sc->jme_dev,
1226                             "could not create %dth Tx dmamap.\n", i);
1227
1228                         for (j = 0; j < i; ++j) {
1229                                 txd = &sc->jme_cdata.jme_txdesc[j];
1230                                 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1231                                                    txd->tx_dmamap);
1232                         }
1233                         bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1234                         sc->jme_cdata.jme_tx_tag = NULL;
1235                         return error;
1236                 }
1237         }
1238
1239         /*
1240          * Create DMA stuffs for RX buffers
1241          */
1242         for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1243                 error = jme_rxbuf_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
1244                 if (error)
1245                         return error;
1246         }
1247         return 0;
1248 }
1249
1250 static void
1251 jme_dma_free(struct jme_softc *sc)
1252 {
1253         struct jme_txdesc *txd;
1254         struct jme_rxdesc *rxd;
1255         struct jme_rxdata *rdata;
1256         int i, r;
1257
1258         /* Tx ring */
1259         if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1260                 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1261                     sc->jme_cdata.jme_tx_ring_map);
1262                 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1263                     sc->jme_cdata.jme_tx_ring,
1264                     sc->jme_cdata.jme_tx_ring_map);
1265                 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1266                 sc->jme_cdata.jme_tx_ring_tag = NULL;
1267         }
1268
1269         /* Rx ring */
1270         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1271                 rdata = &sc->jme_cdata.jme_rx_data[r];
1272                 if (rdata->jme_rx_ring_tag != NULL) {
1273                         bus_dmamap_unload(rdata->jme_rx_ring_tag,
1274                                           rdata->jme_rx_ring_map);
1275                         bus_dmamem_free(rdata->jme_rx_ring_tag,
1276                                         rdata->jme_rx_ring,
1277                                         rdata->jme_rx_ring_map);
1278                         bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1279                         rdata->jme_rx_ring_tag = NULL;
1280                 }
1281         }
1282
1283         /* Tx buffers */
1284         if (sc->jme_cdata.jme_tx_tag != NULL) {
1285                 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
1286                         txd = &sc->jme_cdata.jme_txdesc[i];
1287                         bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1288                             txd->tx_dmamap);
1289                 }
1290                 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1291                 sc->jme_cdata.jme_tx_tag = NULL;
1292         }
1293
1294         /* Rx buffers */
1295         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1296                 rdata = &sc->jme_cdata.jme_rx_data[r];
1297                 if (rdata->jme_rx_tag != NULL) {
1298                         for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
1299                                 rxd = &rdata->jme_rxdesc[i];
1300                                 bus_dmamap_destroy(rdata->jme_rx_tag,
1301                                                    rxd->rx_dmamap);
1302                         }
1303                         bus_dmamap_destroy(rdata->jme_rx_tag,
1304                                            rdata->jme_rx_sparemap);
1305                         bus_dma_tag_destroy(rdata->jme_rx_tag);
1306                         rdata->jme_rx_tag = NULL;
1307                 }
1308         }
1309
1310         /* Shadow status block. */
1311         if (sc->jme_cdata.jme_ssb_tag != NULL) {
1312                 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1313                     sc->jme_cdata.jme_ssb_map);
1314                 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1315                     sc->jme_cdata.jme_ssb_block,
1316                     sc->jme_cdata.jme_ssb_map);
1317                 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1318                 sc->jme_cdata.jme_ssb_tag = NULL;
1319         }
1320
1321         if (sc->jme_cdata.jme_buffer_tag != NULL) {
1322                 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1323                 sc->jme_cdata.jme_buffer_tag = NULL;
1324         }
1325         if (sc->jme_cdata.jme_ring_tag != NULL) {
1326                 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1327                 sc->jme_cdata.jme_ring_tag = NULL;
1328         }
1329
1330         if (sc->jme_cdata.jme_txdesc != NULL) {
1331                 kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1332                 sc->jme_cdata.jme_txdesc = NULL;
1333         }
1334         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1335                 rdata = &sc->jme_cdata.jme_rx_data[r];
1336                 if (rdata->jme_rxdesc != NULL) {
1337                         kfree(rdata->jme_rxdesc, M_DEVBUF);
1338                         rdata->jme_rxdesc = NULL;
1339                 }
1340         }
1341 }
1342
1343 /*
1344  *      Make sure the interface is stopped at reboot time.
1345  */
1346 static int
1347 jme_shutdown(device_t dev)
1348 {
1349         return jme_suspend(dev);
1350 }
1351
1352 #ifdef notyet
1353 /*
1354  * Unlike other ethernet controllers, JMC250 requires
1355  * explicit resetting link speed to 10/100Mbps as gigabit
1356  * link will cunsume more power than 375mA.
1357  * Note, we reset the link speed to 10/100Mbps with
1358  * auto-negotiation but we don't know whether that operation
1359  * would succeed or not as we have no control after powering
1360  * off. If the renegotiation fail WOL may not work. Running
1361  * at 1Gbps draws more power than 375mA at 3.3V which is
1362  * specified in PCI specification and that would result in
1363  * complete shutdowning power to ethernet controller.
1364  *
1365  * TODO
1366  *  Save current negotiated media speed/duplex/flow-control
1367  *  to softc and restore the same link again after resuming.
1368  *  PHY handling such as power down/resetting to 100Mbps
1369  *  may be better handled in suspend method in phy driver.
1370  */
1371 static void
1372 jme_setlinkspeed(struct jme_softc *sc)
1373 {
1374         struct mii_data *mii;
1375         int aneg, i;
1376
1377         JME_LOCK_ASSERT(sc);
1378
1379         mii = device_get_softc(sc->jme_miibus);
1380         mii_pollstat(mii);
1381         aneg = 0;
1382         if ((mii->mii_media_status & IFM_AVALID) != 0) {
1383                 switch IFM_SUBTYPE(mii->mii_media_active) {
1384                 case IFM_10_T:
1385                 case IFM_100_TX:
1386                         return;
1387                 case IFM_1000_T:
1388                         aneg++;
1389                 default:
1390                         break;
1391                 }
1392         }
1393         jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1394         jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1395             ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1396         jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1397             BMCR_AUTOEN | BMCR_STARTNEG);
1398         DELAY(1000);
1399         if (aneg != 0) {
1400                 /* Poll link state until jme(4) get a 10/100 link. */
1401                 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1402                         mii_pollstat(mii);
1403                         if ((mii->mii_media_status & IFM_AVALID) != 0) {
1404                                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1405                                 case IFM_10_T:
1406                                 case IFM_100_TX:
1407                                         jme_mac_config(sc);
1408                                         return;
1409                                 default:
1410                                         break;
1411                                 }
1412                         }
1413                         JME_UNLOCK(sc);
1414                         pause("jmelnk", hz);
1415                         JME_LOCK(sc);
1416                 }
1417                 if (i == MII_ANEGTICKS_GIGE)
1418                         device_printf(sc->jme_dev, "establishing link failed, "
1419                             "WOL may not work!");
1420         }
1421         /*
1422          * No link, force MAC to have 100Mbps, full-duplex link.
1423          * This is the last resort and may/may not work.
1424          */
1425         mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1426         mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1427         jme_mac_config(sc);
1428 }
1429
1430 static void
1431 jme_setwol(struct jme_softc *sc)
1432 {
1433         struct ifnet *ifp = &sc->arpcom.ac_if;
1434         uint32_t gpr, pmcs;
1435         uint16_t pmstat;
1436         int pmc;
1437
1438         if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1439                 /* No PME capability, PHY power down. */
1440                 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1441                     MII_BMCR, BMCR_PDOWN);
1442                 return;
1443         }
1444
1445         gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1446         pmcs = CSR_READ_4(sc, JME_PMCS);
1447         pmcs &= ~PMCS_WOL_ENB_MASK;
1448         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1449                 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1450                 /* Enable PME message. */
1451                 gpr |= GPREG0_PME_ENB;
1452                 /* For gigabit controllers, reset link speed to 10/100. */
1453                 if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1454                         jme_setlinkspeed(sc);
1455         }
1456
1457         CSR_WRITE_4(sc, JME_PMCS, pmcs);
1458         CSR_WRITE_4(sc, JME_GPREG0, gpr);
1459
1460         /* Request PME. */
1461         pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1462         pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1463         if ((ifp->if_capenable & IFCAP_WOL) != 0)
1464                 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1465         pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1466         if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1467                 /* No WOL, PHY power down. */
1468                 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1469                     MII_BMCR, BMCR_PDOWN);
1470         }
1471 }
1472 #endif
1473
1474 static int
1475 jme_suspend(device_t dev)
1476 {
1477         struct jme_softc *sc = device_get_softc(dev);
1478         struct ifnet *ifp = &sc->arpcom.ac_if;
1479
1480         ifnet_serialize_all(ifp);
1481         jme_stop(sc);
1482 #ifdef notyet
1483         jme_setwol(sc);
1484 #endif
1485         ifnet_deserialize_all(ifp);
1486
1487         return (0);
1488 }
1489
1490 static int
1491 jme_resume(device_t dev)
1492 {
1493         struct jme_softc *sc = device_get_softc(dev);
1494         struct ifnet *ifp = &sc->arpcom.ac_if;
1495 #ifdef notyet
1496         int pmc;
1497 #endif
1498
1499         ifnet_serialize_all(ifp);
1500
1501 #ifdef notyet
1502         if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1503                 uint16_t pmstat;
1504
1505                 pmstat = pci_read_config(sc->jme_dev,
1506                     pmc + PCIR_POWER_STATUS, 2);
1507                 /* Disable PME clear PME status. */
1508                 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1509                 pci_write_config(sc->jme_dev,
1510                     pmc + PCIR_POWER_STATUS, pmstat, 2);
1511         }
1512 #endif
1513
1514         if (ifp->if_flags & IFF_UP)
1515                 jme_init(sc);
1516
1517         ifnet_deserialize_all(ifp);
1518
1519         return (0);
1520 }
1521
1522 static int
1523 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1524 {
1525         struct jme_txdesc *txd;
1526         struct jme_desc *desc;
1527         struct mbuf *m;
1528         bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1529         int maxsegs, nsegs;
1530         int error, i, prod, symbol_desc;
1531         uint32_t cflags, flag64;
1532
1533         M_ASSERTPKTHDR((*m_head));
1534
1535         prod = sc->jme_cdata.jme_tx_prod;
1536         txd = &sc->jme_cdata.jme_txdesc[prod];
1537
1538         if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1539                 symbol_desc = 1;
1540         else
1541                 symbol_desc = 0;
1542
1543         maxsegs = (sc->jme_cdata.jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1544                   (JME_TXD_RSVD + symbol_desc);
1545         if (maxsegs > JME_MAXTXSEGS)
1546                 maxsegs = JME_MAXTXSEGS;
1547         KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc),
1548                 ("not enough segments %d", maxsegs));
1549
1550         error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag,
1551                         txd->tx_dmamap, m_head,
1552                         txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1553         if (error)
1554                 goto fail;
1555
1556         bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1557                         BUS_DMASYNC_PREWRITE);
1558
1559         m = *m_head;
1560         cflags = 0;
1561
1562         /* Configure checksum offload. */
1563         if (m->m_pkthdr.csum_flags & CSUM_IP)
1564                 cflags |= JME_TD_IPCSUM;
1565         if (m->m_pkthdr.csum_flags & CSUM_TCP)
1566                 cflags |= JME_TD_TCPCSUM;
1567         if (m->m_pkthdr.csum_flags & CSUM_UDP)
1568                 cflags |= JME_TD_UDPCSUM;
1569
1570         /* Configure VLAN. */
1571         if (m->m_flags & M_VLANTAG) {
1572                 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1573                 cflags |= JME_TD_VLAN_TAG;
1574         }
1575
1576         desc = &sc->jme_cdata.jme_tx_ring[prod];
1577         desc->flags = htole32(cflags);
1578         desc->addr_hi = htole32(m->m_pkthdr.len);
1579         if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1580                 /*
1581                  * Use 64bits TX desc chain format.
1582                  *
1583                  * The first TX desc of the chain, which is setup here,
1584                  * is just a symbol TX desc carrying no payload.
1585                  */
1586                 flag64 = JME_TD_64BIT;
1587                 desc->buflen = 0;
1588                 desc->addr_lo = 0;
1589
1590                 /* No effective TX desc is consumed */
1591                 i = 0;
1592         } else {
1593                 /*
1594                  * Use 32bits TX desc chain format.
1595                  *
1596                  * The first TX desc of the chain, which is setup here,
1597                  * is an effective TX desc carrying the first segment of
1598                  * the mbuf chain.
1599                  */
1600                 flag64 = 0;
1601                 desc->buflen = htole32(txsegs[0].ds_len);
1602                 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1603
1604                 /* One effective TX desc is consumed */
1605                 i = 1;
1606         }
1607         sc->jme_cdata.jme_tx_cnt++;
1608         KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1609                  sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
1610         JME_DESC_INC(prod, sc->jme_cdata.jme_tx_desc_cnt);
1611
1612         txd->tx_ndesc = 1 - i;
1613         for (; i < nsegs; i++) {
1614                 desc = &sc->jme_cdata.jme_tx_ring[prod];
1615                 desc->buflen = htole32(txsegs[i].ds_len);
1616                 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1617                 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1618                 desc->flags = htole32(JME_TD_OWN | flag64);
1619
1620                 sc->jme_cdata.jme_tx_cnt++;
1621                 KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1622                          sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
1623                 JME_DESC_INC(prod, sc->jme_cdata.jme_tx_desc_cnt);
1624         }
1625
1626         /* Update producer index. */
1627         sc->jme_cdata.jme_tx_prod = prod;
1628         /*
1629          * Finally request interrupt and give the first descriptor
1630          * owenership to hardware.
1631          */
1632         desc = txd->tx_desc;
1633         desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1634
1635         txd->tx_m = m;
1636         txd->tx_ndesc += nsegs;
1637
1638         return 0;
1639 fail:
1640         m_freem(*m_head);
1641         *m_head = NULL;
1642         return error;
1643 }
1644
1645 static void
1646 jme_start(struct ifnet *ifp)
1647 {
1648         struct jme_softc *sc = ifp->if_softc;
1649         struct mbuf *m_head;
1650         int enq = 0;
1651
1652         ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
1653
1654         if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1655                 ifq_purge(&ifp->if_snd);
1656                 return;
1657         }
1658
1659         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1660                 return;
1661
1662         if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1663                 jme_txeof(sc);
1664
1665         while (!ifq_is_empty(&ifp->if_snd)) {
1666                 /*
1667                  * Check number of available TX descs, always
1668                  * leave JME_TXD_RSVD free TX descs.
1669                  */
1670                 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1671                     sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD) {
1672                         ifp->if_flags |= IFF_OACTIVE;
1673                         break;
1674                 }
1675
1676                 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1677                 if (m_head == NULL)
1678                         break;
1679
1680                 /*
1681                  * Pack the data into the transmit ring. If we
1682                  * don't have room, set the OACTIVE flag and wait
1683                  * for the NIC to drain the ring.
1684                  */
1685                 if (jme_encap(sc, &m_head)) {
1686                         KKASSERT(m_head == NULL);
1687                         ifp->if_oerrors++;
1688                         ifp->if_flags |= IFF_OACTIVE;
1689                         break;
1690                 }
1691                 enq++;
1692
1693                 /*
1694                  * If there's a BPF listener, bounce a copy of this frame
1695                  * to him.
1696                  */
1697                 ETHER_BPF_MTAP(ifp, m_head);
1698         }
1699
1700         if (enq > 0) {
1701                 /*
1702                  * Reading TXCSR takes very long time under heavy load
1703                  * so cache TXCSR value and writes the ORed value with
1704                  * the kick command to the TXCSR. This saves one register
1705                  * access cycle.
1706                  */
1707                 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1708                     TXCSR_TXQ_N_START(TXCSR_TXQ0));
1709                 /* Set a timeout in case the chip goes out to lunch. */
1710                 ifp->if_timer = JME_TX_TIMEOUT;
1711         }
1712 }
1713
1714 static void
1715 jme_watchdog(struct ifnet *ifp)
1716 {
1717         struct jme_softc *sc = ifp->if_softc;
1718
1719         ASSERT_IFNET_SERIALIZED_ALL(ifp);
1720
1721         if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1722                 if_printf(ifp, "watchdog timeout (missed link)\n");
1723                 ifp->if_oerrors++;
1724                 jme_init(sc);
1725                 return;
1726         }
1727
1728         jme_txeof(sc);
1729         if (sc->jme_cdata.jme_tx_cnt == 0) {
1730                 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1731                           "-- recovering\n");
1732                 if (!ifq_is_empty(&ifp->if_snd))
1733                         if_devstart(ifp);
1734                 return;
1735         }
1736
1737         if_printf(ifp, "watchdog timeout\n");
1738         ifp->if_oerrors++;
1739         jme_init(sc);
1740         if (!ifq_is_empty(&ifp->if_snd))
1741                 if_devstart(ifp);
1742 }
1743
1744 static int
1745 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1746 {
1747         struct jme_softc *sc = ifp->if_softc;
1748         struct mii_data *mii = device_get_softc(sc->jme_miibus);
1749         struct ifreq *ifr = (struct ifreq *)data;
1750         int error = 0, mask;
1751
1752         ASSERT_IFNET_SERIALIZED_ALL(ifp);
1753
1754         switch (cmd) {
1755         case SIOCSIFMTU:
1756                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1757                     (!(sc->jme_caps & JME_CAP_JUMBO) &&
1758                      ifr->ifr_mtu > JME_MAX_MTU)) {
1759                         error = EINVAL;
1760                         break;
1761                 }
1762
1763                 if (ifp->if_mtu != ifr->ifr_mtu) {
1764                         /*
1765                          * No special configuration is required when interface
1766                          * MTU is changed but availability of Tx checksum
1767                          * offload should be chcked against new MTU size as
1768                          * FIFO size is just 2K.
1769                          */
1770                         if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1771                                 ifp->if_capenable &= ~IFCAP_TXCSUM;
1772                                 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1773                         }
1774                         ifp->if_mtu = ifr->ifr_mtu;
1775                         if (ifp->if_flags & IFF_RUNNING)
1776                                 jme_init(sc);
1777                 }
1778                 break;
1779
1780         case SIOCSIFFLAGS:
1781                 if (ifp->if_flags & IFF_UP) {
1782                         if (ifp->if_flags & IFF_RUNNING) {
1783                                 if ((ifp->if_flags ^ sc->jme_if_flags) &
1784                                     (IFF_PROMISC | IFF_ALLMULTI))
1785                                         jme_set_filter(sc);
1786                         } else {
1787                                 jme_init(sc);
1788                         }
1789                 } else {
1790                         if (ifp->if_flags & IFF_RUNNING)
1791                                 jme_stop(sc);
1792                 }
1793                 sc->jme_if_flags = ifp->if_flags;
1794                 break;
1795
1796         case SIOCADDMULTI:
1797         case SIOCDELMULTI:
1798                 if (ifp->if_flags & IFF_RUNNING)
1799                         jme_set_filter(sc);
1800                 break;
1801
1802         case SIOCSIFMEDIA:
1803         case SIOCGIFMEDIA:
1804                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1805                 break;
1806
1807         case SIOCSIFCAP:
1808                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1809
1810                 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1811                         ifp->if_capenable ^= IFCAP_TXCSUM;
1812                         if (IFCAP_TXCSUM & ifp->if_capenable)
1813                                 ifp->if_hwassist |= JME_CSUM_FEATURES;
1814                         else
1815                                 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1816                 }
1817                 if (mask & IFCAP_RXCSUM) {
1818                         uint32_t reg;
1819
1820                         ifp->if_capenable ^= IFCAP_RXCSUM;
1821                         reg = CSR_READ_4(sc, JME_RXMAC);
1822                         reg &= ~RXMAC_CSUM_ENB;
1823                         if (ifp->if_capenable & IFCAP_RXCSUM)
1824                                 reg |= RXMAC_CSUM_ENB;
1825                         CSR_WRITE_4(sc, JME_RXMAC, reg);
1826                 }
1827
1828                 if (mask & IFCAP_VLAN_HWTAGGING) {
1829                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1830                         jme_set_vlan(sc);
1831                 }
1832
1833                 if (mask & IFCAP_RSS)
1834                         ifp->if_capenable ^= IFCAP_RSS;
1835                 break;
1836
1837         default:
1838                 error = ether_ioctl(ifp, cmd, data);
1839                 break;
1840         }
1841         return (error);
1842 }
1843
1844 static void
1845 jme_mac_config(struct jme_softc *sc)
1846 {
1847         struct mii_data *mii;
1848         uint32_t ghc, rxmac, txmac, txpause, gp1;
1849         int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1850
1851         mii = device_get_softc(sc->jme_miibus);
1852
1853         CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1854         DELAY(10);
1855         CSR_WRITE_4(sc, JME_GHC, 0);
1856         ghc = 0;
1857         rxmac = CSR_READ_4(sc, JME_RXMAC);
1858         rxmac &= ~RXMAC_FC_ENB;
1859         txmac = CSR_READ_4(sc, JME_TXMAC);
1860         txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1861         txpause = CSR_READ_4(sc, JME_TXPFC);
1862         txpause &= ~TXPFC_PAUSE_ENB;
1863         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1864                 ghc |= GHC_FULL_DUPLEX;
1865                 rxmac &= ~RXMAC_COLL_DET_ENB;
1866                 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1867                     TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1868                     TXMAC_FRAME_BURST);
1869 #ifdef notyet
1870                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1871                         txpause |= TXPFC_PAUSE_ENB;
1872                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1873                         rxmac |= RXMAC_FC_ENB;
1874 #endif
1875                 /* Disable retry transmit timer/retry limit. */
1876                 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1877                     ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1878         } else {
1879                 rxmac |= RXMAC_COLL_DET_ENB;
1880                 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1881                 /* Enable retry transmit timer/retry limit. */
1882                 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1883                     TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1884         }
1885
1886         /*
1887          * Reprogram Tx/Rx MACs with resolved speed/duplex.
1888          */
1889         gp1 = CSR_READ_4(sc, JME_GPREG1);
1890         gp1 &= ~GPREG1_WA_HDX;
1891
1892         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1893                 hdx = 1;
1894
1895         switch (IFM_SUBTYPE(mii->mii_media_active)) {
1896         case IFM_10_T:
1897                 ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1898                 if (hdx)
1899                         gp1 |= GPREG1_WA_HDX;
1900                 break;
1901
1902         case IFM_100_TX:
1903                 ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1904                 if (hdx)
1905                         gp1 |= GPREG1_WA_HDX;
1906
1907                 /*
1908                  * Use extended FIFO depth to workaround CRC errors
1909                  * emitted by chips before JMC250B
1910                  */
1911                 phyconf = JMPHY_CONF_EXTFIFO;
1912                 break;
1913
1914         case IFM_1000_T:
1915                 if (sc->jme_caps & JME_CAP_FASTETH)
1916                         break;
1917
1918                 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
1919                 if (hdx)
1920                         txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1921                 break;
1922
1923         default:
1924                 break;
1925         }
1926         CSR_WRITE_4(sc, JME_GHC, ghc);
1927         CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1928         CSR_WRITE_4(sc, JME_TXMAC, txmac);
1929         CSR_WRITE_4(sc, JME_TXPFC, txpause);
1930
1931         if (sc->jme_workaround & JME_WA_EXTFIFO) {
1932                 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1933                                     JMPHY_CONF, phyconf);
1934         }
1935         if (sc->jme_workaround & JME_WA_HDX)
1936                 CSR_WRITE_4(sc, JME_GPREG1, gp1);
1937 }
1938
1939 static void
1940 jme_intr(void *xsc)
1941 {
1942         struct jme_softc *sc = xsc;
1943         struct ifnet *ifp = &sc->arpcom.ac_if;
1944         uint32_t status;
1945         int r;
1946
1947         ASSERT_SERIALIZED(&sc->jme_serialize);
1948
1949         status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1950         if (status == 0 || status == 0xFFFFFFFF)
1951                 return;
1952
1953         /* Disable interrupts. */
1954         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1955
1956         status = CSR_READ_4(sc, JME_INTR_STATUS);
1957         if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1958                 goto back;
1959
1960         /* Reset PCC counter/timer and Ack interrupts. */
1961         status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1962
1963         if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1964                 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1965
1966         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1967                 if (status & jme_rx_status[r].jme_coal) {
1968                         status |= jme_rx_status[r].jme_coal |
1969                                   jme_rx_status[r].jme_comp;
1970                 }
1971         }
1972
1973         CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1974
1975         if (ifp->if_flags & IFF_RUNNING) {
1976                 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1977                         jme_rx_intr(sc, status);
1978
1979                 if (status & INTR_RXQ_DESC_EMPTY) {
1980                         /*
1981                          * Notify hardware availability of new Rx buffers.
1982                          * Reading RXCSR takes very long time under heavy
1983                          * load so cache RXCSR value and writes the ORed
1984                          * value with the kick command to the RXCSR. This
1985                          * saves one register access cycle.
1986                          */
1987                         CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1988                             RXCSR_RX_ENB | RXCSR_RXQ_START);
1989                 }
1990
1991                 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1992                         lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
1993                         jme_txeof(sc);
1994                         if (!ifq_is_empty(&ifp->if_snd))
1995                                 if_devstart(ifp);
1996                         lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
1997                 }
1998         }
1999 back:
2000         /* Reenable interrupts. */
2001         CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2002 }
2003
2004 static void
2005 jme_txeof(struct jme_softc *sc)
2006 {
2007         struct ifnet *ifp = &sc->arpcom.ac_if;
2008         struct jme_txdesc *txd;
2009         uint32_t status;
2010         int cons, nsegs;
2011
2012         cons = sc->jme_cdata.jme_tx_cons;
2013         if (cons == sc->jme_cdata.jme_tx_prod)
2014                 return;
2015
2016         /*
2017          * Go through our Tx list and free mbufs for those
2018          * frames which have been transmitted.
2019          */
2020         while (cons != sc->jme_cdata.jme_tx_prod) {
2021                 txd = &sc->jme_cdata.jme_txdesc[cons];
2022                 KASSERT(txd->tx_m != NULL,
2023                         ("%s: freeing NULL mbuf!", __func__));
2024
2025                 status = le32toh(txd->tx_desc->flags);
2026                 if ((status & JME_TD_OWN) == JME_TD_OWN)
2027                         break;
2028
2029                 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2030                         ifp->if_oerrors++;
2031                 } else {
2032                         ifp->if_opackets++;
2033                         if (status & JME_TD_COLLISION) {
2034                                 ifp->if_collisions +=
2035                                     le32toh(txd->tx_desc->buflen) &
2036                                     JME_TD_BUF_LEN_MASK;
2037                         }
2038                 }
2039
2040                 /*
2041                  * Only the first descriptor of multi-descriptor
2042                  * transmission is updated so driver have to skip entire
2043                  * chained buffers for the transmiited frame. In other
2044                  * words, JME_TD_OWN bit is valid only at the first
2045                  * descriptor of a multi-descriptor transmission.
2046                  */
2047                 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2048                         sc->jme_cdata.jme_tx_ring[cons].flags = 0;
2049                         JME_DESC_INC(cons, sc->jme_cdata.jme_tx_desc_cnt);
2050                 }
2051
2052                 /* Reclaim transferred mbufs. */
2053                 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2054                 m_freem(txd->tx_m);
2055                 txd->tx_m = NULL;
2056                 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2057                 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2058                         ("%s: Active Tx desc counter was garbled", __func__));
2059                 txd->tx_ndesc = 0;
2060         }
2061         sc->jme_cdata.jme_tx_cons = cons;
2062
2063         if (sc->jme_cdata.jme_tx_cnt == 0)
2064                 ifp->if_timer = 0;
2065
2066         if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
2067             sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD)
2068                 ifp->if_flags &= ~IFF_OACTIVE;
2069 }
2070
2071 static __inline void
2072 jme_discard_rxbufs(struct jme_rxdata *rdata, int cons, int count)
2073 {
2074         int i;
2075
2076         for (i = 0; i < count; ++i) {
2077                 jme_setup_rxdesc(&rdata->jme_rxdesc[cons]);
2078                 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2079         }
2080 }
2081
2082 static __inline struct pktinfo *
2083 jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2084 {
2085         if (flags & JME_RD_IPV4)
2086                 pi->pi_netisr = NETISR_IP;
2087         else if (flags & JME_RD_IPV6)
2088                 pi->pi_netisr = NETISR_IPV6;
2089         else
2090                 return NULL;
2091
2092         pi->pi_flags = 0;
2093         pi->pi_l3proto = IPPROTO_UNKNOWN;
2094
2095         if (flags & JME_RD_MORE_FRAG)
2096                 pi->pi_flags |= PKTINFO_FLAG_FRAG;
2097         else if (flags & JME_RD_TCP)
2098                 pi->pi_l3proto = IPPROTO_TCP;
2099         else if (flags & JME_RD_UDP)
2100                 pi->pi_l3proto = IPPROTO_UDP;
2101         else
2102                 pi = NULL;
2103         return pi;
2104 }
2105
2106 /* Receive a frame. */
2107 static void
2108 jme_rxpkt(struct jme_rxdata *rdata)
2109 {
2110         struct ifnet *ifp = &rdata->jme_sc->arpcom.ac_if;
2111         struct jme_desc *desc;
2112         struct jme_rxdesc *rxd;
2113         struct mbuf *mp, *m;
2114         uint32_t flags, status, hash, hashinfo;
2115         int cons, count, nsegs;
2116
2117         cons = rdata->jme_rx_cons;
2118         desc = &rdata->jme_rx_ring[cons];
2119         flags = le32toh(desc->flags);
2120         status = le32toh(desc->buflen);
2121         hash = le32toh(desc->addr_hi);
2122         hashinfo = le32toh(desc->addr_lo);
2123         nsegs = JME_RX_NSEGS(status);
2124
2125         JME_RSS_DPRINTF(rdata->jme_sc, 15, "ring%d, flags 0x%08x, "
2126                         "hash 0x%08x, hash info 0x%08x\n",
2127                         rdata->jme_rx_idx, flags, hash, hashinfo);
2128
2129         if (status & JME_RX_ERR_STAT) {
2130                 ifp->if_ierrors++;
2131                 jme_discard_rxbufs(rdata, cons, nsegs);
2132 #ifdef JME_SHOW_ERRORS
2133                 if_printf(ifp, "%s : receive error = 0x%b\n",
2134                     __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2135 #endif
2136                 rdata->jme_rx_cons += nsegs;
2137                 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2138                 return;
2139         }
2140
2141         rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2142         for (count = 0; count < nsegs; count++,
2143              JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) {
2144                 rxd = &rdata->jme_rxdesc[cons];
2145                 mp = rxd->rx_m;
2146
2147                 /* Add a new receive buffer to the ring. */
2148                 if (jme_newbuf(rdata, rxd, 0) != 0) {
2149                         ifp->if_iqdrops++;
2150                         /* Reuse buffer. */
2151                         jme_discard_rxbufs(rdata, cons, nsegs - count);
2152                         if (rdata->jme_rxhead != NULL) {
2153                                 m_freem(rdata->jme_rxhead);
2154                                 JME_RXCHAIN_RESET(rdata);
2155                         }
2156                         break;
2157                 }
2158
2159                 /*
2160                  * Assume we've received a full sized frame.
2161                  * Actual size is fixed when we encounter the end of
2162                  * multi-segmented frame.
2163                  */
2164                 mp->m_len = MCLBYTES;
2165
2166                 /* Chain received mbufs. */
2167                 if (rdata->jme_rxhead == NULL) {
2168                         rdata->jme_rxhead = mp;
2169                         rdata->jme_rxtail = mp;
2170                 } else {
2171                         /*
2172                          * Receive processor can receive a maximum frame
2173                          * size of 65535 bytes.
2174                          */
2175                         rdata->jme_rxtail->m_next = mp;
2176                         rdata->jme_rxtail = mp;
2177                 }
2178
2179                 if (count == nsegs - 1) {
2180                         struct pktinfo pi0, *pi;
2181
2182                         /* Last desc. for this frame. */
2183                         m = rdata->jme_rxhead;
2184                         m->m_pkthdr.len = rdata->jme_rxlen;
2185                         if (nsegs > 1) {
2186                                 /* Set first mbuf size. */
2187                                 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2188                                 /* Set last mbuf size. */
2189                                 mp->m_len = rdata->jme_rxlen -
2190                                     ((MCLBYTES - JME_RX_PAD_BYTES) +
2191                                     (MCLBYTES * (nsegs - 2)));
2192                         } else {
2193                                 m->m_len = rdata->jme_rxlen;
2194                         }
2195                         m->m_pkthdr.rcvif = ifp;
2196
2197                         /*
2198                          * Account for 10bytes auto padding which is used
2199                          * to align IP header on 32bit boundary. Also note,
2200                          * CRC bytes is automatically removed by the
2201                          * hardware.
2202                          */
2203                         m->m_data += JME_RX_PAD_BYTES;
2204
2205                         /* Set checksum information. */
2206                         if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2207                             (flags & JME_RD_IPV4)) {
2208                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2209                                 if (flags & JME_RD_IPCSUM)
2210                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2211                                 if ((flags & JME_RD_MORE_FRAG) == 0 &&
2212                                     ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2213                                      (JME_RD_TCP | JME_RD_TCPCSUM) ||
2214                                      (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2215                                      (JME_RD_UDP | JME_RD_UDPCSUM))) {
2216                                         m->m_pkthdr.csum_flags |=
2217                                             CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2218                                         m->m_pkthdr.csum_data = 0xffff;
2219                                 }
2220                         }
2221
2222                         /* Check for VLAN tagged packets. */
2223                         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2224                             (flags & JME_RD_VLAN_TAG)) {
2225                                 m->m_pkthdr.ether_vlantag =
2226                                     flags & JME_RD_VLAN_MASK;
2227                                 m->m_flags |= M_VLANTAG;
2228                         }
2229
2230                         ifp->if_ipackets++;
2231
2232                         if (ifp->if_capenable & IFCAP_RSS)
2233                                 pi = jme_pktinfo(&pi0, flags);
2234                         else
2235                                 pi = NULL;
2236
2237                         if (pi != NULL &&
2238                             (hashinfo & JME_RD_HASH_FN_MASK) != 0) {
2239                                 m->m_flags |= M_HASH;
2240                                 m->m_pkthdr.hash = toeplitz_hash(hash);
2241                         }
2242
2243 #ifdef JME_RSS_DEBUG
2244                         if (pi != NULL) {
2245                                 JME_RSS_DPRINTF(rdata->jme_sc, 10,
2246                                     "isr %d flags %08x, l3 %d %s\n",
2247                                     pi->pi_netisr, pi->pi_flags,
2248                                     pi->pi_l3proto,
2249                                     (m->m_flags & M_HASH) ? "hash" : "");
2250                         }
2251 #endif
2252
2253                         /* Pass it on. */
2254                         ether_input_pkt(ifp, m, pi);
2255
2256                         /* Reset mbuf chains. */
2257                         JME_RXCHAIN_RESET(rdata);
2258 #ifdef JME_RSS_DEBUG
2259                         rdata->jme_rx_pkt++;
2260 #endif
2261                 }
2262         }
2263
2264         rdata->jme_rx_cons += nsegs;
2265         rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
2266 }
2267
2268 static void
2269 jme_rxeof(struct jme_rxdata *rdata, int count)
2270 {
2271         struct jme_desc *desc;
2272         int nsegs, pktlen;
2273
2274         for (;;) {
2275 #ifdef DEVICE_POLLING
2276                 if (count >= 0 && count-- == 0)
2277                         break;
2278 #endif
2279                 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2280                 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2281                         break;
2282                 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2283                         break;
2284
2285                 /*
2286                  * Check number of segments against received bytes.
2287                  * Non-matching value would indicate that hardware
2288                  * is still trying to update Rx descriptors. I'm not
2289                  * sure whether this check is needed.
2290                  */
2291                 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2292                 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2293                 if (nsegs != howmany(pktlen, MCLBYTES)) {
2294                         if_printf(&rdata->jme_sc->arpcom.ac_if,
2295                             "RX fragment count(%d) and "
2296                             "packet size(%d) mismach\n", nsegs, pktlen);
2297                         break;
2298                 }
2299
2300                 /* Received a frame. */
2301                 jme_rxpkt(rdata);
2302         }
2303 }
2304
2305 static void
2306 jme_tick(void *xsc)
2307 {
2308         struct jme_softc *sc = xsc;
2309         struct ifnet *ifp = &sc->arpcom.ac_if;
2310         struct mii_data *mii = device_get_softc(sc->jme_miibus);
2311
2312         ifnet_serialize_all(ifp);
2313
2314         mii_tick(mii);
2315         callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2316
2317         ifnet_deserialize_all(ifp);
2318 }
2319
2320 static void
2321 jme_reset(struct jme_softc *sc)
2322 {
2323         uint32_t val;
2324
2325         /* Make sure that TX and RX are stopped */
2326         jme_stop_tx(sc);
2327         jme_stop_rx(sc);
2328
2329         /* Start reset */
2330         CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2331         DELAY(20);
2332
2333         /*
2334          * Hold reset bit before stop reset
2335          */
2336
2337         /* Disable TXMAC and TXOFL clock sources */
2338         CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2339         /* Disable RXMAC clock source */
2340         val = CSR_READ_4(sc, JME_GPREG1);
2341         CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2342         /* Flush */
2343         CSR_READ_4(sc, JME_GHC);
2344
2345         /* Stop reset */
2346         CSR_WRITE_4(sc, JME_GHC, 0);
2347         /* Flush */
2348         CSR_READ_4(sc, JME_GHC);
2349
2350         /*
2351          * Clear reset bit after stop reset
2352          */
2353
2354         /* Enable TXMAC and TXOFL clock sources */
2355         CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2356         /* Enable RXMAC clock source */
2357         val = CSR_READ_4(sc, JME_GPREG1);
2358         CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2359         /* Flush */
2360         CSR_READ_4(sc, JME_GHC);
2361
2362         /* Disable TXMAC and TXOFL clock sources */
2363         CSR_WRITE_4(sc, JME_GHC, 0);
2364         /* Disable RXMAC clock source */
2365         val = CSR_READ_4(sc, JME_GPREG1);
2366         CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2367         /* Flush */
2368         CSR_READ_4(sc, JME_GHC);
2369
2370         /* Enable TX and RX */
2371         val = CSR_READ_4(sc, JME_TXCSR);
2372         CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB);
2373         val = CSR_READ_4(sc, JME_RXCSR);
2374         CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB);
2375         /* Flush */
2376         CSR_READ_4(sc, JME_TXCSR);
2377         CSR_READ_4(sc, JME_RXCSR);
2378
2379         /* Enable TXMAC and TXOFL clock sources */
2380         CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2381         /* Eisable RXMAC clock source */
2382         val = CSR_READ_4(sc, JME_GPREG1);
2383         CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2384         /* Flush */
2385         CSR_READ_4(sc, JME_GHC);
2386
2387         /* Stop TX and RX */
2388         jme_stop_tx(sc);
2389         jme_stop_rx(sc);
2390 }
2391
2392 static void
2393 jme_init(void *xsc)
2394 {
2395         struct jme_softc *sc = xsc;
2396         struct ifnet *ifp = &sc->arpcom.ac_if;
2397         struct mii_data *mii;
2398         uint8_t eaddr[ETHER_ADDR_LEN];
2399         bus_addr_t paddr;
2400         uint32_t reg;
2401         int error, r;
2402
2403         ASSERT_IFNET_SERIALIZED_ALL(ifp);
2404
2405         /*
2406          * Cancel any pending I/O.
2407          */
2408         jme_stop(sc);
2409
2410         /*
2411          * Reset the chip to a known state.
2412          */
2413         jme_reset(sc);
2414
2415         /*
2416          * Setup MSI/MSI-X vectors to interrupts mapping
2417          */
2418         jme_set_msinum(sc);
2419
2420         sc->jme_txd_spare =
2421         howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES);
2422         KKASSERT(sc->jme_txd_spare >= 1);
2423
2424         /*
2425          * If we use 64bit address mode for transmitting, each Tx request
2426          * needs one more symbol descriptor.
2427          */
2428         if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
2429                 sc->jme_txd_spare += 1;
2430
2431         if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN)
2432                 jme_enable_rss(sc);
2433         else
2434                 jme_disable_rss(sc);
2435
2436         /* Init RX descriptors */
2437         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2438                 error = jme_init_rx_ring(&sc->jme_cdata.jme_rx_data[r]);
2439                 if (error) {
2440                         if_printf(ifp, "initialization failed: "
2441                                   "no memory for %dth RX ring.\n", r);
2442                         jme_stop(sc);
2443                         return;
2444                 }
2445         }
2446
2447         /* Init TX descriptors */
2448         jme_init_tx_ring(sc);
2449
2450         /* Initialize shadow status block. */
2451         jme_init_ssb(sc);
2452
2453         /* Reprogram the station address. */
2454         bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2455         CSR_WRITE_4(sc, JME_PAR0,
2456             eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2457         CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2458
2459         /*
2460          * Configure Tx queue.
2461          *  Tx priority queue weight value : 0
2462          *  Tx FIFO threshold for processing next packet : 16QW
2463          *  Maximum Tx DMA length : 512
2464          *  Allow Tx DMA burst.
2465          */
2466         sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2467         sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2468         sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2469         sc->jme_txcsr |= sc->jme_tx_dma_size;
2470         sc->jme_txcsr |= TXCSR_DMA_BURST;
2471         CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2472
2473         /* Set Tx descriptor counter. */
2474         CSR_WRITE_4(sc, JME_TXQDC, sc->jme_cdata.jme_tx_desc_cnt);
2475
2476         /* Set Tx ring address to the hardware. */
2477         paddr = sc->jme_cdata.jme_tx_ring_paddr;
2478         CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2479         CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2480
2481         /* Configure TxMAC parameters. */
2482         reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2483         reg |= TXMAC_THRESH_1_PKT;
2484         reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2485         CSR_WRITE_4(sc, JME_TXMAC, reg);
2486
2487         /*
2488          * Configure Rx queue.
2489          *  FIFO full threshold for transmitting Tx pause packet : 128T
2490          *  FIFO threshold for processing next packet : 128QW
2491          *  Rx queue 0 select
2492          *  Max Rx DMA length : 128
2493          *  Rx descriptor retry : 32
2494          *  Rx descriptor retry time gap : 256ns
2495          *  Don't receive runt/bad frame.
2496          */
2497         sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2498 #if 0
2499         /*
2500          * Since Rx FIFO size is 4K bytes, receiving frames larger
2501          * than 4K bytes will suffer from Rx FIFO overruns. So
2502          * decrease FIFO threshold to reduce the FIFO overruns for
2503          * frames larger than 4000 bytes.
2504          * For best performance of standard MTU sized frames use
2505          * maximum allowable FIFO threshold, 128QW.
2506          */
2507         if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2508             JME_RX_FIFO_SIZE)
2509                 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2510         else
2511                 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2512 #else
2513         /* Improve PCI Express compatibility */
2514         sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2515 #endif
2516         sc->jme_rxcsr |= sc->jme_rx_dma_size;
2517         sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2518         sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2519         /* XXX TODO DROP_BAD */
2520
2521         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2522                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
2523
2524                 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2525
2526                 /* Set Rx descriptor counter. */
2527                 CSR_WRITE_4(sc, JME_RXQDC, rdata->jme_rx_desc_cnt);
2528
2529                 /* Set Rx ring address to the hardware. */
2530                 paddr = rdata->jme_rx_ring_paddr;
2531                 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2532                 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2533         }
2534
2535         /* Clear receive filter. */
2536         CSR_WRITE_4(sc, JME_RXMAC, 0);
2537
2538         /* Set up the receive filter. */
2539         jme_set_filter(sc);
2540         jme_set_vlan(sc);
2541
2542         /*
2543          * Disable all WOL bits as WOL can interfere normal Rx
2544          * operation. Also clear WOL detection status bits.
2545          */
2546         reg = CSR_READ_4(sc, JME_PMCS);
2547         reg &= ~PMCS_WOL_ENB_MASK;
2548         CSR_WRITE_4(sc, JME_PMCS, reg);
2549
2550         /*
2551          * Pad 10bytes right before received frame. This will greatly
2552          * help Rx performance on strict-alignment architectures as
2553          * it does not need to copy the frame to align the payload.
2554          */
2555         reg = CSR_READ_4(sc, JME_RXMAC);
2556         reg |= RXMAC_PAD_10BYTES;
2557
2558         if (ifp->if_capenable & IFCAP_RXCSUM)
2559                 reg |= RXMAC_CSUM_ENB;
2560         CSR_WRITE_4(sc, JME_RXMAC, reg);
2561
2562         /* Configure general purpose reg0 */
2563         reg = CSR_READ_4(sc, JME_GPREG0);
2564         reg &= ~GPREG0_PCC_UNIT_MASK;
2565         /* Set PCC timer resolution to micro-seconds unit. */
2566         reg |= GPREG0_PCC_UNIT_US;
2567         /*
2568          * Disable all shadow register posting as we have to read
2569          * JME_INTR_STATUS register in jme_intr. Also it seems
2570          * that it's hard to synchronize interrupt status between
2571          * hardware and software with shadow posting due to
2572          * requirements of bus_dmamap_sync(9).
2573          */
2574         reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2575             GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2576             GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2577             GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2578         /* Disable posting of DW0. */
2579         reg &= ~GPREG0_POST_DW0_ENB;
2580         /* Clear PME message. */
2581         reg &= ~GPREG0_PME_ENB;
2582         /* Set PHY address. */
2583         reg &= ~GPREG0_PHY_ADDR_MASK;
2584         reg |= sc->jme_phyaddr;
2585         CSR_WRITE_4(sc, JME_GPREG0, reg);
2586
2587         /* Configure Tx queue 0 packet completion coalescing. */
2588         jme_set_tx_coal(sc);
2589
2590         /* Configure Rx queues packet completion coalescing. */
2591         jme_set_rx_coal(sc);
2592
2593         /* Configure shadow status block but don't enable posting. */
2594         paddr = sc->jme_cdata.jme_ssb_block_paddr;
2595         CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2596         CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2597
2598         /* Disable Timer 1 and Timer 2. */
2599         CSR_WRITE_4(sc, JME_TIMER1, 0);
2600         CSR_WRITE_4(sc, JME_TIMER2, 0);
2601
2602         /* Configure retry transmit period, retry limit value. */
2603         CSR_WRITE_4(sc, JME_TXTRHD,
2604             ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2605             TXTRHD_RT_PERIOD_MASK) |
2606             ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2607             TXTRHD_RT_LIMIT_SHIFT));
2608
2609 #ifdef DEVICE_POLLING
2610         if (!(ifp->if_flags & IFF_POLLING))
2611 #endif
2612         /* Initialize the interrupt mask. */
2613         CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2614         CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2615
2616         /*
2617          * Enabling Tx/Rx DMA engines and Rx queue processing is
2618          * done after detection of valid link in jme_miibus_statchg.
2619          */
2620         sc->jme_flags &= ~JME_FLAG_LINK;
2621
2622         /* Set the current media. */
2623         mii = device_get_softc(sc->jme_miibus);
2624         mii_mediachg(mii);
2625
2626         callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2627
2628         ifp->if_flags |= IFF_RUNNING;
2629         ifp->if_flags &= ~IFF_OACTIVE;
2630 }
2631
2632 static void
2633 jme_stop(struct jme_softc *sc)
2634 {
2635         struct ifnet *ifp = &sc->arpcom.ac_if;
2636         struct jme_txdesc *txd;
2637         struct jme_rxdesc *rxd;
2638         struct jme_rxdata *rdata;
2639         int i, r;
2640
2641         ASSERT_IFNET_SERIALIZED_ALL(ifp);
2642
2643         /*
2644          * Mark the interface down and cancel the watchdog timer.
2645          */
2646         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2647         ifp->if_timer = 0;
2648
2649         callout_stop(&sc->jme_tick_ch);
2650         sc->jme_flags &= ~JME_FLAG_LINK;
2651
2652         /*
2653          * Disable interrupts.
2654          */
2655         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2656         CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2657
2658         /* Disable updating shadow status block. */
2659         CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2660             CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2661
2662         /* Stop receiver, transmitter. */
2663         jme_stop_rx(sc);
2664         jme_stop_tx(sc);
2665
2666         /*
2667          * Free partial finished RX segments
2668          */
2669         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2670                 rdata = &sc->jme_cdata.jme_rx_data[r];
2671                 if (rdata->jme_rxhead != NULL)
2672                         m_freem(rdata->jme_rxhead);
2673                 JME_RXCHAIN_RESET(rdata);
2674         }
2675
2676         /*
2677          * Free RX and TX mbufs still in the queues.
2678          */
2679         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2680                 rdata = &sc->jme_cdata.jme_rx_data[r];
2681                 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
2682                         rxd = &rdata->jme_rxdesc[i];
2683                         if (rxd->rx_m != NULL) {
2684                                 bus_dmamap_unload(rdata->jme_rx_tag,
2685                                                   rxd->rx_dmamap);
2686                                 m_freem(rxd->rx_m);
2687                                 rxd->rx_m = NULL;
2688                         }
2689                 }
2690         }
2691         for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
2692                 txd = &sc->jme_cdata.jme_txdesc[i];
2693                 if (txd->tx_m != NULL) {
2694                         bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2695                             txd->tx_dmamap);
2696                         m_freem(txd->tx_m);
2697                         txd->tx_m = NULL;
2698                         txd->tx_ndesc = 0;
2699                 }
2700         }
2701 }
2702
2703 static void
2704 jme_stop_tx(struct jme_softc *sc)
2705 {
2706         uint32_t reg;
2707         int i;
2708
2709         reg = CSR_READ_4(sc, JME_TXCSR);
2710         if ((reg & TXCSR_TX_ENB) == 0)
2711                 return;
2712         reg &= ~TXCSR_TX_ENB;
2713         CSR_WRITE_4(sc, JME_TXCSR, reg);
2714         for (i = JME_TIMEOUT; i > 0; i--) {
2715                 DELAY(1);
2716                 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2717                         break;
2718         }
2719         if (i == 0)
2720                 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2721 }
2722
2723 static void
2724 jme_stop_rx(struct jme_softc *sc)
2725 {
2726         uint32_t reg;
2727         int i;
2728
2729         reg = CSR_READ_4(sc, JME_RXCSR);
2730         if ((reg & RXCSR_RX_ENB) == 0)
2731                 return;
2732         reg &= ~RXCSR_RX_ENB;
2733         CSR_WRITE_4(sc, JME_RXCSR, reg);
2734         for (i = JME_TIMEOUT; i > 0; i--) {
2735                 DELAY(1);
2736                 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2737                         break;
2738         }
2739         if (i == 0)
2740                 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2741 }
2742
2743 static void
2744 jme_init_tx_ring(struct jme_softc *sc)
2745 {
2746         struct jme_chain_data *cd;
2747         struct jme_txdesc *txd;
2748         int i;
2749
2750         sc->jme_cdata.jme_tx_prod = 0;
2751         sc->jme_cdata.jme_tx_cons = 0;
2752         sc->jme_cdata.jme_tx_cnt = 0;
2753
2754         cd = &sc->jme_cdata;
2755         bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2756         for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
2757                 txd = &sc->jme_cdata.jme_txdesc[i];
2758                 txd->tx_m = NULL;
2759                 txd->tx_desc = &cd->jme_tx_ring[i];
2760                 txd->tx_ndesc = 0;
2761         }
2762 }
2763
2764 static void
2765 jme_init_ssb(struct jme_softc *sc)
2766 {
2767         struct jme_chain_data *cd;
2768
2769         cd = &sc->jme_cdata;
2770         bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2771 }
2772
2773 static int
2774 jme_init_rx_ring(struct jme_rxdata *rdata)
2775 {
2776         struct jme_rxdesc *rxd;
2777         int i;
2778
2779         KKASSERT(rdata->jme_rxhead == NULL &&
2780                  rdata->jme_rxtail == NULL &&
2781                  rdata->jme_rxlen == 0);
2782         rdata->jme_rx_cons = 0;
2783
2784         bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(rdata));
2785         for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
2786                 int error;
2787
2788                 rxd = &rdata->jme_rxdesc[i];
2789                 rxd->rx_m = NULL;
2790                 rxd->rx_desc = &rdata->jme_rx_ring[i];
2791                 error = jme_newbuf(rdata, rxd, 1);
2792                 if (error)
2793                         return error;
2794         }
2795         return 0;
2796 }
2797
2798 static int
2799 jme_newbuf(struct jme_rxdata *rdata, struct jme_rxdesc *rxd, int init)
2800 {
2801         struct mbuf *m;
2802         bus_dma_segment_t segs;
2803         bus_dmamap_t map;
2804         int error, nsegs;
2805
2806         m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2807         if (m == NULL)
2808                 return ENOBUFS;
2809         /*
2810          * JMC250 has 64bit boundary alignment limitation so jme(4)
2811          * takes advantage of 10 bytes padding feature of hardware
2812          * in order not to copy entire frame to align IP header on
2813          * 32bit boundary.
2814          */
2815         m->m_len = m->m_pkthdr.len = MCLBYTES;
2816
2817         error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
2818                         rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
2819                         BUS_DMA_NOWAIT);
2820         if (error) {
2821                 m_freem(m);
2822                 if (init) {
2823                         if_printf(&rdata->jme_sc->arpcom.ac_if,
2824                             "can't load RX mbuf\n");
2825                 }
2826                 return error;
2827         }
2828
2829         if (rxd->rx_m != NULL) {
2830                 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
2831                                 BUS_DMASYNC_POSTREAD);
2832                 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
2833         }
2834         map = rxd->rx_dmamap;
2835         rxd->rx_dmamap = rdata->jme_rx_sparemap;
2836         rdata->jme_rx_sparemap = map;
2837         rxd->rx_m = m;
2838         rxd->rx_paddr = segs.ds_addr;
2839
2840         jme_setup_rxdesc(rxd);
2841         return 0;
2842 }
2843
2844 static void
2845 jme_set_vlan(struct jme_softc *sc)
2846 {
2847         struct ifnet *ifp = &sc->arpcom.ac_if;
2848         uint32_t reg;
2849
2850         ASSERT_IFNET_SERIALIZED_ALL(ifp);
2851
2852         reg = CSR_READ_4(sc, JME_RXMAC);
2853         reg &= ~RXMAC_VLAN_ENB;
2854         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2855                 reg |= RXMAC_VLAN_ENB;
2856         CSR_WRITE_4(sc, JME_RXMAC, reg);
2857 }
2858
2859 static void
2860 jme_set_filter(struct jme_softc *sc)
2861 {
2862         struct ifnet *ifp = &sc->arpcom.ac_if;
2863         struct ifmultiaddr *ifma;
2864         uint32_t crc;
2865         uint32_t mchash[2];
2866         uint32_t rxcfg;
2867
2868         ASSERT_IFNET_SERIALIZED_ALL(ifp);
2869
2870         rxcfg = CSR_READ_4(sc, JME_RXMAC);
2871         rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2872             RXMAC_ALLMULTI);
2873
2874         /*
2875          * Always accept frames destined to our station address.
2876          * Always accept broadcast frames.
2877          */
2878         rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2879
2880         if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2881                 if (ifp->if_flags & IFF_PROMISC)
2882                         rxcfg |= RXMAC_PROMISC;
2883                 if (ifp->if_flags & IFF_ALLMULTI)
2884                         rxcfg |= RXMAC_ALLMULTI;
2885                 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2886                 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2887                 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2888                 return;
2889         }
2890
2891         /*
2892          * Set up the multicast address filter by passing all multicast
2893          * addresses through a CRC generator, and then using the low-order
2894          * 6 bits as an index into the 64 bit multicast hash table.  The
2895          * high order bits select the register, while the rest of the bits
2896          * select the bit within the register.
2897          */
2898         rxcfg |= RXMAC_MULTICAST;
2899         bzero(mchash, sizeof(mchash));
2900
2901         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2902                 if (ifma->ifma_addr->sa_family != AF_LINK)
2903                         continue;
2904                 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2905                     ifma->ifma_addr), ETHER_ADDR_LEN);
2906
2907                 /* Just want the 6 least significant bits. */
2908                 crc &= 0x3f;
2909
2910                 /* Set the corresponding bit in the hash table. */
2911                 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2912         }
2913
2914         CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2915         CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2916         CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2917 }
2918
2919 static int
2920 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
2921 {
2922         struct jme_softc *sc = arg1;
2923         struct ifnet *ifp = &sc->arpcom.ac_if;
2924         int error, v;
2925
2926         ifnet_serialize_all(ifp);
2927
2928         v = sc->jme_tx_coal_to;
2929         error = sysctl_handle_int(oidp, &v, 0, req);
2930         if (error || req->newptr == NULL)
2931                 goto back;
2932
2933         if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2934                 error = EINVAL;
2935                 goto back;
2936         }
2937
2938         if (v != sc->jme_tx_coal_to) {
2939                 sc->jme_tx_coal_to = v;
2940                 if (ifp->if_flags & IFF_RUNNING)
2941                         jme_set_tx_coal(sc);
2942         }
2943 back:
2944         ifnet_deserialize_all(ifp);
2945         return error;
2946 }
2947
2948 static int
2949 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2950 {
2951         struct jme_softc *sc = arg1;
2952         struct ifnet *ifp = &sc->arpcom.ac_if;
2953         int error, v;
2954
2955         ifnet_serialize_all(ifp);
2956
2957         v = sc->jme_tx_coal_pkt;
2958         error = sysctl_handle_int(oidp, &v, 0, req);
2959         if (error || req->newptr == NULL)
2960                 goto back;
2961
2962         if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
2963                 error = EINVAL;
2964                 goto back;
2965         }
2966
2967         if (v != sc->jme_tx_coal_pkt) {
2968                 sc->jme_tx_coal_pkt = v;
2969                 if (ifp->if_flags & IFF_RUNNING)
2970                         jme_set_tx_coal(sc);
2971         }
2972 back:
2973         ifnet_deserialize_all(ifp);
2974         return error;
2975 }
2976
2977 static int
2978 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
2979 {
2980         struct jme_softc *sc = arg1;
2981         struct ifnet *ifp = &sc->arpcom.ac_if;
2982         int error, v;
2983
2984         ifnet_serialize_all(ifp);
2985
2986         v = sc->jme_rx_coal_to;
2987         error = sysctl_handle_int(oidp, &v, 0, req);
2988         if (error || req->newptr == NULL)
2989                 goto back;
2990
2991         if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
2992                 error = EINVAL;
2993                 goto back;
2994         }
2995
2996         if (v != sc->jme_rx_coal_to) {
2997                 sc->jme_rx_coal_to = v;
2998                 if (ifp->if_flags & IFF_RUNNING)
2999                         jme_set_rx_coal(sc);
3000         }
3001 back:
3002         ifnet_deserialize_all(ifp);
3003         return error;
3004 }
3005
3006 static int
3007 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3008 {
3009         struct jme_softc *sc = arg1;
3010         struct ifnet *ifp = &sc->arpcom.ac_if;
3011         int error, v;
3012
3013         ifnet_serialize_all(ifp);
3014
3015         v = sc->jme_rx_coal_pkt;
3016         error = sysctl_handle_int(oidp, &v, 0, req);
3017         if (error || req->newptr == NULL)
3018                 goto back;
3019
3020         if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
3021                 error = EINVAL;
3022                 goto back;
3023         }
3024
3025         if (v != sc->jme_rx_coal_pkt) {
3026                 sc->jme_rx_coal_pkt = v;
3027                 if (ifp->if_flags & IFF_RUNNING)
3028                         jme_set_rx_coal(sc);
3029         }
3030 back:
3031         ifnet_deserialize_all(ifp);
3032         return error;
3033 }
3034
3035 static void
3036 jme_set_tx_coal(struct jme_softc *sc)
3037 {
3038         uint32_t reg;
3039
3040         reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3041             PCCTX_COAL_TO_MASK;
3042         reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3043             PCCTX_COAL_PKT_MASK;
3044         reg |= PCCTX_COAL_TXQ0;
3045         CSR_WRITE_4(sc, JME_PCCTX, reg);
3046 }
3047
3048 static void
3049 jme_set_rx_coal(struct jme_softc *sc)
3050 {
3051         uint32_t reg;
3052         int r;
3053
3054         reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3055             PCCRX_COAL_TO_MASK;
3056         reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3057             PCCRX_COAL_PKT_MASK;
3058         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r)
3059                 CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3060 }
3061
3062 #ifdef DEVICE_POLLING
3063
3064 static void
3065 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3066 {
3067         struct jme_softc *sc = ifp->if_softc;
3068         uint32_t status;
3069         int r;
3070
3071         ASSERT_SERIALIZED(&sc->jme_serialize);
3072
3073         switch (cmd) {
3074         case POLL_REGISTER:
3075                 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3076                 break;
3077
3078         case POLL_DEREGISTER:
3079                 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
3080                 break;
3081
3082         case POLL_AND_CHECK_STATUS:
3083         case POLL_ONLY:
3084                 status = CSR_READ_4(sc, JME_INTR_STATUS);
3085
3086                 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3087                         struct jme_rxdata *rdata =
3088                             &sc->jme_cdata.jme_rx_data[r];
3089
3090                         lwkt_serialize_enter(&rdata->jme_rx_serialize);
3091                         jme_rxeof(rdata, count);
3092                         lwkt_serialize_exit(&rdata->jme_rx_serialize);
3093                 }
3094
3095                 if (status & INTR_RXQ_DESC_EMPTY) {
3096                         CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3097                         CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3098                             RXCSR_RX_ENB | RXCSR_RXQ_START);
3099                 }
3100
3101                 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
3102                 jme_txeof(sc);
3103                 if (!ifq_is_empty(&ifp->if_snd))
3104                         if_devstart(ifp);
3105                 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
3106                 break;
3107         }
3108 }
3109
3110 #endif  /* DEVICE_POLLING */
3111
3112 static int
3113 jme_rxring_dma_alloc(struct jme_rxdata *rdata)
3114 {
3115         bus_dmamem_t dmem;
3116         int error, asize;
3117
3118         asize = roundup2(JME_RX_RING_SIZE(rdata), JME_RX_RING_ALIGN);
3119         error = bus_dmamem_coherent(rdata->jme_sc->jme_cdata.jme_ring_tag,
3120                         JME_RX_RING_ALIGN, 0,
3121                         BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3122                         asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3123         if (error) {
3124                 device_printf(rdata->jme_sc->jme_dev,
3125                     "could not allocate %dth Rx ring.\n", rdata->jme_rx_idx);
3126                 return error;
3127         }
3128         rdata->jme_rx_ring_tag = dmem.dmem_tag;
3129         rdata->jme_rx_ring_map = dmem.dmem_map;
3130         rdata->jme_rx_ring = dmem.dmem_addr;
3131         rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
3132
3133         return 0;
3134 }
3135
3136 static int
3137 jme_rxbuf_dma_alloc(struct jme_rxdata *rdata)
3138 {
3139         int i, error;
3140
3141         /* Create tag for Rx buffers. */
3142         error = bus_dma_tag_create(
3143             rdata->jme_sc->jme_cdata.jme_buffer_tag,/* parent */
3144             JME_RX_BUF_ALIGN, 0,        /* algnmnt, boundary */
3145             BUS_SPACE_MAXADDR,          /* lowaddr */
3146             BUS_SPACE_MAXADDR,          /* highaddr */
3147             NULL, NULL,                 /* filter, filterarg */
3148             MCLBYTES,                   /* maxsize */
3149             1,                          /* nsegments */
3150             MCLBYTES,                   /* maxsegsize */
3151             BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
3152             &rdata->jme_rx_tag);
3153         if (error) {
3154                 device_printf(rdata->jme_sc->jme_dev,
3155                     "could not create %dth Rx DMA tag.\n", rdata->jme_rx_idx);
3156                 return error;
3157         }
3158
3159         /* Create DMA maps for Rx buffers. */
3160         error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3161                                   &rdata->jme_rx_sparemap);
3162         if (error) {
3163                 device_printf(rdata->jme_sc->jme_dev,
3164                     "could not create %dth spare Rx dmamap.\n",
3165                     rdata->jme_rx_idx);
3166                 bus_dma_tag_destroy(rdata->jme_rx_tag);
3167                 rdata->jme_rx_tag = NULL;
3168                 return error;
3169         }
3170         for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
3171                 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3172
3173                 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
3174                                           &rxd->rx_dmamap);
3175                 if (error) {
3176                         int j;
3177
3178                         device_printf(rdata->jme_sc->jme_dev,
3179                             "could not create %dth Rx dmamap "
3180                             "for %dth RX ring.\n", i, rdata->jme_rx_idx);
3181
3182                         for (j = 0; j < i; ++j) {
3183                                 rxd = &rdata->jme_rxdesc[j];
3184                                 bus_dmamap_destroy(rdata->jme_rx_tag,
3185                                                    rxd->rx_dmamap);
3186                         }
3187                         bus_dmamap_destroy(rdata->jme_rx_tag,
3188                                            rdata->jme_rx_sparemap);
3189                         bus_dma_tag_destroy(rdata->jme_rx_tag);
3190                         rdata->jme_rx_tag = NULL;
3191                         return error;
3192                 }
3193         }
3194         return 0;
3195 }
3196
3197 static void
3198 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3199 {
3200         int r;
3201
3202         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3203                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3204
3205                 if (status & rdata->jme_rx_coal) {
3206                         lwkt_serialize_enter(&rdata->jme_rx_serialize);
3207                         jme_rxeof(rdata, -1);
3208                         lwkt_serialize_exit(&rdata->jme_rx_serialize);
3209                 }
3210         }
3211 }
3212
3213 static void
3214 jme_enable_rss(struct jme_softc *sc)
3215 {
3216         uint32_t rssc, ind;
3217         uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
3218         int i;
3219
3220         KASSERT(sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_2 ||
3221                 sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_4,
3222                 ("%s: invalid # of RX rings (%d)",
3223                  sc->arpcom.ac_if.if_xname, sc->jme_cdata.jme_rx_ring_cnt));
3224
3225         rssc = RSSC_HASH_64_ENTRY;
3226         rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
3227         rssc |= sc->jme_cdata.jme_rx_ring_cnt >> 1;
3228         JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3229         CSR_WRITE_4(sc, JME_RSSC, rssc);
3230
3231         toeplitz_get_key(key, sizeof(key));
3232         for (i = 0; i < RSSKEY_NREGS; ++i) {
3233                 uint32_t keyreg;
3234
3235                 keyreg = RSSKEY_REGVAL(key, i);
3236                 JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg);
3237
3238                 CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg);
3239         }
3240
3241         /*
3242          * Create redirect table in following fashion:
3243          * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3244          */
3245         ind = 0;
3246         for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3247                 int q;
3248
3249                 q = i % sc->jme_cdata.jme_rx_ring_cnt;
3250                 ind |= q << (i * 8);
3251         }
3252         JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
3253
3254         for (i = 0; i < RSSTBL_NREGS; ++i)
3255                 CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3256 }
3257
3258 static void
3259 jme_disable_rss(struct jme_softc *sc)
3260 {
3261         CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3262 }
3263
3264 static void
3265 jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3266 {
3267         struct jme_softc *sc = ifp->if_softc;
3268
3269         ifnet_serialize_array_enter(sc->jme_serialize_arr,
3270             sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz);
3271 }
3272
3273 static void
3274 jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3275 {
3276         struct jme_softc *sc = ifp->if_softc;
3277
3278         ifnet_serialize_array_exit(sc->jme_serialize_arr,
3279             sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz);
3280 }
3281
3282 static int
3283 jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3284 {
3285         struct jme_softc *sc = ifp->if_softc;
3286
3287         return ifnet_serialize_array_try(sc->jme_serialize_arr,
3288             sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz);
3289 }
3290
3291 #ifdef INVARIANTS
3292
3293 static void
3294 jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
3295     boolean_t serialized)
3296 {
3297         struct jme_softc *sc = ifp->if_softc;
3298
3299         ifnet_serialize_array_assert(sc->jme_serialize_arr,
3300             sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE,
3301             slz, serialized);
3302 }
3303
3304 #endif  /* INVARIANTS */
3305
3306 static void
3307 jme_msix_try_alloc(device_t dev)
3308 {
3309         struct jme_softc *sc = device_get_softc(dev);
3310         struct jme_msix_data *msix;
3311         int error, i, r, msix_enable, msix_count;
3312
3313         msix_count = 1 + sc->jme_cdata.jme_rx_ring_cnt;
3314         KKASSERT(msix_count <= JME_NMSIX);
3315
3316         msix_enable = device_getenv_int(dev, "msix.enable", jme_msix_enable);
3317
3318         /*
3319          * We leave the 1st MSI-X vector unused, so we
3320          * actually need msix_count + 1 MSI-X vectors.
3321          */
3322         if (!msix_enable || pci_msix_count(dev) < (msix_count + 1))
3323                 return;
3324
3325         for (i = 0; i < msix_count; ++i)
3326                 sc->jme_msix[i].jme_msix_rid = -1;
3327
3328         i = 0;
3329
3330         msix = &sc->jme_msix[i++];
3331         msix->jme_msix_cpuid = 0;               /* XXX Put TX to cpu0 */
3332         msix->jme_msix_arg = &sc->jme_cdata;
3333         msix->jme_msix_func = jme_msix_tx;
3334         msix->jme_msix_intrs = INTR_TXQ_COAL | INTR_TXQ_COAL_TO;
3335         msix->jme_msix_serialize = &sc->jme_cdata.jme_tx_serialize;
3336         ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s tx",
3337             device_get_nameunit(dev));
3338
3339         for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
3340                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3341
3342                 msix = &sc->jme_msix[i++];
3343                 msix->jme_msix_cpuid = r;       /* XXX Put RX to cpuX */
3344                 msix->jme_msix_arg = rdata;
3345                 msix->jme_msix_func = jme_msix_rx;
3346                 msix->jme_msix_intrs = rdata->jme_rx_coal | rdata->jme_rx_empty;
3347                 msix->jme_msix_serialize = &rdata->jme_rx_serialize;
3348                 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc),
3349                     "%s rx%d", device_get_nameunit(dev), r);
3350         }
3351
3352         KKASSERT(i == msix_count);
3353
3354         error = pci_setup_msix(dev);
3355         if (error)
3356                 return;
3357
3358         /* Setup jme_msix_cnt early, so we could cleanup */
3359         sc->jme_msix_cnt = msix_count;
3360
3361         for (i = 0; i < msix_count; ++i) {
3362                 msix = &sc->jme_msix[i];
3363
3364                 msix->jme_msix_vector = i + 1;
3365                 error = pci_alloc_msix_vector(dev, msix->jme_msix_vector,
3366                     &msix->jme_msix_rid, msix->jme_msix_cpuid);
3367                 if (error)
3368                         goto back;
3369
3370                 msix->jme_msix_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3371                     &msix->jme_msix_rid, RF_ACTIVE);
3372                 if (msix->jme_msix_res == NULL) {
3373                         error = ENOMEM;
3374                         goto back;
3375                 }
3376         }
3377
3378         for (i = 0; i < JME_INTR_CNT; ++i) {
3379                 uint32_t intr_mask = (1 << i);
3380                 int x;
3381
3382                 if ((JME_INTRS & intr_mask) == 0)
3383                         continue;
3384
3385                 for (x = 0; x < msix_count; ++x) {
3386                         msix = &sc->jme_msix[x];
3387                         if (msix->jme_msix_intrs & intr_mask) {
3388                                 int reg, shift;
3389
3390                                 reg = i / JME_MSINUM_FACTOR;
3391                                 KKASSERT(reg < JME_MSINUM_CNT);
3392
3393                                 shift = (i % JME_MSINUM_FACTOR) * 4;
3394
3395                                 sc->jme_msinum[reg] |=
3396                                     (msix->jme_msix_vector << shift);
3397
3398                                 break;
3399                         }
3400                 }
3401         }
3402
3403         if (bootverbose) {
3404                 for (i = 0; i < JME_MSINUM_CNT; ++i) {
3405                         device_printf(dev, "MSINUM%d: %#x\n", i,
3406                             sc->jme_msinum[i]);
3407                 }
3408         }
3409
3410         pci_enable_msix(dev);
3411         sc->jme_irq_type = PCI_INTR_TYPE_MSIX;
3412
3413 back:
3414         if (error)
3415                 jme_msix_free(dev);
3416 }
3417
3418 static int
3419 jme_intr_alloc(device_t dev)
3420 {
3421         struct jme_softc *sc = device_get_softc(dev);
3422         u_int irq_flags;
3423
3424         jme_msix_try_alloc(dev);
3425
3426         if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3427                 sc->jme_irq_type = pci_alloc_1intr(dev, jme_msi_enable,
3428                     &sc->jme_irq_rid, &irq_flags);
3429
3430                 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3431                     &sc->jme_irq_rid, irq_flags);
3432                 if (sc->jme_irq_res == NULL) {
3433                         device_printf(dev, "can't allocate irq\n");
3434                         return ENXIO;
3435                 }
3436         }
3437         return 0;
3438 }
3439
3440 static void
3441 jme_msix_free(device_t dev)
3442 {
3443         struct jme_softc *sc = device_get_softc(dev);
3444         int i;
3445
3446         KKASSERT(sc->jme_msix_cnt > 1);
3447
3448         for (i = 0; i < sc->jme_msix_cnt; ++i) {
3449                 struct jme_msix_data *msix = &sc->jme_msix[i];
3450
3451                 if (msix->jme_msix_res != NULL) {
3452                         bus_release_resource(dev, SYS_RES_IRQ,
3453                             msix->jme_msix_rid, msix->jme_msix_res);
3454                         msix->jme_msix_res = NULL;
3455                 }
3456                 if (msix->jme_msix_rid >= 0) {
3457                         pci_release_msix_vector(dev, msix->jme_msix_rid);
3458                         msix->jme_msix_rid = -1;
3459                 }
3460         }
3461         pci_teardown_msix(dev);
3462 }
3463
3464 static void
3465 jme_intr_free(device_t dev)
3466 {
3467         struct jme_softc *sc = device_get_softc(dev);
3468
3469         if (sc->jme_irq_type != PCI_INTR_TYPE_MSIX) {
3470                 if (sc->jme_irq_res != NULL) {
3471                         bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
3472                                              sc->jme_irq_res);
3473                 }
3474                 if (sc->jme_irq_type == PCI_INTR_TYPE_MSI)
3475                         pci_release_msi(dev);
3476         } else {
3477                 jme_msix_free(dev);
3478         }
3479 }
3480
3481 static void
3482 jme_msix_tx(void *xcd)
3483 {
3484         struct jme_chain_data *cd = xcd;
3485         struct jme_softc *sc = cd->jme_sc;
3486         struct ifnet *ifp = &sc->arpcom.ac_if;
3487
3488         ASSERT_SERIALIZED(&cd->jme_tx_serialize);
3489
3490         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3491
3492         CSR_WRITE_4(sc, JME_INTR_STATUS,
3493             INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP);
3494
3495         if (ifp->if_flags & IFF_RUNNING) {
3496                 jme_txeof(sc);
3497                 if (!ifq_is_empty(&ifp->if_snd))
3498                         if_devstart(ifp);
3499         }
3500
3501         CSR_WRITE_4(sc, JME_INTR_MASK_SET, INTR_TXQ_COAL | INTR_TXQ_COAL_TO);
3502 }
3503
3504 static void
3505 jme_msix_rx(void *xrdata)
3506 {
3507         struct jme_rxdata *rdata = xrdata;
3508         struct jme_softc *sc = rdata->jme_sc;
3509         struct ifnet *ifp = &sc->arpcom.ac_if;
3510         uint32_t status;
3511
3512         ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3513
3514         CSR_WRITE_4(sc, JME_INTR_MASK_CLR,
3515             (rdata->jme_rx_coal | rdata->jme_rx_empty));
3516
3517         status = CSR_READ_4(sc, JME_INTR_STATUS);
3518         status &= (rdata->jme_rx_coal | rdata->jme_rx_empty);
3519
3520         if (status & rdata->jme_rx_coal)
3521                 status |= (rdata->jme_rx_coal | rdata->jme_rx_comp);
3522         CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3523
3524         if (ifp->if_flags & IFF_RUNNING) {
3525                 if (status & rdata->jme_rx_coal)
3526                         jme_rxeof(rdata, -1);
3527
3528                 if (status & rdata->jme_rx_empty) {
3529                         CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3530                             RXCSR_RX_ENB | RXCSR_RXQ_START);
3531                 }
3532         }
3533
3534         CSR_WRITE_4(sc, JME_INTR_MASK_SET,
3535             (rdata->jme_rx_coal | rdata->jme_rx_empty));
3536 }
3537
3538 static void
3539 jme_set_msinum(struct jme_softc *sc)
3540 {
3541         int i;
3542
3543         for (i = 0; i < JME_MSINUM_CNT; ++i)
3544                 CSR_WRITE_4(sc, JME_MSINUM(i), sc->jme_msinum[i]);
3545 }
3546
3547 static int
3548 jme_intr_setup(device_t dev)
3549 {
3550         struct jme_softc *sc = device_get_softc(dev);
3551         struct ifnet *ifp = &sc->arpcom.ac_if;
3552         int error;
3553
3554         if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3555                 return jme_msix_setup(dev);
3556
3557         error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE,
3558             jme_intr, sc, &sc->jme_irq_handle, &sc->jme_serialize);
3559         if (error) {
3560                 device_printf(dev, "could not set up interrupt handler.\n");
3561                 return error;
3562         }
3563
3564         ifp->if_cpuid = rman_get_cpuid(sc->jme_irq_res);
3565         KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
3566         return 0;
3567 }
3568
3569 static void
3570 jme_intr_teardown(device_t dev)
3571 {
3572         struct jme_softc *sc = device_get_softc(dev);
3573
3574         if (sc->jme_irq_type == PCI_INTR_TYPE_MSIX)
3575                 jme_msix_teardown(dev, sc->jme_msix_cnt);
3576         else
3577                 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
3578 }
3579
3580 static int
3581 jme_msix_setup(device_t dev)
3582 {
3583         struct jme_softc *sc = device_get_softc(dev);
3584         struct ifnet *ifp = &sc->arpcom.ac_if;
3585         int x;
3586
3587         for (x = 0; x < sc->jme_msix_cnt; ++x) {
3588                 struct jme_msix_data *msix = &sc->jme_msix[x];
3589                 int error;
3590
3591                 error = bus_setup_intr_descr(dev, msix->jme_msix_res,
3592                     INTR_MPSAFE, msix->jme_msix_func, msix->jme_msix_arg,
3593                     &msix->jme_msix_handle, msix->jme_msix_serialize,
3594                     msix->jme_msix_desc);
3595                 if (error) {
3596                         device_printf(dev, "could not set up %s "
3597                             "interrupt handler.\n", msix->jme_msix_desc);
3598                         jme_msix_teardown(dev, x);
3599                         return error;
3600                 }
3601         }
3602         ifp->if_cpuid = 0; /* XXX */
3603         return 0;
3604 }
3605
3606 static void
3607 jme_msix_teardown(device_t dev, int msix_count)
3608 {
3609         struct jme_softc *sc = device_get_softc(dev);
3610         int x;
3611
3612         for (x = 0; x < msix_count; ++x) {
3613                 struct jme_msix_data *msix = &sc->jme_msix[x];
3614
3615                 bus_teardown_intr(dev, msix->jme_msix_res,
3616                     msix->jme_msix_handle);
3617         }
3618 }