First step toward multiple RX ring support
[dragonfly.git] / sys / dev / netif / jme / if_jme.c
1 /*-
2  * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
28  * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.12 2008/11/26 11:55:18 sephe Exp $
29  */
30
31 #include "opt_polling.h"
32
33 #include <sys/param.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/bus.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
39 #include <sys/proc.h>
40 #include <sys/rman.h>
41 #include <sys/serialize.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
45
46 #include <net/ethernet.h>
47 #include <net/if.h>
48 #include <net/bpf.h>
49 #include <net/if_arp.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 #include <net/ifq_var.h>
53 #include <net/vlan/if_vlan_var.h>
54 #include <net/vlan/if_vlan_ether.h>
55
56 #include <dev/netif/mii_layer/miivar.h>
57 #include <dev/netif/mii_layer/jmphyreg.h>
58
59 #include <bus/pci/pcireg.h>
60 #include <bus/pci/pcivar.h>
61 #include <bus/pci/pcidevs.h>
62
63 #include <dev/netif/jme/if_jmereg.h>
64 #include <dev/netif/jme/if_jmevar.h>
65
66 #include "miibus_if.h"
67
68 /* Define the following to disable printing Rx errors. */
69 #undef  JME_SHOW_ERRORS
70
71 #define JME_CSUM_FEATURES       (CSUM_IP | CSUM_TCP | CSUM_UDP)
72
73 static int      jme_probe(device_t);
74 static int      jme_attach(device_t);
75 static int      jme_detach(device_t);
76 static int      jme_shutdown(device_t);
77 static int      jme_suspend(device_t);
78 static int      jme_resume(device_t);
79
80 static int      jme_miibus_readreg(device_t, int, int);
81 static int      jme_miibus_writereg(device_t, int, int, int);
82 static void     jme_miibus_statchg(device_t);
83
84 static void     jme_init(void *);
85 static int      jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
86 static void     jme_start(struct ifnet *);
87 static void     jme_watchdog(struct ifnet *);
88 static void     jme_mediastatus(struct ifnet *, struct ifmediareq *);
89 static int      jme_mediachange(struct ifnet *);
90 #ifdef DEVICE_POLLING
91 static void     jme_poll(struct ifnet *, enum poll_cmd, int);
92 #endif
93
94 static void     jme_intr(void *);
95 static void     jme_txeof(struct jme_softc *);
96 static void     jme_rxeof(struct jme_softc *, int, int);
97 static void     jme_rx_intr(struct jme_softc *, uint32_t);
98
99 static int      jme_dma_alloc(struct jme_softc *);
100 static void     jme_dma_free(struct jme_softc *, int);
101 static void     jme_dmamap_ring_cb(void *, bus_dma_segment_t *, int, int);
102 static void     jme_dmamap_buf_cb(void *, bus_dma_segment_t *, int,
103                                   bus_size_t, int);
104 static int      jme_init_rx_ring(struct jme_softc *, int);
105 static void     jme_init_tx_ring(struct jme_softc *);
106 static void     jme_init_ssb(struct jme_softc *);
107 static int      jme_newbuf(struct jme_softc *, int, struct jme_rxdesc *, int);
108 static int      jme_encap(struct jme_softc *, struct mbuf **);
109 static void     jme_rxpkt(struct jme_softc *, int, struct mbuf_chain *);
110 static int      jme_rxring_dma_alloc(struct jme_softc *, bus_addr_t, int);
111 static int      jme_rxbuf_dma_alloc(struct jme_softc *, int);
112
113 static void     jme_tick(void *);
114 static void     jme_stop(struct jme_softc *);
115 static void     jme_reset(struct jme_softc *);
116 static void     jme_set_vlan(struct jme_softc *);
117 static void     jme_set_filter(struct jme_softc *);
118 static void     jme_stop_tx(struct jme_softc *);
119 static void     jme_stop_rx(struct jme_softc *);
120 static void     jme_mac_config(struct jme_softc *);
121 static void     jme_reg_macaddr(struct jme_softc *, uint8_t[]);
122 static int      jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
123 static int      jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
124 #ifdef notyet
125 static void     jme_setwol(struct jme_softc *);
126 static void     jme_setlinkspeed(struct jme_softc *);
127 #endif
128 static void     jme_set_tx_coal(struct jme_softc *);
129 static void     jme_set_rx_coal(struct jme_softc *);
130
131 static void     jme_sysctl_node(struct jme_softc *);
132 static int      jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
133 static int      jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
134 static int      jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
135 static int      jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
136
137 /*
138  * Devices supported by this driver.
139  */
140 static const struct jme_dev {
141         uint16_t        jme_vendorid;
142         uint16_t        jme_deviceid;
143         uint32_t        jme_caps;
144         const char      *jme_name;
145 } jme_devs[] = {
146         { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
147             JME_CAP_JUMBO,
148             "JMicron Inc, JMC250 Gigabit Ethernet" },
149         { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
150             JME_CAP_FASTETH,
151             "JMicron Inc, JMC260 Fast Ethernet" },
152         { 0, 0, 0, NULL }
153 };
154
155 static device_method_t jme_methods[] = {
156         /* Device interface. */
157         DEVMETHOD(device_probe,         jme_probe),
158         DEVMETHOD(device_attach,        jme_attach),
159         DEVMETHOD(device_detach,        jme_detach),
160         DEVMETHOD(device_shutdown,      jme_shutdown),
161         DEVMETHOD(device_suspend,       jme_suspend),
162         DEVMETHOD(device_resume,        jme_resume),
163
164         /* Bus interface. */
165         DEVMETHOD(bus_print_child,      bus_generic_print_child),
166         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
167
168         /* MII interface. */
169         DEVMETHOD(miibus_readreg,       jme_miibus_readreg),
170         DEVMETHOD(miibus_writereg,      jme_miibus_writereg),
171         DEVMETHOD(miibus_statchg,       jme_miibus_statchg),
172
173         { NULL, NULL }
174 };
175
176 static driver_t jme_driver = {
177         "jme",
178         jme_methods,
179         sizeof(struct jme_softc)
180 };
181
182 static devclass_t jme_devclass;
183
184 DECLARE_DUMMY_MODULE(if_jme);
185 MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
186 DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, 0, 0);
187 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0);
188
189 static const struct {
190         uint32_t        jme_coal;
191         uint32_t        jme_comp;
192 } jme_rx_status[JME_NRXRING_MAX] = {
193         { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP },
194         { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP },
195         { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP },
196         { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP }
197 };
198
199 static int      jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
200 static int      jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
201 static int      jme_rx_ring_count = 1;
202
203 TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
204 TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
205
206 /*
207  *      Read a PHY register on the MII of the JMC250.
208  */
209 static int
210 jme_miibus_readreg(device_t dev, int phy, int reg)
211 {
212         struct jme_softc *sc = device_get_softc(dev);
213         uint32_t val;
214         int i;
215
216         /* For FPGA version, PHY address 0 should be ignored. */
217         if (sc->jme_caps & JME_CAP_FPGA) {
218                 if (phy == 0)
219                         return (0);
220         } else {
221                 if (sc->jme_phyaddr != phy)
222                         return (0);
223         }
224
225         CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
226             SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
227
228         for (i = JME_PHY_TIMEOUT; i > 0; i--) {
229                 DELAY(1);
230                 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
231                         break;
232         }
233         if (i == 0) {
234                 device_printf(sc->jme_dev, "phy read timeout: "
235                               "phy %d, reg %d\n", phy, reg);
236                 return (0);
237         }
238
239         return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
240 }
241
242 /*
243  *      Write a PHY register on the MII of the JMC250.
244  */
245 static int
246 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
247 {
248         struct jme_softc *sc = device_get_softc(dev);
249         int i;
250
251         /* For FPGA version, PHY address 0 should be ignored. */
252         if (sc->jme_caps & JME_CAP_FPGA) {
253                 if (phy == 0)
254                         return (0);
255         } else {
256                 if (sc->jme_phyaddr != phy)
257                         return (0);
258         }
259
260         CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
261             ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
262             SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
263
264         for (i = JME_PHY_TIMEOUT; i > 0; i--) {
265                 DELAY(1);
266                 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
267                         break;
268         }
269         if (i == 0) {
270                 device_printf(sc->jme_dev, "phy write timeout: "
271                               "phy %d, reg %d\n", phy, reg);
272         }
273
274         return (0);
275 }
276
277 /*
278  *      Callback from MII layer when media changes.
279  */
280 static void
281 jme_miibus_statchg(device_t dev)
282 {
283         struct jme_softc *sc = device_get_softc(dev);
284         struct ifnet *ifp = &sc->arpcom.ac_if;
285         struct mii_data *mii;
286         struct jme_txdesc *txd;
287         bus_addr_t paddr;
288         int i, r;
289
290         ASSERT_SERIALIZED(ifp->if_serializer);
291
292         if ((ifp->if_flags & IFF_RUNNING) == 0)
293                 return;
294
295         mii = device_get_softc(sc->jme_miibus);
296
297         sc->jme_flags &= ~JME_FLAG_LINK;
298         if ((mii->mii_media_status & IFM_AVALID) != 0) {
299                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
300                 case IFM_10_T:
301                 case IFM_100_TX:
302                         sc->jme_flags |= JME_FLAG_LINK;
303                         break;
304                 case IFM_1000_T:
305                         if (sc->jme_caps & JME_CAP_FASTETH)
306                                 break;
307                         sc->jme_flags |= JME_FLAG_LINK;
308                         break;
309                 default:
310                         break;
311                 }
312         }
313
314         /*
315          * Disabling Rx/Tx MACs have a side-effect of resetting
316          * JME_TXNDA/JME_RXNDA register to the first address of
317          * Tx/Rx descriptor address. So driver should reset its
318          * internal procucer/consumer pointer and reclaim any
319          * allocated resources.  Note, just saving the value of
320          * JME_TXNDA and JME_RXNDA registers before stopping MAC
321          * and restoring JME_TXNDA/JME_RXNDA register is not
322          * sufficient to make sure correct MAC state because
323          * stopping MAC operation can take a while and hardware
324          * might have updated JME_TXNDA/JME_RXNDA registers
325          * during the stop operation.
326          */
327
328         /* Disable interrupts */
329         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
330
331         /* Stop driver */
332         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
333         ifp->if_timer = 0;
334         callout_stop(&sc->jme_tick_ch);
335
336         /* Stop receiver/transmitter. */
337         jme_stop_rx(sc);
338         jme_stop_tx(sc);
339
340         for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
341                 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
342
343                 jme_rxeof(sc, r, -1);
344                 if (rdata->jme_rxhead != NULL)
345                         m_freem(rdata->jme_rxhead);
346                 JME_RXCHAIN_RESET(sc, r);
347
348                 /*
349                  * Reuse configured Rx descriptors and reset
350                  * procuder/consumer index.
351                  */
352                 rdata->jme_rx_cons = 0;
353         }
354
355         jme_txeof(sc);
356         if (sc->jme_cdata.jme_tx_cnt != 0) {
357                 /* Remove queued packets for transmit. */
358                 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
359                         txd = &sc->jme_cdata.jme_txdesc[i];
360                         if (txd->tx_m != NULL) {
361                                 bus_dmamap_unload(
362                                     sc->jme_cdata.jme_tx_tag,
363                                     txd->tx_dmamap);
364                                 m_freem(txd->tx_m);
365                                 txd->tx_m = NULL;
366                                 txd->tx_ndesc = 0;
367                                 ifp->if_oerrors++;
368                         }
369                 }
370         }
371         jme_init_tx_ring(sc);
372
373         /* Initialize shadow status block. */
374         jme_init_ssb(sc);
375
376         /* Program MAC with resolved speed/duplex/flow-control. */
377         if (sc->jme_flags & JME_FLAG_LINK) {
378                 jme_mac_config(sc);
379
380                 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
381
382                 /* Set Tx ring address to the hardware. */
383                 paddr = sc->jme_cdata.jme_tx_ring_paddr;
384                 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
385                 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
386
387                 for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
388                         CSR_WRITE_4(sc, JME_RXCSR,
389                             sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
390
391                         /* Set Rx ring address to the hardware. */
392                         paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
393                         CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
394                         CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
395                 }
396
397                 /* Restart receiver/transmitter. */
398                 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
399                     RXCSR_RXQ_START);
400                 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
401         }
402
403         ifp->if_flags |= IFF_RUNNING;
404         ifp->if_flags &= ~IFF_OACTIVE;
405         callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
406
407 #ifdef DEVICE_POLLING
408         if (!(ifp->if_flags & IFF_POLLING))
409 #endif
410         /* Reenable interrupts. */
411         CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
412 }
413
414 /*
415  *      Get the current interface media status.
416  */
417 static void
418 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
419 {
420         struct jme_softc *sc = ifp->if_softc;
421         struct mii_data *mii = device_get_softc(sc->jme_miibus);
422
423         ASSERT_SERIALIZED(ifp->if_serializer);
424
425         mii_pollstat(mii);
426         ifmr->ifm_status = mii->mii_media_status;
427         ifmr->ifm_active = mii->mii_media_active;
428 }
429
430 /*
431  *      Set hardware to newly-selected media.
432  */
433 static int
434 jme_mediachange(struct ifnet *ifp)
435 {
436         struct jme_softc *sc = ifp->if_softc;
437         struct mii_data *mii = device_get_softc(sc->jme_miibus);
438         int error;
439
440         ASSERT_SERIALIZED(ifp->if_serializer);
441
442         if (mii->mii_instance != 0) {
443                 struct mii_softc *miisc;
444
445                 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
446                         mii_phy_reset(miisc);
447         }
448         error = mii_mediachg(mii);
449
450         return (error);
451 }
452
453 static int
454 jme_probe(device_t dev)
455 {
456         const struct jme_dev *sp;
457         uint16_t vid, did;
458
459         vid = pci_get_vendor(dev);
460         did = pci_get_device(dev);
461         for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
462                 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
463                         struct jme_softc *sc = device_get_softc(dev);
464
465                         sc->jme_caps = sp->jme_caps;
466                         device_set_desc(dev, sp->jme_name);
467                         return (0);
468                 }
469         }
470         return (ENXIO);
471 }
472
473 static int
474 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
475 {
476         uint32_t reg;
477         int i;
478
479         *val = 0;
480         for (i = JME_TIMEOUT; i > 0; i--) {
481                 reg = CSR_READ_4(sc, JME_SMBCSR);
482                 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
483                         break;
484                 DELAY(1);
485         }
486
487         if (i == 0) {
488                 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
489                 return (ETIMEDOUT);
490         }
491
492         reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
493         CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
494         for (i = JME_TIMEOUT; i > 0; i--) {
495                 DELAY(1);
496                 reg = CSR_READ_4(sc, JME_SMBINTF);
497                 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
498                         break;
499         }
500
501         if (i == 0) {
502                 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
503                 return (ETIMEDOUT);
504         }
505
506         reg = CSR_READ_4(sc, JME_SMBINTF);
507         *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
508
509         return (0);
510 }
511
512 static int
513 jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
514 {
515         uint8_t fup, reg, val;
516         uint32_t offset;
517         int match;
518
519         offset = 0;
520         if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
521             fup != JME_EEPROM_SIG0)
522                 return (ENOENT);
523         if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
524             fup != JME_EEPROM_SIG1)
525                 return (ENOENT);
526         match = 0;
527         do {
528                 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
529                         break;
530                 /* Check for the end of EEPROM descriptor. */
531                 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
532                         break;
533                 if ((uint8_t)JME_EEPROM_MKDESC(JME_EEPROM_FUNC0,
534                     JME_EEPROM_PAGE_BAR1) == fup) {
535                         if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
536                                 break;
537                         if (reg >= JME_PAR0 &&
538                             reg < JME_PAR0 + ETHER_ADDR_LEN) {
539                                 if (jme_eeprom_read_byte(sc, offset + 2,
540                                     &val) != 0)
541                                         break;
542                                 eaddr[reg - JME_PAR0] = val;
543                                 match++;
544                         }
545                 }
546                 /* Try next eeprom descriptor. */
547                 offset += JME_EEPROM_DESC_BYTES;
548         } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
549
550         if (match == ETHER_ADDR_LEN)
551                 return (0);
552
553         return (ENOENT);
554 }
555
556 static void
557 jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
558 {
559         uint32_t par0, par1;
560
561         /* Read station address. */
562         par0 = CSR_READ_4(sc, JME_PAR0);
563         par1 = CSR_READ_4(sc, JME_PAR1);
564         par1 &= 0xFFFF;
565         if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
566                 device_printf(sc->jme_dev,
567                     "generating fake ethernet address.\n");
568                 par0 = karc4random();
569                 /* Set OUI to JMicron. */
570                 eaddr[0] = 0x00;
571                 eaddr[1] = 0x1B;
572                 eaddr[2] = 0x8C;
573                 eaddr[3] = (par0 >> 16) & 0xff;
574                 eaddr[4] = (par0 >> 8) & 0xff;
575                 eaddr[5] = par0 & 0xff;
576         } else {
577                 eaddr[0] = (par0 >> 0) & 0xFF;
578                 eaddr[1] = (par0 >> 8) & 0xFF;
579                 eaddr[2] = (par0 >> 16) & 0xFF;
580                 eaddr[3] = (par0 >> 24) & 0xFF;
581                 eaddr[4] = (par1 >> 0) & 0xFF;
582                 eaddr[5] = (par1 >> 8) & 0xFF;
583         }
584 }
585
586 static int
587 jme_attach(device_t dev)
588 {
589         struct jme_softc *sc = device_get_softc(dev);
590         struct ifnet *ifp = &sc->arpcom.ac_if;
591         uint32_t reg;
592         uint16_t did;
593         uint8_t pcie_ptr, rev;
594         int error = 0;
595         uint8_t eaddr[ETHER_ADDR_LEN];
596
597         sc->jme_rx_desc_cnt = roundup(jme_rx_desc_count, JME_NDESC_ALIGN);
598         if (sc->jme_rx_desc_cnt > JME_NDESC_MAX)
599                 sc->jme_rx_desc_cnt = JME_NDESC_MAX;
600
601         sc->jme_tx_desc_cnt = roundup(jme_tx_desc_count, JME_NDESC_ALIGN);
602         if (sc->jme_tx_desc_cnt > JME_NDESC_MAX)
603                 sc->jme_tx_desc_cnt = JME_NDESC_MAX;
604
605         sc->jme_rx_ring_cnt = jme_rx_ring_count;
606         if (sc->jme_rx_ring_cnt <= 0)
607                 sc->jme_rx_ring_cnt = 1;
608         if (sc->jme_rx_ring_cnt > ncpus2)
609                 sc->jme_rx_ring_cnt = ncpus2;
610         if (sc->jme_rx_ring_cnt > JME_NRXRING_MAX)
611                 sc->jme_rx_ring_cnt = JME_NRXRING_MAX;
612
613         if (sc->jme_rx_ring_cnt > 1) {
614                 sc->jme_caps |= JME_CAP_RSS;
615                 sc->jme_flags |= JME_FLAG_RSS;
616         }
617         sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
618
619         sc->jme_dev = dev;
620         sc->jme_lowaddr = BUS_SPACE_MAXADDR;
621
622         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
623
624         callout_init(&sc->jme_tick_ch);
625
626 #ifndef BURN_BRIDGES
627         if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
628                 uint32_t irq, mem;
629
630                 irq = pci_read_config(dev, PCIR_INTLINE, 4);
631                 mem = pci_read_config(dev, JME_PCIR_BAR, 4);
632
633                 device_printf(dev, "chip is in D%d power mode "
634                     "-- setting to D0\n", pci_get_powerstate(dev));
635
636                 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
637
638                 pci_write_config(dev, PCIR_INTLINE, irq, 4);
639                 pci_write_config(dev, JME_PCIR_BAR, mem, 4);
640         }
641 #endif  /* !BURN_BRIDGE */
642
643         /* Enable bus mastering */
644         pci_enable_busmaster(dev);
645
646         /*
647          * Allocate IO memory
648          *
649          * JMC250 supports both memory mapped and I/O register space
650          * access.  Because I/O register access should use different
651          * BARs to access registers it's waste of time to use I/O
652          * register spce access.  JMC250 uses 16K to map entire memory
653          * space.
654          */
655         sc->jme_mem_rid = JME_PCIR_BAR;
656         sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
657                                                  &sc->jme_mem_rid, RF_ACTIVE);
658         if (sc->jme_mem_res == NULL) {
659                 device_printf(dev, "can't allocate IO memory\n");
660                 return ENXIO;
661         }
662         sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
663         sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
664
665         /*
666          * Allocate IRQ
667          */
668         sc->jme_irq_rid = 0;
669         sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
670                                                  &sc->jme_irq_rid,
671                                                  RF_SHAREABLE | RF_ACTIVE);
672         if (sc->jme_irq_res == NULL) {
673                 device_printf(dev, "can't allocate irq\n");
674                 error = ENXIO;
675                 goto fail;
676         }
677
678         /*
679          * Extract revisions
680          */
681         reg = CSR_READ_4(sc, JME_CHIPMODE);
682         if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
683             CHIPMODE_NOT_FPGA) {
684                 sc->jme_caps |= JME_CAP_FPGA;
685                 if (bootverbose) {
686                         device_printf(dev, "FPGA revision: 0x%04x\n",
687                                       (reg & CHIPMODE_FPGA_REV_MASK) >>
688                                       CHIPMODE_FPGA_REV_SHIFT);
689                 }
690         }
691
692         /* NOTE: FM revision is put in the upper 4 bits */
693         rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
694         rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
695         if (bootverbose)
696                 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
697
698         did = pci_get_device(dev);
699         switch (did) {
700         case PCI_PRODUCT_JMICRON_JMC250:
701                 if (rev == JME_REV1_A2)
702                         sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
703                 break;
704
705         case PCI_PRODUCT_JMICRON_JMC260:
706                 if (rev == JME_REV2)
707                         sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
708                 break;
709
710         default:
711                 panic("unknown device id 0x%04x\n", did);
712         }
713         if (rev >= JME_REV2) {
714                 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
715                 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
716                                       GHC_TXMAC_CLKSRC_1000;
717         }
718
719         /* Reset the ethernet controller. */
720         jme_reset(sc);
721
722         /* Get station address. */
723         reg = CSR_READ_4(sc, JME_SMBCSR);
724         if (reg & SMBCSR_EEPROM_PRESENT)
725                 error = jme_eeprom_macaddr(sc, eaddr);
726         if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
727                 if (error != 0 && (bootverbose)) {
728                         device_printf(dev, "ethernet hardware address "
729                                       "not found in EEPROM.\n");
730                 }
731                 jme_reg_macaddr(sc, eaddr);
732         }
733
734         /*
735          * Save PHY address.
736          * Integrated JR0211 has fixed PHY address whereas FPGA version
737          * requires PHY probing to get correct PHY address.
738          */
739         if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
740                 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
741                     GPREG0_PHY_ADDR_MASK;
742                 if (bootverbose) {
743                         device_printf(dev, "PHY is at address %d.\n",
744                             sc->jme_phyaddr);
745                 }
746         } else {
747                 sc->jme_phyaddr = 0;
748         }
749
750         /* Set max allowable DMA size. */
751         pcie_ptr = pci_get_pciecap_ptr(dev);
752         if (pcie_ptr != 0) {
753                 uint16_t ctrl;
754
755                 sc->jme_caps |= JME_CAP_PCIE;
756                 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
757                 if (bootverbose) {
758                         device_printf(dev, "Read request size : %d bytes.\n",
759                             128 << ((ctrl >> 12) & 0x07));
760                         device_printf(dev, "TLP payload size : %d bytes.\n",
761                             128 << ((ctrl >> 5) & 0x07));
762                 }
763                 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
764                 case PCIEM_DEVCTL_MAX_READRQ_128:
765                         sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
766                         break;
767                 case PCIEM_DEVCTL_MAX_READRQ_256:
768                         sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
769                         break;
770                 default:
771                         sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
772                         break;
773                 }
774                 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
775         } else {
776                 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
777                 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
778         }
779
780 #ifdef notyet
781         if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
782                 sc->jme_caps |= JME_CAP_PMCAP;
783 #endif
784
785         /*
786          * Create sysctl tree
787          */
788         jme_sysctl_node(sc);
789
790         /* Allocate DMA stuffs */
791         error = jme_dma_alloc(sc);
792         if (error)
793                 goto fail;
794
795         ifp->if_softc = sc;
796         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
797         ifp->if_init = jme_init;
798         ifp->if_ioctl = jme_ioctl;
799         ifp->if_start = jme_start;
800 #ifdef DEVICE_POLLING
801         ifp->if_poll = jme_poll;
802 #endif
803         ifp->if_watchdog = jme_watchdog;
804         ifq_set_maxlen(&ifp->if_snd, sc->jme_tx_desc_cnt - JME_TXD_RSVD);
805         ifq_set_ready(&ifp->if_snd);
806
807         /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
808         ifp->if_capabilities = IFCAP_HWCSUM |
809                                IFCAP_VLAN_MTU |
810                                IFCAP_VLAN_HWTAGGING;
811         ifp->if_hwassist = JME_CSUM_FEATURES;
812         ifp->if_capenable = ifp->if_capabilities;
813
814         /* Set up MII bus. */
815         error = mii_phy_probe(dev, &sc->jme_miibus,
816                               jme_mediachange, jme_mediastatus);
817         if (error) {
818                 device_printf(dev, "no PHY found!\n");
819                 goto fail;
820         }
821
822         /*
823          * Save PHYADDR for FPGA mode PHY.
824          */
825         if (sc->jme_caps & JME_CAP_FPGA) {
826                 struct mii_data *mii = device_get_softc(sc->jme_miibus);
827
828                 if (mii->mii_instance != 0) {
829                         struct mii_softc *miisc;
830
831                         LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
832                                 if (miisc->mii_phy != 0) {
833                                         sc->jme_phyaddr = miisc->mii_phy;
834                                         break;
835                                 }
836                         }
837                         if (sc->jme_phyaddr != 0) {
838                                 device_printf(sc->jme_dev,
839                                     "FPGA PHY is at %d\n", sc->jme_phyaddr);
840                                 /* vendor magic. */
841                                 jme_miibus_writereg(dev, sc->jme_phyaddr,
842                                     JMPHY_CONF, JMPHY_CONF_DEFFIFO);
843
844                                 /* XXX should we clear JME_WA_EXTFIFO */
845                         }
846                 }
847         }
848
849         ether_ifattach(ifp, eaddr, NULL);
850
851         /* Tell the upper layer(s) we support long frames. */
852         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
853
854         error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, jme_intr, sc,
855                                &sc->jme_irq_handle, ifp->if_serializer);
856         if (error) {
857                 device_printf(dev, "could not set up interrupt handler.\n");
858                 ether_ifdetach(ifp);
859                 goto fail;
860         }
861
862         ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->jme_irq_res));
863         KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
864         return 0;
865 fail:
866         jme_detach(dev);
867         return (error);
868 }
869
870 static int
871 jme_detach(device_t dev)
872 {
873         struct jme_softc *sc = device_get_softc(dev);
874
875         if (device_is_attached(dev)) {
876                 struct ifnet *ifp = &sc->arpcom.ac_if;
877
878                 lwkt_serialize_enter(ifp->if_serializer);
879                 jme_stop(sc);
880                 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
881                 lwkt_serialize_exit(ifp->if_serializer);
882
883                 ether_ifdetach(ifp);
884         }
885
886         if (sc->jme_sysctl_tree != NULL)
887                 sysctl_ctx_free(&sc->jme_sysctl_ctx);
888
889         if (sc->jme_miibus != NULL)
890                 device_delete_child(dev, sc->jme_miibus);
891         bus_generic_detach(dev);
892
893         if (sc->jme_irq_res != NULL) {
894                 bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
895                                      sc->jme_irq_res);
896         }
897
898         if (sc->jme_mem_res != NULL) {
899                 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
900                                      sc->jme_mem_res);
901         }
902
903         jme_dma_free(sc, 1);
904
905         return (0);
906 }
907
908 static void
909 jme_sysctl_node(struct jme_softc *sc)
910 {
911         int coal_max;
912
913         sysctl_ctx_init(&sc->jme_sysctl_ctx);
914         sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
915                                 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
916                                 device_get_nameunit(sc->jme_dev),
917                                 CTLFLAG_RD, 0, "");
918         if (sc->jme_sysctl_tree == NULL) {
919                 device_printf(sc->jme_dev, "can't add sysctl node\n");
920                 return;
921         }
922
923         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
924             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
925             "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
926             sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
927
928         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
929             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
930             "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
931             sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
932
933         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
934             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
935             "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
936             sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
937
938         SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
939             SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
940             "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
941             sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
942
943         SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
944                        SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
945                        "rx_desc_count", CTLFLAG_RD, &sc->jme_rx_desc_cnt,
946                        0, "RX desc count");
947         SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
948                        SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
949                        "tx_desc_count", CTLFLAG_RD, &sc->jme_tx_desc_cnt,
950                        0, "TX desc count");
951
952         /*
953          * Set default coalesce valves
954          */
955         sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
956         sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
957         sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
958         sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
959
960         /*
961          * Adjust coalesce valves, in case that the number of TX/RX
962          * descs are set to small values by users.
963          *
964          * NOTE: coal_max will not be zero, since number of descs
965          * must aligned by JME_NDESC_ALIGN (16 currently)
966          */
967         coal_max = sc->jme_tx_desc_cnt / 6;
968         if (coal_max < sc->jme_tx_coal_pkt)
969                 sc->jme_tx_coal_pkt = coal_max;
970
971         coal_max = sc->jme_rx_desc_cnt / 4;
972         if (coal_max < sc->jme_rx_coal_pkt)
973                 sc->jme_rx_coal_pkt = coal_max;
974 }
975
976 static void
977 jme_dmamap_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
978 {
979         if (error)
980                 return;
981
982         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
983         *((bus_addr_t *)arg) = segs->ds_addr;
984 }
985
986 static void
987 jme_dmamap_buf_cb(void *xctx, bus_dma_segment_t *segs, int nsegs,
988                   bus_size_t mapsz __unused, int error)
989 {
990         struct jme_dmamap_ctx *ctx = xctx;
991         int i;
992
993         if (error)
994                 return;
995
996         if (nsegs > ctx->nsegs) {
997                 ctx->nsegs = 0;
998                 return;
999         }
1000
1001         ctx->nsegs = nsegs;
1002         for (i = 0; i < nsegs; ++i)
1003                 ctx->segs[i] = segs[i];
1004 }
1005
1006 static int
1007 jme_dma_alloc(struct jme_softc *sc)
1008 {
1009         struct jme_txdesc *txd;
1010         bus_addr_t busaddr, lowaddr;
1011         int error, i;
1012
1013         sc->jme_cdata.jme_txdesc =
1014         kmalloc(sc->jme_tx_desc_cnt * sizeof(struct jme_txdesc),
1015                 M_DEVBUF, M_WAITOK | M_ZERO);
1016         for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1017                 sc->jme_cdata.jme_rx_data[i].jme_rxdesc =
1018                 kmalloc(sc->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
1019                         M_DEVBUF, M_WAITOK | M_ZERO);
1020         }
1021
1022         lowaddr = sc->jme_lowaddr;
1023 again:
1024         /* Create parent ring tag. */
1025         error = bus_dma_tag_create(NULL,/* parent */
1026             1, 0,                       /* algnmnt, boundary */
1027             lowaddr,                    /* lowaddr */
1028             BUS_SPACE_MAXADDR,          /* highaddr */
1029             NULL, NULL,                 /* filter, filterarg */
1030             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1031             0,                          /* nsegments */
1032             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1033             0,                          /* flags */
1034             &sc->jme_cdata.jme_ring_tag);
1035         if (error) {
1036                 device_printf(sc->jme_dev,
1037                     "could not create parent ring DMA tag.\n");
1038                 return error;
1039         }
1040
1041         /*
1042          * Create DMA stuffs for TX ring
1043          */
1044
1045         /* Create tag for Tx ring. */
1046         error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1047             JME_TX_RING_ALIGN, 0,       /* algnmnt, boundary */
1048             lowaddr,                    /* lowaddr */
1049             BUS_SPACE_MAXADDR,          /* highaddr */
1050             NULL, NULL,                 /* filter, filterarg */
1051             JME_TX_RING_SIZE(sc),       /* maxsize */
1052             1,                          /* nsegments */
1053             JME_TX_RING_SIZE(sc),       /* maxsegsize */
1054             0,                          /* flags */
1055             &sc->jme_cdata.jme_tx_ring_tag);
1056         if (error) {
1057                 device_printf(sc->jme_dev,
1058                     "could not allocate Tx ring DMA tag.\n");
1059                 return error;
1060         }
1061
1062         /* Allocate DMA'able memory for TX ring */
1063         error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag,
1064             (void **)&sc->jme_cdata.jme_tx_ring,
1065             BUS_DMA_WAITOK | BUS_DMA_ZERO,
1066             &sc->jme_cdata.jme_tx_ring_map);
1067         if (error) {
1068                 device_printf(sc->jme_dev,
1069                     "could not allocate DMA'able memory for Tx ring.\n");
1070                 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1071                 sc->jme_cdata.jme_tx_ring_tag = NULL;
1072                 return error;
1073         }
1074
1075         /*  Load the DMA map for Tx ring. */
1076         error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag,
1077             sc->jme_cdata.jme_tx_ring_map, sc->jme_cdata.jme_tx_ring,
1078             JME_TX_RING_SIZE(sc), jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
1079         if (error) {
1080                 device_printf(sc->jme_dev,
1081                     "could not load DMA'able memory for Tx ring.\n");
1082                 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1083                                 sc->jme_cdata.jme_tx_ring,
1084                                 sc->jme_cdata.jme_tx_ring_map);
1085                 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1086                 sc->jme_cdata.jme_tx_ring_tag = NULL;
1087                 return error;
1088         }
1089         sc->jme_cdata.jme_tx_ring_paddr = busaddr;
1090
1091         /*
1092          * Create DMA stuffs for RX ring
1093          */
1094         for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1095                 error = jme_rxring_dma_alloc(sc, lowaddr, i);
1096                 if (error)
1097                         return error;
1098         }
1099
1100         if (lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1101                 bus_addr_t ring_end;
1102
1103                 /* Tx/Rx descriptor queue should reside within 4GB boundary. */
1104                 ring_end = sc->jme_cdata.jme_tx_ring_paddr +
1105                            JME_TX_RING_SIZE(sc);
1106                 if (JME_ADDR_HI(ring_end) !=
1107                     JME_ADDR_HI(sc->jme_cdata.jme_tx_ring_paddr)) {
1108                         device_printf(sc->jme_dev, "TX ring 4GB boundary "
1109                             "crossed, switching to 32bit DMA address mode.\n");
1110                         jme_dma_free(sc, 0);
1111                         /* Limit DMA address space to 32bit and try again. */
1112                         lowaddr = BUS_SPACE_MAXADDR_32BIT;
1113                         goto again;
1114                 }
1115
1116                 for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1117                         bus_addr_t ring_start;
1118
1119                         ring_start =
1120                             sc->jme_cdata.jme_rx_data[i].jme_rx_ring_paddr;
1121                         ring_end = ring_start + JME_RX_RING_SIZE(sc);
1122                         if (JME_ADDR_HI(ring_end) != JME_ADDR_HI(ring_start)) {
1123                                 device_printf(sc->jme_dev,
1124                                 "%dth RX ring 4GB boundary crossed, "
1125                                 "switching to 32bit DMA address mode.\n", i);
1126                                 jme_dma_free(sc, 0);
1127                                 /*
1128                                  * Limit DMA address space to 32bit and
1129                                  * try again.
1130                                  */
1131                                 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1132                                 goto again;
1133                         }
1134                 }
1135         }
1136
1137         /* Create parent buffer tag. */
1138         error = bus_dma_tag_create(NULL,/* parent */
1139             1, 0,                       /* algnmnt, boundary */
1140             sc->jme_lowaddr,            /* lowaddr */
1141             BUS_SPACE_MAXADDR,          /* highaddr */
1142             NULL, NULL,                 /* filter, filterarg */
1143             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1144             0,                          /* nsegments */
1145             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1146             0,                          /* flags */
1147             &sc->jme_cdata.jme_buffer_tag);
1148         if (error) {
1149                 device_printf(sc->jme_dev,
1150                     "could not create parent buffer DMA tag.\n");
1151                 return error;
1152         }
1153
1154         /*
1155          * Create DMA stuffs for shadow status block
1156          */
1157
1158         /* Create shadow status block tag. */
1159         error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1160             JME_SSB_ALIGN, 0,           /* algnmnt, boundary */
1161             sc->jme_lowaddr,            /* lowaddr */
1162             BUS_SPACE_MAXADDR,          /* highaddr */
1163             NULL, NULL,                 /* filter, filterarg */
1164             JME_SSB_SIZE,               /* maxsize */
1165             1,                          /* nsegments */
1166             JME_SSB_SIZE,               /* maxsegsize */
1167             0,                          /* flags */
1168             &sc->jme_cdata.jme_ssb_tag);
1169         if (error) {
1170                 device_printf(sc->jme_dev,
1171                     "could not create shadow status block DMA tag.\n");
1172                 return error;
1173         }
1174
1175         /* Allocate DMA'able memory for shadow status block. */
1176         error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag,
1177             (void **)&sc->jme_cdata.jme_ssb_block,
1178             BUS_DMA_WAITOK | BUS_DMA_ZERO,
1179             &sc->jme_cdata.jme_ssb_map);
1180         if (error) {
1181                 device_printf(sc->jme_dev, "could not allocate DMA'able "
1182                     "memory for shadow status block.\n");
1183                 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1184                 sc->jme_cdata.jme_ssb_tag = NULL;
1185                 return error;
1186         }
1187
1188         /* Load the DMA map for shadow status block */
1189         error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag,
1190             sc->jme_cdata.jme_ssb_map, sc->jme_cdata.jme_ssb_block,
1191             JME_SSB_SIZE, jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
1192         if (error) {
1193                 device_printf(sc->jme_dev, "could not load DMA'able memory "
1194                     "for shadow status block.\n");
1195                 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1196                                 sc->jme_cdata.jme_ssb_block,
1197                                 sc->jme_cdata.jme_ssb_map);
1198                 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1199                 sc->jme_cdata.jme_ssb_tag = NULL;
1200                 return error;
1201         }
1202         sc->jme_cdata.jme_ssb_block_paddr = busaddr;
1203
1204         /*
1205          * Create DMA stuffs for TX buffers
1206          */
1207
1208         /* Create tag for Tx buffers. */
1209         error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1210             1, 0,                       /* algnmnt, boundary */
1211             sc->jme_lowaddr,            /* lowaddr */
1212             BUS_SPACE_MAXADDR,          /* highaddr */
1213             NULL, NULL,                 /* filter, filterarg */
1214             JME_TSO_MAXSIZE,            /* maxsize */
1215             JME_MAXTXSEGS,              /* nsegments */
1216             JME_TSO_MAXSEGSIZE,         /* maxsegsize */
1217             0,                          /* flags */
1218             &sc->jme_cdata.jme_tx_tag);
1219         if (error != 0) {
1220                 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1221                 return error;
1222         }
1223
1224         /* Create DMA maps for Tx buffers. */
1225         for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1226                 txd = &sc->jme_cdata.jme_txdesc[i];
1227                 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0,
1228                     &txd->tx_dmamap);
1229                 if (error) {
1230                         int j;
1231
1232                         device_printf(sc->jme_dev,
1233                             "could not create %dth Tx dmamap.\n", i);
1234
1235                         for (j = 0; j < i; ++j) {
1236                                 txd = &sc->jme_cdata.jme_txdesc[j];
1237                                 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1238                                                    txd->tx_dmamap);
1239                         }
1240                         bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1241                         sc->jme_cdata.jme_tx_tag = NULL;
1242                         return error;
1243                 }
1244         }
1245
1246         /*
1247          * Create DMA stuffs for RX buffers
1248          */
1249         for (i = 0; i < sc->jme_rx_ring_cnt; ++i) {
1250                 error = jme_rxbuf_dma_alloc(sc, i);
1251                 if (error)
1252                         return error;
1253         }
1254         return 0;
1255 }
1256
1257 static void
1258 jme_dma_free(struct jme_softc *sc, int detach)
1259 {
1260         struct jme_txdesc *txd;
1261         struct jme_rxdesc *rxd;
1262         struct jme_rxdata *rdata;
1263         int i, r;
1264
1265         /* Tx ring */
1266         if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1267                 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1268                     sc->jme_cdata.jme_tx_ring_map);
1269                 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1270                     sc->jme_cdata.jme_tx_ring,
1271                     sc->jme_cdata.jme_tx_ring_map);
1272                 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1273                 sc->jme_cdata.jme_tx_ring_tag = NULL;
1274         }
1275
1276         /* Rx ring */
1277         for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1278                 rdata = &sc->jme_cdata.jme_rx_data[r];
1279                 if (rdata->jme_rx_ring_tag != NULL) {
1280                         bus_dmamap_unload(rdata->jme_rx_ring_tag,
1281                                           rdata->jme_rx_ring_map);
1282                         bus_dmamem_free(rdata->jme_rx_ring_tag,
1283                                         rdata->jme_rx_ring,
1284                                         rdata->jme_rx_ring_map);
1285                         bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1286                         rdata->jme_rx_ring_tag = NULL;
1287                 }
1288         }
1289
1290         /* Tx buffers */
1291         if (sc->jme_cdata.jme_tx_tag != NULL) {
1292                 for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
1293                         txd = &sc->jme_cdata.jme_txdesc[i];
1294                         bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1295                             txd->tx_dmamap);
1296                 }
1297                 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1298                 sc->jme_cdata.jme_tx_tag = NULL;
1299         }
1300
1301         /* Rx buffers */
1302         for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1303                 rdata = &sc->jme_cdata.jme_rx_data[r];
1304                 if (rdata->jme_rx_tag != NULL) {
1305                         for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
1306                                 rxd = &rdata->jme_rxdesc[i];
1307                                 bus_dmamap_destroy(rdata->jme_rx_tag,
1308                                                    rxd->rx_dmamap);
1309                         }
1310                         bus_dmamap_destroy(rdata->jme_rx_tag,
1311                                            rdata->jme_rx_sparemap);
1312                         bus_dma_tag_destroy(rdata->jme_rx_tag);
1313                         rdata->jme_rx_tag = NULL;
1314                 }
1315         }
1316
1317         /* Shadow status block. */
1318         if (sc->jme_cdata.jme_ssb_tag != NULL) {
1319                 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1320                     sc->jme_cdata.jme_ssb_map);
1321                 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1322                     sc->jme_cdata.jme_ssb_block,
1323                     sc->jme_cdata.jme_ssb_map);
1324                 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1325                 sc->jme_cdata.jme_ssb_tag = NULL;
1326         }
1327
1328         if (sc->jme_cdata.jme_buffer_tag != NULL) {
1329                 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1330                 sc->jme_cdata.jme_buffer_tag = NULL;
1331         }
1332         if (sc->jme_cdata.jme_ring_tag != NULL) {
1333                 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1334                 sc->jme_cdata.jme_ring_tag = NULL;
1335         }
1336
1337         if (detach) {
1338                 if (sc->jme_cdata.jme_txdesc != NULL) {
1339                         kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1340                         sc->jme_cdata.jme_txdesc = NULL;
1341                 }
1342                 for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
1343                         rdata = &sc->jme_cdata.jme_rx_data[r];
1344                         if (rdata->jme_rxdesc != NULL) {
1345                                 kfree(rdata->jme_rxdesc, M_DEVBUF);
1346                                 rdata->jme_rxdesc = NULL;
1347                         }
1348                 }
1349         }
1350 }
1351
1352 /*
1353  *      Make sure the interface is stopped at reboot time.
1354  */
1355 static int
1356 jme_shutdown(device_t dev)
1357 {
1358         return jme_suspend(dev);
1359 }
1360
1361 #ifdef notyet
1362 /*
1363  * Unlike other ethernet controllers, JMC250 requires
1364  * explicit resetting link speed to 10/100Mbps as gigabit
1365  * link will cunsume more power than 375mA.
1366  * Note, we reset the link speed to 10/100Mbps with
1367  * auto-negotiation but we don't know whether that operation
1368  * would succeed or not as we have no control after powering
1369  * off. If the renegotiation fail WOL may not work. Running
1370  * at 1Gbps draws more power than 375mA at 3.3V which is
1371  * specified in PCI specification and that would result in
1372  * complete shutdowning power to ethernet controller.
1373  *
1374  * TODO
1375  *  Save current negotiated media speed/duplex/flow-control
1376  *  to softc and restore the same link again after resuming.
1377  *  PHY handling such as power down/resetting to 100Mbps
1378  *  may be better handled in suspend method in phy driver.
1379  */
1380 static void
1381 jme_setlinkspeed(struct jme_softc *sc)
1382 {
1383         struct mii_data *mii;
1384         int aneg, i;
1385
1386         JME_LOCK_ASSERT(sc);
1387
1388         mii = device_get_softc(sc->jme_miibus);
1389         mii_pollstat(mii);
1390         aneg = 0;
1391         if ((mii->mii_media_status & IFM_AVALID) != 0) {
1392                 switch IFM_SUBTYPE(mii->mii_media_active) {
1393                 case IFM_10_T:
1394                 case IFM_100_TX:
1395                         return;
1396                 case IFM_1000_T:
1397                         aneg++;
1398                 default:
1399                         break;
1400                 }
1401         }
1402         jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1403         jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1404             ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1405         jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1406             BMCR_AUTOEN | BMCR_STARTNEG);
1407         DELAY(1000);
1408         if (aneg != 0) {
1409                 /* Poll link state until jme(4) get a 10/100 link. */
1410                 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1411                         mii_pollstat(mii);
1412                         if ((mii->mii_media_status & IFM_AVALID) != 0) {
1413                                 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1414                                 case IFM_10_T:
1415                                 case IFM_100_TX:
1416                                         jme_mac_config(sc);
1417                                         return;
1418                                 default:
1419                                         break;
1420                                 }
1421                         }
1422                         JME_UNLOCK(sc);
1423                         pause("jmelnk", hz);
1424                         JME_LOCK(sc);
1425                 }
1426                 if (i == MII_ANEGTICKS_GIGE)
1427                         device_printf(sc->jme_dev, "establishing link failed, "
1428                             "WOL may not work!");
1429         }
1430         /*
1431          * No link, force MAC to have 100Mbps, full-duplex link.
1432          * This is the last resort and may/may not work.
1433          */
1434         mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1435         mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1436         jme_mac_config(sc);
1437 }
1438
1439 static void
1440 jme_setwol(struct jme_softc *sc)
1441 {
1442         struct ifnet *ifp = &sc->arpcom.ac_if;
1443         uint32_t gpr, pmcs;
1444         uint16_t pmstat;
1445         int pmc;
1446
1447         if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1448                 /* No PME capability, PHY power down. */
1449                 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1450                     MII_BMCR, BMCR_PDOWN);
1451                 return;
1452         }
1453
1454         gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1455         pmcs = CSR_READ_4(sc, JME_PMCS);
1456         pmcs &= ~PMCS_WOL_ENB_MASK;
1457         if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1458                 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1459                 /* Enable PME message. */
1460                 gpr |= GPREG0_PME_ENB;
1461                 /* For gigabit controllers, reset link speed to 10/100. */
1462                 if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
1463                         jme_setlinkspeed(sc);
1464         }
1465
1466         CSR_WRITE_4(sc, JME_PMCS, pmcs);
1467         CSR_WRITE_4(sc, JME_GPREG0, gpr);
1468
1469         /* Request PME. */
1470         pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1471         pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1472         if ((ifp->if_capenable & IFCAP_WOL) != 0)
1473                 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1474         pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1475         if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1476                 /* No WOL, PHY power down. */
1477                 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1478                     MII_BMCR, BMCR_PDOWN);
1479         }
1480 }
1481 #endif
1482
1483 static int
1484 jme_suspend(device_t dev)
1485 {
1486         struct jme_softc *sc = device_get_softc(dev);
1487         struct ifnet *ifp = &sc->arpcom.ac_if;
1488
1489         lwkt_serialize_enter(ifp->if_serializer);
1490         jme_stop(sc);
1491 #ifdef notyet
1492         jme_setwol(sc);
1493 #endif
1494         lwkt_serialize_exit(ifp->if_serializer);
1495
1496         return (0);
1497 }
1498
1499 static int
1500 jme_resume(device_t dev)
1501 {
1502         struct jme_softc *sc = device_get_softc(dev);
1503         struct ifnet *ifp = &sc->arpcom.ac_if;
1504 #ifdef notyet
1505         int pmc;
1506 #endif
1507
1508         lwkt_serialize_enter(ifp->if_serializer);
1509
1510 #ifdef notyet
1511         if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1512                 uint16_t pmstat;
1513
1514                 pmstat = pci_read_config(sc->jme_dev,
1515                     pmc + PCIR_POWER_STATUS, 2);
1516                 /* Disable PME clear PME status. */
1517                 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1518                 pci_write_config(sc->jme_dev,
1519                     pmc + PCIR_POWER_STATUS, pmstat, 2);
1520         }
1521 #endif
1522
1523         if (ifp->if_flags & IFF_UP)
1524                 jme_init(sc);
1525
1526         lwkt_serialize_exit(ifp->if_serializer);
1527
1528         return (0);
1529 }
1530
1531 static int
1532 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1533 {
1534         struct jme_txdesc *txd;
1535         struct jme_desc *desc;
1536         struct mbuf *m;
1537         struct jme_dmamap_ctx ctx;
1538         bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1539         int maxsegs;
1540         int error, i, prod, symbol_desc;
1541         uint32_t cflags, flag64;
1542
1543         M_ASSERTPKTHDR((*m_head));
1544
1545         prod = sc->jme_cdata.jme_tx_prod;
1546         txd = &sc->jme_cdata.jme_txdesc[prod];
1547
1548         if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1549                 symbol_desc = 1;
1550         else
1551                 symbol_desc = 0;
1552
1553         maxsegs = (sc->jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
1554                   (JME_TXD_RSVD + symbol_desc);
1555         if (maxsegs > JME_MAXTXSEGS)
1556                 maxsegs = JME_MAXTXSEGS;
1557         KASSERT(maxsegs >= (sc->jme_txd_spare - symbol_desc),
1558                 ("not enough segments %d\n", maxsegs));
1559
1560         ctx.nsegs = maxsegs;
1561         ctx.segs = txsegs;
1562         error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1563                                      *m_head, jme_dmamap_buf_cb, &ctx,
1564                                      BUS_DMA_NOWAIT);
1565         if (!error && ctx.nsegs == 0) {
1566                 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1567                 error = EFBIG;
1568         }
1569         if (error == EFBIG) {
1570                 m = m_defrag(*m_head, MB_DONTWAIT);
1571                 if (m == NULL) {
1572                         if_printf(&sc->arpcom.ac_if,
1573                                   "could not defrag TX mbuf\n");
1574                         error = ENOBUFS;
1575                         goto fail;
1576                 }
1577                 *m_head = m;
1578
1579                 ctx.nsegs = maxsegs;
1580                 ctx.segs = txsegs;
1581                 error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_tx_tag,
1582                                              txd->tx_dmamap, *m_head,
1583                                              jme_dmamap_buf_cb, &ctx,
1584                                              BUS_DMA_NOWAIT);
1585                 if (error || ctx.nsegs == 0) {
1586                         if_printf(&sc->arpcom.ac_if,
1587                                   "could not load defragged TX mbuf\n");
1588                         if (!error) {
1589                                 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
1590                                                   txd->tx_dmamap);
1591                                 error = EFBIG;
1592                         }
1593                         goto fail;
1594                 }
1595         } else if (error) {
1596                 if_printf(&sc->arpcom.ac_if, "could not load TX mbuf\n");
1597                 goto fail;
1598         }
1599
1600         m = *m_head;
1601         cflags = 0;
1602
1603         /* Configure checksum offload. */
1604         if (m->m_pkthdr.csum_flags & CSUM_IP)
1605                 cflags |= JME_TD_IPCSUM;
1606         if (m->m_pkthdr.csum_flags & CSUM_TCP)
1607                 cflags |= JME_TD_TCPCSUM;
1608         if (m->m_pkthdr.csum_flags & CSUM_UDP)
1609                 cflags |= JME_TD_UDPCSUM;
1610
1611         /* Configure VLAN. */
1612         if (m->m_flags & M_VLANTAG) {
1613                 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1614                 cflags |= JME_TD_VLAN_TAG;
1615         }
1616
1617         desc = &sc->jme_cdata.jme_tx_ring[prod];
1618         desc->flags = htole32(cflags);
1619         desc->addr_hi = htole32(m->m_pkthdr.len);
1620         if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1621                 /*
1622                  * Use 64bits TX desc chain format.
1623                  *
1624                  * The first TX desc of the chain, which is setup here,
1625                  * is just a symbol TX desc carrying no payload.
1626                  */
1627                 flag64 = JME_TD_64BIT;
1628                 desc->buflen = 0;
1629                 desc->addr_lo = 0;
1630
1631                 /* No effective TX desc is consumed */
1632                 i = 0;
1633         } else {
1634                 /*
1635                  * Use 32bits TX desc chain format.
1636                  *
1637                  * The first TX desc of the chain, which is setup here,
1638                  * is an effective TX desc carrying the first segment of
1639                  * the mbuf chain.
1640                  */
1641                 flag64 = 0;
1642                 desc->buflen = htole32(txsegs[0].ds_len);
1643                 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1644
1645                 /* One effective TX desc is consumed */
1646                 i = 1;
1647         }
1648         sc->jme_cdata.jme_tx_cnt++;
1649         KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
1650                  sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1651         JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1652
1653         txd->tx_ndesc = 1 - i;
1654         for (; i < ctx.nsegs; i++) {
1655                 desc = &sc->jme_cdata.jme_tx_ring[prod];
1656                 desc->flags = htole32(JME_TD_OWN | flag64);
1657                 desc->buflen = htole32(txsegs[i].ds_len);
1658                 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1659                 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1660
1661                 sc->jme_cdata.jme_tx_cnt++;
1662                 KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1663                          sc->jme_tx_desc_cnt - JME_TXD_RSVD);
1664                 JME_DESC_INC(prod, sc->jme_tx_desc_cnt);
1665         }
1666
1667         /* Update producer index. */
1668         sc->jme_cdata.jme_tx_prod = prod;
1669         /*
1670          * Finally request interrupt and give the first descriptor
1671          * owenership to hardware.
1672          */
1673         desc = txd->tx_desc;
1674         desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1675
1676         txd->tx_m = m;
1677         txd->tx_ndesc += ctx.nsegs;
1678
1679         /* Sync descriptors. */
1680         bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1681                         BUS_DMASYNC_PREWRITE);
1682         bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1683                         sc->jme_cdata.jme_tx_ring_map, BUS_DMASYNC_PREWRITE);
1684         return 0;
1685 fail:
1686         m_freem(*m_head);
1687         *m_head = NULL;
1688         return error;
1689 }
1690
1691 static void
1692 jme_start(struct ifnet *ifp)
1693 {
1694         struct jme_softc *sc = ifp->if_softc;
1695         struct mbuf *m_head;
1696         int enq = 0;
1697
1698         ASSERT_SERIALIZED(ifp->if_serializer);
1699
1700         if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1701                 ifq_purge(&ifp->if_snd);
1702                 return;
1703         }
1704
1705         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1706                 return;
1707
1708         if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
1709                 jme_txeof(sc);
1710
1711         while (!ifq_is_empty(&ifp->if_snd)) {
1712                 /*
1713                  * Check number of available TX descs, always
1714                  * leave JME_TXD_RSVD free TX descs.
1715                  */
1716                 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1717                     sc->jme_tx_desc_cnt - JME_TXD_RSVD) {
1718                         ifp->if_flags |= IFF_OACTIVE;
1719                         break;
1720                 }
1721
1722                 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1723                 if (m_head == NULL)
1724                         break;
1725
1726                 /*
1727                  * Pack the data into the transmit ring. If we
1728                  * don't have room, set the OACTIVE flag and wait
1729                  * for the NIC to drain the ring.
1730                  */
1731                 if (jme_encap(sc, &m_head)) {
1732                         KKASSERT(m_head == NULL);
1733                         ifp->if_oerrors++;
1734                         ifp->if_flags |= IFF_OACTIVE;
1735                         break;
1736                 }
1737                 enq++;
1738
1739                 /*
1740                  * If there's a BPF listener, bounce a copy of this frame
1741                  * to him.
1742                  */
1743                 ETHER_BPF_MTAP(ifp, m_head);
1744         }
1745
1746         if (enq > 0) {
1747                 /*
1748                  * Reading TXCSR takes very long time under heavy load
1749                  * so cache TXCSR value and writes the ORed value with
1750                  * the kick command to the TXCSR. This saves one register
1751                  * access cycle.
1752                  */
1753                 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1754                     TXCSR_TXQ_N_START(TXCSR_TXQ0));
1755                 /* Set a timeout in case the chip goes out to lunch. */
1756                 ifp->if_timer = JME_TX_TIMEOUT;
1757         }
1758 }
1759
1760 static void
1761 jme_watchdog(struct ifnet *ifp)
1762 {
1763         struct jme_softc *sc = ifp->if_softc;
1764
1765         ASSERT_SERIALIZED(ifp->if_serializer);
1766
1767         if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1768                 if_printf(ifp, "watchdog timeout (missed link)\n");
1769                 ifp->if_oerrors++;
1770                 jme_init(sc);
1771                 return;
1772         }
1773
1774         jme_txeof(sc);
1775         if (sc->jme_cdata.jme_tx_cnt == 0) {
1776                 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1777                           "-- recovering\n");
1778                 if (!ifq_is_empty(&ifp->if_snd))
1779                         if_devstart(ifp);
1780                 return;
1781         }
1782
1783         if_printf(ifp, "watchdog timeout\n");
1784         ifp->if_oerrors++;
1785         jme_init(sc);
1786         if (!ifq_is_empty(&ifp->if_snd))
1787                 if_devstart(ifp);
1788 }
1789
1790 static int
1791 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1792 {
1793         struct jme_softc *sc = ifp->if_softc;
1794         struct mii_data *mii = device_get_softc(sc->jme_miibus);
1795         struct ifreq *ifr = (struct ifreq *)data;
1796         int error = 0, mask;
1797
1798         ASSERT_SERIALIZED(ifp->if_serializer);
1799
1800         switch (cmd) {
1801         case SIOCSIFMTU:
1802                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1803                     (!(sc->jme_caps & JME_CAP_JUMBO) &&
1804                      ifr->ifr_mtu > JME_MAX_MTU)) {
1805                         error = EINVAL;
1806                         break;
1807                 }
1808
1809                 if (ifp->if_mtu != ifr->ifr_mtu) {
1810                         /*
1811                          * No special configuration is required when interface
1812                          * MTU is changed but availability of Tx checksum
1813                          * offload should be chcked against new MTU size as
1814                          * FIFO size is just 2K.
1815                          */
1816                         if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1817                                 ifp->if_capenable &= ~IFCAP_TXCSUM;
1818                                 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1819                         }
1820                         ifp->if_mtu = ifr->ifr_mtu;
1821                         if (ifp->if_flags & IFF_RUNNING)
1822                                 jme_init(sc);
1823                 }
1824                 break;
1825
1826         case SIOCSIFFLAGS:
1827                 if (ifp->if_flags & IFF_UP) {
1828                         if (ifp->if_flags & IFF_RUNNING) {
1829                                 if ((ifp->if_flags ^ sc->jme_if_flags) &
1830                                     (IFF_PROMISC | IFF_ALLMULTI))
1831                                         jme_set_filter(sc);
1832                         } else {
1833                                 jme_init(sc);
1834                         }
1835                 } else {
1836                         if (ifp->if_flags & IFF_RUNNING)
1837                                 jme_stop(sc);
1838                 }
1839                 sc->jme_if_flags = ifp->if_flags;
1840                 break;
1841
1842         case SIOCADDMULTI:
1843         case SIOCDELMULTI:
1844                 if (ifp->if_flags & IFF_RUNNING)
1845                         jme_set_filter(sc);
1846                 break;
1847
1848         case SIOCSIFMEDIA:
1849         case SIOCGIFMEDIA:
1850                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1851                 break;
1852
1853         case SIOCSIFCAP:
1854                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1855
1856                 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1857                         if (IFCAP_TXCSUM & ifp->if_capabilities) {
1858                                 ifp->if_capenable ^= IFCAP_TXCSUM;
1859                                 if (IFCAP_TXCSUM & ifp->if_capenable)
1860                                         ifp->if_hwassist |= JME_CSUM_FEATURES;
1861                                 else
1862                                         ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1863                         }
1864                 }
1865                 if ((mask & IFCAP_RXCSUM) &&
1866                     (IFCAP_RXCSUM & ifp->if_capabilities)) {
1867                         uint32_t reg;
1868
1869                         ifp->if_capenable ^= IFCAP_RXCSUM;
1870                         reg = CSR_READ_4(sc, JME_RXMAC);
1871                         reg &= ~RXMAC_CSUM_ENB;
1872                         if (ifp->if_capenable & IFCAP_RXCSUM)
1873                                 reg |= RXMAC_CSUM_ENB;
1874                         CSR_WRITE_4(sc, JME_RXMAC, reg);
1875                 }
1876
1877                 if ((mask & IFCAP_VLAN_HWTAGGING) &&
1878                     (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities)) {
1879                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1880                         jme_set_vlan(sc);
1881                 }
1882                 break;
1883
1884         default:
1885                 error = ether_ioctl(ifp, cmd, data);
1886                 break;
1887         }
1888         return (error);
1889 }
1890
1891 static void
1892 jme_mac_config(struct jme_softc *sc)
1893 {
1894         struct mii_data *mii;
1895         uint32_t ghc, rxmac, txmac, txpause, gp1;
1896         int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
1897
1898         mii = device_get_softc(sc->jme_miibus);
1899
1900         CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1901         DELAY(10);
1902         CSR_WRITE_4(sc, JME_GHC, 0);
1903         ghc = 0;
1904         rxmac = CSR_READ_4(sc, JME_RXMAC);
1905         rxmac &= ~RXMAC_FC_ENB;
1906         txmac = CSR_READ_4(sc, JME_TXMAC);
1907         txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1908         txpause = CSR_READ_4(sc, JME_TXPFC);
1909         txpause &= ~TXPFC_PAUSE_ENB;
1910         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1911                 ghc |= GHC_FULL_DUPLEX;
1912                 rxmac &= ~RXMAC_COLL_DET_ENB;
1913                 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1914                     TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1915                     TXMAC_FRAME_BURST);
1916 #ifdef notyet
1917                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1918                         txpause |= TXPFC_PAUSE_ENB;
1919                 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1920                         rxmac |= RXMAC_FC_ENB;
1921 #endif
1922                 /* Disable retry transmit timer/retry limit. */
1923                 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1924                     ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1925         } else {
1926                 rxmac |= RXMAC_COLL_DET_ENB;
1927                 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1928                 /* Enable retry transmit timer/retry limit. */
1929                 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1930                     TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1931         }
1932
1933         /*
1934          * Reprogram Tx/Rx MACs with resolved speed/duplex.
1935          */
1936         gp1 = CSR_READ_4(sc, JME_GPREG1);
1937         gp1 &= ~GPREG1_WA_HDX;
1938
1939         if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1940                 hdx = 1;
1941
1942         switch (IFM_SUBTYPE(mii->mii_media_active)) {
1943         case IFM_10_T:
1944                 ghc |= GHC_SPEED_10 | sc->jme_clksrc;
1945                 if (hdx)
1946                         gp1 |= GPREG1_WA_HDX;
1947                 break;
1948
1949         case IFM_100_TX:
1950                 ghc |= GHC_SPEED_100 | sc->jme_clksrc;
1951                 if (hdx)
1952                         gp1 |= GPREG1_WA_HDX;
1953
1954                 /*
1955                  * Use extended FIFO depth to workaround CRC errors
1956                  * emitted by chips before JMC250B
1957                  */
1958                 phyconf = JMPHY_CONF_EXTFIFO;
1959                 break;
1960
1961         case IFM_1000_T:
1962                 if (sc->jme_caps & JME_CAP_FASTETH)
1963                         break;
1964
1965                 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
1966                 if (hdx)
1967                         txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1968                 break;
1969
1970         default:
1971                 break;
1972         }
1973         CSR_WRITE_4(sc, JME_GHC, ghc);
1974         CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1975         CSR_WRITE_4(sc, JME_TXMAC, txmac);
1976         CSR_WRITE_4(sc, JME_TXPFC, txpause);
1977
1978         if (sc->jme_workaround & JME_WA_EXTFIFO) {
1979                 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1980                                     JMPHY_CONF, phyconf);
1981         }
1982         if (sc->jme_workaround & JME_WA_HDX)
1983                 CSR_WRITE_4(sc, JME_GPREG1, gp1);
1984 }
1985
1986 static void
1987 jme_intr(void *xsc)
1988 {
1989         struct jme_softc *sc = xsc;
1990         struct ifnet *ifp = &sc->arpcom.ac_if;
1991         uint32_t status;
1992         int r;
1993
1994         ASSERT_SERIALIZED(ifp->if_serializer);
1995
1996         status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1997         if (status == 0 || status == 0xFFFFFFFF)
1998                 return;
1999
2000         /* Disable interrupts. */
2001         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2002
2003         status = CSR_READ_4(sc, JME_INTR_STATUS);
2004         if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2005                 goto back;
2006
2007         /* Reset PCC counter/timer and Ack interrupts. */
2008         status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2009
2010         if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
2011                 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2012
2013         for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2014                 if (status & jme_rx_status[r].jme_coal) {
2015                         status |= jme_rx_status[r].jme_coal |
2016                                   jme_rx_status[r].jme_comp;
2017                 }
2018         }
2019
2020         CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2021
2022         if (ifp->if_flags & IFF_RUNNING) {
2023                 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
2024                         jme_rx_intr(sc, status);
2025
2026                 if (status & INTR_RXQ_DESC_EMPTY) {
2027                         /*
2028                          * Notify hardware availability of new Rx buffers.
2029                          * Reading RXCSR takes very long time under heavy
2030                          * load so cache RXCSR value and writes the ORed
2031                          * value with the kick command to the RXCSR. This
2032                          * saves one register access cycle.
2033                          */
2034                         CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2035                             RXCSR_RX_ENB | RXCSR_RXQ_START);
2036                 }
2037
2038                 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
2039                         jme_txeof(sc);
2040                         if (!ifq_is_empty(&ifp->if_snd))
2041                                 if_devstart(ifp);
2042                 }
2043         }
2044 back:
2045         /* Reenable interrupts. */
2046         CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2047 }
2048
2049 static void
2050 jme_txeof(struct jme_softc *sc)
2051 {
2052         struct ifnet *ifp = &sc->arpcom.ac_if;
2053         struct jme_txdesc *txd;
2054         uint32_t status;
2055         int cons, nsegs;
2056
2057         cons = sc->jme_cdata.jme_tx_cons;
2058         if (cons == sc->jme_cdata.jme_tx_prod)
2059                 return;
2060
2061         bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2062                         sc->jme_cdata.jme_tx_ring_map,
2063                         BUS_DMASYNC_POSTREAD);
2064
2065         /*
2066          * Go through our Tx list and free mbufs for those
2067          * frames which have been transmitted.
2068          */
2069         while (cons != sc->jme_cdata.jme_tx_prod) {
2070                 txd = &sc->jme_cdata.jme_txdesc[cons];
2071                 KASSERT(txd->tx_m != NULL,
2072                         ("%s: freeing NULL mbuf!\n", __func__));
2073
2074                 status = le32toh(txd->tx_desc->flags);
2075                 if ((status & JME_TD_OWN) == JME_TD_OWN)
2076                         break;
2077
2078                 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2079                         ifp->if_oerrors++;
2080                 } else {
2081                         ifp->if_opackets++;
2082                         if (status & JME_TD_COLLISION) {
2083                                 ifp->if_collisions +=
2084                                     le32toh(txd->tx_desc->buflen) &
2085                                     JME_TD_BUF_LEN_MASK;
2086                         }
2087                 }
2088
2089                 /*
2090                  * Only the first descriptor of multi-descriptor
2091                  * transmission is updated so driver have to skip entire
2092                  * chained buffers for the transmiited frame. In other
2093                  * words, JME_TD_OWN bit is valid only at the first
2094                  * descriptor of a multi-descriptor transmission.
2095                  */
2096                 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2097                         sc->jme_cdata.jme_tx_ring[cons].flags = 0;
2098                         JME_DESC_INC(cons, sc->jme_tx_desc_cnt);
2099                 }
2100
2101                 /* Reclaim transferred mbufs. */
2102                 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2103                 m_freem(txd->tx_m);
2104                 txd->tx_m = NULL;
2105                 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2106                 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2107                         ("%s: Active Tx desc counter was garbled\n", __func__));
2108                 txd->tx_ndesc = 0;
2109         }
2110         sc->jme_cdata.jme_tx_cons = cons;
2111
2112         if (sc->jme_cdata.jme_tx_cnt == 0)
2113                 ifp->if_timer = 0;
2114
2115         if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
2116             sc->jme_tx_desc_cnt - JME_TXD_RSVD)
2117                 ifp->if_flags &= ~IFF_OACTIVE;
2118
2119         bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2120                         sc->jme_cdata.jme_tx_ring_map,
2121                         BUS_DMASYNC_PREWRITE);
2122 }
2123
2124 static __inline void
2125 jme_discard_rxbufs(struct jme_softc *sc, int ring, int cons, int count)
2126 {
2127         struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2128         int i;
2129
2130         for (i = 0; i < count; ++i) {
2131                 struct jme_desc *desc = &rdata->jme_rx_ring[cons];
2132
2133                 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2134                 desc->buflen = htole32(MCLBYTES);
2135                 JME_DESC_INC(cons, sc->jme_rx_desc_cnt);
2136         }
2137 }
2138
2139 /* Receive a frame. */
2140 static void
2141 jme_rxpkt(struct jme_softc *sc, int ring, struct mbuf_chain *chain)
2142 {
2143         struct ifnet *ifp = &sc->arpcom.ac_if;
2144         struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2145         struct jme_desc *desc;
2146         struct jme_rxdesc *rxd;
2147         struct mbuf *mp, *m;
2148         uint32_t flags, status;
2149         int cons, count, nsegs;
2150
2151         cons = rdata->jme_rx_cons;
2152         desc = &rdata->jme_rx_ring[cons];
2153         flags = le32toh(desc->flags);
2154         status = le32toh(desc->buflen);
2155         nsegs = JME_RX_NSEGS(status);
2156
2157         if (status & JME_RX_ERR_STAT) {
2158                 ifp->if_ierrors++;
2159                 jme_discard_rxbufs(sc, ring, cons, nsegs);
2160 #ifdef JME_SHOW_ERRORS
2161                 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2162                     __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2163 #endif
2164                 rdata->jme_rx_cons += nsegs;
2165                 rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2166                 return;
2167         }
2168
2169         rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2170         for (count = 0; count < nsegs; count++,
2171              JME_DESC_INC(cons, sc->jme_rx_desc_cnt)) {
2172                 rxd = &rdata->jme_rxdesc[cons];
2173                 mp = rxd->rx_m;
2174
2175                 /* Add a new receive buffer to the ring. */
2176                 if (jme_newbuf(sc, ring, rxd, 0) != 0) {
2177                         ifp->if_iqdrops++;
2178                         /* Reuse buffer. */
2179                         jme_discard_rxbufs(sc, ring, cons, nsegs - count);
2180                         if (rdata->jme_rxhead != NULL) {
2181                                 m_freem(rdata->jme_rxhead);
2182                                 JME_RXCHAIN_RESET(sc, ring);
2183                         }
2184                         break;
2185                 }
2186
2187                 /*
2188                  * Assume we've received a full sized frame.
2189                  * Actual size is fixed when we encounter the end of
2190                  * multi-segmented frame.
2191                  */
2192                 mp->m_len = MCLBYTES;
2193
2194                 /* Chain received mbufs. */
2195                 if (rdata->jme_rxhead == NULL) {
2196                         rdata->jme_rxhead = mp;
2197                         rdata->jme_rxtail = mp;
2198                 } else {
2199                         /*
2200                          * Receive processor can receive a maximum frame
2201                          * size of 65535 bytes.
2202                          */
2203                         mp->m_flags &= ~M_PKTHDR;
2204                         rdata->jme_rxtail->m_next = mp;
2205                         rdata->jme_rxtail = mp;
2206                 }
2207
2208                 if (count == nsegs - 1) {
2209                         /* Last desc. for this frame. */
2210                         m = rdata->jme_rxhead;
2211                         /* XXX assert PKTHDR? */
2212                         m->m_flags |= M_PKTHDR;
2213                         m->m_pkthdr.len = rdata->jme_rxlen;
2214                         if (nsegs > 1) {
2215                                 /* Set first mbuf size. */
2216                                 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2217                                 /* Set last mbuf size. */
2218                                 mp->m_len = rdata->jme_rxlen -
2219                                     ((MCLBYTES - JME_RX_PAD_BYTES) +
2220                                     (MCLBYTES * (nsegs - 2)));
2221                         } else {
2222                                 m->m_len = rdata->jme_rxlen;
2223                         }
2224                         m->m_pkthdr.rcvif = ifp;
2225
2226                         /*
2227                          * Account for 10bytes auto padding which is used
2228                          * to align IP header on 32bit boundary. Also note,
2229                          * CRC bytes is automatically removed by the
2230                          * hardware.
2231                          */
2232                         m->m_data += JME_RX_PAD_BYTES;
2233
2234                         /* Set checksum information. */
2235                         if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2236                             (flags & JME_RD_IPV4)) {
2237                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2238                                 if (flags & JME_RD_IPCSUM)
2239                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2240                                 if ((flags & JME_RD_MORE_FRAG) == 0 &&
2241                                     ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2242                                      (JME_RD_TCP | JME_RD_TCPCSUM) ||
2243                                      (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2244                                      (JME_RD_UDP | JME_RD_UDPCSUM))) {
2245                                         m->m_pkthdr.csum_flags |=
2246                                             CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2247                                         m->m_pkthdr.csum_data = 0xffff;
2248                                 }
2249                         }
2250
2251                         /* Check for VLAN tagged packets. */
2252                         if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2253                             (flags & JME_RD_VLAN_TAG)) {
2254                                 m->m_pkthdr.ether_vlantag =
2255                                     flags & JME_RD_VLAN_MASK;
2256                                 m->m_flags |= M_VLANTAG;
2257                         }
2258
2259                         ifp->if_ipackets++;
2260                         /* Pass it on. */
2261                         ether_input_chain(ifp, m, chain);
2262
2263                         /* Reset mbuf chains. */
2264                         JME_RXCHAIN_RESET(sc, ring);
2265                 }
2266         }
2267
2268         rdata->jme_rx_cons += nsegs;
2269         rdata->jme_rx_cons %= sc->jme_rx_desc_cnt;
2270 }
2271
2272 static void
2273 jme_rxeof(struct jme_softc *sc, int ring, int count)
2274 {
2275         struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2276         struct jme_desc *desc;
2277         int nsegs, prog, pktlen;
2278         struct mbuf_chain chain[MAXCPU];
2279
2280         ether_input_chain_init(chain);
2281
2282         bus_dmamap_sync(rdata->jme_rx_ring_tag, rdata->jme_rx_ring_map,
2283                         BUS_DMASYNC_POSTREAD);
2284
2285         prog = 0;
2286         for (;;) {
2287 #ifdef DEVICE_POLLING
2288                 if (count >= 0 && count-- == 0)
2289                         break;
2290 #endif
2291                 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
2292                 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2293                         break;
2294                 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2295                         break;
2296
2297                 /*
2298                  * Check number of segments against received bytes.
2299                  * Non-matching value would indicate that hardware
2300                  * is still trying to update Rx descriptors. I'm not
2301                  * sure whether this check is needed.
2302                  */
2303                 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2304                 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2305                 if (nsegs != howmany(pktlen, MCLBYTES)) {
2306                         if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) "
2307                                   "and packet size(%d) mismach\n",
2308                                   nsegs, pktlen);
2309                         break;
2310                 }
2311
2312                 /* Received a frame. */
2313                 jme_rxpkt(sc, ring, chain);
2314                 prog++;
2315         }
2316
2317         if (prog > 0) {
2318                 bus_dmamap_sync(rdata->jme_rx_ring_tag, rdata->jme_rx_ring_map,
2319                                 BUS_DMASYNC_PREWRITE);
2320                 ether_input_dispatch(chain);
2321         }
2322 }
2323
2324 static void
2325 jme_tick(void *xsc)
2326 {
2327         struct jme_softc *sc = xsc;
2328         struct ifnet *ifp = &sc->arpcom.ac_if;
2329         struct mii_data *mii = device_get_softc(sc->jme_miibus);
2330
2331         lwkt_serialize_enter(ifp->if_serializer);
2332
2333         mii_tick(mii);
2334         callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2335
2336         lwkt_serialize_exit(ifp->if_serializer);
2337 }
2338
2339 static void
2340 jme_reset(struct jme_softc *sc)
2341 {
2342 #ifdef foo
2343         /* Stop receiver, transmitter. */
2344         jme_stop_rx(sc);
2345         jme_stop_tx(sc);
2346 #endif
2347         CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2348         DELAY(10);
2349         CSR_WRITE_4(sc, JME_GHC, 0);
2350 }
2351
2352 static void
2353 jme_init(void *xsc)
2354 {
2355         struct jme_softc *sc = xsc;
2356         struct ifnet *ifp = &sc->arpcom.ac_if;
2357         struct mii_data *mii;
2358         uint8_t eaddr[ETHER_ADDR_LEN];
2359         bus_addr_t paddr;
2360         uint32_t reg;
2361         int error, r;
2362
2363         ASSERT_SERIALIZED(ifp->if_serializer);
2364
2365         /*
2366          * Cancel any pending I/O.
2367          */
2368         jme_stop(sc);
2369
2370         /*
2371          * Reset the chip to a known state.
2372          */
2373         jme_reset(sc);
2374
2375         sc->jme_txd_spare =
2376         howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES);
2377         KKASSERT(sc->jme_txd_spare >= 1);
2378
2379         /*
2380          * If we use 64bit address mode for transmitting, each Tx request
2381          * needs one more symbol descriptor.
2382          */
2383         if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
2384                 sc->jme_txd_spare += 1;
2385
2386         if (sc->jme_flags & JME_FLAG_RSS) {
2387                 sc->jme_rx_ring_inuse = sc->jme_rx_ring_cnt;
2388                 KKASSERT(sc->jme_rx_ring_inuse > 1);
2389                 /* TODO: enable RSS */
2390         } else {
2391                 sc->jme_rx_ring_inuse = 1;
2392
2393                 /* Disable RSS. */
2394                 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
2395         }
2396
2397         /* Init RX descriptors */
2398         for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2399                 error = jme_init_rx_ring(sc, r);
2400                 if (error) {
2401                         if_printf(ifp, "initialization failed: "
2402                                   "no memory for %dth RX ring.\n", r);
2403                         jme_stop(sc);
2404                         return;
2405                 }
2406         }
2407
2408         /* Init TX descriptors */
2409         jme_init_tx_ring(sc);
2410
2411         /* Initialize shadow status block. */
2412         jme_init_ssb(sc);
2413
2414         /* Reprogram the station address. */
2415         bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2416         CSR_WRITE_4(sc, JME_PAR0,
2417             eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2418         CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2419
2420         /*
2421          * Configure Tx queue.
2422          *  Tx priority queue weight value : 0
2423          *  Tx FIFO threshold for processing next packet : 16QW
2424          *  Maximum Tx DMA length : 512
2425          *  Allow Tx DMA burst.
2426          */
2427         sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2428         sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2429         sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2430         sc->jme_txcsr |= sc->jme_tx_dma_size;
2431         sc->jme_txcsr |= TXCSR_DMA_BURST;
2432         CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2433
2434         /* Set Tx descriptor counter. */
2435         CSR_WRITE_4(sc, JME_TXQDC, sc->jme_tx_desc_cnt);
2436
2437         /* Set Tx ring address to the hardware. */
2438         paddr = sc->jme_cdata.jme_tx_ring_paddr;
2439         CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2440         CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2441
2442         /* Configure TxMAC parameters. */
2443         reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2444         reg |= TXMAC_THRESH_1_PKT;
2445         reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2446         CSR_WRITE_4(sc, JME_TXMAC, reg);
2447
2448         /*
2449          * Configure Rx queue.
2450          *  FIFO full threshold for transmitting Tx pause packet : 128T
2451          *  FIFO threshold for processing next packet : 128QW
2452          *  Rx queue 0 select
2453          *  Max Rx DMA length : 128
2454          *  Rx descriptor retry : 32
2455          *  Rx descriptor retry time gap : 256ns
2456          *  Don't receive runt/bad frame.
2457          */
2458         sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2459         /*
2460          * Since Rx FIFO size is 4K bytes, receiving frames larger
2461          * than 4K bytes will suffer from Rx FIFO overruns. So
2462          * decrease FIFO threshold to reduce the FIFO overruns for
2463          * frames larger than 4000 bytes.
2464          * For best performance of standard MTU sized frames use
2465          * maximum allowable FIFO threshold, 128QW.
2466          */
2467         if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2468             JME_RX_FIFO_SIZE)
2469                 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2470         else
2471                 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2472         sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
2473         sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2474         sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2475         /* XXX TODO DROP_BAD */
2476
2477         for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2478                 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2479
2480                 /* Set Rx descriptor counter. */
2481                 CSR_WRITE_4(sc, JME_RXQDC, sc->jme_rx_desc_cnt);
2482
2483                 /* Set Rx ring address to the hardware. */
2484                 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
2485                 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2486                 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2487         }
2488
2489         /* Clear receive filter. */
2490         CSR_WRITE_4(sc, JME_RXMAC, 0);
2491
2492         /* Set up the receive filter. */
2493         jme_set_filter(sc);
2494         jme_set_vlan(sc);
2495
2496         /*
2497          * Disable all WOL bits as WOL can interfere normal Rx
2498          * operation. Also clear WOL detection status bits.
2499          */
2500         reg = CSR_READ_4(sc, JME_PMCS);
2501         reg &= ~PMCS_WOL_ENB_MASK;
2502         CSR_WRITE_4(sc, JME_PMCS, reg);
2503
2504         /*
2505          * Pad 10bytes right before received frame. This will greatly
2506          * help Rx performance on strict-alignment architectures as
2507          * it does not need to copy the frame to align the payload.
2508          */
2509         reg = CSR_READ_4(sc, JME_RXMAC);
2510         reg |= RXMAC_PAD_10BYTES;
2511
2512         if (ifp->if_capenable & IFCAP_RXCSUM)
2513                 reg |= RXMAC_CSUM_ENB;
2514         CSR_WRITE_4(sc, JME_RXMAC, reg);
2515
2516         /* Configure general purpose reg0 */
2517         reg = CSR_READ_4(sc, JME_GPREG0);
2518         reg &= ~GPREG0_PCC_UNIT_MASK;
2519         /* Set PCC timer resolution to micro-seconds unit. */
2520         reg |= GPREG0_PCC_UNIT_US;
2521         /*
2522          * Disable all shadow register posting as we have to read
2523          * JME_INTR_STATUS register in jme_intr. Also it seems
2524          * that it's hard to synchronize interrupt status between
2525          * hardware and software with shadow posting due to
2526          * requirements of bus_dmamap_sync(9).
2527          */
2528         reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2529             GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2530             GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2531             GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2532         /* Disable posting of DW0. */
2533         reg &= ~GPREG0_POST_DW0_ENB;
2534         /* Clear PME message. */
2535         reg &= ~GPREG0_PME_ENB;
2536         /* Set PHY address. */
2537         reg &= ~GPREG0_PHY_ADDR_MASK;
2538         reg |= sc->jme_phyaddr;
2539         CSR_WRITE_4(sc, JME_GPREG0, reg);
2540
2541         /* Configure Tx queue 0 packet completion coalescing. */
2542         jme_set_tx_coal(sc);
2543
2544         /* Configure Rx queue 0 packet completion coalescing. */
2545         jme_set_rx_coal(sc);
2546
2547         /* Configure shadow status block but don't enable posting. */
2548         paddr = sc->jme_cdata.jme_ssb_block_paddr;
2549         CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2550         CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2551
2552         /* Disable Timer 1 and Timer 2. */
2553         CSR_WRITE_4(sc, JME_TIMER1, 0);
2554         CSR_WRITE_4(sc, JME_TIMER2, 0);
2555
2556         /* Configure retry transmit period, retry limit value. */
2557         CSR_WRITE_4(sc, JME_TXTRHD,
2558             ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2559             TXTRHD_RT_PERIOD_MASK) |
2560             ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2561             TXTRHD_RT_LIMIT_SHIFT));
2562
2563 #ifdef DEVICE_POLLING
2564         if (!(ifp->if_flags & IFF_POLLING))
2565 #endif
2566         /* Initialize the interrupt mask. */
2567         CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2568         CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2569
2570         /*
2571          * Enabling Tx/Rx DMA engines and Rx queue processing is
2572          * done after detection of valid link in jme_miibus_statchg.
2573          */
2574         sc->jme_flags &= ~JME_FLAG_LINK;
2575
2576         /* Set the current media. */
2577         mii = device_get_softc(sc->jme_miibus);
2578         mii_mediachg(mii);
2579
2580         callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2581
2582         ifp->if_flags |= IFF_RUNNING;
2583         ifp->if_flags &= ~IFF_OACTIVE;
2584 }
2585
2586 static void
2587 jme_stop(struct jme_softc *sc)
2588 {
2589         struct ifnet *ifp = &sc->arpcom.ac_if;
2590         struct jme_txdesc *txd;
2591         struct jme_rxdesc *rxd;
2592         struct jme_rxdata *rdata;
2593         int i, r;
2594
2595         ASSERT_SERIALIZED(ifp->if_serializer);
2596
2597         /*
2598          * Mark the interface down and cancel the watchdog timer.
2599          */
2600         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2601         ifp->if_timer = 0;
2602
2603         callout_stop(&sc->jme_tick_ch);
2604         sc->jme_flags &= ~JME_FLAG_LINK;
2605
2606         /*
2607          * Disable interrupts.
2608          */
2609         CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2610         CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2611
2612         /* Disable updating shadow status block. */
2613         CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2614             CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2615
2616         /* Stop receiver, transmitter. */
2617         jme_stop_rx(sc);
2618         jme_stop_tx(sc);
2619
2620         /*
2621          * Free partial finished RX segments
2622          */
2623         for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2624                 rdata = &sc->jme_cdata.jme_rx_data[r];
2625                 if (rdata->jme_rxhead != NULL)
2626                         m_freem(rdata->jme_rxhead);
2627                 JME_RXCHAIN_RESET(sc, r);
2628         }
2629
2630         /*
2631          * Free RX and TX mbufs still in the queues.
2632          */
2633         for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
2634                 rdata = &sc->jme_cdata.jme_rx_data[r];
2635                 for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2636                         rxd = &rdata->jme_rxdesc[i];
2637                         if (rxd->rx_m != NULL) {
2638                                 bus_dmamap_unload(rdata->jme_rx_tag,
2639                                                   rxd->rx_dmamap);
2640                                 m_freem(rxd->rx_m);
2641                                 rxd->rx_m = NULL;
2642                         }
2643                 }
2644         }
2645         for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2646                 txd = &sc->jme_cdata.jme_txdesc[i];
2647                 if (txd->tx_m != NULL) {
2648                         bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2649                             txd->tx_dmamap);
2650                         m_freem(txd->tx_m);
2651                         txd->tx_m = NULL;
2652                         txd->tx_ndesc = 0;
2653                 }
2654         }
2655 }
2656
2657 static void
2658 jme_stop_tx(struct jme_softc *sc)
2659 {
2660         uint32_t reg;
2661         int i;
2662
2663         reg = CSR_READ_4(sc, JME_TXCSR);
2664         if ((reg & TXCSR_TX_ENB) == 0)
2665                 return;
2666         reg &= ~TXCSR_TX_ENB;
2667         CSR_WRITE_4(sc, JME_TXCSR, reg);
2668         for (i = JME_TIMEOUT; i > 0; i--) {
2669                 DELAY(1);
2670                 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2671                         break;
2672         }
2673         if (i == 0)
2674                 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2675 }
2676
2677 static void
2678 jme_stop_rx(struct jme_softc *sc)
2679 {
2680         uint32_t reg;
2681         int i;
2682
2683         reg = CSR_READ_4(sc, JME_RXCSR);
2684         if ((reg & RXCSR_RX_ENB) == 0)
2685                 return;
2686         reg &= ~RXCSR_RX_ENB;
2687         CSR_WRITE_4(sc, JME_RXCSR, reg);
2688         for (i = JME_TIMEOUT; i > 0; i--) {
2689                 DELAY(1);
2690                 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2691                         break;
2692         }
2693         if (i == 0)
2694                 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2695 }
2696
2697 static void
2698 jme_init_tx_ring(struct jme_softc *sc)
2699 {
2700         struct jme_chain_data *cd;
2701         struct jme_txdesc *txd;
2702         int i;
2703
2704         sc->jme_cdata.jme_tx_prod = 0;
2705         sc->jme_cdata.jme_tx_cons = 0;
2706         sc->jme_cdata.jme_tx_cnt = 0;
2707
2708         cd = &sc->jme_cdata;
2709         bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
2710         for (i = 0; i < sc->jme_tx_desc_cnt; i++) {
2711                 txd = &sc->jme_cdata.jme_txdesc[i];
2712                 txd->tx_m = NULL;
2713                 txd->tx_desc = &cd->jme_tx_ring[i];
2714                 txd->tx_ndesc = 0;
2715         }
2716
2717         bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2718                         sc->jme_cdata.jme_tx_ring_map,
2719                         BUS_DMASYNC_PREWRITE);
2720 }
2721
2722 static void
2723 jme_init_ssb(struct jme_softc *sc)
2724 {
2725         struct jme_chain_data *cd;
2726
2727         cd = &sc->jme_cdata;
2728         bzero(cd->jme_ssb_block, JME_SSB_SIZE);
2729         bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map,
2730                         BUS_DMASYNC_PREWRITE);
2731 }
2732
2733 static int
2734 jme_init_rx_ring(struct jme_softc *sc, int ring)
2735 {
2736         struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2737         struct jme_rxdesc *rxd;
2738         int i;
2739
2740         KKASSERT(rdata->jme_rxhead == NULL &&
2741                  rdata->jme_rxtail == NULL &&
2742                  rdata->jme_rxlen == 0);
2743         rdata->jme_rx_cons = 0;
2744
2745         bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(sc));
2746         for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
2747                 int error;
2748
2749                 rxd = &rdata->jme_rxdesc[i];
2750                 rxd->rx_m = NULL;
2751                 rxd->rx_desc = &rdata->jme_rx_ring[i];
2752                 error = jme_newbuf(sc, ring, rxd, 1);
2753                 if (error)
2754                         return error;
2755         }
2756
2757         bus_dmamap_sync(rdata->jme_rx_ring_tag, rdata->jme_rx_ring_map,
2758                         BUS_DMASYNC_PREWRITE);
2759         return 0;
2760 }
2761
2762 static int
2763 jme_newbuf(struct jme_softc *sc, int ring, struct jme_rxdesc *rxd, int init)
2764 {
2765         struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
2766         struct jme_desc *desc;
2767         struct mbuf *m;
2768         struct jme_dmamap_ctx ctx;
2769         bus_dma_segment_t segs;
2770         bus_dmamap_t map;
2771         int error;
2772
2773         m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2774         if (m == NULL)
2775                 return ENOBUFS;
2776         /*
2777          * JMC250 has 64bit boundary alignment limitation so jme(4)
2778          * takes advantage of 10 bytes padding feature of hardware
2779          * in order not to copy entire frame to align IP header on
2780          * 32bit boundary.
2781          */
2782         m->m_len = m->m_pkthdr.len = MCLBYTES;
2783
2784         ctx.nsegs = 1;
2785         ctx.segs = &segs;
2786         error = bus_dmamap_load_mbuf(rdata->jme_rx_tag,
2787                                      rdata->jme_rx_sparemap,
2788                                      m, jme_dmamap_buf_cb, &ctx,
2789                                      BUS_DMA_NOWAIT);
2790         if (error || ctx.nsegs == 0) {
2791                 if (!error) {
2792                         bus_dmamap_unload(rdata->jme_rx_tag,
2793                                           rdata->jme_rx_sparemap);
2794                         error = EFBIG;
2795                         if_printf(&sc->arpcom.ac_if, "too many segments?!\n");
2796                 }
2797                 m_freem(m);
2798
2799                 if (init)
2800                         if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2801                 return error;
2802         }
2803
2804         if (rxd->rx_m != NULL) {
2805                 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
2806                                 BUS_DMASYNC_POSTREAD);
2807                 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
2808         }
2809         map = rxd->rx_dmamap;
2810         rxd->rx_dmamap = rdata->jme_rx_sparemap;
2811         rdata->jme_rx_sparemap = map;
2812         rxd->rx_m = m;
2813
2814         desc = rxd->rx_desc;
2815         desc->buflen = htole32(segs.ds_len);
2816         desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr));
2817         desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr));
2818         desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2819
2820         return 0;
2821 }
2822
2823 static void
2824 jme_set_vlan(struct jme_softc *sc)
2825 {
2826         struct ifnet *ifp = &sc->arpcom.ac_if;
2827         uint32_t reg;
2828
2829         ASSERT_SERIALIZED(ifp->if_serializer);
2830
2831         reg = CSR_READ_4(sc, JME_RXMAC);
2832         reg &= ~RXMAC_VLAN_ENB;
2833         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2834                 reg |= RXMAC_VLAN_ENB;
2835         CSR_WRITE_4(sc, JME_RXMAC, reg);
2836 }
2837
2838 static void
2839 jme_set_filter(struct jme_softc *sc)
2840 {
2841         struct ifnet *ifp = &sc->arpcom.ac_if;
2842         struct ifmultiaddr *ifma;
2843         uint32_t crc;
2844         uint32_t mchash[2];
2845         uint32_t rxcfg;
2846
2847         ASSERT_SERIALIZED(ifp->if_serializer);
2848
2849         rxcfg = CSR_READ_4(sc, JME_RXMAC);
2850         rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2851             RXMAC_ALLMULTI);
2852
2853         /*
2854          * Always accept frames destined to our station address.
2855          * Always accept broadcast frames.
2856          */
2857         rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2858
2859         if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2860                 if (ifp->if_flags & IFF_PROMISC)
2861                         rxcfg |= RXMAC_PROMISC;
2862                 if (ifp->if_flags & IFF_ALLMULTI)
2863                         rxcfg |= RXMAC_ALLMULTI;
2864                 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2865                 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2866                 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2867                 return;
2868         }
2869
2870         /*
2871          * Set up the multicast address filter by passing all multicast
2872          * addresses through a CRC generator, and then using the low-order
2873          * 6 bits as an index into the 64 bit multicast hash table.  The
2874          * high order bits select the register, while the rest of the bits
2875          * select the bit within the register.
2876          */
2877         rxcfg |= RXMAC_MULTICAST;
2878         bzero(mchash, sizeof(mchash));
2879
2880         LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2881                 if (ifma->ifma_addr->sa_family != AF_LINK)
2882                         continue;
2883                 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2884                     ifma->ifma_addr), ETHER_ADDR_LEN);
2885
2886                 /* Just want the 6 least significant bits. */
2887                 crc &= 0x3f;
2888
2889                 /* Set the corresponding bit in the hash table. */
2890                 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2891         }
2892
2893         CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2894         CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2895         CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2896 }
2897
2898 static int
2899 jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
2900 {
2901         struct jme_softc *sc = arg1;
2902         struct ifnet *ifp = &sc->arpcom.ac_if;
2903         int error, v;
2904
2905         lwkt_serialize_enter(ifp->if_serializer);
2906
2907         v = sc->jme_tx_coal_to;
2908         error = sysctl_handle_int(oidp, &v, 0, req);
2909         if (error || req->newptr == NULL)
2910                 goto back;
2911
2912         if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2913                 error = EINVAL;
2914                 goto back;
2915         }
2916
2917         if (v != sc->jme_tx_coal_to) {
2918                 sc->jme_tx_coal_to = v;
2919                 if (ifp->if_flags & IFF_RUNNING)
2920                         jme_set_tx_coal(sc);
2921         }
2922 back:
2923         lwkt_serialize_exit(ifp->if_serializer);
2924         return error;
2925 }
2926
2927 static int
2928 jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
2929 {
2930         struct jme_softc *sc = arg1;
2931         struct ifnet *ifp = &sc->arpcom.ac_if;
2932         int error, v;
2933
2934         lwkt_serialize_enter(ifp->if_serializer);
2935
2936         v = sc->jme_tx_coal_pkt;
2937         error = sysctl_handle_int(oidp, &v, 0, req);
2938         if (error || req->newptr == NULL)
2939                 goto back;
2940
2941         if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
2942                 error = EINVAL;
2943                 goto back;
2944         }
2945
2946         if (v != sc->jme_tx_coal_pkt) {
2947                 sc->jme_tx_coal_pkt = v;
2948                 if (ifp->if_flags & IFF_RUNNING)
2949                         jme_set_tx_coal(sc);
2950         }
2951 back:
2952         lwkt_serialize_exit(ifp->if_serializer);
2953         return error;
2954 }
2955
2956 static int
2957 jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
2958 {
2959         struct jme_softc *sc = arg1;
2960         struct ifnet *ifp = &sc->arpcom.ac_if;
2961         int error, v;
2962
2963         lwkt_serialize_enter(ifp->if_serializer);
2964
2965         v = sc->jme_rx_coal_to;
2966         error = sysctl_handle_int(oidp, &v, 0, req);
2967         if (error || req->newptr == NULL)
2968                 goto back;
2969
2970         if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
2971                 error = EINVAL;
2972                 goto back;
2973         }
2974
2975         if (v != sc->jme_rx_coal_to) {
2976                 sc->jme_rx_coal_to = v;
2977                 if (ifp->if_flags & IFF_RUNNING)
2978                         jme_set_rx_coal(sc);
2979         }
2980 back:
2981         lwkt_serialize_exit(ifp->if_serializer);
2982         return error;
2983 }
2984
2985 static int
2986 jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
2987 {
2988         struct jme_softc *sc = arg1;
2989         struct ifnet *ifp = &sc->arpcom.ac_if;
2990         int error, v;
2991
2992         lwkt_serialize_enter(ifp->if_serializer);
2993
2994         v = sc->jme_rx_coal_pkt;
2995         error = sysctl_handle_int(oidp, &v, 0, req);
2996         if (error || req->newptr == NULL)
2997                 goto back;
2998
2999         if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
3000                 error = EINVAL;
3001                 goto back;
3002         }
3003
3004         if (v != sc->jme_rx_coal_pkt) {
3005                 sc->jme_rx_coal_pkt = v;
3006                 if (ifp->if_flags & IFF_RUNNING)
3007                         jme_set_rx_coal(sc);
3008         }
3009 back:
3010         lwkt_serialize_exit(ifp->if_serializer);
3011         return error;
3012 }
3013
3014 static void
3015 jme_set_tx_coal(struct jme_softc *sc)
3016 {
3017         uint32_t reg;
3018
3019         reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3020             PCCTX_COAL_TO_MASK;
3021         reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3022             PCCTX_COAL_PKT_MASK;
3023         reg |= PCCTX_COAL_TXQ0;
3024         CSR_WRITE_4(sc, JME_PCCTX, reg);
3025 }
3026
3027 static void
3028 jme_set_rx_coal(struct jme_softc *sc)
3029 {
3030         uint32_t reg;
3031         int r;
3032
3033         reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3034             PCCRX_COAL_TO_MASK;
3035         reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3036             PCCRX_COAL_PKT_MASK;
3037         for (r = 0; r < sc->jme_rx_ring_cnt; ++r) {
3038                 if (r < sc->jme_rx_ring_inuse)
3039                         CSR_WRITE_4(sc, JME_PCCRX(r), reg);
3040                 else
3041                         CSR_WRITE_4(sc, JME_PCCRX(r), 0);
3042         }
3043 }
3044
3045 #ifdef DEVICE_POLLING
3046
3047 static void
3048 jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3049 {
3050         struct jme_softc *sc = ifp->if_softc;
3051         uint32_t status;
3052         int r;
3053
3054         ASSERT_SERIALIZED(ifp->if_serializer);
3055
3056         switch (cmd) {
3057         case POLL_REGISTER:
3058                 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3059                 break;
3060
3061         case POLL_DEREGISTER:
3062                 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
3063                 break;
3064
3065         case POLL_AND_CHECK_STATUS:
3066         case POLL_ONLY:
3067                 status = CSR_READ_4(sc, JME_INTR_STATUS);
3068                 for (r = 0; r < sc->jme_rx_ring_inuse; ++r)
3069                         jme_rxeof(sc, r, count);
3070
3071                 if (status & INTR_RXQ_DESC_EMPTY) {
3072                         CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3073                         CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3074                             RXCSR_RX_ENB | RXCSR_RXQ_START);
3075                 }
3076
3077                 jme_txeof(sc);
3078                 if (!ifq_is_empty(&ifp->if_snd))
3079                         if_devstart(ifp);
3080                 break;
3081         }
3082 }
3083
3084 #endif  /* DEVICE_POLLING */
3085
3086 static int
3087 jme_rxring_dma_alloc(struct jme_softc *sc, bus_addr_t lowaddr, int ring)
3088 {
3089         struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3090         bus_addr_t busaddr;
3091         int error;
3092
3093         /* Create tag for Rx ring. */
3094         error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
3095             JME_RX_RING_ALIGN, 0,       /* algnmnt, boundary */
3096             lowaddr,                    /* lowaddr */
3097             BUS_SPACE_MAXADDR,          /* highaddr */
3098             NULL, NULL,                 /* filter, filterarg */
3099             JME_RX_RING_SIZE(sc),       /* maxsize */
3100             1,                          /* nsegments */
3101             JME_RX_RING_SIZE(sc),       /* maxsegsize */
3102             0,                          /* flags */
3103             &rdata->jme_rx_ring_tag);
3104         if (error) {
3105                 device_printf(sc->jme_dev,
3106                     "could not allocate %dth Rx ring DMA tag.\n", ring);
3107                 return error;
3108         }
3109
3110         /* Allocate DMA'able memory for RX ring */
3111         error = bus_dmamem_alloc(rdata->jme_rx_ring_tag,
3112                                  (void **)&rdata->jme_rx_ring,
3113                                  BUS_DMA_WAITOK | BUS_DMA_ZERO,
3114                                  &rdata->jme_rx_ring_map);
3115         if (error) {
3116                 device_printf(sc->jme_dev,
3117                     "could not allocate DMA'able memory for "
3118                     "%dth Rx ring.\n", ring);
3119                 bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
3120                 rdata->jme_rx_ring_tag = NULL;
3121                 return error;
3122         }
3123
3124         /* Load the DMA map for Rx ring. */
3125         error = bus_dmamap_load(rdata->jme_rx_ring_tag, rdata->jme_rx_ring_map,
3126                                 rdata->jme_rx_ring, JME_RX_RING_SIZE(sc),
3127                                 jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
3128         if (error) {
3129                 device_printf(sc->jme_dev,
3130                     "could not load DMA'able memory for %dth Rx ring.\n", ring);
3131                 bus_dmamem_free(rdata->jme_rx_ring_tag, rdata->jme_rx_ring,
3132                                 rdata->jme_rx_ring_map);
3133                 bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
3134                 rdata->jme_rx_ring_tag = NULL;
3135                 return error;
3136         }
3137         rdata->jme_rx_ring_paddr = busaddr;
3138
3139         return 0;
3140 }
3141
3142 static int
3143 jme_rxbuf_dma_alloc(struct jme_softc *sc, int ring)
3144 {
3145         struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[ring];
3146         int i, error;
3147
3148         /* Create tag for Rx buffers. */
3149         error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
3150             JME_RX_BUF_ALIGN, 0,        /* algnmnt, boundary */
3151             sc->jme_lowaddr,            /* lowaddr */
3152             BUS_SPACE_MAXADDR,          /* highaddr */
3153             NULL, NULL,                 /* filter, filterarg */
3154             MCLBYTES,                   /* maxsize */
3155             1,                          /* nsegments */
3156             MCLBYTES,                   /* maxsegsize */
3157             0,                          /* flags */
3158             &rdata->jme_rx_tag);
3159         if (error) {
3160                 device_printf(sc->jme_dev,
3161                     "could not create %dth Rx DMA tag.\n", ring);
3162                 return error;
3163         }
3164
3165         /* Create DMA maps for Rx buffers. */
3166         error = bus_dmamap_create(rdata->jme_rx_tag, 0,
3167                                   &rdata->jme_rx_sparemap);
3168         if (error) {
3169                 device_printf(sc->jme_dev,
3170                     "could not create %dth spare Rx dmamap.\n", ring);
3171                 bus_dma_tag_destroy(rdata->jme_rx_tag);
3172                 rdata->jme_rx_tag = NULL;
3173                 return error;
3174         }
3175         for (i = 0; i < sc->jme_rx_desc_cnt; i++) {
3176                 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3177
3178                 error = bus_dmamap_create(rdata->jme_rx_tag, 0,
3179                                           &rxd->rx_dmamap);
3180                 if (error) {
3181                         int j;
3182
3183                         device_printf(sc->jme_dev,
3184                             "could not create %dth Rx dmamap "
3185                             "for %dth RX ring.\n", i, ring);
3186
3187                         for (j = 0; j < i; ++j) {
3188                                 rxd = &rdata->jme_rxdesc[j];
3189                                 bus_dmamap_destroy(rdata->jme_rx_tag,
3190                                                    rxd->rx_dmamap);
3191                         }
3192                         bus_dmamap_destroy(rdata->jme_rx_tag,
3193                                            rdata->jme_rx_sparemap);
3194                         bus_dma_tag_destroy(rdata->jme_rx_tag);
3195                         rdata->jme_rx_tag = NULL;
3196                         return error;
3197                 }
3198         }
3199         return 0;
3200 }
3201
3202 static void
3203 jme_rx_intr(struct jme_softc *sc, uint32_t status)
3204 {
3205         int r;
3206
3207         for (r = 0; r < sc->jme_rx_ring_inuse; ++r) {
3208                 if (status & jme_rx_status[r].jme_coal)
3209                         jme_rxeof(sc, r, -1);
3210         }
3211 }