jme: Add TSO support
[dragonfly.git] / sys / dev / netif / jme / if_jme.c
CommitLineData
76fbb0b9
SZ
1/*-
2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
76fbb0b9
SZ
28 */
29
9de40864 30#include "opt_polling.h"
93bfe1b8 31#include "opt_jme.h"
9de40864 32
76fbb0b9
SZ
33#include <sys/param.h>
34#include <sys/endian.h>
35#include <sys/kernel.h>
36#include <sys/bus.h>
37#include <sys/interrupt.h>
38#include <sys/malloc.h>
39#include <sys/proc.h>
40#include <sys/rman.h>
41#include <sys/serialize.h>
31f0d5a2 42#include <sys/serialize2.h>
76fbb0b9
SZ
43#include <sys/socket.h>
44#include <sys/sockio.h>
45#include <sys/sysctl.h>
46
47#include <net/ethernet.h>
48#include <net/if.h>
49#include <net/bpf.h>
50#include <net/if_arp.h>
51#include <net/if_dl.h>
52#include <net/if_media.h>
53#include <net/ifq_var.h>
24dd1705 54#include <net/toeplitz.h>
a6acc6e2 55#include <net/toeplitz2.h>
76fbb0b9
SZ
56#include <net/vlan/if_vlan_var.h>
57#include <net/vlan/if_vlan_ether.h>
58
1bedd927
SZ
59#include <netinet/ip.h>
60#include <netinet/tcp.h>
a6acc6e2 61
76fbb0b9 62#include <dev/netif/mii_layer/miivar.h>
dbe37f03 63#include <dev/netif/mii_layer/jmphyreg.h>
76fbb0b9
SZ
64
65#include <bus/pci/pcireg.h>
66#include <bus/pci/pcivar.h>
67#include <bus/pci/pcidevs.h>
68
08c76ecf
SZ
69#include <dev/netif/jme/if_jmereg.h>
70#include <dev/netif/jme/if_jmevar.h>
76fbb0b9
SZ
71
72#include "miibus_if.h"
73
29890f78
SZ
74#define JME_TX_SERIALIZE 1
75#define JME_RX_SERIALIZE 2
76
76fbb0b9
SZ
77#define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
78
760c056c
SZ
79#ifdef JME_RSS_DEBUG
80#define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
81do { \
66f75939 82 if ((sc)->jme_rss_debug >= (lvl)) \
760c056c
SZ
83 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
84} while (0)
85#else /* !JME_RSS_DEBUG */
86#define JME_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
87#endif /* JME_RSS_DEBUG */
88
76fbb0b9
SZ
89static int jme_probe(device_t);
90static int jme_attach(device_t);
91static int jme_detach(device_t);
92static int jme_shutdown(device_t);
93static int jme_suspend(device_t);
94static int jme_resume(device_t);
95
96static int jme_miibus_readreg(device_t, int, int);
97static int jme_miibus_writereg(device_t, int, int, int);
98static void jme_miibus_statchg(device_t);
99
100static void jme_init(void *);
101static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
102static void jme_start(struct ifnet *);
103static void jme_watchdog(struct ifnet *);
104static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
105static int jme_mediachange(struct ifnet *);
9de40864
SZ
106#ifdef DEVICE_POLLING
107static void jme_poll(struct ifnet *, enum poll_cmd, int);
108#endif
31f0d5a2
SZ
109static void jme_serialize(struct ifnet *, enum ifnet_serialize);
110static void jme_deserialize(struct ifnet *, enum ifnet_serialize);
111static int jme_tryserialize(struct ifnet *, enum ifnet_serialize);
112#ifdef INVARIANTS
113static void jme_serialize_assert(struct ifnet *, enum ifnet_serialize,
114 boolean_t);
115#endif
76fbb0b9
SZ
116
117static void jme_intr(void *);
58880b0d
SZ
118static void jme_msix_tx(void *);
119static void jme_msix_rx(void *);
76fbb0b9 120static void jme_txeof(struct jme_softc *);
dea2452a 121static void jme_rxeof(struct jme_rxdata *, int);
4447c752 122static void jme_rx_intr(struct jme_softc *, uint32_t);
76fbb0b9 123
58880b0d
SZ
124static int jme_msix_setup(device_t);
125static void jme_msix_teardown(device_t, int);
126static int jme_intr_setup(device_t);
127static void jme_intr_teardown(device_t);
128static void jme_msix_try_alloc(device_t);
129static void jme_msix_free(device_t);
130static int jme_intr_alloc(device_t);
131static void jme_intr_free(device_t);
76fbb0b9 132static int jme_dma_alloc(struct jme_softc *);
0b3414d9 133static void jme_dma_free(struct jme_softc *);
dea2452a 134static int jme_init_rx_ring(struct jme_rxdata *);
76fbb0b9
SZ
135static void jme_init_tx_ring(struct jme_softc *);
136static void jme_init_ssb(struct jme_softc *);
dea2452a 137static int jme_newbuf(struct jme_rxdata *, struct jme_rxdesc *, int);
76fbb0b9 138static int jme_encap(struct jme_softc *, struct mbuf **);
dea2452a
SZ
139static void jme_rxpkt(struct jme_rxdata *);
140static int jme_rxring_dma_alloc(struct jme_rxdata *);
141static int jme_rxbuf_dma_alloc(struct jme_rxdata *);
064b75ed 142static int jme_rxbuf_dma_filter(void *, bus_addr_t);
76fbb0b9
SZ
143
144static void jme_tick(void *);
145static void jme_stop(struct jme_softc *);
146static void jme_reset(struct jme_softc *);
58880b0d 147static void jme_set_msinum(struct jme_softc *);
76fbb0b9
SZ
148static void jme_set_vlan(struct jme_softc *);
149static void jme_set_filter(struct jme_softc *);
150static void jme_stop_tx(struct jme_softc *);
151static void jme_stop_rx(struct jme_softc *);
152static void jme_mac_config(struct jme_softc *);
153static void jme_reg_macaddr(struct jme_softc *, uint8_t[]);
154static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
155static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
156#ifdef notyet
157static void jme_setwol(struct jme_softc *);
158static void jme_setlinkspeed(struct jme_softc *);
159#endif
2870abc4
SZ
160static void jme_set_tx_coal(struct jme_softc *);
161static void jme_set_rx_coal(struct jme_softc *);
760c056c
SZ
162static void jme_enable_rss(struct jme_softc *);
163static void jme_disable_rss(struct jme_softc *);
cccc3955
SZ
164static void jme_serialize_skipmain(struct jme_softc *);
165static void jme_deserialize_skipmain(struct jme_softc *);
76fbb0b9
SZ
166
167static void jme_sysctl_node(struct jme_softc *);
2870abc4
SZ
168static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
169static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
170static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
171static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
76fbb0b9
SZ
172
173/*
174 * Devices supported by this driver.
175 */
176static const struct jme_dev {
177 uint16_t jme_vendorid;
178 uint16_t jme_deviceid;
3a5f3f36 179 uint32_t jme_caps;
76fbb0b9
SZ
180 const char *jme_name;
181} jme_devs[] = {
44e8c66c 182 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
3a5f3f36 183 JME_CAP_JUMBO,
76fbb0b9 184 "JMicron Inc, JMC250 Gigabit Ethernet" },
44e8c66c 185 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
3a5f3f36 186 JME_CAP_FASTETH,
76fbb0b9 187 "JMicron Inc, JMC260 Fast Ethernet" },
3a5f3f36 188 { 0, 0, 0, NULL }
76fbb0b9
SZ
189};
190
191static device_method_t jme_methods[] = {
192 /* Device interface. */
193 DEVMETHOD(device_probe, jme_probe),
194 DEVMETHOD(device_attach, jme_attach),
195 DEVMETHOD(device_detach, jme_detach),
196 DEVMETHOD(device_shutdown, jme_shutdown),
197 DEVMETHOD(device_suspend, jme_suspend),
198 DEVMETHOD(device_resume, jme_resume),
199
200 /* Bus interface. */
201 DEVMETHOD(bus_print_child, bus_generic_print_child),
202 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
203
204 /* MII interface. */
205 DEVMETHOD(miibus_readreg, jme_miibus_readreg),
206 DEVMETHOD(miibus_writereg, jme_miibus_writereg),
207 DEVMETHOD(miibus_statchg, jme_miibus_statchg),
208
209 { NULL, NULL }
210};
211
212static driver_t jme_driver = {
213 "jme",
214 jme_methods,
215 sizeof(struct jme_softc)
216};
217
218static devclass_t jme_devclass;
219
220DECLARE_DUMMY_MODULE(if_jme);
221MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
aa2b9d05
SW
222DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL);
223DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL);
76fbb0b9 224
4447c752
SZ
225static const struct {
226 uint32_t jme_coal;
227 uint32_t jme_comp;
58880b0d 228 uint32_t jme_empty;
4447c752 229} jme_rx_status[JME_NRXRING_MAX] = {
58880b0d
SZ
230 { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP,
231 INTR_RXQ0_DESC_EMPTY },
232 { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP,
233 INTR_RXQ1_DESC_EMPTY },
234 { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP,
235 INTR_RXQ2_DESC_EMPTY },
236 { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP,
237 INTR_RXQ3_DESC_EMPTY }
4447c752
SZ
238};
239
69325526
SZ
240static int jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
241static int jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
6afef6ab 242static int jme_rx_ring_count = 0;
3eba890a 243static int jme_msi_enable = 1;
58880b0d 244static int jme_msix_enable = 1;
83b03786
SZ
245
246TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
247TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
413d06bb 248TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
3eba890a 249TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable);
58880b0d 250TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable);
83b03786 251
fd2a6d2c
SZ
252static __inline void
253jme_setup_rxdesc(struct jme_rxdesc *rxd)
254{
255 struct jme_desc *desc;
256
257 desc = rxd->rx_desc;
258 desc->buflen = htole32(MCLBYTES);
259 desc->addr_lo = htole32(JME_ADDR_LO(rxd->rx_paddr));
260 desc->addr_hi = htole32(JME_ADDR_HI(rxd->rx_paddr));
261 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
262}
263
76fbb0b9
SZ
264/*
265 * Read a PHY register on the MII of the JMC250.
266 */
267static int
268jme_miibus_readreg(device_t dev, int phy, int reg)
269{
270 struct jme_softc *sc = device_get_softc(dev);
271 uint32_t val;
272 int i;
273
274 /* For FPGA version, PHY address 0 should be ignored. */
ec7e787b 275 if (sc->jme_caps & JME_CAP_FPGA) {
76fbb0b9
SZ
276 if (phy == 0)
277 return (0);
278 } else {
279 if (sc->jme_phyaddr != phy)
280 return (0);
281 }
282
283 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
284 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
285
286 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
287 DELAY(1);
288 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
289 break;
290 }
291 if (i == 0) {
292 device_printf(sc->jme_dev, "phy read timeout: "
293 "phy %d, reg %d\n", phy, reg);
294 return (0);
295 }
296
297 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
298}
299
300/*
301 * Write a PHY register on the MII of the JMC250.
302 */
303static int
304jme_miibus_writereg(device_t dev, int phy, int reg, int val)
305{
306 struct jme_softc *sc = device_get_softc(dev);
307 int i;
308
309 /* For FPGA version, PHY address 0 should be ignored. */
ec7e787b 310 if (sc->jme_caps & JME_CAP_FPGA) {
76fbb0b9
SZ
311 if (phy == 0)
312 return (0);
313 } else {
314 if (sc->jme_phyaddr != phy)
315 return (0);
316 }
317
318 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
319 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
320 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
321
322 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
323 DELAY(1);
324 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
325 break;
326 }
327 if (i == 0) {
328 device_printf(sc->jme_dev, "phy write timeout: "
329 "phy %d, reg %d\n", phy, reg);
330 }
331
332 return (0);
333}
334
335/*
336 * Callback from MII layer when media changes.
337 */
338static void
339jme_miibus_statchg(device_t dev)
340{
341 struct jme_softc *sc = device_get_softc(dev);
342 struct ifnet *ifp = &sc->arpcom.ac_if;
343 struct mii_data *mii;
344 struct jme_txdesc *txd;
345 bus_addr_t paddr;
4447c752 346 int i, r;
76fbb0b9 347
cccc3955
SZ
348 if (sc->jme_in_tick)
349 jme_serialize_skipmain(sc);
31f0d5a2 350 ASSERT_IFNET_SERIALIZED_ALL(ifp);
76fbb0b9
SZ
351
352 if ((ifp->if_flags & IFF_RUNNING) == 0)
cccc3955 353 goto done;
76fbb0b9
SZ
354
355 mii = device_get_softc(sc->jme_miibus);
356
cccc3955 357 sc->jme_has_link = FALSE;
76fbb0b9
SZ
358 if ((mii->mii_media_status & IFM_AVALID) != 0) {
359 switch (IFM_SUBTYPE(mii->mii_media_active)) {
360 case IFM_10_T:
361 case IFM_100_TX:
cccc3955 362 sc->jme_has_link = TRUE;
76fbb0b9
SZ
363 break;
364 case IFM_1000_T:
ec7e787b 365 if (sc->jme_caps & JME_CAP_FASTETH)
76fbb0b9 366 break;
cccc3955 367 sc->jme_has_link = TRUE;
76fbb0b9
SZ
368 break;
369 default:
370 break;
371 }
372 }
373
374 /*
375 * Disabling Rx/Tx MACs have a side-effect of resetting
376 * JME_TXNDA/JME_RXNDA register to the first address of
377 * Tx/Rx descriptor address. So driver should reset its
378 * internal procucer/consumer pointer and reclaim any
379 * allocated resources. Note, just saving the value of
380 * JME_TXNDA and JME_RXNDA registers before stopping MAC
381 * and restoring JME_TXNDA/JME_RXNDA register is not
382 * sufficient to make sure correct MAC state because
383 * stopping MAC operation can take a while and hardware
384 * might have updated JME_TXNDA/JME_RXNDA registers
385 * during the stop operation.
386 */
387
388 /* Disable interrupts */
389 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
390
391 /* Stop driver */
392 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
393 ifp->if_timer = 0;
394 callout_stop(&sc->jme_tick_ch);
395
396 /* Stop receiver/transmitter. */
397 jme_stop_rx(sc);
398 jme_stop_tx(sc);
399
7b040092 400 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
4447c752
SZ
401 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
402
dea2452a 403 jme_rxeof(rdata, -1);
4447c752
SZ
404 if (rdata->jme_rxhead != NULL)
405 m_freem(rdata->jme_rxhead);
dea2452a 406 JME_RXCHAIN_RESET(rdata);
4447c752
SZ
407
408 /*
409 * Reuse configured Rx descriptors and reset
410 * procuder/consumer index.
411 */
412 rdata->jme_rx_cons = 0;
413 }
6afef6ab 414 if (JME_ENABLE_HWRSS(sc))
deaeb5a5
SZ
415 jme_enable_rss(sc);
416 else
417 jme_disable_rss(sc);
76fbb0b9
SZ
418
419 jme_txeof(sc);
420 if (sc->jme_cdata.jme_tx_cnt != 0) {
421 /* Remove queued packets for transmit. */
b020bb10 422 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
76fbb0b9
SZ
423 txd = &sc->jme_cdata.jme_txdesc[i];
424 if (txd->tx_m != NULL) {
425 bus_dmamap_unload(
426 sc->jme_cdata.jme_tx_tag,
427 txd->tx_dmamap);
428 m_freem(txd->tx_m);
429 txd->tx_m = NULL;
430 txd->tx_ndesc = 0;
431 ifp->if_oerrors++;
432 }
433 }
434 }
76fbb0b9
SZ
435 jme_init_tx_ring(sc);
436
437 /* Initialize shadow status block. */
438 jme_init_ssb(sc);
439
440 /* Program MAC with resolved speed/duplex/flow-control. */
cccc3955 441 if (sc->jme_has_link) {
76fbb0b9
SZ
442 jme_mac_config(sc);
443
76fbb0b9
SZ
444 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
445
446 /* Set Tx ring address to the hardware. */
7405bec3 447 paddr = sc->jme_cdata.jme_tx_ring_paddr;
76fbb0b9
SZ
448 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
449 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
450
7b040092 451 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
4447c752
SZ
452 CSR_WRITE_4(sc, JME_RXCSR,
453 sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
454
455 /* Set Rx ring address to the hardware. */
456 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
457 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
458 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
459 }
76fbb0b9
SZ
460
461 /* Restart receiver/transmitter. */
462 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
463 RXCSR_RXQ_START);
464 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
465 }
466
467 ifp->if_flags |= IFF_RUNNING;
468 ifp->if_flags &= ~IFF_OACTIVE;
469 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
470
9de40864
SZ
471#ifdef DEVICE_POLLING
472 if (!(ifp->if_flags & IFF_POLLING))
473#endif
76fbb0b9
SZ
474 /* Reenable interrupts. */
475 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
cccc3955
SZ
476
477done:
478 if (sc->jme_in_tick)
479 jme_deserialize_skipmain(sc);
76fbb0b9
SZ
480}
481
482/*
483 * Get the current interface media status.
484 */
485static void
486jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
487{
488 struct jme_softc *sc = ifp->if_softc;
489 struct mii_data *mii = device_get_softc(sc->jme_miibus);
490
31f0d5a2 491 ASSERT_IFNET_SERIALIZED_ALL(ifp);
76fbb0b9
SZ
492
493 mii_pollstat(mii);
494 ifmr->ifm_status = mii->mii_media_status;
495 ifmr->ifm_active = mii->mii_media_active;
496}
497
498/*
499 * Set hardware to newly-selected media.
500 */
501static int
502jme_mediachange(struct ifnet *ifp)
503{
504 struct jme_softc *sc = ifp->if_softc;
505 struct mii_data *mii = device_get_softc(sc->jme_miibus);
506 int error;
507
31f0d5a2 508 ASSERT_IFNET_SERIALIZED_ALL(ifp);
76fbb0b9
SZ
509
510 if (mii->mii_instance != 0) {
511 struct mii_softc *miisc;
512
513 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
514 mii_phy_reset(miisc);
515 }
516 error = mii_mediachg(mii);
517
518 return (error);
519}
520
521static int
522jme_probe(device_t dev)
523{
524 const struct jme_dev *sp;
525 uint16_t vid, did;
526
527 vid = pci_get_vendor(dev);
528 did = pci_get_device(dev);
529 for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
530 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
3a5f3f36
SZ
531 struct jme_softc *sc = device_get_softc(dev);
532
533 sc->jme_caps = sp->jme_caps;
76fbb0b9 534 device_set_desc(dev, sp->jme_name);
76fbb0b9
SZ
535 return (0);
536 }
537 }
538 return (ENXIO);
539}
540
541static int
542jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
543{
544 uint32_t reg;
545 int i;
546
547 *val = 0;
548 for (i = JME_TIMEOUT; i > 0; i--) {
549 reg = CSR_READ_4(sc, JME_SMBCSR);
550 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
551 break;
552 DELAY(1);
553 }
554
555 if (i == 0) {
556 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
557 return (ETIMEDOUT);
558 }
559
560 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
561 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
562 for (i = JME_TIMEOUT; i > 0; i--) {
563 DELAY(1);
564 reg = CSR_READ_4(sc, JME_SMBINTF);
565 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
566 break;
567 }
568
569 if (i == 0) {
570 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
571 return (ETIMEDOUT);
572 }
573
574 reg = CSR_READ_4(sc, JME_SMBINTF);
575 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
576
577 return (0);
578}
579
580static int
581jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
582{
583 uint8_t fup, reg, val;
584 uint32_t offset;
585 int match;
586
587 offset = 0;
588 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
589 fup != JME_EEPROM_SIG0)
590 return (ENOENT);
591 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
592 fup != JME_EEPROM_SIG1)
593 return (ENOENT);
594 match = 0;
595 do {
596 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
597 break;
09927fe6
SZ
598 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
599 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
76fbb0b9
SZ
600 if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
601 break;
602 if (reg >= JME_PAR0 &&
603 reg < JME_PAR0 + ETHER_ADDR_LEN) {
604 if (jme_eeprom_read_byte(sc, offset + 2,
605 &val) != 0)
606 break;
607 eaddr[reg - JME_PAR0] = val;
608 match++;
609 }
610 }
09927fe6
SZ
611 /* Check for the end of EEPROM descriptor. */
612 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
613 break;
76fbb0b9
SZ
614 /* Try next eeprom descriptor. */
615 offset += JME_EEPROM_DESC_BYTES;
616 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
617
618 if (match == ETHER_ADDR_LEN)
619 return (0);
620
621 return (ENOENT);
622}
623
624static void
625jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
626{
627 uint32_t par0, par1;
628
629 /* Read station address. */
630 par0 = CSR_READ_4(sc, JME_PAR0);
631 par1 = CSR_READ_4(sc, JME_PAR1);
632 par1 &= 0xFFFF;
633 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
634 device_printf(sc->jme_dev,
635 "generating fake ethernet address.\n");
636 par0 = karc4random();
637 /* Set OUI to JMicron. */
638 eaddr[0] = 0x00;
639 eaddr[1] = 0x1B;
640 eaddr[2] = 0x8C;
641 eaddr[3] = (par0 >> 16) & 0xff;
642 eaddr[4] = (par0 >> 8) & 0xff;
643 eaddr[5] = par0 & 0xff;
644 } else {
645 eaddr[0] = (par0 >> 0) & 0xFF;
646 eaddr[1] = (par0 >> 8) & 0xFF;
647 eaddr[2] = (par0 >> 16) & 0xFF;
648 eaddr[3] = (par0 >> 24) & 0xFF;
649 eaddr[4] = (par1 >> 0) & 0xFF;
650 eaddr[5] = (par1 >> 8) & 0xFF;
651 }
652}
653
654static int
655jme_attach(device_t dev)
656{
657 struct jme_softc *sc = device_get_softc(dev);
658 struct ifnet *ifp = &sc->arpcom.ac_if;
659 uint32_t reg;
b249905b
SZ
660 uint16_t did;
661 uint8_t pcie_ptr, rev;
7b040092 662 int error = 0, i, j, rx_desc_cnt;
76fbb0b9
SZ
663 uint8_t eaddr[ETHER_ADDR_LEN];
664
594bec47
SZ
665 device_printf(dev, "rxdata %zu, chain_data %zu\n",
666 sizeof(struct jme_rxdata), sizeof(struct jme_chain_data));
667
31f0d5a2
SZ
668 lwkt_serialize_init(&sc->jme_serialize);
669 lwkt_serialize_init(&sc->jme_cdata.jme_tx_serialize);
670 for (i = 0; i < JME_NRXRING_MAX; ++i) {
671 lwkt_serialize_init(
672 &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize);
673 }
674
7b040092 675 rx_desc_cnt = device_getenv_int(dev, "rx_desc_count",
1cc217a9 676 jme_rx_desc_count);
7b040092
SZ
677 rx_desc_cnt = roundup(rx_desc_cnt, JME_NDESC_ALIGN);
678 if (rx_desc_cnt > JME_NDESC_MAX)
679 rx_desc_cnt = JME_NDESC_MAX;
69325526 680
b020bb10 681 sc->jme_cdata.jme_tx_desc_cnt = device_getenv_int(dev, "tx_desc_count",
1cc217a9 682 jme_tx_desc_count);
b020bb10
SZ
683 sc->jme_cdata.jme_tx_desc_cnt = roundup(sc->jme_cdata.jme_tx_desc_cnt,
684 JME_NDESC_ALIGN);
685 if (sc->jme_cdata.jme_tx_desc_cnt > JME_NDESC_MAX)
686 sc->jme_cdata.jme_tx_desc_cnt = JME_NDESC_MAX;
83b03786 687
9389fe19 688 /*
a317449e 689 * Calculate rx rings
9389fe19 690 */
7b040092 691 sc->jme_cdata.jme_rx_ring_cnt = device_getenv_int(dev, "rx_ring_count",
1cc217a9 692 jme_rx_ring_count);
7b040092
SZ
693 sc->jme_cdata.jme_rx_ring_cnt =
694 if_ring_count2(sc->jme_cdata.jme_rx_ring_cnt, JME_NRXRING_MAX);
4447c752 695
31f0d5a2
SZ
696 i = 0;
697 sc->jme_serialize_arr[i++] = &sc->jme_serialize;
29890f78
SZ
698
699 KKASSERT(i == JME_TX_SERIALIZE);
31f0d5a2 700 sc->jme_serialize_arr[i++] = &sc->jme_cdata.jme_tx_serialize;
29890f78
SZ
701
702 KKASSERT(i == JME_RX_SERIALIZE);
7b040092 703 for (j = 0; j < sc->jme_cdata.jme_rx_ring_cnt; ++j) {
31f0d5a2
SZ
704 sc->jme_serialize_arr[i++] =
705 &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize;
706 }
707 KKASSERT(i <= JME_NSERIALIZE);
708 sc->jme_serialize_cnt = i;
709
58880b0d 710 sc->jme_cdata.jme_sc = sc;
7b040092 711 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
58880b0d
SZ
712 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
713
714 rdata->jme_sc = sc;
715 rdata->jme_rx_coal = jme_rx_status[i].jme_coal;
716 rdata->jme_rx_comp = jme_rx_status[i].jme_comp;
717 rdata->jme_rx_empty = jme_rx_status[i].jme_empty;
718 rdata->jme_rx_idx = i;
7b040092 719 rdata->jme_rx_desc_cnt = rx_desc_cnt;
58880b0d
SZ
720 }
721
76fbb0b9 722 sc->jme_dev = dev;
b249905b
SZ
723 sc->jme_lowaddr = BUS_SPACE_MAXADDR;
724
76fbb0b9
SZ
725 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
726
727 callout_init(&sc->jme_tick_ch);
728
729#ifndef BURN_BRIDGES
730 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
731 uint32_t irq, mem;
732
733 irq = pci_read_config(dev, PCIR_INTLINE, 4);
734 mem = pci_read_config(dev, JME_PCIR_BAR, 4);
735
736 device_printf(dev, "chip is in D%d power mode "
737 "-- setting to D0\n", pci_get_powerstate(dev));
738
739 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
740
741 pci_write_config(dev, PCIR_INTLINE, irq, 4);
742 pci_write_config(dev, JME_PCIR_BAR, mem, 4);
743 }
744#endif /* !BURN_BRIDGE */
745
746 /* Enable bus mastering */
747 pci_enable_busmaster(dev);
748
749 /*
750 * Allocate IO memory
751 *
752 * JMC250 supports both memory mapped and I/O register space
753 * access. Because I/O register access should use different
754 * BARs to access registers it's waste of time to use I/O
755 * register spce access. JMC250 uses 16K to map entire memory
756 * space.
757 */
758 sc->jme_mem_rid = JME_PCIR_BAR;
759 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
760 &sc->jme_mem_rid, RF_ACTIVE);
761 if (sc->jme_mem_res == NULL) {
762 device_printf(dev, "can't allocate IO memory\n");
763 return ENXIO;
764 }
765 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
766 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
767
768 /*
769 * Allocate IRQ
770 */
58880b0d
SZ
771 error = jme_intr_alloc(dev);
772 if (error)
76fbb0b9 773 goto fail;
76fbb0b9
SZ
774
775 /*
b249905b 776 * Extract revisions
76fbb0b9
SZ
777 */
778 reg = CSR_READ_4(sc, JME_CHIPMODE);
779 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
780 CHIPMODE_NOT_FPGA) {
ec7e787b 781 sc->jme_caps |= JME_CAP_FPGA;
76fbb0b9 782 if (bootverbose) {
b249905b 783 device_printf(dev, "FPGA revision: 0x%04x\n",
76fbb0b9
SZ
784 (reg & CHIPMODE_FPGA_REV_MASK) >>
785 CHIPMODE_FPGA_REV_SHIFT);
786 }
787 }
788
b249905b
SZ
789 /* NOTE: FM revision is put in the upper 4 bits */
790 rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
791 rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
792 if (bootverbose)
793 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
794
795 did = pci_get_device(dev);
796 switch (did) {
797 case PCI_PRODUCT_JMICRON_JMC250:
798 if (rev == JME_REV1_A2)
799 sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
800 break;
801
802 case PCI_PRODUCT_JMICRON_JMC260:
803 if (rev == JME_REV2)
804 sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
805 break;
806
807 default:
ed20d0e3 808 panic("unknown device id 0x%04x", did);
b249905b
SZ
809 }
810 if (rev >= JME_REV2) {
811 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
812 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
813 GHC_TXMAC_CLKSRC_1000;
814 }
815
76fbb0b9
SZ
816 /* Reset the ethernet controller. */
817 jme_reset(sc);
818
58880b0d
SZ
819 /* Map MSI/MSI-X vectors */
820 jme_set_msinum(sc);
821
76fbb0b9
SZ
822 /* Get station address. */
823 reg = CSR_READ_4(sc, JME_SMBCSR);
824 if (reg & SMBCSR_EEPROM_PRESENT)
825 error = jme_eeprom_macaddr(sc, eaddr);
826 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
827 if (error != 0 && (bootverbose)) {
828 device_printf(dev, "ethernet hardware address "
829 "not found in EEPROM.\n");
830 }
831 jme_reg_macaddr(sc, eaddr);
832 }
833
834 /*
835 * Save PHY address.
836 * Integrated JR0211 has fixed PHY address whereas FPGA version
837 * requires PHY probing to get correct PHY address.
838 */
ec7e787b 839 if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
76fbb0b9
SZ
840 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
841 GPREG0_PHY_ADDR_MASK;
842 if (bootverbose) {
843 device_printf(dev, "PHY is at address %d.\n",
844 sc->jme_phyaddr);
845 }
846 } else {
847 sc->jme_phyaddr = 0;
848 }
849
850 /* Set max allowable DMA size. */
851 pcie_ptr = pci_get_pciecap_ptr(dev);
852 if (pcie_ptr != 0) {
853 uint16_t ctrl;
854
ec7e787b 855 sc->jme_caps |= JME_CAP_PCIE;
76fbb0b9
SZ
856 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
857 if (bootverbose) {
858 device_printf(dev, "Read request size : %d bytes.\n",
859 128 << ((ctrl >> 12) & 0x07));
860 device_printf(dev, "TLP payload size : %d bytes.\n",
861 128 << ((ctrl >> 5) & 0x07));
862 }
863 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
864 case PCIEM_DEVCTL_MAX_READRQ_128:
865 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
866 break;
867 case PCIEM_DEVCTL_MAX_READRQ_256:
868 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
869 break;
870 default:
871 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
872 break;
873 }
874 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
875 } else {
876 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
877 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
878 }
879
880#ifdef notyet
881 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
ec7e787b 882 sc->jme_caps |= JME_CAP_PMCAP;
76fbb0b9
SZ
883#endif
884
885 /*
886 * Create sysctl tree
887 */
888 jme_sysctl_node(sc);
889
890 /* Allocate DMA stuffs */
891 error = jme_dma_alloc(sc);
892 if (error)
893 goto fail;
894
895 ifp->if_softc = sc;
896 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
897 ifp->if_init = jme_init;
898 ifp->if_ioctl = jme_ioctl;
899 ifp->if_start = jme_start;
9de40864
SZ
900#ifdef DEVICE_POLLING
901 ifp->if_poll = jme_poll;
902#endif
76fbb0b9 903 ifp->if_watchdog = jme_watchdog;
31f0d5a2
SZ
904 ifp->if_serialize = jme_serialize;
905 ifp->if_deserialize = jme_deserialize;
906 ifp->if_tryserialize = jme_tryserialize;
907#ifdef INVARIANTS
908 ifp->if_serialize_assert = jme_serialize_assert;
909#endif
b020bb10
SZ
910 ifq_set_maxlen(&ifp->if_snd,
911 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
76fbb0b9
SZ
912 ifq_set_ready(&ifp->if_snd);
913
914 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
915 ifp->if_capabilities = IFCAP_HWCSUM |
1bedd927 916 IFCAP_TSO |
76fbb0b9
SZ
917 IFCAP_VLAN_MTU |
918 IFCAP_VLAN_HWTAGGING;
7b040092 919 if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN)
d585233c 920 ifp->if_capabilities |= IFCAP_RSS;
76fbb0b9
SZ
921 ifp->if_capenable = ifp->if_capabilities;
922
3d2aeb15
SZ
923 /*
924 * Disable TXCSUM by default to improve bulk data
925 * transmit performance (+20Mbps improvement).
926 */
927 ifp->if_capenable &= ~IFCAP_TXCSUM;
928
929 if (ifp->if_capenable & IFCAP_TXCSUM)
1bedd927
SZ
930 ifp->if_hwassist |= JME_CSUM_FEATURES;
931 ifp->if_hwassist |= CSUM_TSO;
3d2aeb15 932
76fbb0b9
SZ
933 /* Set up MII bus. */
934 error = mii_phy_probe(dev, &sc->jme_miibus,
935 jme_mediachange, jme_mediastatus);
936 if (error) {
937 device_printf(dev, "no PHY found!\n");
938 goto fail;
939 }
940
941 /*
942 * Save PHYADDR for FPGA mode PHY.
943 */
ec7e787b 944 if (sc->jme_caps & JME_CAP_FPGA) {
76fbb0b9
SZ
945 struct mii_data *mii = device_get_softc(sc->jme_miibus);
946
947 if (mii->mii_instance != 0) {
948 struct mii_softc *miisc;
949
950 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
951 if (miisc->mii_phy != 0) {
952 sc->jme_phyaddr = miisc->mii_phy;
953 break;
954 }
955 }
956 if (sc->jme_phyaddr != 0) {
957 device_printf(sc->jme_dev,
958 "FPGA PHY is at %d\n", sc->jme_phyaddr);
959 /* vendor magic. */
dbe37f03
SZ
960 jme_miibus_writereg(dev, sc->jme_phyaddr,
961 JMPHY_CONF, JMPHY_CONF_DEFFIFO);
962
ad22907f 963 /* XXX should we clear JME_WA_EXTFIFO */
76fbb0b9
SZ
964 }
965 }
966 }
967
968 ether_ifattach(ifp, eaddr, NULL);
969
970 /* Tell the upper layer(s) we support long frames. */
971 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
972
58880b0d 973 error = jme_intr_setup(dev);
76fbb0b9 974 if (error) {
76fbb0b9
SZ
975 ether_ifdetach(ifp);
976 goto fail;
977 }
978
76fbb0b9
SZ
979 return 0;
980fail:
981 jme_detach(dev);
982 return (error);
983}
984
985static int
986jme_detach(device_t dev)
987{
988 struct jme_softc *sc = device_get_softc(dev);
989
990 if (device_is_attached(dev)) {
991 struct ifnet *ifp = &sc->arpcom.ac_if;
992
31f0d5a2 993 ifnet_serialize_all(ifp);
76fbb0b9 994 jme_stop(sc);
58880b0d 995 jme_intr_teardown(dev);
31f0d5a2 996 ifnet_deserialize_all(ifp);
76fbb0b9
SZ
997
998 ether_ifdetach(ifp);
999 }
1000
1001 if (sc->jme_sysctl_tree != NULL)
1002 sysctl_ctx_free(&sc->jme_sysctl_ctx);
1003
1004 if (sc->jme_miibus != NULL)
1005 device_delete_child(dev, sc->jme_miibus);
1006 bus_generic_detach(dev);
1007
58880b0d 1008 jme_intr_free(dev);
76fbb0b9
SZ
1009
1010 if (sc->jme_mem_res != NULL) {
1011 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
1012 sc->jme_mem_res);
1013 }
1014
0b3414d9 1015 jme_dma_free(sc);
76fbb0b9
SZ
1016
1017 return (0);
1018}
1019
1020static void
1021jme_sysctl_node(struct jme_softc *sc)
1022{
83b03786 1023 int coal_max;
760c056c 1024#ifdef JME_RSS_DEBUG
760c056c
SZ
1025 int r;
1026#endif
83b03786 1027
76fbb0b9
SZ
1028 sysctl_ctx_init(&sc->jme_sysctl_ctx);
1029 sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
1030 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
1031 device_get_nameunit(sc->jme_dev),
1032 CTLFLAG_RD, 0, "");
1033 if (sc->jme_sysctl_tree == NULL) {
1034 device_printf(sc->jme_dev, "can't add sysctl node\n");
1035 return;
1036 }
1037
1038 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1039 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
2870abc4
SZ
1040 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1041 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
76fbb0b9
SZ
1042
1043 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1044 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
2870abc4
SZ
1045 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1046 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
76fbb0b9
SZ
1047
1048 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1049 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
2870abc4
SZ
1050 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1051 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
76fbb0b9
SZ
1052
1053 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1054 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
2870abc4
SZ
1055 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1056 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
76fbb0b9 1057
83b03786
SZ
1058 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1059 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
7b040092
SZ
1060 "rx_desc_count", CTLFLAG_RD,
1061 &sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt,
83b03786
SZ
1062 0, "RX desc count");
1063 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1064 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
b020bb10
SZ
1065 "tx_desc_count", CTLFLAG_RD,
1066 &sc->jme_cdata.jme_tx_desc_cnt,
83b03786 1067 0, "TX desc count");
760c056c
SZ
1068 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1069 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
7b040092
SZ
1070 "rx_ring_count", CTLFLAG_RD,
1071 &sc->jme_cdata.jme_rx_ring_cnt,
760c056c 1072 0, "RX ring count");
760c056c
SZ
1073#ifdef JME_RSS_DEBUG
1074 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1075 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
24dd1705 1076 "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
760c056c 1077 0, "RSS debug level");
7b040092
SZ
1078 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
1079 char rx_ring_pkt[32];
1080
760c056c 1081 ksnprintf(rx_ring_pkt, sizeof(rx_ring_pkt), "rx_ring%d_pkt", r);
7b040092
SZ
1082 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx,
1083 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1084 rx_ring_pkt, CTLFLAG_RW,
1085 &sc->jme_cdata.jme_rx_data[r].jme_rx_pkt, "RXed packets");
760c056c
SZ
1086 }
1087#endif
83b03786
SZ
1088
1089 /*
1090 * Set default coalesce valves
1091 */
76fbb0b9 1092 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
76fbb0b9 1093 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
76fbb0b9 1094 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
76fbb0b9 1095 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
83b03786
SZ
1096
1097 /*
1098 * Adjust coalesce valves, in case that the number of TX/RX
1099 * descs are set to small values by users.
1100 *
1101 * NOTE: coal_max will not be zero, since number of descs
1102 * must aligned by JME_NDESC_ALIGN (16 currently)
1103 */
b020bb10 1104 coal_max = sc->jme_cdata.jme_tx_desc_cnt / 6;
83b03786
SZ
1105 if (coal_max < sc->jme_tx_coal_pkt)
1106 sc->jme_tx_coal_pkt = coal_max;
1107
7b040092 1108 coal_max = sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt / 4;
83b03786
SZ
1109 if (coal_max < sc->jme_rx_coal_pkt)
1110 sc->jme_rx_coal_pkt = coal_max;
76fbb0b9
SZ
1111}
1112
76fbb0b9
SZ
1113static int
1114jme_dma_alloc(struct jme_softc *sc)
1115{
1116 struct jme_txdesc *txd;
1128a202 1117 bus_dmamem_t dmem;
ff7f3632 1118 int error, i, asize;
76fbb0b9 1119
83b03786 1120 sc->jme_cdata.jme_txdesc =
b020bb10 1121 kmalloc(sc->jme_cdata.jme_tx_desc_cnt * sizeof(struct jme_txdesc),
83b03786 1122 M_DEVBUF, M_WAITOK | M_ZERO);
7b040092
SZ
1123 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1124 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
1125
1126 rdata->jme_rxdesc =
1127 kmalloc(rdata->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
4447c752
SZ
1128 M_DEVBUF, M_WAITOK | M_ZERO);
1129 }
83b03786 1130
76fbb0b9
SZ
1131 /* Create parent ring tag. */
1132 error = bus_dma_tag_create(NULL,/* parent */
a7547dad
SZ
1133 1, JME_RING_BOUNDARY, /* algnmnt, boundary */
1134 sc->jme_lowaddr, /* lowaddr */
76fbb0b9
SZ
1135 BUS_SPACE_MAXADDR, /* highaddr */
1136 NULL, NULL, /* filter, filterarg */
1137 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1138 0, /* nsegments */
1139 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1140 0, /* flags */
1141 &sc->jme_cdata.jme_ring_tag);
1142 if (error) {
1143 device_printf(sc->jme_dev,
1144 "could not create parent ring DMA tag.\n");
1145 return error;
1146 }
1147
1148 /*
1149 * Create DMA stuffs for TX ring
1150 */
ff7f3632 1151 asize = roundup2(JME_TX_RING_SIZE(sc), JME_TX_RING_ALIGN);
1128a202
SZ
1152 error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1153 JME_TX_RING_ALIGN, 0,
0eb220ec 1154 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
ff7f3632 1155 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
76fbb0b9 1156 if (error) {
1128a202 1157 device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
76fbb0b9
SZ
1158 return error;
1159 }
1128a202
SZ
1160 sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag;
1161 sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map;
1162 sc->jme_cdata.jme_tx_ring = dmem.dmem_addr;
1163 sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr;
76fbb0b9
SZ
1164
1165 /*
1128a202 1166 * Create DMA stuffs for RX rings
76fbb0b9 1167 */
7b040092 1168 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
dea2452a 1169 error = jme_rxring_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
4447c752
SZ
1170 if (error)
1171 return error;
76fbb0b9 1172 }
76fbb0b9 1173
76fbb0b9
SZ
1174 /* Create parent buffer tag. */
1175 error = bus_dma_tag_create(NULL,/* parent */
1176 1, 0, /* algnmnt, boundary */
b249905b 1177 sc->jme_lowaddr, /* lowaddr */
76fbb0b9
SZ
1178 BUS_SPACE_MAXADDR, /* highaddr */
1179 NULL, NULL, /* filter, filterarg */
1180 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1181 0, /* nsegments */
1182 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1183 0, /* flags */
1184 &sc->jme_cdata.jme_buffer_tag);
1185 if (error) {
1186 device_printf(sc->jme_dev,
1187 "could not create parent buffer DMA tag.\n");
1188 return error;
1189 }
1190
1191 /*
1192 * Create DMA stuffs for shadow status block
1193 */
ff7f3632 1194 asize = roundup2(JME_SSB_SIZE, JME_SSB_ALIGN);
1128a202 1195 error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
0eb220ec 1196 JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
ff7f3632 1197 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
76fbb0b9
SZ
1198 if (error) {
1199 device_printf(sc->jme_dev,
1128a202 1200 "could not create shadow status block.\n");
76fbb0b9
SZ
1201 return error;
1202 }
1128a202
SZ
1203 sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1204 sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1205 sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1206 sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
76fbb0b9
SZ
1207
1208 /*
1209 * Create DMA stuffs for TX buffers
1210 */
1211
1212 /* Create tag for Tx buffers. */
1213 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1214 1, 0, /* algnmnt, boundary */
0eb220ec 1215 BUS_SPACE_MAXADDR, /* lowaddr */
76fbb0b9
SZ
1216 BUS_SPACE_MAXADDR, /* highaddr */
1217 NULL, NULL, /* filter, filterarg */
1bedd927 1218 JME_TSO_MAXSIZE, /* maxsize */
76fbb0b9 1219 JME_MAXTXSEGS, /* nsegments */
9d424cee
SZ
1220 JME_MAXSEGSIZE, /* maxsegsize */
1221 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
76fbb0b9
SZ
1222 &sc->jme_cdata.jme_tx_tag);
1223 if (error != 0) {
1224 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1225 return error;
1226 }
1227
1228 /* Create DMA maps for Tx buffers. */
b020bb10 1229 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
76fbb0b9 1230 txd = &sc->jme_cdata.jme_txdesc[i];
9d424cee
SZ
1231 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag,
1232 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1233 &txd->tx_dmamap);
76fbb0b9
SZ
1234 if (error) {
1235 int j;
1236
1237 device_printf(sc->jme_dev,
1238 "could not create %dth Tx dmamap.\n", i);
1239
1240 for (j = 0; j < i; ++j) {
1241 txd = &sc->jme_cdata.jme_txdesc[j];
1242 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1243 txd->tx_dmamap);
1244 }
1245 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1246 sc->jme_cdata.jme_tx_tag = NULL;
1247 return error;
1248 }
1249 }
1250
1251 /*
1252 * Create DMA stuffs for RX buffers
1253 */
7b040092 1254 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
dea2452a 1255 error = jme_rxbuf_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
4447c752 1256 if (error)
76fbb0b9 1257 return error;
76fbb0b9
SZ
1258 }
1259 return 0;
1260}
1261
1262static void
0b3414d9 1263jme_dma_free(struct jme_softc *sc)
76fbb0b9
SZ
1264{
1265 struct jme_txdesc *txd;
1266 struct jme_rxdesc *rxd;
4447c752
SZ
1267 struct jme_rxdata *rdata;
1268 int i, r;
76fbb0b9
SZ
1269
1270 /* Tx ring */
1271 if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1272 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1273 sc->jme_cdata.jme_tx_ring_map);
1274 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
560616bf 1275 sc->jme_cdata.jme_tx_ring,
76fbb0b9
SZ
1276 sc->jme_cdata.jme_tx_ring_map);
1277 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1278 sc->jme_cdata.jme_tx_ring_tag = NULL;
1279 }
1280
1281 /* Rx ring */
7b040092 1282 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
4447c752
SZ
1283 rdata = &sc->jme_cdata.jme_rx_data[r];
1284 if (rdata->jme_rx_ring_tag != NULL) {
1285 bus_dmamap_unload(rdata->jme_rx_ring_tag,
1286 rdata->jme_rx_ring_map);
1287 bus_dmamem_free(rdata->jme_rx_ring_tag,
1288 rdata->jme_rx_ring,
1289 rdata->jme_rx_ring_map);
1290 bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1291 rdata->jme_rx_ring_tag = NULL;
1292 }
76fbb0b9
SZ
1293 }
1294
1295 /* Tx buffers */
1296 if (sc->jme_cdata.jme_tx_tag != NULL) {
b020bb10 1297 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
76fbb0b9
SZ
1298 txd = &sc->jme_cdata.jme_txdesc[i];
1299 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1300 txd->tx_dmamap);
1301 }
1302 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1303 sc->jme_cdata.jme_tx_tag = NULL;
1304 }
1305
1306 /* Rx buffers */
7b040092 1307 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
4447c752
SZ
1308 rdata = &sc->jme_cdata.jme_rx_data[r];
1309 if (rdata->jme_rx_tag != NULL) {
7b040092 1310 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
4447c752
SZ
1311 rxd = &rdata->jme_rxdesc[i];
1312 bus_dmamap_destroy(rdata->jme_rx_tag,
1313 rxd->rx_dmamap);
1314 }
1315 bus_dmamap_destroy(rdata->jme_rx_tag,
1316 rdata->jme_rx_sparemap);
1317 bus_dma_tag_destroy(rdata->jme_rx_tag);
1318 rdata->jme_rx_tag = NULL;
76fbb0b9 1319 }
76fbb0b9
SZ
1320 }
1321
1322 /* Shadow status block. */
1323 if (sc->jme_cdata.jme_ssb_tag != NULL) {
1324 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1325 sc->jme_cdata.jme_ssb_map);
1326 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
560616bf 1327 sc->jme_cdata.jme_ssb_block,
76fbb0b9
SZ
1328 sc->jme_cdata.jme_ssb_map);
1329 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1330 sc->jme_cdata.jme_ssb_tag = NULL;
1331 }
1332
1333 if (sc->jme_cdata.jme_buffer_tag != NULL) {
1334 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1335 sc->jme_cdata.jme_buffer_tag = NULL;
1336 }
1337 if (sc->jme_cdata.jme_ring_tag != NULL) {
1338 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1339 sc->jme_cdata.jme_ring_tag = NULL;
1340 }
83b03786 1341
0b3414d9
SZ
1342 if (sc->jme_cdata.jme_txdesc != NULL) {
1343 kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1344 sc->jme_cdata.jme_txdesc = NULL;
1345 }
7b040092 1346 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
0b3414d9
SZ
1347 rdata = &sc->jme_cdata.jme_rx_data[r];
1348 if (rdata->jme_rxdesc != NULL) {
1349 kfree(rdata->jme_rxdesc, M_DEVBUF);
1350 rdata->jme_rxdesc = NULL;
83b03786
SZ
1351 }
1352 }
76fbb0b9
SZ
1353}
1354
1355/*
1356 * Make sure the interface is stopped at reboot time.
1357 */
1358static int
1359jme_shutdown(device_t dev)
1360{
1361 return jme_suspend(dev);
1362}
1363
1364#ifdef notyet
1365/*
1366 * Unlike other ethernet controllers, JMC250 requires
1367 * explicit resetting link speed to 10/100Mbps as gigabit
1368 * link will cunsume more power than 375mA.
1369 * Note, we reset the link speed to 10/100Mbps with
1370 * auto-negotiation but we don't know whether that operation
1371 * would succeed or not as we have no control after powering
1372 * off. If the renegotiation fail WOL may not work. Running
1373 * at 1Gbps draws more power than 375mA at 3.3V which is
1374 * specified in PCI specification and that would result in
1375 * complete shutdowning power to ethernet controller.
1376 *
1377 * TODO
1378 * Save current negotiated media speed/duplex/flow-control
1379 * to softc and restore the same link again after resuming.
1380 * PHY handling such as power down/resetting to 100Mbps
1381 * may be better handled in suspend method in phy driver.
1382 */
1383static void
1384jme_setlinkspeed(struct jme_softc *sc)
1385{
1386 struct mii_data *mii;
1387 int aneg, i;
1388
1389 JME_LOCK_ASSERT(sc);
1390
1391 mii = device_get_softc(sc->jme_miibus);
1392 mii_pollstat(mii);
1393 aneg = 0;
1394 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1395 switch IFM_SUBTYPE(mii->mii_media_active) {
1396 case IFM_10_T:
1397 case IFM_100_TX:
1398 return;
1399 case IFM_1000_T:
1400 aneg++;
1401 default:
1402 break;
1403 }
1404 }
1405 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1406 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1407 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1408 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1409 BMCR_AUTOEN | BMCR_STARTNEG);
1410 DELAY(1000);
1411 if (aneg != 0) {
1412 /* Poll link state until jme(4) get a 10/100 link. */
1413 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1414 mii_pollstat(mii);
1415 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1416 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1417 case IFM_10_T:
1418 case IFM_100_TX:
1419 jme_mac_config(sc);
1420 return;
1421 default:
1422 break;
1423 }
1424 }
1425 JME_UNLOCK(sc);
1426 pause("jmelnk", hz);
1427 JME_LOCK(sc);
1428 }
1429 if (i == MII_ANEGTICKS_GIGE)
1430 device_printf(sc->jme_dev, "establishing link failed, "
1431 "WOL may not work!");
1432 }
1433 /*
1434 * No link, force MAC to have 100Mbps, full-duplex link.
1435 * This is the last resort and may/may not work.
1436 */
1437 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1438 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1439 jme_mac_config(sc);
1440}
1441
1442static void
1443jme_setwol(struct jme_softc *sc)
1444{
1445 struct ifnet *ifp = &sc->arpcom.ac_if;
1446 uint32_t gpr, pmcs;
1447 uint16_t pmstat;
1448 int pmc;
1449
1450 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1451 /* No PME capability, PHY power down. */
1452 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1453 MII_BMCR, BMCR_PDOWN);
1454 return;
1455 }
1456
1457 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1458 pmcs = CSR_READ_4(sc, JME_PMCS);
1459 pmcs &= ~PMCS_WOL_ENB_MASK;
1460 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1461 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1462 /* Enable PME message. */
1463 gpr |= GPREG0_PME_ENB;
1464 /* For gigabit controllers, reset link speed to 10/100. */
ec7e787b 1465 if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
76fbb0b9
SZ
1466 jme_setlinkspeed(sc);
1467 }
1468
1469 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1470 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1471
1472 /* Request PME. */
1473 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1474 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1475 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1476 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1477 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1478 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1479 /* No WOL, PHY power down. */
1480 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1481 MII_BMCR, BMCR_PDOWN);
1482 }
1483}
1484#endif
1485
1486static int
1487jme_suspend(device_t dev)
1488{
1489 struct jme_softc *sc = device_get_softc(dev);
1490 struct ifnet *ifp = &sc->arpcom.ac_if;
1491
31f0d5a2 1492 ifnet_serialize_all(ifp);
76fbb0b9
SZ
1493 jme_stop(sc);
1494#ifdef notyet
1495 jme_setwol(sc);
1496#endif
31f0d5a2 1497 ifnet_deserialize_all(ifp);
76fbb0b9
SZ
1498
1499 return (0);
1500}
1501
1502static int
1503jme_resume(device_t dev)
1504{
1505 struct jme_softc *sc = device_get_softc(dev);
1506 struct ifnet *ifp = &sc->arpcom.ac_if;
1507#ifdef notyet
1508 int pmc;
1509#endif
1510
31f0d5a2 1511 ifnet_serialize_all(ifp);
76fbb0b9
SZ
1512
1513#ifdef notyet
1514 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1515 uint16_t pmstat;
1516
1517 pmstat = pci_read_config(sc->jme_dev,
1518 pmc + PCIR_POWER_STATUS, 2);
1519 /* Disable PME clear PME status. */
1520 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1521 pci_write_config(sc->jme_dev,
1522 pmc + PCIR_POWER_STATUS, pmstat, 2);
1523 }
1524#endif
1525
1526 if (ifp->if_flags & IFF_UP)
1527 jme_init(sc);
1528
31f0d5a2 1529 ifnet_deserialize_all(ifp);
76fbb0b9
SZ
1530
1531 return (0);
1532}
1533
1bedd927
SZ
1534static __inline int
1535jme_tso_pullup(struct mbuf **mp)
1536{
1537 int hoff, iphlen, thoff;
1538 struct mbuf *m;
1539
1540 m = *mp;
1541 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
1542
1543 iphlen = m->m_pkthdr.csum_iphlen;
1544 thoff = m->m_pkthdr.csum_thlen;
1545 hoff = m->m_pkthdr.csum_lhlen;
1546
1547 KASSERT(iphlen > 0, ("invalid ip hlen"));
1548 KASSERT(thoff > 0, ("invalid tcp hlen"));
1549 KASSERT(hoff > 0, ("invalid ether hlen"));
1550
1551 if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
1552 m = m_pullup(m, hoff + iphlen + thoff);
1553 if (m == NULL) {
1554 *mp = NULL;
1555 return ENOBUFS;
1556 }
1557 *mp = m;
1558 }
1559 return 0;
1560}
1561
76fbb0b9
SZ
1562static int
1563jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1564{
1565 struct jme_txdesc *txd;
1566 struct jme_desc *desc;
1567 struct mbuf *m;
76fbb0b9 1568 bus_dma_segment_t txsegs[JME_MAXTXSEGS];
b0ba1747 1569 int maxsegs, nsegs;
9b3ee148 1570 int error, i, prod, symbol_desc;
1bedd927 1571 uint32_t cflags, flag64, mss;
76fbb0b9
SZ
1572
1573 M_ASSERTPKTHDR((*m_head));
1574
1bedd927
SZ
1575 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) {
1576 /* XXX Is this necessary? */
1577 error = jme_tso_pullup(m_head);
1578 if (error)
1579 return error;
1580 }
1581
76fbb0b9
SZ
1582 prod = sc->jme_cdata.jme_tx_prod;
1583 txd = &sc->jme_cdata.jme_txdesc[prod];
1584
9b3ee148
SZ
1585 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1586 symbol_desc = 1;
1587 else
1588 symbol_desc = 0;
1589
b020bb10 1590 maxsegs = (sc->jme_cdata.jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
9b3ee148 1591 (JME_TXD_RSVD + symbol_desc);
76fbb0b9
SZ
1592 if (maxsegs > JME_MAXTXSEGS)
1593 maxsegs = JME_MAXTXSEGS;
1bedd927 1594 KASSERT(maxsegs >= (JME_TXD_SPARE - symbol_desc),
ed20d0e3 1595 ("not enough segments %d", maxsegs));
76fbb0b9 1596
b0ba1747
SZ
1597 error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag,
1598 txd->tx_dmamap, m_head,
1599 txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1600 if (error)
ecc6de9e 1601 goto fail;
76fbb0b9 1602
4458ee95
SZ
1603 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1604 BUS_DMASYNC_PREWRITE);
1605
76fbb0b9
SZ
1606 m = *m_head;
1607 cflags = 0;
1bedd927 1608 mss = 0;
76fbb0b9
SZ
1609
1610 /* Configure checksum offload. */
1bedd927
SZ
1611 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1612 mss = (uint32_t)m->m_pkthdr.tso_segsz << JME_TD_MSS_SHIFT;
1613 cflags |= JME_TD_TSO;
1614 } else if (m->m_pkthdr.csum_flags & JME_CSUM_FEATURES) {
1615 if (m->m_pkthdr.csum_flags & CSUM_IP)
1616 cflags |= JME_TD_IPCSUM;
1617 if (m->m_pkthdr.csum_flags & CSUM_TCP)
1618 cflags |= JME_TD_TCPCSUM;
1619 if (m->m_pkthdr.csum_flags & CSUM_UDP)
1620 cflags |= JME_TD_UDPCSUM;
1621 }
76fbb0b9
SZ
1622
1623 /* Configure VLAN. */
1624 if (m->m_flags & M_VLANTAG) {
1625 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1626 cflags |= JME_TD_VLAN_TAG;
1627 }
1628
560616bf 1629 desc = &sc->jme_cdata.jme_tx_ring[prod];
76fbb0b9 1630 desc->flags = htole32(cflags);
76fbb0b9 1631 desc->addr_hi = htole32(m->m_pkthdr.len);
7228f061
SZ
1632 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1633 /*
1634 * Use 64bits TX desc chain format.
1635 *
1636 * The first TX desc of the chain, which is setup here,
1637 * is just a symbol TX desc carrying no payload.
1638 */
1639 flag64 = JME_TD_64BIT;
1bedd927 1640 desc->buflen = htole32(mss);
7228f061
SZ
1641 desc->addr_lo = 0;
1642
1643 /* No effective TX desc is consumed */
1644 i = 0;
1645 } else {
1646 /*
1647 * Use 32bits TX desc chain format.
1648 *
1649 * The first TX desc of the chain, which is setup here,
1650 * is an effective TX desc carrying the first segment of
1651 * the mbuf chain.
1652 */
1653 flag64 = 0;
1bedd927 1654 desc->buflen = htole32(mss | txsegs[0].ds_len);
7228f061
SZ
1655 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1656
1657 /* One effective TX desc is consumed */
1658 i = 1;
1659 }
76fbb0b9 1660 sc->jme_cdata.jme_tx_cnt++;
9de40864 1661 KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
022f915e 1662 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
b020bb10 1663 JME_DESC_INC(prod, sc->jme_cdata.jme_tx_desc_cnt);
7228f061
SZ
1664
1665 txd->tx_ndesc = 1 - i;
b0ba1747 1666 for (; i < nsegs; i++) {
560616bf 1667 desc = &sc->jme_cdata.jme_tx_ring[prod];
76fbb0b9
SZ
1668 desc->buflen = htole32(txsegs[i].ds_len);
1669 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1670 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
a54bd021 1671 desc->flags = htole32(JME_TD_OWN | flag64);
76fbb0b9
SZ
1672
1673 sc->jme_cdata.jme_tx_cnt++;
1674 KKASSERT(sc->jme_cdata.jme_tx_cnt <=
022f915e 1675 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
b020bb10 1676 JME_DESC_INC(prod, sc->jme_cdata.jme_tx_desc_cnt);
76fbb0b9
SZ
1677 }
1678
1679 /* Update producer index. */
1680 sc->jme_cdata.jme_tx_prod = prod;
1681 /*
1682 * Finally request interrupt and give the first descriptor
1683 * owenership to hardware.
1684 */
1685 desc = txd->tx_desc;
1686 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1687
1688 txd->tx_m = m;
b0ba1747 1689 txd->tx_ndesc += nsegs;
76fbb0b9 1690
ecc6de9e
SZ
1691 return 0;
1692fail:
1693 m_freem(*m_head);
1694 *m_head = NULL;
1695 return error;
76fbb0b9
SZ
1696}
1697
1698static void
1699jme_start(struct ifnet *ifp)
1700{
1701 struct jme_softc *sc = ifp->if_softc;
1702 struct mbuf *m_head;
1703 int enq = 0;
1704
31f0d5a2 1705 ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
76fbb0b9 1706
cccc3955 1707 if (!sc->jme_has_link) {
76fbb0b9
SZ
1708 ifq_purge(&ifp->if_snd);
1709 return;
1710 }
1711
1712 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1713 return;
1714
83b03786 1715 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
76fbb0b9
SZ
1716 jme_txeof(sc);
1717
1718 while (!ifq_is_empty(&ifp->if_snd)) {
1719 /*
1720 * Check number of available TX descs, always
1721 * leave JME_TXD_RSVD free TX descs.
1722 */
1bedd927 1723 if (sc->jme_cdata.jme_tx_cnt + JME_TXD_SPARE >
b020bb10 1724 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD) {
76fbb0b9
SZ
1725 ifp->if_flags |= IFF_OACTIVE;
1726 break;
1727 }
1728
1729 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1730 if (m_head == NULL)
1731 break;
1732
1733 /*
1734 * Pack the data into the transmit ring. If we
1735 * don't have room, set the OACTIVE flag and wait
1736 * for the NIC to drain the ring.
1737 */
1738 if (jme_encap(sc, &m_head)) {
ecc6de9e
SZ
1739 KKASSERT(m_head == NULL);
1740 ifp->if_oerrors++;
76fbb0b9
SZ
1741 ifp->if_flags |= IFF_OACTIVE;
1742 break;
1743 }
1744 enq++;
1745
1746 /*
1747 * If there's a BPF listener, bounce a copy of this frame
1748 * to him.
1749 */
1750 ETHER_BPF_MTAP(ifp, m_head);
1751 }
1752
1753 if (enq > 0) {
1754 /*
1755 * Reading TXCSR takes very long time under heavy load
1756 * so cache TXCSR value and writes the ORed value with
1757 * the kick command to the TXCSR. This saves one register
1758 * access cycle.
1759 */
1760 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1761 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1762 /* Set a timeout in case the chip goes out to lunch. */
1763 ifp->if_timer = JME_TX_TIMEOUT;
1764 }
1765}
1766
1767static void
1768jme_watchdog(struct ifnet *ifp)
1769{
1770 struct jme_softc *sc = ifp->if_softc;
1771
31f0d5a2 1772 ASSERT_IFNET_SERIALIZED_ALL(ifp);
76fbb0b9 1773
cccc3955 1774 if (!sc->jme_has_link) {
76fbb0b9
SZ
1775 if_printf(ifp, "watchdog timeout (missed link)\n");
1776 ifp->if_oerrors++;
1777 jme_init(sc);
1778 return;
1779 }
1780
1781 jme_txeof(sc);
1782 if (sc->jme_cdata.jme_tx_cnt == 0) {
1783 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1784 "-- recovering\n");
1785 if (!ifq_is_empty(&ifp->if_snd))
1786 if_devstart(ifp);
1787 return;
1788 }
1789
1790 if_printf(ifp, "watchdog timeout\n");
1791 ifp->if_oerrors++;
1792 jme_init(sc);
1793 if (!ifq_is_empty(&ifp->if_snd))
1794 if_devstart(ifp);
1795}
1796
1797static int
1798jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1799{
1800 struct jme_softc *sc = ifp->if_softc;
1801 struct mii_data *mii = device_get_softc(sc->jme_miibus);
1802 struct ifreq *ifr = (struct ifreq *)data;
1803 int error = 0, mask;
1804
31f0d5a2 1805 ASSERT_IFNET_SERIALIZED_ALL(ifp);
76fbb0b9
SZ
1806
1807 switch (cmd) {
1808 case SIOCSIFMTU:
1809 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
3a5f3f36 1810 (!(sc->jme_caps & JME_CAP_JUMBO) &&
76fbb0b9
SZ
1811 ifr->ifr_mtu > JME_MAX_MTU)) {
1812 error = EINVAL;
1813 break;
1814 }
1815
1816 if (ifp->if_mtu != ifr->ifr_mtu) {
1817 /*
1818 * No special configuration is required when interface
1819 * MTU is changed but availability of Tx checksum
1820 * offload should be chcked against new MTU size as
1821 * FIFO size is just 2K.
1822 */
1823 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1bedd927
SZ
1824 ifp->if_capenable &=
1825 ~(IFCAP_TXCSUM | IFCAP_TSO);
1826 ifp->if_hwassist &=
1827 ~(JME_CSUM_FEATURES | CSUM_TSO);
76fbb0b9
SZ
1828 }
1829 ifp->if_mtu = ifr->ifr_mtu;
1830 if (ifp->if_flags & IFF_RUNNING)
1831 jme_init(sc);
1832 }
1833 break;
1834
1835 case SIOCSIFFLAGS:
1836 if (ifp->if_flags & IFF_UP) {
1837 if (ifp->if_flags & IFF_RUNNING) {
1838 if ((ifp->if_flags ^ sc->jme_if_flags) &
1839 (IFF_PROMISC | IFF_ALLMULTI))
1840 jme_set_filter(sc);
1841 } else {
1842 jme_init(sc);
1843 }
1844 } else {
1845 if (ifp->if_flags & IFF_RUNNING)
1846 jme_stop(sc);
1847 }
1848 sc->jme_if_flags = ifp->if_flags;
1849 break;
1850
1851 case SIOCADDMULTI:
1852 case SIOCDELMULTI:
1853 if (ifp->if_flags & IFF_RUNNING)
1854 jme_set_filter(sc);
1855 break;
1856
1857 case SIOCSIFMEDIA:
1858 case SIOCGIFMEDIA:
1859 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1860 break;
1861
1862 case SIOCSIFCAP:
1863 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1864
1865 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
e4616e94 1866 ifp->if_capenable ^= IFCAP_TXCSUM;
1bedd927 1867 if (ifp->if_capenable & IFCAP_TXCSUM)
e4616e94
SZ
1868 ifp->if_hwassist |= JME_CSUM_FEATURES;
1869 else
1870 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
76fbb0b9 1871 }
e4616e94 1872 if (mask & IFCAP_RXCSUM) {
76fbb0b9
SZ
1873 uint32_t reg;
1874
1875 ifp->if_capenable ^= IFCAP_RXCSUM;
1876 reg = CSR_READ_4(sc, JME_RXMAC);
1877 reg &= ~RXMAC_CSUM_ENB;
1878 if (ifp->if_capenable & IFCAP_RXCSUM)
1879 reg |= RXMAC_CSUM_ENB;
1880 CSR_WRITE_4(sc, JME_RXMAC, reg);
1881 }
1882
e4616e94 1883 if (mask & IFCAP_VLAN_HWTAGGING) {
76fbb0b9
SZ
1884 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1885 jme_set_vlan(sc);
1886 }
e4616e94 1887
1bedd927
SZ
1888 if ((mask & IFCAP_TSO) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1889 ifp->if_capenable ^= IFCAP_TSO;
1890 if (ifp->if_capenable & IFCAP_TSO)
1891 ifp->if_hwassist |= CSUM_TSO;
1892 else
1893 ifp->if_hwassist &= ~CSUM_TSO;
1894 }
1895
9f20b7b3 1896 if (mask & IFCAP_RSS)
d585233c 1897 ifp->if_capenable ^= IFCAP_RSS;
76fbb0b9
SZ
1898 break;
1899
1900 default:
1901 error = ether_ioctl(ifp, cmd, data);
1902 break;
1903 }
1904 return (error);
1905}
1906
1907static void
1908jme_mac_config(struct jme_softc *sc)
1909{
1910 struct mii_data *mii;
3b3da110
SZ
1911 uint32_t ghc, rxmac, txmac, txpause, gp1;
1912 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
76fbb0b9
SZ
1913
1914 mii = device_get_softc(sc->jme_miibus);
1915
1916 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1917 DELAY(10);
1918 CSR_WRITE_4(sc, JME_GHC, 0);
1919 ghc = 0;
1920 rxmac = CSR_READ_4(sc, JME_RXMAC);
1921 rxmac &= ~RXMAC_FC_ENB;
1922 txmac = CSR_READ_4(sc, JME_TXMAC);
1923 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1924 txpause = CSR_READ_4(sc, JME_TXPFC);
1925 txpause &= ~TXPFC_PAUSE_ENB;
1926 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1927 ghc |= GHC_FULL_DUPLEX;
1928 rxmac &= ~RXMAC_COLL_DET_ENB;
1929 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1930 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1931 TXMAC_FRAME_BURST);
1932#ifdef notyet
1933 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1934 txpause |= TXPFC_PAUSE_ENB;
1935 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1936 rxmac |= RXMAC_FC_ENB;
1937#endif
1938 /* Disable retry transmit timer/retry limit. */
1939 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1940 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1941 } else {
1942 rxmac |= RXMAC_COLL_DET_ENB;
1943 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1944 /* Enable retry transmit timer/retry limit. */
1945 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1946 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1947 }
1948
3b3da110
SZ
1949 /*
1950 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1951 */
1952 gp1 = CSR_READ_4(sc, JME_GPREG1);
1953 gp1 &= ~GPREG1_WA_HDX;
1954
1955 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1956 hdx = 1;
1957
76fbb0b9
SZ
1958 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1959 case IFM_10_T:
b249905b 1960 ghc |= GHC_SPEED_10 | sc->jme_clksrc;
3b3da110
SZ
1961 if (hdx)
1962 gp1 |= GPREG1_WA_HDX;
76fbb0b9 1963 break;
dbe37f03 1964
76fbb0b9 1965 case IFM_100_TX:
b249905b 1966 ghc |= GHC_SPEED_100 | sc->jme_clksrc;
3b3da110
SZ
1967 if (hdx)
1968 gp1 |= GPREG1_WA_HDX;
dbe37f03
SZ
1969
1970 /*
1971 * Use extended FIFO depth to workaround CRC errors
1972 * emitted by chips before JMC250B
1973 */
1974 phyconf = JMPHY_CONF_EXTFIFO;
76fbb0b9 1975 break;
dbe37f03 1976
76fbb0b9 1977 case IFM_1000_T:
ec7e787b 1978 if (sc->jme_caps & JME_CAP_FASTETH)
76fbb0b9 1979 break;
dbe37f03 1980
b249905b 1981 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
3b3da110 1982 if (hdx)
76fbb0b9
SZ
1983 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1984 break;
dbe37f03 1985
76fbb0b9
SZ
1986 default:
1987 break;
1988 }
1989 CSR_WRITE_4(sc, JME_GHC, ghc);
1990 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1991 CSR_WRITE_4(sc, JME_TXMAC, txmac);
1992 CSR_WRITE_4(sc, JME_TXPFC, txpause);
dbe37f03 1993
ad22907f 1994 if (sc->jme_workaround & JME_WA_EXTFIFO) {
dbe37f03
SZ
1995 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1996 JMPHY_CONF, phyconf);
1997 }
3b3da110
SZ
1998 if (sc->jme_workaround & JME_WA_HDX)
1999 CSR_WRITE_4(sc, JME_GPREG1, gp1);
76fbb0b9
SZ
2000}
2001
2002static void
2003jme_intr(void *xsc)
2004{
2005 struct jme_softc *sc = xsc;
2006 struct ifnet *ifp = &sc->arpcom.ac_if;
2007 uint32_t status;
4447c752 2008 int r;
76fbb0b9 2009
31f0d5a2 2010 ASSERT_SERIALIZED(&sc->jme_serialize);
76fbb0b9
SZ
2011
2012 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2013 if (status == 0 || status == 0xFFFFFFFF)
2014 return;
2015
2016 /* Disable interrupts. */
2017 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2018
2019 status = CSR_READ_4(sc, JME_INTR_STATUS);
2020 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2021 goto back;
2022
2023 /* Reset PCC counter/timer and Ack interrupts. */
2024 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
4447c752 2025
76fbb0b9
SZ
2026 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
2027 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
4447c752 2028
7b040092 2029 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
4447c752
SZ
2030 if (status & jme_rx_status[r].jme_coal) {
2031 status |= jme_rx_status[r].jme_coal |
2032 jme_rx_status[r].jme_comp;
2033 }
2034 }
2035
76fbb0b9
SZ
2036 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2037
2038 if (ifp->if_flags & IFF_RUNNING) {
2039 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
4447c752 2040 jme_rx_intr(sc, status);
76fbb0b9
SZ
2041
2042 if (status & INTR_RXQ_DESC_EMPTY) {
2043 /*
2044 * Notify hardware availability of new Rx buffers.
2045 * Reading RXCSR takes very long time under heavy
2046 * load so cache RXCSR value and writes the ORed
2047 * value with the kick command to the RXCSR. This
2048 * saves one register access cycle.
2049 */
2050 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2051 RXCSR_RX_ENB | RXCSR_RXQ_START);
2052 }
2053
2054 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
31f0d5a2 2055 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
76fbb0b9
SZ
2056 jme_txeof(sc);
2057 if (!ifq_is_empty(&ifp->if_snd))
2058 if_devstart(ifp);
31f0d5a2 2059 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
76fbb0b9
SZ
2060 }
2061 }
2062back:
2063 /* Reenable interrupts. */
2064 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2065}
2066
2067static void
2068jme_txeof(struct jme_softc *sc)
2069{
2070 struct ifnet *ifp = &sc->arpcom.ac_if;
6960d7d2 2071 int cons;
76fbb0b9
SZ
2072
2073 cons = sc->jme_cdata.jme_tx_cons;
2074 if (cons == sc->jme_cdata.jme_tx_prod)
2075 return;
2076
76fbb0b9
SZ
2077 /*
2078 * Go through our Tx list and free mbufs for those
2079 * frames which have been transmitted.
2080 */
2081 while (cons != sc->jme_cdata.jme_tx_prod) {
6960d7d2
SZ
2082 struct jme_txdesc *txd, *next_txd;
2083 uint32_t status, next_status;
2084 int next_cons, nsegs;
2085
76fbb0b9
SZ
2086 txd = &sc->jme_cdata.jme_txdesc[cons];
2087 KASSERT(txd->tx_m != NULL,
ed20d0e3 2088 ("%s: freeing NULL mbuf!", __func__));
76fbb0b9
SZ
2089
2090 status = le32toh(txd->tx_desc->flags);
2091 if ((status & JME_TD_OWN) == JME_TD_OWN)
2092 break;
2093
6960d7d2
SZ
2094 /*
2095 * NOTE:
2096 * This chip will always update the TX descriptor's
2097 * buflen field and this updating always happens
2098 * after clearing the OWN bit, so even if the OWN
2099 * bit is cleared by the chip, we still don't sure
2100 * about whether the buflen field has been updated
2101 * by the chip or not. To avoid this race, we wait
2102 * for the next TX descriptor's OWN bit to be cleared
2103 * by the chip before reusing this TX descriptor.
2104 */
2105 next_cons = cons;
2106 JME_DESC_ADD(next_cons, txd->tx_ndesc,
2107 sc->jme_cdata.jme_tx_desc_cnt);
2108 next_txd = &sc->jme_cdata.jme_txdesc[next_cons];
2109 if (next_txd->tx_m == NULL)
2110 break;
2111 next_status = le32toh(next_txd->tx_desc->flags);
2112 if ((next_status & JME_TD_OWN) == JME_TD_OWN)
2113 break;
2114
76fbb0b9
SZ
2115 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2116 ifp->if_oerrors++;
2117 } else {
2118 ifp->if_opackets++;
2119 if (status & JME_TD_COLLISION) {
2120 ifp->if_collisions +=
2121 le32toh(txd->tx_desc->buflen) &
2122 JME_TD_BUF_LEN_MASK;
2123 }
2124 }
2125
2126 /*
2127 * Only the first descriptor of multi-descriptor
2128 * transmission is updated so driver have to skip entire
2129 * chained buffers for the transmiited frame. In other
2130 * words, JME_TD_OWN bit is valid only at the first
2131 * descriptor of a multi-descriptor transmission.
2132 */
2133 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
560616bf 2134 sc->jme_cdata.jme_tx_ring[cons].flags = 0;
b020bb10 2135 JME_DESC_INC(cons, sc->jme_cdata.jme_tx_desc_cnt);
76fbb0b9
SZ
2136 }
2137
2138 /* Reclaim transferred mbufs. */
2139 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2140 m_freem(txd->tx_m);
2141 txd->tx_m = NULL;
2142 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2143 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
ed20d0e3 2144 ("%s: Active Tx desc counter was garbled", __func__));
76fbb0b9
SZ
2145 txd->tx_ndesc = 0;
2146 }
2147 sc->jme_cdata.jme_tx_cons = cons;
2148
1bedd927
SZ
2149 /* 1 for symbol TX descriptor */
2150 if (sc->jme_cdata.jme_tx_cnt <= JME_MAXTXSEGS + 1)
76fbb0b9
SZ
2151 ifp->if_timer = 0;
2152
1bedd927 2153 if (sc->jme_cdata.jme_tx_cnt + JME_TXD_SPARE <=
b020bb10 2154 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD)
76fbb0b9 2155 ifp->if_flags &= ~IFF_OACTIVE;
76fbb0b9
SZ
2156}
2157
2158static __inline void
dea2452a 2159jme_discard_rxbufs(struct jme_rxdata *rdata, int cons, int count)
76fbb0b9
SZ
2160{
2161 int i;
2162
2163 for (i = 0; i < count; ++i) {
fd2a6d2c 2164 jme_setup_rxdesc(&rdata->jme_rxdesc[cons]);
7b040092 2165 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
76fbb0b9
SZ
2166 }
2167}
2168
a6acc6e2
SZ
2169static __inline struct pktinfo *
2170jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2171{
2172 if (flags & JME_RD_IPV4)
2173 pi->pi_netisr = NETISR_IP;
2174 else if (flags & JME_RD_IPV6)
2175 pi->pi_netisr = NETISR_IPV6;
2176 else
2177 return NULL;
2178
2179 pi->pi_flags = 0;
2180 pi->pi_l3proto = IPPROTO_UNKNOWN;
2181
2182 if (flags & JME_RD_MORE_FRAG)
2183 pi->pi_flags |= PKTINFO_FLAG_FRAG;
2184 else if (flags & JME_RD_TCP)
2185 pi->pi_l3proto = IPPROTO_TCP;
2186 else if (flags & JME_RD_UDP)
2187 pi->pi_l3proto = IPPROTO_UDP;
7345eb80
SZ
2188 else
2189 pi = NULL;
a6acc6e2
SZ
2190 return pi;
2191}
2192
76fbb0b9
SZ
2193/* Receive a frame. */
2194static void
dea2452a 2195jme_rxpkt(struct jme_rxdata *rdata)
76fbb0b9 2196{
dea2452a 2197 struct ifnet *ifp = &rdata->jme_sc->arpcom.ac_if;
76fbb0b9
SZ
2198 struct jme_desc *desc;
2199 struct jme_rxdesc *rxd;
2200 struct mbuf *mp, *m;
a6acc6e2 2201 uint32_t flags, status, hash, hashinfo;
76fbb0b9
SZ
2202 int cons, count, nsegs;
2203
4447c752
SZ
2204 cons = rdata->jme_rx_cons;
2205 desc = &rdata->jme_rx_ring[cons];
9d4f763d 2206
76fbb0b9
SZ
2207 flags = le32toh(desc->flags);
2208 status = le32toh(desc->buflen);
a6acc6e2
SZ
2209 hash = le32toh(desc->addr_hi);
2210 hashinfo = le32toh(desc->addr_lo);
76fbb0b9
SZ
2211 nsegs = JME_RX_NSEGS(status);
2212
9d4f763d
SZ
2213 if (nsegs > 1) {
2214 /* Skip the first descriptor. */
2215 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2216
2217 /*
2218 * Clear the OWN bit of the following RX descriptors;
2219 * hardware will not clear the OWN bit except the first
2220 * RX descriptor.
2221 *
2222 * Since the first RX descriptor is setup, i.e. OWN bit
2223 * on, before its followins RX descriptors, leaving the
2224 * OWN bit on the following RX descriptors will trick
2225 * the hardware into thinking that the following RX
2226 * descriptors are ready to be used too.
2227 */
2228 for (count = 1; count < nsegs; count++,
2229 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt))
2230 rdata->jme_rx_ring[cons].flags = 0;
2231
2232 cons = rdata->jme_rx_cons;
2233 }
2234
7b040092 2235 JME_RSS_DPRINTF(rdata->jme_sc, 15, "ring%d, flags 0x%08x, "
a6acc6e2 2236 "hash 0x%08x, hash info 0x%08x\n",
7b040092 2237 rdata->jme_rx_idx, flags, hash, hashinfo);
760c056c 2238
76fbb0b9
SZ
2239 if (status & JME_RX_ERR_STAT) {
2240 ifp->if_ierrors++;
dea2452a 2241 jme_discard_rxbufs(rdata, cons, nsegs);
76fbb0b9 2242#ifdef JME_SHOW_ERRORS
7b040092 2243 if_printf(ifp, "%s : receive error = 0x%b\n",
76fbb0b9
SZ
2244 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2245#endif
4447c752 2246 rdata->jme_rx_cons += nsegs;
7b040092 2247 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
76fbb0b9
SZ
2248 return;
2249 }
2250
4447c752 2251 rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
76fbb0b9 2252 for (count = 0; count < nsegs; count++,
7b040092 2253 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) {
4447c752 2254 rxd = &rdata->jme_rxdesc[cons];
76fbb0b9
SZ
2255 mp = rxd->rx_m;
2256
2257 /* Add a new receive buffer to the ring. */
dea2452a 2258 if (jme_newbuf(rdata, rxd, 0) != 0) {
76fbb0b9
SZ
2259 ifp->if_iqdrops++;
2260 /* Reuse buffer. */
dea2452a 2261 jme_discard_rxbufs(rdata, cons, nsegs - count);
4447c752
SZ
2262 if (rdata->jme_rxhead != NULL) {
2263 m_freem(rdata->jme_rxhead);
dea2452a 2264 JME_RXCHAIN_RESET(rdata);
76fbb0b9
SZ
2265 }
2266 break;
2267 }
2268
2269 /*
2270 * Assume we've received a full sized frame.
2271 * Actual size is fixed when we encounter the end of
2272 * multi-segmented frame.
2273 */
2274 mp->m_len = MCLBYTES;
2275
2276 /* Chain received mbufs. */
4447c752
SZ
2277 if (rdata->jme_rxhead == NULL) {
2278 rdata->jme_rxhead = mp;
2279 rdata->jme_rxtail = mp;
76fbb0b9
SZ
2280 } else {
2281 /*
2282 * Receive processor can receive a maximum frame
2283 * size of 65535 bytes.
2284 */
4447c752
SZ
2285 rdata->jme_rxtail->m_next = mp;
2286 rdata->jme_rxtail = mp;
76fbb0b9
SZ
2287 }
2288
2289 if (count == nsegs - 1) {
a6acc6e2
SZ
2290 struct pktinfo pi0, *pi;
2291
76fbb0b9 2292 /* Last desc. for this frame. */
4447c752 2293 m = rdata->jme_rxhead;
4447c752 2294 m->m_pkthdr.len = rdata->jme_rxlen;
76fbb0b9
SZ
2295 if (nsegs > 1) {
2296 /* Set first mbuf size. */
2297 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2298 /* Set last mbuf size. */
4447c752 2299 mp->m_len = rdata->jme_rxlen -
76fbb0b9
SZ
2300 ((MCLBYTES - JME_RX_PAD_BYTES) +
2301 (MCLBYTES * (nsegs - 2)));
2302 } else {
4447c752 2303 m->m_len = rdata->jme_rxlen;
76fbb0b9
SZ
2304 }
2305 m->m_pkthdr.rcvif = ifp;
2306
2307 /*
2308 * Account for 10bytes auto padding which is used
2309 * to align IP header on 32bit boundary. Also note,
2310 * CRC bytes is automatically removed by the
2311 * hardware.
2312 */
2313 m->m_data += JME_RX_PAD_BYTES;
2314
2315 /* Set checksum information. */
2316 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2317 (flags & JME_RD_IPV4)) {
2318 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2319 if (flags & JME_RD_IPCSUM)
2320 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2321 if ((flags & JME_RD_MORE_FRAG) == 0 &&
2322 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2323 (JME_RD_TCP | JME_RD_TCPCSUM) ||
2324 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2325 (JME_RD_UDP | JME_RD_UDPCSUM))) {
2326 m->m_pkthdr.csum_flags |=
2327 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2328 m->m_pkthdr.csum_data = 0xffff;
2329 }
2330 }
2331
2332 /* Check for VLAN tagged packets. */
2333 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2334 (flags & JME_RD_VLAN_TAG)) {
2335 m->m_pkthdr.ether_vlantag =
2336 flags & JME_RD_VLAN_MASK;
2337 m->m_flags |= M_VLANTAG;
2338 }
2339
2340 ifp->if_ipackets++;
a6acc6e2
SZ
2341
2342 if (ifp->if_capenable & IFCAP_RSS)
2343 pi = jme_pktinfo(&pi0, flags);
2344 else
2345 pi = NULL;
2346
2347 if (pi != NULL &&
055b7997
SZ
2348 (hashinfo & JME_RD_HASH_FN_MASK) ==
2349 JME_RD_HASH_FN_TOEPLITZ) {
2350 m->m_flags |= (M_HASH | M_CKHASH);
a6acc6e2
SZ
2351 m->m_pkthdr.hash = toeplitz_hash(hash);
2352 }
2353
2354#ifdef JME_RSS_DEBUG
2355 if (pi != NULL) {
7b040092 2356 JME_RSS_DPRINTF(rdata->jme_sc, 10,
a6acc6e2
SZ
2357 "isr %d flags %08x, l3 %d %s\n",
2358 pi->pi_netisr, pi->pi_flags,
2359 pi->pi_l3proto,
2360 (m->m_flags & M_HASH) ? "hash" : "");
2361 }
2362#endif
2363
76fbb0b9 2364 /* Pass it on. */
eda7db08 2365 ether_input_pkt(ifp, m, pi);
76fbb0b9
SZ
2366
2367 /* Reset mbuf chains. */
dea2452a 2368 JME_RXCHAIN_RESET(rdata);
760c056c 2369#ifdef JME_RSS_DEBUG
7b040092 2370 rdata->jme_rx_pkt++;
760c056c 2371#endif
76fbb0b9
SZ
2372 }
2373 }
2374
4447c752 2375 rdata->jme_rx_cons += nsegs;
7b040092 2376 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
76fbb0b9
SZ
2377}
2378
eda7db08 2379static void
dea2452a 2380jme_rxeof(struct jme_rxdata *rdata, int count)
76fbb0b9
SZ
2381{
2382 struct jme_desc *desc;
eda7db08 2383 int nsegs, pktlen;
76fbb0b9 2384
76fbb0b9 2385 for (;;) {
3fa06afc
SZ
2386#ifdef DEVICE_POLLING
2387 if (count >= 0 && count-- == 0)
2388 break;
2389#endif
4447c752 2390 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
76fbb0b9
SZ
2391 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2392 break;
2393 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2394 break;
2395
2396 /*
2397 * Check number of segments against received bytes.
2398 * Non-matching value would indicate that hardware
2399 * is still trying to update Rx descriptors. I'm not
2400 * sure whether this check is needed.
2401 */
2402 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2403 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2404 if (nsegs != howmany(pktlen, MCLBYTES)) {
dea2452a
SZ
2405 if_printf(&rdata->jme_sc->arpcom.ac_if,
2406 "RX fragment count(%d) and "
2407 "packet size(%d) mismach\n", nsegs, pktlen);
76fbb0b9
SZ
2408 break;
2409 }
2410
6afef6ab
SZ
2411 /*
2412 * NOTE:
2413 * RSS hash and hash information may _not_ be set by the
2414 * hardware even if the OWN bit is cleared and VALID bit
2415 * is set.
2416 *
2417 * If the RSS information is not delivered by the hardware
2418 * yet, we MUST NOT accept this packet, let alone reusing
2419 * its RX descriptor. If this packet was accepted and its
2420 * RX descriptor was reused before hardware delivering the
2421 * RSS information, the RX buffer's address would be trashed
2422 * by the RSS information delivered by the hardware.
2423 */
2424 if (JME_ENABLE_HWRSS(rdata->jme_sc)) {
2425 struct jme_rxdesc *rxd;
2426 uint32_t hashinfo;
2427
2428 hashinfo = le32toh(desc->addr_lo);
2429 rxd = &rdata->jme_rxdesc[rdata->jme_rx_cons];
2430
2431 /*
2432 * This test should be enough to detect the pending
2433 * RSS information delivery, given:
2434 * - If RSS hash is not calculated, the hashinfo
064b75ed
SZ
2435 * will be 0. Howvever, the lower 32bits of RX
2436 * buffers' physical address will never be 0.
2437 * (see jme_rxbuf_dma_filter)
6afef6ab
SZ
2438 * - If RSS hash is calculated, the lowest 4 bits
2439 * of hashinfo will be set, while the RX buffers
2440 * are at least 2K aligned.
2441 */
2442 if (hashinfo == JME_ADDR_LO(rxd->rx_paddr)) {
2443#ifdef JME_SHOW_RSSWB
2444 if_printf(&rdata->jme_sc->arpcom.ac_if,
2445 "RSS is not written back yet\n");
2446#endif
2447 break;
2448 }
2449 }
2450
76fbb0b9 2451 /* Received a frame. */
dea2452a 2452 jme_rxpkt(rdata);
76fbb0b9 2453 }
76fbb0b9
SZ
2454}
2455
2456static void
2457jme_tick(void *xsc)
2458{
2459 struct jme_softc *sc = xsc;
76fbb0b9
SZ
2460 struct mii_data *mii = device_get_softc(sc->jme_miibus);
2461
cccc3955 2462 lwkt_serialize_enter(&sc->jme_serialize);
76fbb0b9 2463
cccc3955 2464 sc->jme_in_tick = TRUE;
76fbb0b9 2465 mii_tick(mii);
cccc3955
SZ
2466 sc->jme_in_tick = FALSE;
2467
76fbb0b9
SZ
2468 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2469
cccc3955 2470 lwkt_serialize_exit(&sc->jme_serialize);
76fbb0b9
SZ
2471}
2472
2473static void
2474jme_reset(struct jme_softc *sc)
2475{
409fe405
SZ
2476 uint32_t val;
2477
2478 /* Make sure that TX and RX are stopped */
76fbb0b9 2479 jme_stop_tx(sc);
409fe405
SZ
2480 jme_stop_rx(sc);
2481
2482 /* Start reset */
76fbb0b9 2483 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
409fe405
SZ
2484 DELAY(20);
2485
2486 /*
2487 * Hold reset bit before stop reset
2488 */
2489
2490 /* Disable TXMAC and TXOFL clock sources */
2491 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2492 /* Disable RXMAC clock source */
2493 val = CSR_READ_4(sc, JME_GPREG1);
2494 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2495 /* Flush */
2496 CSR_READ_4(sc, JME_GHC);
2497
2498 /* Stop reset */
2499 CSR_WRITE_4(sc, JME_GHC, 0);
2500 /* Flush */
2501 CSR_READ_4(sc, JME_GHC);
2502
2503 /*
2504 * Clear reset bit after stop reset
2505 */
2506
2507 /* Enable TXMAC and TXOFL clock sources */
2508 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2509 /* Enable RXMAC clock source */
2510 val = CSR_READ_4(sc, JME_GPREG1);
2511 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2512 /* Flush */
2513 CSR_READ_4(sc, JME_GHC);
2514
2515 /* Disable TXMAC and TXOFL clock sources */
76fbb0b9 2516 CSR_WRITE_4(sc, JME_GHC, 0);
409fe405
SZ
2517 /* Disable RXMAC clock source */
2518 val = CSR_READ_4(sc, JME_GPREG1);
2519 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2520 /* Flush */
2521 CSR_READ_4(sc, JME_GHC);
2522
2523 /* Enable TX and RX */
2524 val = CSR_READ_4(sc, JME_TXCSR);
2525 CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB);
2526 val = CSR_READ_4(sc, JME_RXCSR);
2527 CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB);
2528 /* Flush */
2529 CSR_READ_4(sc, JME_TXCSR);
2530 CSR_READ_4(sc, JME_RXCSR);
2531
2532 /* Enable TXMAC and TXOFL clock sources */
2533 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2534 /* Eisable RXMAC clock source */
2535 val = CSR_READ_4(sc, JME_GPREG1);
2536 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2537 /* Flush */
2538 CSR_READ_4(sc, JME_GHC);
2539
2540 /* Stop TX and RX */
2541 jme_stop_tx(sc);
2542 jme_stop_rx(sc);
76fbb0b9
SZ
2543}
2544
2545static void
2546jme_init(void *xsc)
2547{
2548 struct jme_softc *sc = xsc;
2549 struct ifnet *ifp = &sc->arpcom.ac_if;
2550 struct mii_data *mii;
2551 uint8_t eaddr[ETHER_ADDR_LEN];
2552 bus_addr_t paddr;
2553 uint32_t reg;
4447c752 2554 int error, r;
76fbb0b9 2555
31f0d5a2 2556 ASSERT_IFNET_SERIALIZED_ALL(ifp);
76fbb0b9
SZ
2557
2558 /*
2559 * Cancel any pending I/O.
2560 */
2561 jme_stop(sc);
2562
2563 /*
2564 * Reset the chip to a known state.
2565 */
2566 jme_reset(sc);
2567
58880b0d
SZ
2568 /*
2569 * Setup MSI/MSI-X vectors to interrupts mapping
2570 */
2571 jme_set_msinum(sc);
2572
6afef6ab 2573 if (JME_ENABLE_HWRSS(sc))
760c056c
SZ
2574 jme_enable_rss(sc);
2575 else
2576 jme_disable_rss(sc);
4447c752
SZ
2577
2578 /* Init RX descriptors */
7b040092 2579 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
dea2452a 2580 error = jme_init_rx_ring(&sc->jme_cdata.jme_rx_data[r]);
4447c752
SZ
2581 if (error) {
2582 if_printf(ifp, "initialization failed: "
2583 "no memory for %dth RX ring.\n", r);
2584 jme_stop(sc);
2585 return;
2586 }
2587 }
2588
2589 /* Init TX descriptors */
76fbb0b9
SZ
2590 jme_init_tx_ring(sc);
2591
2592 /* Initialize shadow status block. */
2593 jme_init_ssb(sc);
2594
2595 /* Reprogram the station address. */
2596 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2597 CSR_WRITE_4(sc, JME_PAR0,
2598 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2599 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2600
2601 /*
2602 * Configure Tx queue.
2603 * Tx priority queue weight value : 0
2604 * Tx FIFO threshold for processing next packet : 16QW
2605 * Maximum Tx DMA length : 512
2606 * Allow Tx DMA burst.
2607 */
2608 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2609 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2610 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2611 sc->jme_txcsr |= sc->jme_tx_dma_size;
2612 sc->jme_txcsr |= TXCSR_DMA_BURST;
2613 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2614
2615 /* Set Tx descriptor counter. */
b020bb10 2616 CSR_WRITE_4(sc, JME_TXQDC, sc->jme_cdata.jme_tx_desc_cnt);
76fbb0b9
SZ
2617
2618 /* Set Tx ring address to the hardware. */
7405bec3 2619 paddr = sc->jme_cdata.jme_tx_ring_paddr;
76fbb0b9
SZ
2620 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2621 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2622
2623 /* Configure TxMAC parameters. */
2624 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2625 reg |= TXMAC_THRESH_1_PKT;
2626 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2627 CSR_WRITE_4(sc, JME_TXMAC, reg);
2628
2629 /*
2630 * Configure Rx queue.
2631 * FIFO full threshold for transmitting Tx pause packet : 128T
2632 * FIFO threshold for processing next packet : 128QW
2633 * Rx queue 0 select
2634 * Max Rx DMA length : 128
2635 * Rx descriptor retry : 32
2636 * Rx descriptor retry time gap : 256ns
2637 * Don't receive runt/bad frame.
2638 */
2639 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
223cfc2f 2640#if 0
76fbb0b9
SZ
2641 /*
2642 * Since Rx FIFO size is 4K bytes, receiving frames larger
2643 * than 4K bytes will suffer from Rx FIFO overruns. So
2644 * decrease FIFO threshold to reduce the FIFO overruns for
2645 * frames larger than 4000 bytes.
2646 * For best performance of standard MTU sized frames use
2647 * maximum allowable FIFO threshold, 128QW.
2648 */
2649 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2650 JME_RX_FIFO_SIZE)
2651 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2652 else
2653 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
223cfc2f
SZ
2654#else
2655 /* Improve PCI Express compatibility */
2656 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2657#endif
2658 sc->jme_rxcsr |= sc->jme_rx_dma_size;
76fbb0b9
SZ
2659 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2660 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2661 /* XXX TODO DROP_BAD */
76fbb0b9 2662
7b040092
SZ
2663 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2664 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
2665
4447c752
SZ
2666 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2667
2668 /* Set Rx descriptor counter. */
7b040092 2669 CSR_WRITE_4(sc, JME_RXQDC, rdata->jme_rx_desc_cnt);
76fbb0b9 2670
4447c752 2671 /* Set Rx ring address to the hardware. */
7b040092 2672 paddr = rdata->jme_rx_ring_paddr;
4447c752
SZ
2673 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2674 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2675 }
76fbb0b9
SZ
2676
2677 /* Clear receive filter. */
2678 CSR_WRITE_4(sc, JME_RXMAC, 0);
2679
2680 /* Set up the receive filter. */
2681 jme_set_filter(sc);
2682 jme_set_vlan(sc);
2683
2684 /*
2685 * Disable all WOL bits as WOL can interfere normal Rx
2686 * operation. Also clear WOL detection status bits.
2687 */
2688 reg = CSR_READ_4(sc, JME_PMCS);
2689 reg &= ~PMCS_WOL_ENB_MASK;
2690 CSR_WRITE_4(sc, JME_PMCS, reg);
2691
2692 /*
2693 * Pad 10bytes right before received frame. This will greatly
2694 * help Rx performance on strict-alignment architectures as
2695 * it does not need to copy the frame to align the payload.
2696 */
2697 reg = CSR_READ_4(sc, JME_RXMAC);
2698 reg |= RXMAC_PAD_10BYTES;
2699
2700 if (ifp->if_capenable & IFCAP_RXCSUM)
2701 reg |= RXMAC_CSUM_ENB;
2702 CSR_WRITE_4(sc, JME_RXMAC, reg);
2703
2704 /* Configure general purpose reg0 */
2705 reg = CSR_READ_4(sc, JME_GPREG0);
2706 reg &= ~GPREG0_PCC_UNIT_MASK;
2707 /* Set PCC timer resolution to micro-seconds unit. */
2708 reg |= GPREG0_PCC_UNIT_US;
2709 /*
2710 * Disable all shadow register posting as we have to read
2711 * JME_INTR_STATUS register in jme_intr. Also it seems
2712 * that it's hard to synchronize interrupt status between
2713 * hardware and software with shadow posting due to
2714 * requirements of bus_dmamap_sync(9).
2715 */
2716 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2717 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2718 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2719 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2720 /* Disable posting of DW0. */
2721 reg &= ~GPREG0_POST_DW0_ENB;
2722 /* Clear PME message. */
2723 reg &= ~GPREG0_PME_ENB;
2724 /* Set PHY address. */
2725 reg &= ~GPREG0_PHY_ADDR_MASK;
2726 reg |= sc->jme_phyaddr;
2727 CSR_WRITE_4(sc, JME_GPREG0, reg);
2728
2729 /* Configure Tx queue 0 packet completion coalescing. */
2870abc4 2730 jme_set_tx_coal(sc);
76fbb0b9 2731
dea2452a 2732 /* Configure Rx queues packet completion coalescing. */
2870abc4 2733 jme_set_rx_coal(sc);
76fbb0b9
SZ
2734
2735 /* Configure shadow status block but don't enable posting. */
560616bf 2736 paddr = sc->jme_cdata.jme_ssb_block_paddr;
76fbb0b9
SZ
2737 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2738 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2739
2740 /* Disable Timer 1 and Timer 2. */
2741 CSR_WRITE_4(sc, JME_TIMER1, 0);
2742 CSR_WRITE_4(sc, JME_TIMER2, 0);
2743
2744 /* Configure retry transmit period, retry limit value. */
2745 CSR_WRITE_4(sc, JME_TXTRHD,
2746 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2747 TXTRHD_RT_PERIOD_MASK) |
2748 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2749 TXTRHD_RT_LIMIT_SHIFT));
2750
9de40864
SZ
2751#ifdef DEVICE_POLLING
2752 if (!(ifp->if_flags & IFF_POLLING))
2753#endif
76fbb0b9
SZ
2754 /* Initialize the interrupt mask. */
2755 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2756 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2757
2758 /*
2759 * Enabling Tx/Rx DMA engines and Rx queue processing is
2760 * done after detection of valid link in jme_miibus_statchg.
2761 */
cccc3955 2762 sc->jme_has_link = FALSE;
76fbb0b9
SZ
2763
2764 /* Set the current media. */
2765 mii = device_get_softc(sc->jme_miibus);
2766 mii_mediachg(mii);
2767
2768 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2769
2770 ifp->if_flags |= IFF_RUNNING;
2771 ifp->if_flags &= ~IFF_OACTIVE;
2772}
2773
2774static void
2775jme_stop(struct jme_softc *sc)
2776{
2777 struct ifnet *ifp = &sc->arpcom.ac_if;
2778 struct jme_txdesc *txd;
2779 struct jme_rxdesc *rxd;
4447c752
SZ
2780 struct jme_rxdata *rdata;
2781 int i, r;
76fbb0b9 2782
31f0d5a2 2783 ASSERT_IFNET_SERIALIZED_ALL(ifp);
76fbb0b9
SZ
2784
2785 /*
2786 * Mark the interface down and cancel the watchdog timer.
2787 */
2788 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2789 ifp->if_timer = 0;
2790
2791 callout_stop(&sc->jme_tick_ch);
cccc3955 2792 sc->jme_has_link = FALSE;
76fbb0b9
SZ
2793
2794 /*
2795 * Disable interrupts.
2796 */
2797 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2798 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2799
2800 /* Disable updating shadow status block. */
2801 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2802 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2803
2804 /* Stop receiver, transmitter. */
2805 jme_stop_rx(sc);
2806 jme_stop_tx(sc);
2807
76fbb0b9
SZ
2808 /*
2809 * Free partial finished RX segments
2810 */
7b040092 2811 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
4447c752
SZ
2812 rdata = &sc->jme_cdata.jme_rx_data[r];
2813 if (rdata->jme_rxhead != NULL)
2814 m_freem(rdata->jme_rxhead);
dea2452a 2815 JME_RXCHAIN_RESET(rdata);
4447c752 2816 }
76fbb0b9
SZ
2817
2818 /*
2819 * Free RX and TX mbufs still in the queues.
2820 */
7b040092 2821 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
4447c752 2822 rdata = &sc->jme_cdata.jme_rx_data[r];
7b040092 2823 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
4447c752
SZ
2824 rxd = &rdata->jme_rxdesc[i];
2825 if (rxd->rx_m != NULL) {
2826 bus_dmamap_unload(rdata->jme_rx_tag,
2827 rxd->rx_dmamap);
2828 m_freem(rxd->rx_m);
2829 rxd->rx_m = NULL;
2830 }
76fbb0b9 2831 }
4447c752 2832 }
b020bb10 2833 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
76fbb0b9
SZ
2834 txd = &sc->jme_cdata.jme_txdesc[i];
2835 if (txd->tx_m != NULL) {
2836 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2837 txd->tx_dmamap);
2838 m_freem(txd->tx_m);
2839 txd->tx_m = NULL;
2840 txd->tx_ndesc = 0;
2841 }
2842 }
2843}
2844
2845static void
2846jme_stop_tx(struct jme_softc *sc)
2847{
2848 uint32_t reg;
2849 int i;
2850
2851 reg = CSR_READ_4(sc, JME_TXCSR);
2852 if ((reg & TXCSR_TX_ENB) == 0)
2853 return;
2854 reg &= ~TXCSR_TX_ENB;
2855 CSR_WRITE_4(sc, JME_TXCSR, reg);
2856 for (i = JME_TIMEOUT; i > 0; i--) {
2857 DELAY(1);
2858 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2859 break;
2860 }
2861 if (i == 0)
2862 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2863}
2864
2865static void
2866jme_stop_rx(struct jme_softc *sc)
2867{
2868 uint32_t reg;
2869 int i;
2870
2871 reg = CSR_READ_4(sc, JME_RXCSR);
2872 if ((reg & RXCSR_RX_ENB) == 0)
2873 return;
2874 reg &= ~RXCSR_RX_ENB;
2875 CSR_WRITE_4(sc, JME_RXCSR, reg);
2876 for (i = JME_TIMEOUT; i > 0; i--) {
2877 DELAY(1);
2878 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2879 break;
2880 }
2881 if (i == 0)
2882 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2883}
2884
2885static void
2886jme_init_tx_ring(struct jme_softc *sc)
2887{
560616bf 2888 struct jme_chain_data *cd;
76fbb0b9
SZ
2889 struct jme_txdesc *txd;
2890 int i;
2891
2892 sc->jme_cdata.jme_tx_prod = 0;
2893 sc->jme_cdata.jme_tx_cons = 0;
2894 sc->jme_cdata.jme_tx_cnt = 0;
2895
560616bf
SZ
2896 cd = &sc->jme_cdata;
2897 bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
b020bb10 2898 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
76fbb0b9
SZ
2899 txd = &sc->jme_cdata.jme_txdesc[i];
2900 txd->tx_m = NULL;
560616bf 2901 txd->tx_desc = &cd->jme_tx_ring[i];
76fbb0b9
SZ
2902 txd->tx_ndesc = 0;
2903 }
76fbb0b9
SZ
2904}
2905
2906static void
2907jme_init_ssb(struct jme_softc *sc)
2908{
560616bf 2909 struct jme_chain_data *cd;
76fbb0b9 2910
560616bf
SZ
2911 cd = &sc->jme_cdata;
2912 bzero(cd->jme_ssb_block, JME_SSB_SIZE);
76fbb0b9
SZ
2913}
2914
2915static int
dea2452a 2916jme_init_rx_ring(struct jme_rxdata *rdata)
76fbb0b9 2917{
76fbb0b9
SZ
2918 struct jme_rxdesc *rxd;
2919 int i;
2920
4447c752
SZ
2921 KKASSERT(rdata->jme_rxhead == NULL &&
2922 rdata->jme_rxtail == NULL &&
2923 rdata->jme_rxlen == 0);
2924 rdata->jme_rx_cons = 0;
76fbb0b9 2925
7b040092
SZ
2926 bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(rdata));
2927 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
76fbb0b9
SZ
2928 int error;
2929
4447c752 2930 rxd = &rdata->jme_rxdesc[i];
76fbb0b9 2931 rxd->rx_m = NULL;
4447c752 2932 rxd->rx_desc = &rdata->jme_rx_ring[i];
dea2452a 2933 error = jme_newbuf(rdata, rxd, 1);
76fbb0b9 2934 if (error)
4447c752 2935 return error;
76fbb0b9 2936 }
4447c752 2937 return 0;
76fbb0b9
SZ
2938}
2939
2940static int
dea2452a 2941jme_newbuf(struct jme_rxdata *rdata, struct jme_rxdesc *rxd, int init)
76fbb0b9 2942{
76fbb0b9 2943 struct mbuf *m;
76fbb0b9
SZ
2944 bus_dma_segment_t segs;
2945 bus_dmamap_t map;
b0ba1747 2946 int error, nsegs;
76fbb0b9
SZ
2947
2948 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2949 if (m == NULL)
4447c752 2950 return ENOBUFS;
76fbb0b9
SZ
2951 /*
2952 * JMC250 has 64bit boundary alignment limitation so jme(4)
2953 * takes advantage of 10 bytes padding feature of hardware
2954 * in order not to copy entire frame to align IP header on
2955 * 32bit boundary.
2956 */
2957 m->m_len = m->m_pkthdr.len = MCLBYTES;
2958
b0ba1747
SZ
2959 error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
2960 rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
2961 BUS_DMA_NOWAIT);
2962 if (error) {
76fbb0b9 2963 m_freem(m);
dea2452a
SZ
2964 if (init) {
2965 if_printf(&rdata->jme_sc->arpcom.ac_if,
2966 "can't load RX mbuf\n");
2967 }
4447c752 2968 return error;
76fbb0b9
SZ
2969 }
2970
2971 if (rxd->rx_m != NULL) {
4447c752 2972 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
76fbb0b9 2973 BUS_DMASYNC_POSTREAD);
4447c752 2974 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
76fbb0b9
SZ
2975 }
2976 map = rxd->rx_dmamap;
4447c752
SZ
2977 rxd->rx_dmamap = rdata->jme_rx_sparemap;
2978 rdata->jme_rx_sparemap = map;
76fbb0b9 2979 rxd->rx_m = m;
fd2a6d2c 2980 rxd->rx_paddr = segs.ds_addr;
76fbb0b9 2981
fd2a6d2c 2982 jme_setup_rxdesc(rxd);
4447c752 2983 return 0;
76fbb0b9
SZ
2984}
2985
2986static void
2987jme_set_vlan(struct jme_softc *sc)
2988{
2989 struct ifnet *ifp = &sc->arpcom.ac_if;
2990 uint32_t reg;
2991
31f0d5a2 2992 ASSERT_IFNET_SERIALIZED_ALL(ifp);
76fbb0b9
SZ
2993
2994 reg = CSR_READ_4(sc, JME_RXMAC);
2995 reg &= ~RXMAC_VLAN_ENB;
2996 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2997 reg |= RXMAC_VLAN_ENB;
2998 CSR_WRITE_4(sc, JME_RXMAC, reg);
2999}
3000
3001static void
3002jme_set_filter(struct jme_softc *sc)
3003{
3004 struct ifnet *ifp = &sc->arpcom.ac_if;
3005 struct ifmultiaddr *ifma;
3006 uint32_t crc;
3007 uint32_t mchash[2];
3008 uint32_t rxcfg;
3009
31f0d5a2 3010 ASSERT_IFNET_SERIALIZED_ALL(ifp);
76fbb0b9
SZ
3011
3012 rxcfg = CSR_READ_4(sc, JME_RXMAC);
3013 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
3014 RXMAC_ALLMULTI);
3015
3016 /*
3017 * Always accept frames destined to our station address.
3018 * Always accept broadcast frames.
3019 */
3020 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
3021
3022 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
3023 if (ifp->if_flags & IFF_PROMISC)
3024 rxcfg |= RXMAC_PROMISC;
3025 if (ifp->if_flags & IFF_ALLMULTI)
3026 rxcfg |= RXMAC_ALLMULTI;
3027 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
3028 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
3029 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3030 return;
3031 }
3032
3033 /*
3034 * Set up the multicast address filter by passing all multicast
3035 * addresses through a CRC generator, and then using the low-order
3036 * 6 bits as an index into the 64 bit multicast hash table. The
3037 * high order bits select the register, while the rest of the bits
3038 * select the bit within the register.
3039 */
3040 rxcfg |= RXMAC_MULTICAST;
3041 bzero(mchash, sizeof(mchash));
3042
441d34b2 3043 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
76fbb0b9
SZ
3044 if (ifma->ifma_addr->sa_family != AF_LINK)
3045 continue;
3046 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3047 ifma->ifma_addr), ETHER_ADDR_LEN);
3048
3049 /* Just want the 6 least significant bits. */
3050 crc &= 0x3f;
3051
3052 /* Set the corresponding bit in the hash table. */
3053 mchash[crc >> 5] |= 1 << (crc & 0x1f);
3054 }
3055
3056 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
3057 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
3058 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3059}
3060
3061static int
2870abc4 3062jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
76fbb0b9 3063{
2870abc4
SZ
3064 struct jme_softc *sc = arg1;
3065 struct ifnet *ifp = &sc->arpcom.ac_if;
3066 int error, v;
3067
31f0d5a2 3068 ifnet_serialize_all(ifp);
2870abc4
SZ
3069
3070 v = sc->jme_tx_coal_to;
3071 error = sysctl_handle_int(oidp, &v, 0, req);
3072 if (error || req->newptr == NULL)
3073 goto back;
3074
3075 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
3076 error = EINVAL;
3077 goto back;
3078 }
3079
3080 if (v != sc->jme_tx_coal_to) {
3081 sc->jme_tx_coal_to = v;
3082 if (ifp->if_flags & IFF_RUNNING)
3083 jme_set_tx_coal(sc);
3084 }
3085back:
31f0d5a2 3086 ifnet_deserialize_all(ifp);
2870abc4 3087 return error;
76fbb0b9
SZ
3088}
3089
3090static int
2870abc4 3091jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
76fbb0b9 3092{
2870abc4
SZ
3093 struct jme_softc *sc = arg1;
3094 struct ifnet *ifp = &sc->arpcom.ac_if;
3095 int error, v;
3096
31f0d5a2 3097 ifnet_serialize_all(ifp);
2870abc4
SZ
3098
3099 v = sc->jme_tx_coal_pkt;
3100 error = sysctl_handle_int(oidp, &v, 0, req);
3101 if (error || req->newptr == NULL)
3102 goto back;
3103
3104 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
3105 error = EINVAL;
3106 goto back;
3107 }
3108
3109 if (v != sc->jme_tx_coal_pkt) {
3110 sc->jme_tx_coal_pkt = v;
3111 if (ifp->if_flags & IFF_RUNNING)
3112 jme_set_tx_coal(sc);
3113 }
3114back:
31f0d5a2 3115 ifnet_deserialize_all(ifp);
2870abc4 3116 return error;
76fbb0b9
SZ
3117}
3118
3119static int
2870abc4 3120jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
76fbb0b9 3121{
2870abc4
SZ
3122 struct jme_softc *sc = arg1;
3123 struct ifnet *ifp = &sc->arpcom.ac_if;
3124 int error, v;
3125
31f0d5a2 3126 ifnet_serialize_all(ifp);
2870abc4
SZ
3127
3128 v = sc->jme_rx_coal_to;
3129 error = sysctl_handle_int(oidp, &v, 0, req);
3130 if (error || req->newptr == NULL)
3131 goto back;
3132
3133 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
3134 error = EINVAL;
3135 goto back;
3136 }
3137
3138 if (v != sc->jme_rx_coal_to) {
3139 sc->jme_rx_coal_to = v;
3140 if (ifp->if_flags & IFF_RUNNING)
3141 jme_set_rx_coal(sc);
3142 }
3143back:
31f0d5a2 3144 ifnet_deserialize_all(ifp);
2870abc4 3145 return error;
76fbb0b9
SZ
3146}
3147
3148static int
2870abc4
SZ
3149jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3150{
3151 struct jme_softc *sc = arg1;
3152 struct ifnet *ifp = &sc->arpcom.ac_if;
3153 int error, v;
3154
31f0d5a2 3155 ifnet_serialize_all(ifp);
2870abc4
SZ
3156
3157 v = sc->jme_rx_coal_pkt;
3158 error = sysctl_handle_int(oidp, &v, 0, req);
3159 if (error || req->newptr == NULL)
3160 goto back;
3161
3162 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
3163 error = EINVAL;
3164 goto back;
3165 }
3166
3167 if (v != sc->jme_rx_coal_pkt) {
3168 sc->jme_rx_coal_pkt = v;
3169 if (ifp->if_flags & IFF_RUNNING)
3170 jme_set_rx_coal(sc);
3171 }
3172back:
31f0d5a2 3173 ifnet_deserialize_all(ifp);
2870abc4
SZ
3174 return error;
3175}
3176
3177static void
3178jme_set_tx_coal(struct jme_softc *sc)
3179{
3180 uint32_t reg;
3181
3182 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3183 PCCTX_COAL_TO_MASK;
3184 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3185 PCCTX_COAL_PKT_MASK;
3186 reg |= PCCTX_COAL_TXQ0;
3187 CSR_WRITE_4(sc, JME_PCCTX, reg);
3188}
3189
3190static void
3191jme_set_rx_coal(struct jme_softc *sc)
76fbb0b9 3192{
2870abc4 3193 uint32_t reg;
4447c752 3194 int r;
2870abc4
SZ
3195
3196 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3197 PCCRX_COAL_TO_MASK;
3198 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3199 PCCRX_COAL_PKT_MASK;
7b040092 3200 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r)
9f20b7b3 3201 CSR_WRITE_4(sc, JME_PCCRX(r), reg);
76fbb0b9 3202}
9de40864
SZ
3203
3204#ifdef DEVICE_POLLING
3205
3206static void
3207jme_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
3208{
3209 struct jme_softc *sc = ifp->if_softc;
3210 uint32_t status;
eda7db08 3211 int r;
9de40864 3212
31f0d5a2 3213 ASSERT_SERIALIZED(&sc->jme_serialize);
9de40864
SZ
3214
3215 switch (cmd) {
3216 case POLL_REGISTER:
3217 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3218 break;
3219
3220 case POLL_DEREGISTER:
3221 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
3222 break;
3223
3224 case POLL_AND_CHECK_STATUS:
3225 case POLL_ONLY:
3226 status = CSR_READ_4(sc, JME_INTR_STATUS);
0e7f1e6f 3227
7b040092 3228 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
31f0d5a2
SZ
3229 struct jme_rxdata *rdata =
3230 &sc->jme_cdata.jme_rx_data[r];
3231
3232 lwkt_serialize_enter(&rdata->jme_rx_serialize);
dea2452a 3233 jme_rxeof(rdata, count);
31f0d5a2
SZ
3234 lwkt_serialize_exit(&rdata->jme_rx_serialize);
3235 }
9de40864
SZ
3236
3237 if (status & INTR_RXQ_DESC_EMPTY) {
3238 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
3239 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
3240 RXCSR_RX_ENB | RXCSR_RXQ_START);
3241 }
3242
31f0d5a2 3243 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
9de40864
SZ
3244 jme_txeof(sc);
3245 if (!ifq_is_empty(&ifp->if_snd))
3246 if_devstart(ifp);
31f0d5a2 3247 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
9de40864
SZ
3248 break;
3249 }
3250}
3251
3252#endif /* DEVICE_POLLING */
4447c752
SZ
3253
3254static int
dea2452a 3255jme_rxring_dma_alloc(struct jme_rxdata *rdata)
4447c752 3256{
1128a202 3257 bus_dmamem_t dmem;
ff7f3632 3258 int error, asize;
4447c752 3259
ff7f3632 3260 asize = roundup2(JME_RX_RING_SIZE(rdata), JME_RX_RING_ALIGN);
dea2452a 3261 error = bus_dmamem_coherent(rdata->jme_sc->jme_cdata.jme_ring_tag,
1128a202 3262 JME_RX_RING_ALIGN, 0,
0eb220ec 3263 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
ff7f3632 3264 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
4447c752 3265 if (error) {
dea2452a
SZ
3266 device_printf(rdata->jme_sc->jme_dev,
3267 "could not allocate %dth Rx ring.\n", rdata->jme_rx_idx);
4447c752
SZ
3268 return error;
3269 }
1128a202
SZ
3270 rdata->jme_rx_ring_tag = dmem.dmem_tag;
3271 rdata->jme_rx_ring_map = dmem.dmem_map;
3272 rdata->jme_rx_ring = dmem.dmem_addr;
3273 rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
4447c752
SZ
3274
3275 return 0;
3276}
3277
3278static int
064b75ed
SZ
3279jme_rxbuf_dma_filter(void *arg __unused, bus_addr_t paddr)
3280{
3281 if ((paddr & 0xffffffff) == 0) {
3282 /*
3283 * Don't allow lower 32bits of the RX buffer's
3284 * physical address to be 0, else it will break
3285 * hardware pending RSS information delivery
3286 * detection on RX path.
3287 */
3288 return 1;
3289 }
3290 return 0;
3291}
3292
3293static int
dea2452a 3294jme_rxbuf_dma_alloc(struct jme_rxdata *rdata)
4447c752 3295{
064b75ed 3296 bus_addr_t lowaddr;
4447c752
SZ
3297 int i, error;
3298
064b75ed
SZ
3299 lowaddr = BUS_SPACE_MAXADDR;
3300 if (JME_ENABLE_HWRSS(rdata->jme_sc)) {
3301 /* jme_rxbuf_dma_filter will be called */
3302 lowaddr = BUS_SPACE_MAXADDR_32BIT;
3303 }
3304
4447c752 3305 /* Create tag for Rx buffers. */
dea2452a
SZ
3306 error = bus_dma_tag_create(
3307 rdata->jme_sc->jme_cdata.jme_buffer_tag,/* parent */
4447c752 3308 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
064b75ed 3309 lowaddr, /* lowaddr */
4447c752 3310 BUS_SPACE_MAXADDR, /* highaddr */
064b75ed 3311 jme_rxbuf_dma_filter, NULL, /* filter, filterarg */
4447c752
SZ
3312 MCLBYTES, /* maxsize */
3313 1, /* nsegments */
3314 MCLBYTES, /* maxsegsize */
9d424cee 3315 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
4447c752
SZ
3316 &rdata->jme_rx_tag);
3317 if (error) {
dea2452a
SZ
3318 device_printf(rdata->jme_sc->jme_dev,
3319 "could not create %dth Rx DMA tag.\n", rdata->jme_rx_idx);
4447c752
SZ
3320 return error;
3321 }
3322
3323 /* Create DMA maps for Rx buffers. */
9d424cee 3324 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
4447c752
SZ
3325 &rdata->jme_rx_sparemap);
3326 if (error) {
dea2452a
SZ
3327 device_printf(rdata->jme_sc->jme_dev,
3328 "could not create %dth spare Rx dmamap.\n",
3329 rdata->jme_rx_idx);
4447c752
SZ
3330 bus_dma_tag_destroy(rdata->jme_rx_tag);
3331 rdata->jme_rx_tag = NULL;
3332 return error;
3333 }
7b040092 3334 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
4447c752
SZ
3335 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3336
9d424cee 3337 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
4447c752
SZ
3338 &rxd->rx_dmamap);
3339 if (error) {
3340 int j;
3341
dea2452a 3342 device_printf(rdata->jme_sc->jme_dev,
4447c752 3343 "could not create %dth Rx dmamap "
dea2452a 3344 "for %dth RX ring.\n", i, rdata->jme_rx_idx);
4447c752
SZ
3345
3346 for (j = 0; j < i; ++j) {
3347 rxd = &rdata->jme_rxdesc[j];
3348 bus_dmamap_destroy(rdata->jme_rx_tag,
3349 rxd->rx_dmamap);
3350 }
3351 bus_dmamap_destroy(rdata->jme_rx_tag,
3352 rdata->jme_rx_sparemap);
3353 bus_dma_tag_destroy(rdata->jme_rx_tag);
3354 rdata->jme_rx_tag = NULL;
3355 return error;
3356 }
3357 }
3358 return 0;
3359}
3360
3361static void
3362jme_rx_intr(struct jme_softc *sc, uint32_t status)
3363{
eda7db08 3364 int r;
4447c752 3365
7b040092 3366 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
31810fb8 3367 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
31f0d5a2 3368
31810fb8 3369 if (status & rdata->jme_rx_coal) {
31f0d5a2 3370 lwkt_serialize_enter(&rdata->jme_rx_serialize);
dea2452a 3371 jme_rxeof(rdata, -1);
31f0d5a2
SZ
3372 lwkt_serialize_exit(&rdata->jme_rx_serialize);
3373 }
4447c752
SZ
3374 }
3375}
760c056c
SZ
3376
3377static void
3378jme_enable_rss(struct jme_softc *sc)
3379{
24dd1705
SZ
3380 uint32_t rssc, ind;
3381 uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
760c056c
SZ
3382 int i;
3383
022f915e
SZ
3384 KASSERT(sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_2 ||
3385 sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_4,
ed20d0e3 3386 ("%s: invalid # of RX rings (%d)",
022f915e 3387 sc->arpcom.ac_if.if_xname, sc->jme_cdata.jme_rx_ring_cnt));
66f75939 3388
760c056c
SZ
3389 rssc = RSSC_HASH_64_ENTRY;
3390 rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
7b040092 3391 rssc |= sc->jme_cdata.jme_rx_ring_cnt >> 1;
760c056c
SZ
3392 JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3393 CSR_WRITE_4(sc, JME_RSSC, rssc);
3394
24dd1705
SZ
3395 toeplitz_get_key(key, sizeof(key));
3396 for (i = 0; i < RSSKEY_NREGS; ++i) {
3397 uint32_t keyreg;
3398
3399 keyreg = RSSKEY_REGVAL(key, i);
3400 JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg);
3401
3402 CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg);
3403 }
760c056c 3404
66f75939
SZ
3405 /*
3406 * Create redirect table in following fashion:
3407 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3408 */
760c056c 3409 ind = 0;
66f75939
SZ
3410 for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3411 int q;
3412
7b040092 3413 q = i % sc->jme_cdata.jme_rx_ring_cnt;
66f75939 3414 ind |= q << (i * 8);
760c056c
SZ
3415 }
3416 JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
66f75939 3417
760c056c
SZ
3418 for (i = 0; i < RSSTBL_NREGS; ++i)
3419 CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3420}
3421
3422static void
3423jme_disable_rss(struct jme_softc *sc)
3424{
760c056c
SZ
3425 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3426}
31f0d5a2
SZ
3427
3428static void
3429jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3430{
3431 struct jme_softc *sc = ifp->if_softc;
3432
29890f78
SZ
3433 ifnet_serialize_array_enter(sc->jme_serialize_arr,
3434 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz);
31f0d5a2
SZ
3435}
3436
3437static void
3438jme_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3439{
3440 struct jme_softc *sc = ifp->if_softc;
3441
29890f78
SZ
3442 ifnet_serialize_array_exit(sc->jme_serialize_arr,
3443 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz);
31f0d5a2
SZ
3444}
3445
3446static int
3447jme_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3448{
3449 struct jme_softc *sc = ifp->if_softc;
3450
29890f78
SZ
3451 return ifnet_serialize_array_try(sc->jme_serialize_arr,
3452 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE, slz);
31f0d5a2
SZ
3453}
3454
3455#ifdef INVARIANTS
3456
3457static void
3458jme_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
3459 boolean_t serialized)
3460{
3461 struct jme_softc *sc = ifp->if_softc;
31f0d5a2 3462
29890f78
SZ
3463 ifnet_serialize_array_assert(sc->jme_serialize_arr,
3464 sc->jme_serialize_cnt, JME_TX_SERIALIZE, JME_RX_SERIALIZE,
3465 slz, serialized);
31f0d5a2
SZ
3466}
3467
3468#endif /* INVARIANTS */
58880b0d
SZ
3469
3470static void
3471jme_msix_try_alloc(device_t dev)
3472{
3473 struct jme_softc *sc = device_get_softc(dev);
3474 struct jme_msix_data *msix;
3475 int error, i, r, msix_enable, msix_count;
58880b0d 3476
7b040092 3477 msix_count = 1 + sc->jme_cdata.jme_rx_ring_cnt;
58880b0d
SZ
3478 KKASSERT(msix_count <= JME_NMSIX);
3479
1cc217a9 3480 msix_enable = device_getenv_int(dev, "msix.enable", jme_msix_enable);
58880b0d
SZ
3481
3482 /*
3483 * We leave the 1st MSI-X vector unused, so we
3484 * actually need msix_count + 1 MSI-X vectors.
3485 */
3486 if (!msix_enable || pci_msix_count(dev) < (msix_count + 1))
3487 return;
3488
3489 for (i = 0; i < msix_count; ++i)
3490 sc->jme_msix[i].jme_msix_rid = -1;
3491
3492 i = 0;
3493
3494 msix = &sc->jme_msix[i++];
3495 msix->jme_msix_cpuid = 0; /* XXX Put TX to cpu0 */
3496 msix->jme_msix_arg = &sc->jme_cdata;
3497 msix->jme_msix_func = jme_msix_tx;
3498 msix->jme_msix_intrs = INTR_TXQ_COAL | INTR_TXQ_COAL_TO;
3499 msix->jme_msix_serialize = &sc->jme_cdata.jme_tx_serialize;
3500 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc), "%s tx",
3501 device_get_nameunit(dev));
3502
7b040092 3503 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
58880b0d
SZ
3504 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
3505
3506 msix = &sc->jme_msix[i++];
3507 msix->jme_msix_cpuid = r; /* XXX Put RX to cpuX */
3508 msix->jme_msix_arg = rdata;
3509 msix->jme_msix_func = jme_msix_rx;
3510 msix->jme_msix_intrs = rdata->jme_rx_coal | rdata->jme_rx_empty;
3511 msix->jme_msix_serialize = &rdata->jme_rx_serialize;
3512 ksnprintf(msix->jme_msix_desc, sizeof(msix->jme_msix_desc),
3513 "%s rx%d", device_get_nameunit(dev), r);
3514 }
3515
3516 KKASSERT(i == msix_count);
3517
3518 error = pci_setup_msix(dev);
3519 if (error)
3520 return;
3521
3522 /* Setup jme_msix_cnt early, so we could cleanup */
3523 sc->jme_msix_cnt = msix_count;
3524
3525 for (i = 0; i < msix_count; ++i) {
3526 msix = &sc->jme_msix[i];
3527
3528 msix->jme_msix_vector = i + 1;
3529 error = pci_alloc_msix_vector(dev, msix->jme_msix_vector,
3530 &msix->jme_msix_rid, msix->jme_msix_cpuid);
3531 if (error)
3532 goto back;
3533
3534 msix->jme_msix_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
3535 &msix->jme_msix_rid, RF_ACTIVE);
3536 if (msix->jme_msix_res == NULL) {
3537 error = ENOMEM;
3538 goto back;
3539 }
3540 }
3541
3542 for (i = 0; i < JME_INTR_CNT; ++i) {
3543 uint32_t intr_mask = (1 << i);
3544 int x;
3545
3546 if ((JME_INTRS & intr_mask) == 0)
3547 continue;
3548
3549 for (x = 0; x < msix_count; ++x) {
3550 msix = &sc->jme_msix[x];
3551 if (msix->jme_msix_intrs & intr_mask) {
3552 int reg, shift;
3553
3554 reg = i / JME_MSINUM_FACTOR;
3555 KKASSERT(reg < JME_MSINUM_CNT);
3556
3557 shift = (i % JME_MSINUM_FACTOR) * 4;
3558
3559 sc->jme_msinum[reg] |=
3560 (msix->jme_msix_vector << shift);
3561
3562 break;
3563 }
3564 }
3565 }
3566
3567 if (bootverbose) {
3568 for (i = 0; i < JME_MSINUM_CNT; ++i) {
3569 device_printf(dev, "MSINUM%d: %#x\n", i,
3570 sc->jme_msinum[i]);
3571 }
3572 }
3573
3574 pci_enable_msix(dev);
3575 sc->jme_irq_type = PCI_INTR_TYPE_MSIX;
3576
3577back:
3578 if (error)
3579 jme_msix_free(dev);
3580}
3581
3582static int
3583jme_intr_alloc(device_t dev)
3584{