jme: Factor out jme_rx_restart
[dragonfly.git] / sys / dev / netif / jme / if_jme.c
CommitLineData
76fbb0b9
SZ
1/*-
2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
76fbb0b9
SZ
28 */
29
8a0620e4 30#include "opt_ifpoll.h"
93bfe1b8 31#include "opt_jme.h"
9de40864 32
76fbb0b9
SZ
33#include <sys/param.h>
34#include <sys/endian.h>
35#include <sys/kernel.h>
36#include <sys/bus.h>
37#include <sys/interrupt.h>
38#include <sys/malloc.h>
39#include <sys/proc.h>
40#include <sys/rman.h>
41#include <sys/serialize.h>
31f0d5a2 42#include <sys/serialize2.h>
76fbb0b9
SZ
43#include <sys/socket.h>
44#include <sys/sockio.h>
45#include <sys/sysctl.h>
46
47#include <net/ethernet.h>
48#include <net/if.h>
49#include <net/bpf.h>
50#include <net/if_arp.h>
51#include <net/if_dl.h>
52#include <net/if_media.h>
8a0620e4 53#include <net/if_poll.h>
76fbb0b9 54#include <net/ifq_var.h>
24dd1705 55#include <net/toeplitz.h>
a6acc6e2 56#include <net/toeplitz2.h>
76fbb0b9
SZ
57#include <net/vlan/if_vlan_var.h>
58#include <net/vlan/if_vlan_ether.h>
59
1bedd927
SZ
60#include <netinet/ip.h>
61#include <netinet/tcp.h>
a6acc6e2 62
76fbb0b9 63#include <dev/netif/mii_layer/miivar.h>
dbe37f03 64#include <dev/netif/mii_layer/jmphyreg.h>
76fbb0b9
SZ
65
66#include <bus/pci/pcireg.h>
67#include <bus/pci/pcivar.h>
68#include <bus/pci/pcidevs.h>
69
08c76ecf
SZ
70#include <dev/netif/jme/if_jmereg.h>
71#include <dev/netif/jme/if_jmevar.h>
76fbb0b9
SZ
72
73#include "miibus_if.h"
74
29890f78
SZ
75#define JME_TX_SERIALIZE 1
76#define JME_RX_SERIALIZE 2
77
76fbb0b9
SZ
78#define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
79
760c056c
SZ
80#ifdef JME_RSS_DEBUG
81#define JME_RSS_DPRINTF(sc, lvl, fmt, ...) \
82do { \
66f75939 83 if ((sc)->jme_rss_debug >= (lvl)) \
760c056c
SZ
84 if_printf(&(sc)->arpcom.ac_if, fmt, __VA_ARGS__); \
85} while (0)
86#else /* !JME_RSS_DEBUG */
87#define JME_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
88#endif /* JME_RSS_DEBUG */
89
76fbb0b9
SZ
90static int jme_probe(device_t);
91static int jme_attach(device_t);
92static int jme_detach(device_t);
93static int jme_shutdown(device_t);
94static int jme_suspend(device_t);
95static int jme_resume(device_t);
96
97static int jme_miibus_readreg(device_t, int, int);
98static int jme_miibus_writereg(device_t, int, int, int);
99static void jme_miibus_statchg(device_t);
100
101static void jme_init(void *);
102static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
103static void jme_start(struct ifnet *);
104static void jme_watchdog(struct ifnet *);
105static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
106static int jme_mediachange(struct ifnet *);
8a0620e4
SZ
107#ifdef IFPOLL_ENABLE
108static void jme_npoll(struct ifnet *, struct ifpoll_info *);
9de40864 109#endif
31f0d5a2
SZ
110static void jme_serialize(struct ifnet *, enum ifnet_serialize);
111static void jme_deserialize(struct ifnet *, enum ifnet_serialize);
112static int jme_tryserialize(struct ifnet *, enum ifnet_serialize);
113#ifdef INVARIANTS
114static void jme_serialize_assert(struct ifnet *, enum ifnet_serialize,
115 boolean_t);
116#endif
76fbb0b9
SZ
117
118static void jme_intr(void *);
58880b0d
SZ
119static void jme_msix_tx(void *);
120static void jme_msix_rx(void *);
e0009afb 121static void jme_msix_status(void *);
76fbb0b9 122static void jme_txeof(struct jme_softc *);
dea2452a 123static void jme_rxeof(struct jme_rxdata *, int);
4447c752 124static void jme_rx_intr(struct jme_softc *, uint32_t);
8a0620e4
SZ
125static void jme_enable_intr(struct jme_softc *);
126static void jme_disable_intr(struct jme_softc *);
de437f82 127static void jme_rx_restart(struct jme_softc *, uint32_t);
76fbb0b9 128
58880b0d
SZ
129static int jme_msix_setup(device_t);
130static void jme_msix_teardown(device_t, int);
131static int jme_intr_setup(device_t);
132static void jme_intr_teardown(device_t);
133static void jme_msix_try_alloc(device_t);
134static void jme_msix_free(device_t);
135static int jme_intr_alloc(device_t);
136static void jme_intr_free(device_t);
76fbb0b9 137static int jme_dma_alloc(struct jme_softc *);
0b3414d9 138static void jme_dma_free(struct jme_softc *);
dea2452a 139static int jme_init_rx_ring(struct jme_rxdata *);
76fbb0b9
SZ
140static void jme_init_tx_ring(struct jme_softc *);
141static void jme_init_ssb(struct jme_softc *);
dea2452a 142static int jme_newbuf(struct jme_rxdata *, struct jme_rxdesc *, int);
76fbb0b9 143static int jme_encap(struct jme_softc *, struct mbuf **);
dea2452a
SZ
144static void jme_rxpkt(struct jme_rxdata *);
145static int jme_rxring_dma_alloc(struct jme_rxdata *);
146static int jme_rxbuf_dma_alloc(struct jme_rxdata *);
064b75ed 147static int jme_rxbuf_dma_filter(void *, bus_addr_t);
76fbb0b9
SZ
148
149static void jme_tick(void *);
150static void jme_stop(struct jme_softc *);
151static void jme_reset(struct jme_softc *);
58880b0d 152static void jme_set_msinum(struct jme_softc *);
76fbb0b9
SZ
153static void jme_set_vlan(struct jme_softc *);
154static void jme_set_filter(struct jme_softc *);
155static void jme_stop_tx(struct jme_softc *);
156static void jme_stop_rx(struct jme_softc *);
157static void jme_mac_config(struct jme_softc *);
158static void jme_reg_macaddr(struct jme_softc *, uint8_t[]);
159static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
160static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
161#ifdef notyet
162static void jme_setwol(struct jme_softc *);
163static void jme_setlinkspeed(struct jme_softc *);
164#endif
2870abc4
SZ
165static void jme_set_tx_coal(struct jme_softc *);
166static void jme_set_rx_coal(struct jme_softc *);
760c056c
SZ
167static void jme_enable_rss(struct jme_softc *);
168static void jme_disable_rss(struct jme_softc *);
cccc3955
SZ
169static void jme_serialize_skipmain(struct jme_softc *);
170static void jme_deserialize_skipmain(struct jme_softc *);
76fbb0b9
SZ
171
172static void jme_sysctl_node(struct jme_softc *);
2870abc4
SZ
173static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
174static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
175static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
176static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
8a0620e4
SZ
177#ifdef IFPOLL_ENABLE
178static int jme_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS);
179static int jme_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS);
180#endif
76fbb0b9
SZ
181
182/*
183 * Devices supported by this driver.
184 */
185static const struct jme_dev {
186 uint16_t jme_vendorid;
187 uint16_t jme_deviceid;
3a5f3f36 188 uint32_t jme_caps;
76fbb0b9
SZ
189 const char *jme_name;
190} jme_devs[] = {
44e8c66c 191 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
3a5f3f36 192 JME_CAP_JUMBO,
76fbb0b9 193 "JMicron Inc, JMC250 Gigabit Ethernet" },
44e8c66c 194 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
3a5f3f36 195 JME_CAP_FASTETH,
76fbb0b9 196 "JMicron Inc, JMC260 Fast Ethernet" },
3a5f3f36 197 { 0, 0, 0, NULL }
76fbb0b9
SZ
198};
199
200static device_method_t jme_methods[] = {
201 /* Device interface. */
202 DEVMETHOD(device_probe, jme_probe),
203 DEVMETHOD(device_attach, jme_attach),
204 DEVMETHOD(device_detach, jme_detach),
205 DEVMETHOD(device_shutdown, jme_shutdown),
206 DEVMETHOD(device_suspend, jme_suspend),
207 DEVMETHOD(device_resume, jme_resume),
208
209 /* Bus interface. */
210 DEVMETHOD(bus_print_child, bus_generic_print_child),
211 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
212
213 /* MII interface. */
214 DEVMETHOD(miibus_readreg, jme_miibus_readreg),
215 DEVMETHOD(miibus_writereg, jme_miibus_writereg),
216 DEVMETHOD(miibus_statchg, jme_miibus_statchg),
217
218 { NULL, NULL }
219};
220
221static driver_t jme_driver = {
222 "jme",
223 jme_methods,
224 sizeof(struct jme_softc)
225};
226
227static devclass_t jme_devclass;
228
229DECLARE_DUMMY_MODULE(if_jme);
230MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
aa2b9d05
SW
231DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, NULL, NULL);
232DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, NULL, NULL);
76fbb0b9 233
4447c752
SZ
234static const struct {
235 uint32_t jme_coal;
236 uint32_t jme_comp;
58880b0d 237 uint32_t jme_empty;
4447c752 238} jme_rx_status[JME_NRXRING_MAX] = {
58880b0d
SZ
239 { INTR_RXQ0_COAL | INTR_RXQ0_COAL_TO, INTR_RXQ0_COMP,
240 INTR_RXQ0_DESC_EMPTY },
241 { INTR_RXQ1_COAL | INTR_RXQ1_COAL_TO, INTR_RXQ1_COMP,
242 INTR_RXQ1_DESC_EMPTY },
243 { INTR_RXQ2_COAL | INTR_RXQ2_COAL_TO, INTR_RXQ2_COMP,
244 INTR_RXQ2_DESC_EMPTY },
245 { INTR_RXQ3_COAL | INTR_RXQ3_COAL_TO, INTR_RXQ3_COMP,
246 INTR_RXQ3_DESC_EMPTY }
4447c752
SZ
247};
248
69325526
SZ
249static int jme_rx_desc_count = JME_RX_DESC_CNT_DEF;
250static int jme_tx_desc_count = JME_TX_DESC_CNT_DEF;
6afef6ab 251static int jme_rx_ring_count = 0;
3eba890a 252static int jme_msi_enable = 1;
58880b0d 253static int jme_msix_enable = 1;
83b03786
SZ
254
255TUNABLE_INT("hw.jme.rx_desc_count", &jme_rx_desc_count);
256TUNABLE_INT("hw.jme.tx_desc_count", &jme_tx_desc_count);
413d06bb 257TUNABLE_INT("hw.jme.rx_ring_count", &jme_rx_ring_count);
3eba890a 258TUNABLE_INT("hw.jme.msi.enable", &jme_msi_enable);
58880b0d 259TUNABLE_INT("hw.jme.msix.enable", &jme_msix_enable);
83b03786 260
fd2a6d2c
SZ
261static __inline void
262jme_setup_rxdesc(struct jme_rxdesc *rxd)
263{
264 struct jme_desc *desc;
265
266 desc = rxd->rx_desc;
267 desc->buflen = htole32(MCLBYTES);
268 desc->addr_lo = htole32(JME_ADDR_LO(rxd->rx_paddr));
269 desc->addr_hi = htole32(JME_ADDR_HI(rxd->rx_paddr));
270 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
271}
272
76fbb0b9
SZ
273/*
274 * Read a PHY register on the MII of the JMC250.
275 */
276static int
277jme_miibus_readreg(device_t dev, int phy, int reg)
278{
279 struct jme_softc *sc = device_get_softc(dev);
280 uint32_t val;
281 int i;
282
283 /* For FPGA version, PHY address 0 should be ignored. */
ec7e787b 284 if (sc->jme_caps & JME_CAP_FPGA) {
76fbb0b9
SZ
285 if (phy == 0)
286 return (0);
287 } else {
288 if (sc->jme_phyaddr != phy)
289 return (0);
290 }
291
292 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
293 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
294
295 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
296 DELAY(1);
297 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
298 break;
299 }
300 if (i == 0) {
301 device_printf(sc->jme_dev, "phy read timeout: "
302 "phy %d, reg %d\n", phy, reg);
303 return (0);
304 }
305
306 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
307}
308
309/*
310 * Write a PHY register on the MII of the JMC250.
311 */
312static int
313jme_miibus_writereg(device_t dev, int phy, int reg, int val)
314{
315 struct jme_softc *sc = device_get_softc(dev);
316 int i;
317
318 /* For FPGA version, PHY address 0 should be ignored. */
ec7e787b 319 if (sc->jme_caps & JME_CAP_FPGA) {
76fbb0b9
SZ
320 if (phy == 0)
321 return (0);
322 } else {
323 if (sc->jme_phyaddr != phy)
324 return (0);
325 }
326
327 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
328 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
329 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
330
331 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
332 DELAY(1);
333 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
334 break;
335 }
336 if (i == 0) {
337 device_printf(sc->jme_dev, "phy write timeout: "
338 "phy %d, reg %d\n", phy, reg);
339 }
340
341 return (0);
342}
343
344/*
345 * Callback from MII layer when media changes.
346 */
347static void
348jme_miibus_statchg(device_t dev)
349{
350 struct jme_softc *sc = device_get_softc(dev);
351 struct ifnet *ifp = &sc->arpcom.ac_if;
352 struct mii_data *mii;
353 struct jme_txdesc *txd;
354 bus_addr_t paddr;
4447c752 355 int i, r;
76fbb0b9 356
cccc3955
SZ
357 if (sc->jme_in_tick)
358 jme_serialize_skipmain(sc);
31f0d5a2 359 ASSERT_IFNET_SERIALIZED_ALL(ifp);
76fbb0b9
SZ
360
361 if ((ifp->if_flags & IFF_RUNNING) == 0)
cccc3955 362 goto done;
76fbb0b9
SZ
363
364 mii = device_get_softc(sc->jme_miibus);
365
cccc3955 366 sc->jme_has_link = FALSE;
76fbb0b9
SZ
367 if ((mii->mii_media_status & IFM_AVALID) != 0) {
368 switch (IFM_SUBTYPE(mii->mii_media_active)) {
369 case IFM_10_T:
370 case IFM_100_TX:
cccc3955 371 sc->jme_has_link = TRUE;
76fbb0b9
SZ
372 break;
373 case IFM_1000_T:
ec7e787b 374 if (sc->jme_caps & JME_CAP_FASTETH)
76fbb0b9 375 break;
cccc3955 376 sc->jme_has_link = TRUE;
76fbb0b9
SZ
377 break;
378 default:
379 break;
380 }
381 }
382
383 /*
384 * Disabling Rx/Tx MACs have a side-effect of resetting
385 * JME_TXNDA/JME_RXNDA register to the first address of
386 * Tx/Rx descriptor address. So driver should reset its
387 * internal procucer/consumer pointer and reclaim any
388 * allocated resources. Note, just saving the value of
389 * JME_TXNDA and JME_RXNDA registers before stopping MAC
390 * and restoring JME_TXNDA/JME_RXNDA register is not
391 * sufficient to make sure correct MAC state because
392 * stopping MAC operation can take a while and hardware
393 * might have updated JME_TXNDA/JME_RXNDA registers
394 * during the stop operation.
395 */
396
397 /* Disable interrupts */
398 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
399
400 /* Stop driver */
401 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
402 ifp->if_timer = 0;
403 callout_stop(&sc->jme_tick_ch);
404
405 /* Stop receiver/transmitter. */
406 jme_stop_rx(sc);
407 jme_stop_tx(sc);
408
7b040092 409 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
4447c752
SZ
410 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
411
dea2452a 412 jme_rxeof(rdata, -1);
4447c752
SZ
413 if (rdata->jme_rxhead != NULL)
414 m_freem(rdata->jme_rxhead);
dea2452a 415 JME_RXCHAIN_RESET(rdata);
4447c752
SZ
416
417 /*
418 * Reuse configured Rx descriptors and reset
419 * procuder/consumer index.
420 */
421 rdata->jme_rx_cons = 0;
422 }
6afef6ab 423 if (JME_ENABLE_HWRSS(sc))
deaeb5a5
SZ
424 jme_enable_rss(sc);
425 else
426 jme_disable_rss(sc);
76fbb0b9
SZ
427
428 jme_txeof(sc);
429 if (sc->jme_cdata.jme_tx_cnt != 0) {
430 /* Remove queued packets for transmit. */
b020bb10 431 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
76fbb0b9
SZ
432 txd = &sc->jme_cdata.jme_txdesc[i];
433 if (txd->tx_m != NULL) {
434 bus_dmamap_unload(
435 sc->jme_cdata.jme_tx_tag,
436 txd->tx_dmamap);
437 m_freem(txd->tx_m);
438 txd->tx_m = NULL;
439 txd->tx_ndesc = 0;
440 ifp->if_oerrors++;
441 }
442 }
443 }
76fbb0b9
SZ
444 jme_init_tx_ring(sc);
445
446 /* Initialize shadow status block. */
447 jme_init_ssb(sc);
448
449 /* Program MAC with resolved speed/duplex/flow-control. */
cccc3955 450 if (sc->jme_has_link) {
76fbb0b9
SZ
451 jme_mac_config(sc);
452
76fbb0b9
SZ
453 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
454
455 /* Set Tx ring address to the hardware. */
7405bec3 456 paddr = sc->jme_cdata.jme_tx_ring_paddr;
76fbb0b9
SZ
457 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
458 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
459
7b040092 460 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
4447c752
SZ
461 CSR_WRITE_4(sc, JME_RXCSR,
462 sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
463
464 /* Set Rx ring address to the hardware. */
465 paddr = sc->jme_cdata.jme_rx_data[r].jme_rx_ring_paddr;
466 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
467 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
468 }
76fbb0b9
SZ
469
470 /* Restart receiver/transmitter. */
471 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
472 RXCSR_RXQ_START);
473 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
474 }
475
476 ifp->if_flags |= IFF_RUNNING;
477 ifp->if_flags &= ~IFF_OACTIVE;
478 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
479
8a0620e4
SZ
480#ifdef IFPOLL_ENABLE
481 if (!(ifp->if_flags & IFF_NPOLLING))
9de40864 482#endif
76fbb0b9
SZ
483 /* Reenable interrupts. */
484 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
cccc3955
SZ
485
486done:
487 if (sc->jme_in_tick)
488 jme_deserialize_skipmain(sc);
76fbb0b9
SZ
489}
490
491/*
492 * Get the current interface media status.
493 */
494static void
495jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
496{
497 struct jme_softc *sc = ifp->if_softc;
498 struct mii_data *mii = device_get_softc(sc->jme_miibus);
499
31f0d5a2 500 ASSERT_IFNET_SERIALIZED_ALL(ifp);
76fbb0b9
SZ
501
502 mii_pollstat(mii);
503 ifmr->ifm_status = mii->mii_media_status;
504 ifmr->ifm_active = mii->mii_media_active;
505}
506
507/*
508 * Set hardware to newly-selected media.
509 */
510static int
511jme_mediachange(struct ifnet *ifp)
512{
513 struct jme_softc *sc = ifp->if_softc;
514 struct mii_data *mii = device_get_softc(sc->jme_miibus);
515 int error;
516
31f0d5a2 517 ASSERT_IFNET_SERIALIZED_ALL(ifp);
76fbb0b9
SZ
518
519 if (mii->mii_instance != 0) {
520 struct mii_softc *miisc;
521
522 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
523 mii_phy_reset(miisc);
524 }
525 error = mii_mediachg(mii);
526
527 return (error);
528}
529
530static int
531jme_probe(device_t dev)
532{
533 const struct jme_dev *sp;
534 uint16_t vid, did;
535
536 vid = pci_get_vendor(dev);
537 did = pci_get_device(dev);
538 for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
539 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
3a5f3f36
SZ
540 struct jme_softc *sc = device_get_softc(dev);
541
542 sc->jme_caps = sp->jme_caps;
76fbb0b9 543 device_set_desc(dev, sp->jme_name);
76fbb0b9
SZ
544 return (0);
545 }
546 }
547 return (ENXIO);
548}
549
550static int
551jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
552{
553 uint32_t reg;
554 int i;
555
556 *val = 0;
557 for (i = JME_TIMEOUT; i > 0; i--) {
558 reg = CSR_READ_4(sc, JME_SMBCSR);
559 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
560 break;
561 DELAY(1);
562 }
563
564 if (i == 0) {
565 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
566 return (ETIMEDOUT);
567 }
568
569 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
570 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
571 for (i = JME_TIMEOUT; i > 0; i--) {
572 DELAY(1);
573 reg = CSR_READ_4(sc, JME_SMBINTF);
574 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
575 break;
576 }
577
578 if (i == 0) {
579 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
580 return (ETIMEDOUT);
581 }
582
583 reg = CSR_READ_4(sc, JME_SMBINTF);
584 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
585
586 return (0);
587}
588
589static int
590jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
591{
592 uint8_t fup, reg, val;
593 uint32_t offset;
594 int match;
595
596 offset = 0;
597 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
598 fup != JME_EEPROM_SIG0)
599 return (ENOENT);
600 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
601 fup != JME_EEPROM_SIG1)
602 return (ENOENT);
603 match = 0;
604 do {
605 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
606 break;
09927fe6
SZ
607 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
608 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
76fbb0b9
SZ
609 if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
610 break;
611 if (reg >= JME_PAR0 &&
612 reg < JME_PAR0 + ETHER_ADDR_LEN) {
613 if (jme_eeprom_read_byte(sc, offset + 2,
614 &val) != 0)
615 break;
616 eaddr[reg - JME_PAR0] = val;
617 match++;
618 }
619 }
09927fe6
SZ
620 /* Check for the end of EEPROM descriptor. */
621 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
622 break;
76fbb0b9
SZ
623 /* Try next eeprom descriptor. */
624 offset += JME_EEPROM_DESC_BYTES;
625 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
626
627 if (match == ETHER_ADDR_LEN)
628 return (0);
629
630 return (ENOENT);
631}
632
633static void
634jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
635{
636 uint32_t par0, par1;
637
638 /* Read station address. */
639 par0 = CSR_READ_4(sc, JME_PAR0);
640 par1 = CSR_READ_4(sc, JME_PAR1);
641 par1 &= 0xFFFF;
642 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
643 device_printf(sc->jme_dev,
644 "generating fake ethernet address.\n");
645 par0 = karc4random();
646 /* Set OUI to JMicron. */
647 eaddr[0] = 0x00;
648 eaddr[1] = 0x1B;
649 eaddr[2] = 0x8C;
650 eaddr[3] = (par0 >> 16) & 0xff;
651 eaddr[4] = (par0 >> 8) & 0xff;
652 eaddr[5] = par0 & 0xff;
653 } else {
654 eaddr[0] = (par0 >> 0) & 0xFF;
655 eaddr[1] = (par0 >> 8) & 0xFF;
656 eaddr[2] = (par0 >> 16) & 0xFF;
657 eaddr[3] = (par0 >> 24) & 0xFF;
658 eaddr[4] = (par1 >> 0) & 0xFF;
659 eaddr[5] = (par1 >> 8) & 0xFF;
660 }
661}
662
663static int
664jme_attach(device_t dev)
665{
666 struct jme_softc *sc = device_get_softc(dev);
667 struct ifnet *ifp = &sc->arpcom.ac_if;
668 uint32_t reg;
b249905b
SZ
669 uint16_t did;
670 uint8_t pcie_ptr, rev;
a3fede62 671 int error = 0, i, j, rx_desc_cnt, coal_max;
76fbb0b9 672 uint8_t eaddr[ETHER_ADDR_LEN];
8a0620e4
SZ
673#ifdef IFPOLL_ENABLE
674 int offset, offset_def;
675#endif
76fbb0b9 676
31f0d5a2
SZ
677 lwkt_serialize_init(&sc->jme_serialize);
678 lwkt_serialize_init(&sc->jme_cdata.jme_tx_serialize);
679 for (i = 0; i < JME_NRXRING_MAX; ++i) {
680 lwkt_serialize_init(
681 &sc->jme_cdata.jme_rx_data[i].jme_rx_serialize);
682 }
683
7b040092 684 rx_desc_cnt = device_getenv_int(dev, "rx_desc_count",
1cc217a9 685 jme_rx_desc_count);
7b040092
SZ
686 rx_desc_cnt = roundup(rx_desc_cnt, JME_NDESC_ALIGN);
687 if (rx_desc_cnt > JME_NDESC_MAX)
688 rx_desc_cnt = JME_NDESC_MAX;
69325526 689
b020bb10 690 sc->jme_cdata.jme_tx_desc_cnt = device_getenv_int(dev, "tx_desc_count",
1cc217a9 691 jme_tx_desc_count);
b020bb10
SZ
692 sc->jme_cdata.jme_tx_desc_cnt = roundup(sc->jme_cdata.jme_tx_desc_cnt,
693 JME_NDESC_ALIGN);
694 if (sc->jme_cdata.jme_tx_desc_cnt > JME_NDESC_MAX)
695 sc->jme_cdata.jme_tx_desc_cnt = JME_NDESC_MAX;
83b03786 696
9389fe19 697 /*
a317449e 698 * Calculate rx rings
9389fe19 699 */
7b040092 700 sc->jme_cdata.jme_rx_ring_cnt = device_getenv_int(dev, "rx_ring_count",
1cc217a9 701 jme_rx_ring_count);
7b040092
SZ
702 sc->jme_cdata.jme_rx_ring_cnt =
703 if_ring_count2(sc->jme_cdata.jme_rx_ring_cnt, JME_NRXRING_MAX);
4447c752 704
31f0d5a2
SZ
705 i = 0;
706 sc->jme_serialize_arr[i++] = &sc->jme_serialize;
29890f78
SZ
707
708 KKASSERT(i == JME_TX_SERIALIZE);
31f0d5a2 709 sc->jme_serialize_arr[i++] = &sc->jme_cdata.jme_tx_serialize;
29890f78
SZ
710
711 KKASSERT(i == JME_RX_SERIALIZE);
7b040092 712 for (j = 0; j < sc->jme_cdata.jme_rx_ring_cnt; ++j) {
31f0d5a2
SZ
713 sc->jme_serialize_arr[i++] =
714 &sc->jme_cdata.jme_rx_data[j].jme_rx_serialize;
715 }
716 KKASSERT(i <= JME_NSERIALIZE);
717 sc->jme_serialize_cnt = i;
718
58880b0d 719 sc->jme_cdata.jme_sc = sc;
7b040092 720 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
58880b0d
SZ
721 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
722
723 rdata->jme_sc = sc;
724 rdata->jme_rx_coal = jme_rx_status[i].jme_coal;
725 rdata->jme_rx_comp = jme_rx_status[i].jme_comp;
726 rdata->jme_rx_empty = jme_rx_status[i].jme_empty;
727 rdata->jme_rx_idx = i;
7b040092 728 rdata->jme_rx_desc_cnt = rx_desc_cnt;
58880b0d
SZ
729 }
730
76fbb0b9 731 sc->jme_dev = dev;
b249905b
SZ
732 sc->jme_lowaddr = BUS_SPACE_MAXADDR;
733
76fbb0b9
SZ
734 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
735
736 callout_init(&sc->jme_tick_ch);
737
738#ifndef BURN_BRIDGES
739 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
740 uint32_t irq, mem;
741
742 irq = pci_read_config(dev, PCIR_INTLINE, 4);
743 mem = pci_read_config(dev, JME_PCIR_BAR, 4);
744
745 device_printf(dev, "chip is in D%d power mode "
746 "-- setting to D0\n", pci_get_powerstate(dev));
747
748 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
749
750 pci_write_config(dev, PCIR_INTLINE, irq, 4);
751 pci_write_config(dev, JME_PCIR_BAR, mem, 4);
752 }
753#endif /* !BURN_BRIDGE */
754
755 /* Enable bus mastering */
756 pci_enable_busmaster(dev);
757
758 /*
759 * Allocate IO memory
760 *
761 * JMC250 supports both memory mapped and I/O register space
762 * access. Because I/O register access should use different
763 * BARs to access registers it's waste of time to use I/O
764 * register spce access. JMC250 uses 16K to map entire memory
765 * space.
766 */
767 sc->jme_mem_rid = JME_PCIR_BAR;
768 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
769 &sc->jme_mem_rid, RF_ACTIVE);
770 if (sc->jme_mem_res == NULL) {
771 device_printf(dev, "can't allocate IO memory\n");
772 return ENXIO;
773 }
774 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
775 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
776
777 /*
778 * Allocate IRQ
779 */
58880b0d
SZ
780 error = jme_intr_alloc(dev);
781 if (error)
76fbb0b9 782 goto fail;
76fbb0b9
SZ
783
784 /*
b249905b 785 * Extract revisions
76fbb0b9
SZ
786 */
787 reg = CSR_READ_4(sc, JME_CHIPMODE);
788 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
789 CHIPMODE_NOT_FPGA) {
ec7e787b 790 sc->jme_caps |= JME_CAP_FPGA;
76fbb0b9 791 if (bootverbose) {
b249905b 792 device_printf(dev, "FPGA revision: 0x%04x\n",
76fbb0b9
SZ
793 (reg & CHIPMODE_FPGA_REV_MASK) >>
794 CHIPMODE_FPGA_REV_SHIFT);
795 }
796 }
797
b249905b
SZ
798 /* NOTE: FM revision is put in the upper 4 bits */
799 rev = ((reg & CHIPMODE_REVFM_MASK) >> CHIPMODE_REVFM_SHIFT) << 4;
800 rev |= (reg & CHIPMODE_REVECO_MASK) >> CHIPMODE_REVECO_SHIFT;
801 if (bootverbose)
802 device_printf(dev, "Revision (FM/ECO): 0x%02x\n", rev);
803
804 did = pci_get_device(dev);
805 switch (did) {
806 case PCI_PRODUCT_JMICRON_JMC250:
807 if (rev == JME_REV1_A2)
808 sc->jme_workaround |= JME_WA_EXTFIFO | JME_WA_HDX;
809 break;
810
811 case PCI_PRODUCT_JMICRON_JMC260:
812 if (rev == JME_REV2)
813 sc->jme_lowaddr = BUS_SPACE_MAXADDR_32BIT;
814 break;
815
816 default:
ed20d0e3 817 panic("unknown device id 0x%04x", did);
b249905b
SZ
818 }
819 if (rev >= JME_REV2) {
820 sc->jme_clksrc = GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC;
821 sc->jme_clksrc_1000 = GHC_TXOFL_CLKSRC_1000 |
822 GHC_TXMAC_CLKSRC_1000;
823 }
824
76fbb0b9
SZ
825 /* Reset the ethernet controller. */
826 jme_reset(sc);
827
58880b0d
SZ
828 /* Map MSI/MSI-X vectors */
829 jme_set_msinum(sc);
830
76fbb0b9
SZ
831 /* Get station address. */
832 reg = CSR_READ_4(sc, JME_SMBCSR);
833 if (reg & SMBCSR_EEPROM_PRESENT)
834 error = jme_eeprom_macaddr(sc, eaddr);
835 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
836 if (error != 0 && (bootverbose)) {
837 device_printf(dev, "ethernet hardware address "
838 "not found in EEPROM.\n");
839 }
840 jme_reg_macaddr(sc, eaddr);
841 }
842
843 /*
844 * Save PHY address.
845 * Integrated JR0211 has fixed PHY address whereas FPGA version
846 * requires PHY probing to get correct PHY address.
847 */
ec7e787b 848 if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
76fbb0b9
SZ
849 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
850 GPREG0_PHY_ADDR_MASK;
851 if (bootverbose) {
852 device_printf(dev, "PHY is at address %d.\n",
853 sc->jme_phyaddr);
854 }
855 } else {
856 sc->jme_phyaddr = 0;
857 }
858
859 /* Set max allowable DMA size. */
860 pcie_ptr = pci_get_pciecap_ptr(dev);
861 if (pcie_ptr != 0) {
862 uint16_t ctrl;
863
ec7e787b 864 sc->jme_caps |= JME_CAP_PCIE;
76fbb0b9
SZ
865 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
866 if (bootverbose) {
867 device_printf(dev, "Read request size : %d bytes.\n",
868 128 << ((ctrl >> 12) & 0x07));
869 device_printf(dev, "TLP payload size : %d bytes.\n",
870 128 << ((ctrl >> 5) & 0x07));
871 }
872 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
873 case PCIEM_DEVCTL_MAX_READRQ_128:
874 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
875 break;
876 case PCIEM_DEVCTL_MAX_READRQ_256:
877 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
878 break;
879 default:
880 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
881 break;
882 }
883 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
884 } else {
885 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
886 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
887 }
888
889#ifdef notyet
890 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
ec7e787b 891 sc->jme_caps |= JME_CAP_PMCAP;
76fbb0b9
SZ
892#endif
893
8a0620e4
SZ
894#ifdef IFPOLL_ENABLE
895 /*
896 * NPOLLING RX CPU offset
897 */
898 if (sc->jme_cdata.jme_rx_ring_cnt == ncpus2) {
899 offset = 0;
900 } else {
901 offset_def = (sc->jme_cdata.jme_rx_ring_cnt *
902 device_get_unit(dev)) % ncpus2;
903 offset = device_getenv_int(dev, "npoll.rxoff", offset_def);
904 if (offset >= ncpus2 ||
905 offset % sc->jme_cdata.jme_rx_ring_cnt != 0) {
906 device_printf(dev, "invalid npoll.rxoff %d, use %d\n",
907 offset, offset_def);
908 offset = offset_def;
909 }
910 }
911 sc->jme_npoll_rxoff = offset;
912
913 /*
914 * NPOLLING TX CPU offset
915 */
916 offset_def = sc->jme_npoll_rxoff;
917 offset = device_getenv_int(dev, "npoll.txoff", offset_def);
918 if (offset >= ncpus2) {
919 device_printf(dev, "invalid npoll.txoff %d, use %d\n",
920 offset, offset_def);
921 offset = offset_def;
922 }
923 sc->jme_npoll_txoff = offset;
924#endif
925
76fbb0b9 926 /*
a3fede62
SZ
927 * Set default coalesce valves
928 */
929 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
930 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
931 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
932 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
933
934 /*
935 * Adjust coalesce valves, in case that the number of TX/RX
936 * descs are set to small values by users.
937 *
938 * NOTE: coal_max will not be zero, since number of descs
939 * must aligned by JME_NDESC_ALIGN (16 currently)
940 */
941 coal_max = sc->jme_cdata.jme_tx_desc_cnt / 2;
942 if (coal_max < sc->jme_tx_coal_pkt)
943 sc->jme_tx_coal_pkt = coal_max;
944
945 coal_max = sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt / 2;
946 if (coal_max < sc->jme_rx_coal_pkt)
947 sc->jme_rx_coal_pkt = coal_max;
948
949 /*
76fbb0b9
SZ
950 * Create sysctl tree
951 */
952 jme_sysctl_node(sc);
953
954 /* Allocate DMA stuffs */
955 error = jme_dma_alloc(sc);
956 if (error)
957 goto fail;
958
959 ifp->if_softc = sc;
960 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
961 ifp->if_init = jme_init;
962 ifp->if_ioctl = jme_ioctl;
963 ifp->if_start = jme_start;
8a0620e4
SZ
964#ifdef IFPOLL_ENABLE
965 ifp->if_npoll = jme_npoll;
9de40864 966#endif
76fbb0b9 967 ifp->if_watchdog = jme_watchdog;
31f0d5a2
SZ
968 ifp->if_serialize = jme_serialize;
969 ifp->if_deserialize = jme_deserialize;
970 ifp->if_tryserialize = jme_tryserialize;
971#ifdef INVARIANTS
972 ifp->if_serialize_assert = jme_serialize_assert;
973#endif
b020bb10
SZ
974 ifq_set_maxlen(&ifp->if_snd,
975 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
76fbb0b9
SZ
976 ifq_set_ready(&ifp->if_snd);
977
978 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
979 ifp->if_capabilities = IFCAP_HWCSUM |
1bedd927 980 IFCAP_TSO |
76fbb0b9
SZ
981 IFCAP_VLAN_MTU |
982 IFCAP_VLAN_HWTAGGING;
7b040092 983 if (sc->jme_cdata.jme_rx_ring_cnt > JME_NRXRING_MIN)
d585233c 984 ifp->if_capabilities |= IFCAP_RSS;
76fbb0b9
SZ
985 ifp->if_capenable = ifp->if_capabilities;
986
3d2aeb15
SZ
987 /*
988 * Disable TXCSUM by default to improve bulk data
989 * transmit performance (+20Mbps improvement).
990 */
991 ifp->if_capenable &= ~IFCAP_TXCSUM;
992
993 if (ifp->if_capenable & IFCAP_TXCSUM)
1bedd927
SZ
994 ifp->if_hwassist |= JME_CSUM_FEATURES;
995 ifp->if_hwassist |= CSUM_TSO;
3d2aeb15 996
76fbb0b9
SZ
997 /* Set up MII bus. */
998 error = mii_phy_probe(dev, &sc->jme_miibus,
999 jme_mediachange, jme_mediastatus);
1000 if (error) {
1001 device_printf(dev, "no PHY found!\n");
1002 goto fail;
1003 }
1004
1005 /*
1006 * Save PHYADDR for FPGA mode PHY.
1007 */
ec7e787b 1008 if (sc->jme_caps & JME_CAP_FPGA) {
76fbb0b9
SZ
1009 struct mii_data *mii = device_get_softc(sc->jme_miibus);
1010
1011 if (mii->mii_instance != 0) {
1012 struct mii_softc *miisc;
1013
1014 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
1015 if (miisc->mii_phy != 0) {
1016 sc->jme_phyaddr = miisc->mii_phy;
1017 break;
1018 }
1019 }
1020 if (sc->jme_phyaddr != 0) {
1021 device_printf(sc->jme_dev,
1022 "FPGA PHY is at %d\n", sc->jme_phyaddr);
1023 /* vendor magic. */
dbe37f03
SZ
1024 jme_miibus_writereg(dev, sc->jme_phyaddr,
1025 JMPHY_CONF, JMPHY_CONF_DEFFIFO);
1026
ad22907f 1027 /* XXX should we clear JME_WA_EXTFIFO */
76fbb0b9
SZ
1028 }
1029 }
1030 }
1031
1032 ether_ifattach(ifp, eaddr, NULL);
1033
1034 /* Tell the upper layer(s) we support long frames. */
1035 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1036
58880b0d 1037 error = jme_intr_setup(dev);
76fbb0b9 1038 if (error) {
76fbb0b9
SZ
1039 ether_ifdetach(ifp);
1040 goto fail;
1041 }
1042
76fbb0b9
SZ
1043 return 0;
1044fail:
1045 jme_detach(dev);
1046 return (error);
1047}
1048
1049static int
1050jme_detach(device_t dev)
1051{
1052 struct jme_softc *sc = device_get_softc(dev);
1053
1054 if (device_is_attached(dev)) {
1055 struct ifnet *ifp = &sc->arpcom.ac_if;
1056
31f0d5a2 1057 ifnet_serialize_all(ifp);
76fbb0b9 1058 jme_stop(sc);
58880b0d 1059 jme_intr_teardown(dev);
31f0d5a2 1060 ifnet_deserialize_all(ifp);
76fbb0b9
SZ
1061
1062 ether_ifdetach(ifp);
1063 }
1064
1065 if (sc->jme_sysctl_tree != NULL)
1066 sysctl_ctx_free(&sc->jme_sysctl_ctx);
1067
1068 if (sc->jme_miibus != NULL)
1069 device_delete_child(dev, sc->jme_miibus);
1070 bus_generic_detach(dev);
1071
58880b0d 1072 jme_intr_free(dev);
76fbb0b9
SZ
1073
1074 if (sc->jme_mem_res != NULL) {
1075 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
1076 sc->jme_mem_res);
1077 }
1078
0b3414d9 1079 jme_dma_free(sc);
76fbb0b9
SZ
1080
1081 return (0);
1082}
1083
1084static void
1085jme_sysctl_node(struct jme_softc *sc)
1086{
760c056c 1087#ifdef JME_RSS_DEBUG
760c056c
SZ
1088 int r;
1089#endif
83b03786 1090
76fbb0b9
SZ
1091 sysctl_ctx_init(&sc->jme_sysctl_ctx);
1092 sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
1093 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
1094 device_get_nameunit(sc->jme_dev),
1095 CTLFLAG_RD, 0, "");
1096 if (sc->jme_sysctl_tree == NULL) {
1097 device_printf(sc->jme_dev, "can't add sysctl node\n");
1098 return;
1099 }
1100
1101 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1102 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
2870abc4
SZ
1103 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1104 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
76fbb0b9
SZ
1105
1106 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1107 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
2870abc4
SZ
1108 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1109 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
76fbb0b9
SZ
1110
1111 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1112 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
2870abc4
SZ
1113 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
1114 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
76fbb0b9
SZ
1115
1116 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1117 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
2870abc4
SZ
1118 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
1119 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
76fbb0b9 1120
83b03786
SZ
1121 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1122 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
7b040092
SZ
1123 "rx_desc_count", CTLFLAG_RD,
1124 &sc->jme_cdata.jme_rx_data[0].jme_rx_desc_cnt,
83b03786
SZ
1125 0, "RX desc count");
1126 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1127 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
b020bb10
SZ
1128 "tx_desc_count", CTLFLAG_RD,
1129 &sc->jme_cdata.jme_tx_desc_cnt,
83b03786 1130 0, "TX desc count");
760c056c
SZ
1131 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1132 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
7b040092
SZ
1133 "rx_ring_count", CTLFLAG_RD,
1134 &sc->jme_cdata.jme_rx_ring_cnt,
760c056c 1135 0, "RX ring count");
8a0620e4 1136
760c056c
SZ
1137#ifdef JME_RSS_DEBUG
1138 SYSCTL_ADD_INT(&sc->jme_sysctl_ctx,
1139 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
24dd1705 1140 "rss_debug", CTLFLAG_RW, &sc->jme_rss_debug,
760c056c 1141 0, "RSS debug level");
7b040092 1142 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
955f266e 1143 char rx_ring_desc[32];
7b040092 1144
955f266e
SZ
1145 ksnprintf(rx_ring_desc, sizeof(rx_ring_desc),
1146 "rx_ring%d_pkt", r);
7b040092
SZ
1147 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx,
1148 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
955f266e 1149 rx_ring_desc, CTLFLAG_RW,
7b040092 1150 &sc->jme_cdata.jme_rx_data[r].jme_rx_pkt, "RXed packets");
955f266e
SZ
1151
1152 ksnprintf(rx_ring_desc, sizeof(rx_ring_desc),
1153 "rx_ring%d_emp", r);
1154 SYSCTL_ADD_ULONG(&sc->jme_sysctl_ctx,
1155 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1156 rx_ring_desc, CTLFLAG_RW,
1157 &sc->jme_cdata.jme_rx_data[r].jme_rx_emp,
1158 "# of time RX ring empty");
760c056c
SZ
1159 }
1160#endif
83b03786 1161
8a0620e4
SZ
1162#ifdef IFPOLL_ENABLE
1163 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1164 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1165 "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
1166 jme_sysctl_npoll_rxoff, "I", "NPOLLING RX cpu offset");
1167 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
1168 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
1169 "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
1170 jme_sysctl_npoll_txoff, "I", "NPOLLING TX cpu offset");
1171#endif
76fbb0b9
SZ
1172}
1173
76fbb0b9
SZ
1174static int
1175jme_dma_alloc(struct jme_softc *sc)
1176{
1177 struct jme_txdesc *txd;
1128a202 1178 bus_dmamem_t dmem;
ff7f3632 1179 int error, i, asize;
76fbb0b9 1180
83b03786 1181 sc->jme_cdata.jme_txdesc =
b020bb10 1182 kmalloc(sc->jme_cdata.jme_tx_desc_cnt * sizeof(struct jme_txdesc),
83b03786 1183 M_DEVBUF, M_WAITOK | M_ZERO);
7b040092
SZ
1184 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
1185 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[i];
1186
1187 rdata->jme_rxdesc =
1188 kmalloc(rdata->jme_rx_desc_cnt * sizeof(struct jme_rxdesc),
4447c752
SZ
1189 M_DEVBUF, M_WAITOK | M_ZERO);
1190 }
83b03786 1191
76fbb0b9
SZ
1192 /* Create parent ring tag. */
1193 error = bus_dma_tag_create(NULL,/* parent */
a7547dad
SZ
1194 1, JME_RING_BOUNDARY, /* algnmnt, boundary */
1195 sc->jme_lowaddr, /* lowaddr */
76fbb0b9
SZ
1196 BUS_SPACE_MAXADDR, /* highaddr */
1197 NULL, NULL, /* filter, filterarg */
1198 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1199 0, /* nsegments */
1200 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1201 0, /* flags */
1202 &sc->jme_cdata.jme_ring_tag);
1203 if (error) {
1204 device_printf(sc->jme_dev,
1205 "could not create parent ring DMA tag.\n");
1206 return error;
1207 }
1208
1209 /*
1210 * Create DMA stuffs for TX ring
1211 */
ff7f3632 1212 asize = roundup2(JME_TX_RING_SIZE(sc), JME_TX_RING_ALIGN);
1128a202
SZ
1213 error = bus_dmamem_coherent(sc->jme_cdata.jme_ring_tag,
1214 JME_TX_RING_ALIGN, 0,
0eb220ec 1215 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
ff7f3632 1216 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
76fbb0b9 1217 if (error) {
1128a202 1218 device_printf(sc->jme_dev, "could not allocate Tx ring.\n");
76fbb0b9
SZ
1219 return error;
1220 }
1128a202
SZ
1221 sc->jme_cdata.jme_tx_ring_tag = dmem.dmem_tag;
1222 sc->jme_cdata.jme_tx_ring_map = dmem.dmem_map;
1223 sc->jme_cdata.jme_tx_ring = dmem.dmem_addr;
1224 sc->jme_cdata.jme_tx_ring_paddr = dmem.dmem_busaddr;
76fbb0b9
SZ
1225
1226 /*
1128a202 1227 * Create DMA stuffs for RX rings
76fbb0b9 1228 */
7b040092 1229 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
dea2452a 1230 error = jme_rxring_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
4447c752
SZ
1231 if (error)
1232 return error;
76fbb0b9 1233 }
76fbb0b9 1234
76fbb0b9
SZ
1235 /* Create parent buffer tag. */
1236 error = bus_dma_tag_create(NULL,/* parent */
1237 1, 0, /* algnmnt, boundary */
b249905b 1238 sc->jme_lowaddr, /* lowaddr */
76fbb0b9
SZ
1239 BUS_SPACE_MAXADDR, /* highaddr */
1240 NULL, NULL, /* filter, filterarg */
1241 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1242 0, /* nsegments */
1243 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1244 0, /* flags */
1245 &sc->jme_cdata.jme_buffer_tag);
1246 if (error) {
1247 device_printf(sc->jme_dev,
1248 "could not create parent buffer DMA tag.\n");
1249 return error;
1250 }
1251
1252 /*
1253 * Create DMA stuffs for shadow status block
1254 */
ff7f3632 1255 asize = roundup2(JME_SSB_SIZE, JME_SSB_ALIGN);
1128a202 1256 error = bus_dmamem_coherent(sc->jme_cdata.jme_buffer_tag,
0eb220ec 1257 JME_SSB_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
ff7f3632 1258 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
76fbb0b9
SZ
1259 if (error) {
1260 device_printf(sc->jme_dev,
1128a202 1261 "could not create shadow status block.\n");
76fbb0b9
SZ
1262 return error;
1263 }
1128a202
SZ
1264 sc->jme_cdata.jme_ssb_tag = dmem.dmem_tag;
1265 sc->jme_cdata.jme_ssb_map = dmem.dmem_map;
1266 sc->jme_cdata.jme_ssb_block = dmem.dmem_addr;
1267 sc->jme_cdata.jme_ssb_block_paddr = dmem.dmem_busaddr;
76fbb0b9
SZ
1268
1269 /*
1270 * Create DMA stuffs for TX buffers
1271 */
1272
1273 /* Create tag for Tx buffers. */
1274 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1275 1, 0, /* algnmnt, boundary */
0eb220ec 1276 BUS_SPACE_MAXADDR, /* lowaddr */
76fbb0b9
SZ
1277 BUS_SPACE_MAXADDR, /* highaddr */
1278 NULL, NULL, /* filter, filterarg */
1bedd927 1279 JME_TSO_MAXSIZE, /* maxsize */
76fbb0b9 1280 JME_MAXTXSEGS, /* nsegments */
9d424cee
SZ
1281 JME_MAXSEGSIZE, /* maxsegsize */
1282 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,/* flags */
76fbb0b9
SZ
1283 &sc->jme_cdata.jme_tx_tag);
1284 if (error != 0) {
1285 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1286 return error;
1287 }
1288
1289 /* Create DMA maps for Tx buffers. */
b020bb10 1290 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
76fbb0b9 1291 txd = &sc->jme_cdata.jme_txdesc[i];
9d424cee
SZ
1292 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag,
1293 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1294 &txd->tx_dmamap);
76fbb0b9
SZ
1295 if (error) {
1296 int j;
1297
1298 device_printf(sc->jme_dev,
1299 "could not create %dth Tx dmamap.\n", i);
1300
1301 for (j = 0; j < i; ++j) {
1302 txd = &sc->jme_cdata.jme_txdesc[j];
1303 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1304 txd->tx_dmamap);
1305 }
1306 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1307 sc->jme_cdata.jme_tx_tag = NULL;
1308 return error;
1309 }
1310 }
1311
1312 /*
1313 * Create DMA stuffs for RX buffers
1314 */
7b040092 1315 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
dea2452a 1316 error = jme_rxbuf_dma_alloc(&sc->jme_cdata.jme_rx_data[i]);
4447c752 1317 if (error)
76fbb0b9 1318 return error;
76fbb0b9
SZ
1319 }
1320 return 0;
1321}
1322
1323static void
0b3414d9 1324jme_dma_free(struct jme_softc *sc)
76fbb0b9
SZ
1325{
1326 struct jme_txdesc *txd;
1327 struct jme_rxdesc *rxd;
4447c752
SZ
1328 struct jme_rxdata *rdata;
1329 int i, r;
76fbb0b9
SZ
1330
1331 /* Tx ring */
1332 if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1333 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1334 sc->jme_cdata.jme_tx_ring_map);
1335 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
560616bf 1336 sc->jme_cdata.jme_tx_ring,
76fbb0b9
SZ
1337 sc->jme_cdata.jme_tx_ring_map);
1338 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1339 sc->jme_cdata.jme_tx_ring_tag = NULL;
1340 }
1341
1342 /* Rx ring */
7b040092 1343 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
4447c752
SZ
1344 rdata = &sc->jme_cdata.jme_rx_data[r];
1345 if (rdata->jme_rx_ring_tag != NULL) {
1346 bus_dmamap_unload(rdata->jme_rx_ring_tag,
1347 rdata->jme_rx_ring_map);
1348 bus_dmamem_free(rdata->jme_rx_ring_tag,
1349 rdata->jme_rx_ring,
1350 rdata->jme_rx_ring_map);
1351 bus_dma_tag_destroy(rdata->jme_rx_ring_tag);
1352 rdata->jme_rx_ring_tag = NULL;
1353 }
76fbb0b9
SZ
1354 }
1355
1356 /* Tx buffers */
1357 if (sc->jme_cdata.jme_tx_tag != NULL) {
b020bb10 1358 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
76fbb0b9
SZ
1359 txd = &sc->jme_cdata.jme_txdesc[i];
1360 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1361 txd->tx_dmamap);
1362 }
1363 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1364 sc->jme_cdata.jme_tx_tag = NULL;
1365 }
1366
1367 /* Rx buffers */
7b040092 1368 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
4447c752
SZ
1369 rdata = &sc->jme_cdata.jme_rx_data[r];
1370 if (rdata->jme_rx_tag != NULL) {
7b040092 1371 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
4447c752
SZ
1372 rxd = &rdata->jme_rxdesc[i];
1373 bus_dmamap_destroy(rdata->jme_rx_tag,
1374 rxd->rx_dmamap);
1375 }
1376 bus_dmamap_destroy(rdata->jme_rx_tag,
1377 rdata->jme_rx_sparemap);
1378 bus_dma_tag_destroy(rdata->jme_rx_tag);
1379 rdata->jme_rx_tag = NULL;
76fbb0b9 1380 }
76fbb0b9
SZ
1381 }
1382
1383 /* Shadow status block. */
1384 if (sc->jme_cdata.jme_ssb_tag != NULL) {
1385 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1386 sc->jme_cdata.jme_ssb_map);
1387 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
560616bf 1388 sc->jme_cdata.jme_ssb_block,
76fbb0b9
SZ
1389 sc->jme_cdata.jme_ssb_map);
1390 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1391 sc->jme_cdata.jme_ssb_tag = NULL;
1392 }
1393
1394 if (sc->jme_cdata.jme_buffer_tag != NULL) {
1395 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1396 sc->jme_cdata.jme_buffer_tag = NULL;
1397 }
1398 if (sc->jme_cdata.jme_ring_tag != NULL) {
1399 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1400 sc->jme_cdata.jme_ring_tag = NULL;
1401 }
83b03786 1402
0b3414d9
SZ
1403 if (sc->jme_cdata.jme_txdesc != NULL) {
1404 kfree(sc->jme_cdata.jme_txdesc, M_DEVBUF);
1405 sc->jme_cdata.jme_txdesc = NULL;
1406 }
7b040092 1407 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
0b3414d9
SZ
1408 rdata = &sc->jme_cdata.jme_rx_data[r];
1409 if (rdata->jme_rxdesc != NULL) {
1410 kfree(rdata->jme_rxdesc, M_DEVBUF);
1411 rdata->jme_rxdesc = NULL;
83b03786
SZ
1412 }
1413 }
76fbb0b9
SZ
1414}
1415
1416/*
1417 * Make sure the interface is stopped at reboot time.
1418 */
1419static int
1420jme_shutdown(device_t dev)
1421{
1422 return jme_suspend(dev);
1423}
1424
1425#ifdef notyet
1426/*
1427 * Unlike other ethernet controllers, JMC250 requires
1428 * explicit resetting link speed to 10/100Mbps as gigabit
1429 * link will cunsume more power than 375mA.
1430 * Note, we reset the link speed to 10/100Mbps with
1431 * auto-negotiation but we don't know whether that operation
1432 * would succeed or not as we have no control after powering
1433 * off. If the renegotiation fail WOL may not work. Running
1434 * at 1Gbps draws more power than 375mA at 3.3V which is
1435 * specified in PCI specification and that would result in
1436 * complete shutdowning power to ethernet controller.
1437 *
1438 * TODO
1439 * Save current negotiated media speed/duplex/flow-control
1440 * to softc and restore the same link again after resuming.
1441 * PHY handling such as power down/resetting to 100Mbps
1442 * may be better handled in suspend method in phy driver.
1443 */
1444static void
1445jme_setlinkspeed(struct jme_softc *sc)
1446{
1447 struct mii_data *mii;
1448 int aneg, i;
1449
1450 JME_LOCK_ASSERT(sc);
1451
1452 mii = device_get_softc(sc->jme_miibus);
1453 mii_pollstat(mii);
1454 aneg = 0;
1455 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1456 switch IFM_SUBTYPE(mii->mii_media_active) {
1457 case IFM_10_T:
1458 case IFM_100_TX:
1459 return;
1460 case IFM_1000_T:
1461 aneg++;
1462 default:
1463 break;
1464 }
1465 }
1466 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1467 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1468 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1469 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1470 BMCR_AUTOEN | BMCR_STARTNEG);
1471 DELAY(1000);
1472 if (aneg != 0) {
1473 /* Poll link state until jme(4) get a 10/100 link. */
1474 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1475 mii_pollstat(mii);
1476 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1477 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1478 case IFM_10_T:
1479 case IFM_100_TX:
1480 jme_mac_config(sc);
1481 return;
1482 default:
1483 break;
1484 }
1485 }
1486 JME_UNLOCK(sc);
1487 pause("jmelnk", hz);
1488 JME_LOCK(sc);
1489 }
1490 if (i == MII_ANEGTICKS_GIGE)
1491 device_printf(sc->jme_dev, "establishing link failed, "
1492 "WOL may not work!");
1493 }
1494 /*
1495 * No link, force MAC to have 100Mbps, full-duplex link.
1496 * This is the last resort and may/may not work.
1497 */
1498 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1499 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1500 jme_mac_config(sc);
1501}
1502
1503static void
1504jme_setwol(struct jme_softc *sc)
1505{
1506 struct ifnet *ifp = &sc->arpcom.ac_if;
1507 uint32_t gpr, pmcs;
1508 uint16_t pmstat;
1509 int pmc;
1510
1511 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1512 /* No PME capability, PHY power down. */
1513 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1514 MII_BMCR, BMCR_PDOWN);
1515 return;
1516 }
1517
1518 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1519 pmcs = CSR_READ_4(sc, JME_PMCS);
1520 pmcs &= ~PMCS_WOL_ENB_MASK;
1521 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1522 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1523 /* Enable PME message. */
1524 gpr |= GPREG0_PME_ENB;
1525 /* For gigabit controllers, reset link speed to 10/100. */
ec7e787b 1526 if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
76fbb0b9
SZ
1527 jme_setlinkspeed(sc);
1528 }
1529
1530 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1531 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1532
1533 /* Request PME. */
1534 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1535 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1536 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1537 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1538 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1539 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1540 /* No WOL, PHY power down. */
1541 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1542 MII_BMCR, BMCR_PDOWN);
1543 }
1544}
1545#endif
1546
1547static int
1548jme_suspend(device_t dev)
1549{
1550 struct jme_softc *sc = device_get_softc(dev);
1551 struct ifnet *ifp = &sc->arpcom.ac_if;
1552
31f0d5a2 1553 ifnet_serialize_all(ifp);
76fbb0b9
SZ
1554 jme_stop(sc);
1555#ifdef notyet
1556 jme_setwol(sc);
1557#endif
31f0d5a2 1558 ifnet_deserialize_all(ifp);
76fbb0b9
SZ
1559
1560 return (0);
1561}
1562
1563static int
1564jme_resume(device_t dev)
1565{
1566 struct jme_softc *sc = device_get_softc(dev);
1567 struct ifnet *ifp = &sc->arpcom.ac_if;
1568#ifdef notyet
1569 int pmc;
1570#endif
1571
31f0d5a2 1572 ifnet_serialize_all(ifp);
76fbb0b9
SZ
1573
1574#ifdef notyet
1575 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1576 uint16_t pmstat;
1577
1578 pmstat = pci_read_config(sc->jme_dev,
1579 pmc + PCIR_POWER_STATUS, 2);
1580 /* Disable PME clear PME status. */
1581 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1582 pci_write_config(sc->jme_dev,
1583 pmc + PCIR_POWER_STATUS, pmstat, 2);
1584 }
1585#endif
1586
1587 if (ifp->if_flags & IFF_UP)
1588 jme_init(sc);
1589
31f0d5a2 1590 ifnet_deserialize_all(ifp);
76fbb0b9
SZ
1591
1592 return (0);
1593}
1594
1bedd927
SZ
1595static __inline int
1596jme_tso_pullup(struct mbuf **mp)
1597{
1598 int hoff, iphlen, thoff;
1599 struct mbuf *m;
1600
1601 m = *mp;
1602 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
1603
1604 iphlen = m->m_pkthdr.csum_iphlen;
1605 thoff = m->m_pkthdr.csum_thlen;
1606 hoff = m->m_pkthdr.csum_lhlen;
1607
1608 KASSERT(iphlen > 0, ("invalid ip hlen"));
1609 KASSERT(thoff > 0, ("invalid tcp hlen"));
1610 KASSERT(hoff > 0, ("invalid ether hlen"));
1611
1612 if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
1613 m = m_pullup(m, hoff + iphlen + thoff);
1614 if (m == NULL) {
1615 *mp = NULL;
1616 return ENOBUFS;
1617 }
1618 *mp = m;
1619 }
1620 return 0;
1621}
1622
76fbb0b9
SZ
1623static int
1624jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1625{
1626 struct jme_txdesc *txd;
1627 struct jme_desc *desc;
1628 struct mbuf *m;
76fbb0b9 1629 bus_dma_segment_t txsegs[JME_MAXTXSEGS];
b0ba1747 1630 int maxsegs, nsegs;
9b3ee148 1631 int error, i, prod, symbol_desc;
1bedd927 1632 uint32_t cflags, flag64, mss;
76fbb0b9
SZ
1633
1634 M_ASSERTPKTHDR((*m_head));
1635
1bedd927
SZ
1636 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) {
1637 /* XXX Is this necessary? */
1638 error = jme_tso_pullup(m_head);
1639 if (error)
1640 return error;
1641 }
1642
76fbb0b9
SZ
1643 prod = sc->jme_cdata.jme_tx_prod;
1644 txd = &sc->jme_cdata.jme_txdesc[prod];
1645
9b3ee148
SZ
1646 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT)
1647 symbol_desc = 1;
1648 else
1649 symbol_desc = 0;
1650
b020bb10 1651 maxsegs = (sc->jme_cdata.jme_tx_desc_cnt - sc->jme_cdata.jme_tx_cnt) -
9b3ee148 1652 (JME_TXD_RSVD + symbol_desc);
76fbb0b9
SZ
1653 if (maxsegs > JME_MAXTXSEGS)
1654 maxsegs = JME_MAXTXSEGS;
1bedd927 1655 KASSERT(maxsegs >= (JME_TXD_SPARE - symbol_desc),
ed20d0e3 1656 ("not enough segments %d", maxsegs));
76fbb0b9 1657
b0ba1747
SZ
1658 error = bus_dmamap_load_mbuf_defrag(sc->jme_cdata.jme_tx_tag,
1659 txd->tx_dmamap, m_head,
1660 txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1661 if (error)
ecc6de9e 1662 goto fail;
76fbb0b9 1663
4458ee95
SZ
1664 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1665 BUS_DMASYNC_PREWRITE);
1666
76fbb0b9
SZ
1667 m = *m_head;
1668 cflags = 0;
1bedd927 1669 mss = 0;
76fbb0b9
SZ
1670
1671 /* Configure checksum offload. */
1bedd927
SZ
1672 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1673 mss = (uint32_t)m->m_pkthdr.tso_segsz << JME_TD_MSS_SHIFT;
1674 cflags |= JME_TD_TSO;
1675 } else if (m->m_pkthdr.csum_flags & JME_CSUM_FEATURES) {
1676 if (m->m_pkthdr.csum_flags & CSUM_IP)
1677 cflags |= JME_TD_IPCSUM;
1678 if (m->m_pkthdr.csum_flags & CSUM_TCP)
1679 cflags |= JME_TD_TCPCSUM;
1680 if (m->m_pkthdr.csum_flags & CSUM_UDP)
1681 cflags |= JME_TD_UDPCSUM;
1682 }
76fbb0b9
SZ
1683
1684 /* Configure VLAN. */
1685 if (m->m_flags & M_VLANTAG) {
1686 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1687 cflags |= JME_TD_VLAN_TAG;
1688 }
1689
560616bf 1690 desc = &sc->jme_cdata.jme_tx_ring[prod];
76fbb0b9 1691 desc->flags = htole32(cflags);
76fbb0b9 1692 desc->addr_hi = htole32(m->m_pkthdr.len);
7228f061
SZ
1693 if (sc->jme_lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1694 /*
1695 * Use 64bits TX desc chain format.
1696 *
1697 * The first TX desc of the chain, which is setup here,
1698 * is just a symbol TX desc carrying no payload.
1699 */
1700 flag64 = JME_TD_64BIT;
1bedd927 1701 desc->buflen = htole32(mss);
7228f061
SZ
1702 desc->addr_lo = 0;
1703
1704 /* No effective TX desc is consumed */
1705 i = 0;
1706 } else {
1707 /*
1708 * Use 32bits TX desc chain format.
1709 *
1710 * The first TX desc of the chain, which is setup here,
1711 * is an effective TX desc carrying the first segment of
1712 * the mbuf chain.
1713 */
1714 flag64 = 0;
1bedd927 1715 desc->buflen = htole32(mss | txsegs[0].ds_len);
7228f061
SZ
1716 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[0].ds_addr));
1717
1718 /* One effective TX desc is consumed */
1719 i = 1;
1720 }
76fbb0b9 1721 sc->jme_cdata.jme_tx_cnt++;
9de40864 1722 KKASSERT(sc->jme_cdata.jme_tx_cnt - i <
022f915e 1723 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
b020bb10 1724 JME_DESC_INC(prod, sc->jme_cdata.jme_tx_desc_cnt);
7228f061
SZ
1725
1726 txd->tx_ndesc = 1 - i;
b0ba1747 1727 for (; i < nsegs; i++) {
560616bf 1728 desc = &sc->jme_cdata.jme_tx_ring[prod];
76fbb0b9
SZ
1729 desc->buflen = htole32(txsegs[i].ds_len);
1730 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1731 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
a54bd021 1732 desc->flags = htole32(JME_TD_OWN | flag64);
76fbb0b9
SZ
1733
1734 sc->jme_cdata.jme_tx_cnt++;
1735 KKASSERT(sc->jme_cdata.jme_tx_cnt <=
022f915e 1736 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD);
b020bb10 1737 JME_DESC_INC(prod, sc->jme_cdata.jme_tx_desc_cnt);
76fbb0b9
SZ
1738 }
1739
1740 /* Update producer index. */
1741 sc->jme_cdata.jme_tx_prod = prod;
1742 /*
1743 * Finally request interrupt and give the first descriptor
1744 * owenership to hardware.
1745 */
1746 desc = txd->tx_desc;
1747 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1748
1749 txd->tx_m = m;
b0ba1747 1750 txd->tx_ndesc += nsegs;
76fbb0b9 1751
ecc6de9e
SZ
1752 return 0;
1753fail:
1754 m_freem(*m_head);
1755 *m_head = NULL;
1756 return error;
76fbb0b9
SZ
1757}
1758
1759static void
1760jme_start(struct ifnet *ifp)
1761{
1762 struct jme_softc *sc = ifp->if_softc;
1763 struct mbuf *m_head;
1764 int enq = 0;
1765
31f0d5a2 1766 ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
76fbb0b9 1767
cccc3955 1768 if (!sc->jme_has_link) {
76fbb0b9
SZ
1769 ifq_purge(&ifp->if_snd);
1770 return;
1771 }
1772
1773 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1774 return;
1775
83b03786 1776 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT(sc))
76fbb0b9
SZ
1777 jme_txeof(sc);
1778
1779 while (!ifq_is_empty(&ifp->if_snd)) {
1780 /*
1781 * Check number of available TX descs, always
1782 * leave JME_TXD_RSVD free TX descs.
1783 */
1bedd927 1784 if (sc->jme_cdata.jme_tx_cnt + JME_TXD_SPARE >
b020bb10 1785 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD) {
76fbb0b9
SZ
1786 ifp->if_flags |= IFF_OACTIVE;
1787 break;
1788 }
1789
1790 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1791 if (m_head == NULL)
1792 break;
1793
1794 /*
1795 * Pack the data into the transmit ring. If we
1796 * don't have room, set the OACTIVE flag and wait
1797 * for the NIC to drain the ring.
1798 */
1799 if (jme_encap(sc, &m_head)) {
ecc6de9e
SZ
1800 KKASSERT(m_head == NULL);
1801 ifp->if_oerrors++;
76fbb0b9
SZ
1802 ifp->if_flags |= IFF_OACTIVE;
1803 break;
1804 }
1805 enq++;
1806
1807 /*
1808 * If there's a BPF listener, bounce a copy of this frame
1809 * to him.
1810 */
1811 ETHER_BPF_MTAP(ifp, m_head);
1812 }
1813
1814 if (enq > 0) {
1815 /*
1816 * Reading TXCSR takes very long time under heavy load
1817 * so cache TXCSR value and writes the ORed value with
1818 * the kick command to the TXCSR. This saves one register
1819 * access cycle.
1820 */
1821 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1822 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1823 /* Set a timeout in case the chip goes out to lunch. */
1824 ifp->if_timer = JME_TX_TIMEOUT;
1825 }
1826}
1827
1828static void
1829jme_watchdog(struct ifnet *ifp)
1830{
1831 struct jme_softc *sc = ifp->if_softc;
1832
31f0d5a2 1833 ASSERT_IFNET_SERIALIZED_ALL(ifp);
76fbb0b9 1834
cccc3955 1835 if (!sc->jme_has_link) {
76fbb0b9
SZ
1836 if_printf(ifp, "watchdog timeout (missed link)\n");
1837 ifp->if_oerrors++;
1838 jme_init(sc);
1839 return;
1840 }
1841
1842 jme_txeof(sc);
1843 if (sc->jme_cdata.jme_tx_cnt == 0) {
1844 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1845 "-- recovering\n");
1846 if (!ifq_is_empty(&ifp->if_snd))
1847 if_devstart(ifp);
1848 return;
1849 }
1850
1851 if_printf(ifp, "watchdog timeout\n");
1852 ifp->if_oerrors++;
1853 jme_init(sc);
1854 if (!ifq_is_empty(&ifp->if_snd))
1855 if_devstart(ifp);
1856}
1857
1858static int
1859jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1860{
1861 struct jme_softc *sc = ifp->if_softc;
1862 struct mii_data *mii = device_get_softc(sc->jme_miibus);
1863 struct ifreq *ifr = (struct ifreq *)data;
1864 int error = 0, mask;
1865
31f0d5a2 1866 ASSERT_IFNET_SERIALIZED_ALL(ifp);
76fbb0b9
SZ
1867
1868 switch (cmd) {
1869 case SIOCSIFMTU:
1870 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
3a5f3f36 1871 (!(sc->jme_caps & JME_CAP_JUMBO) &&
76fbb0b9
SZ
1872 ifr->ifr_mtu > JME_MAX_MTU)) {
1873 error = EINVAL;
1874 break;
1875 }
1876
1877 if (ifp->if_mtu != ifr->ifr_mtu) {
1878 /*
1879 * No special configuration is required when interface
1880 * MTU is changed but availability of Tx checksum
1881 * offload should be chcked against new MTU size as
1882 * FIFO size is just 2K.
1883 */
1884 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1bedd927
SZ
1885 ifp->if_capenable &=
1886 ~(IFCAP_TXCSUM | IFCAP_TSO);
1887 ifp->if_hwassist &=
1888 ~(JME_CSUM_FEATURES | CSUM_TSO);
76fbb0b9
SZ
1889 }
1890 ifp->if_mtu = ifr->ifr_mtu;
1891 if (ifp->if_flags & IFF_RUNNING)
1892 jme_init(sc);
1893 }
1894 break;
1895
1896 case SIOCSIFFLAGS:
1897 if (ifp->if_flags & IFF_UP) {
1898 if (ifp->if_flags & IFF_RUNNING) {
1899 if ((ifp->if_flags ^ sc->jme_if_flags) &
1900 (IFF_PROMISC | IFF_ALLMULTI))
1901 jme_set_filter(sc);
1902 } else {
1903 jme_init(sc);
1904 }
1905 } else {
1906 if (ifp->if_flags & IFF_RUNNING)
1907 jme_stop(sc);
1908 }
1909 sc->jme_if_flags = ifp->if_flags;
1910 break;
1911
1912 case SIOCADDMULTI:
1913 case SIOCDELMULTI:
1914 if (ifp->if_flags & IFF_RUNNING)
1915 jme_set_filter(sc);
1916 break;
1917
1918 case SIOCSIFMEDIA:
1919 case SIOCGIFMEDIA:
1920 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1921 break;
1922
1923 case SIOCSIFCAP:
1924 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1925
1926 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
e4616e94 1927 ifp->if_capenable ^= IFCAP_TXCSUM;
1bedd927 1928 if (ifp->if_capenable & IFCAP_TXCSUM)
e4616e94
SZ
1929 ifp->if_hwassist |= JME_CSUM_FEATURES;
1930 else
1931 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
76fbb0b9 1932 }
e4616e94 1933 if (mask & IFCAP_RXCSUM) {
76fbb0b9
SZ
1934 uint32_t reg;
1935
1936 ifp->if_capenable ^= IFCAP_RXCSUM;
1937 reg = CSR_READ_4(sc, JME_RXMAC);
1938 reg &= ~RXMAC_CSUM_ENB;
1939 if (ifp->if_capenable & IFCAP_RXCSUM)
1940 reg |= RXMAC_CSUM_ENB;
1941 CSR_WRITE_4(sc, JME_RXMAC, reg);
1942 }
1943
e4616e94 1944 if (mask & IFCAP_VLAN_HWTAGGING) {
76fbb0b9
SZ
1945 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1946 jme_set_vlan(sc);
1947 }
e4616e94 1948
1bedd927
SZ
1949 if ((mask & IFCAP_TSO) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1950 ifp->if_capenable ^= IFCAP_TSO;
1951 if (ifp->if_capenable & IFCAP_TSO)
1952 ifp->if_hwassist |= CSUM_TSO;
1953 else
1954 ifp->if_hwassist &= ~CSUM_TSO;
1955 }
1956
9f20b7b3 1957 if (mask & IFCAP_RSS)
d585233c 1958 ifp->if_capenable ^= IFCAP_RSS;
76fbb0b9
SZ
1959 break;
1960
1961 default:
1962 error = ether_ioctl(ifp, cmd, data);
1963 break;
1964 }
1965 return (error);
1966}
1967
1968static void
1969jme_mac_config(struct jme_softc *sc)
1970{
1971 struct mii_data *mii;
3b3da110
SZ
1972 uint32_t ghc, rxmac, txmac, txpause, gp1;
1973 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
76fbb0b9
SZ
1974
1975 mii = device_get_softc(sc->jme_miibus);
1976
1977 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1978 DELAY(10);
1979 CSR_WRITE_4(sc, JME_GHC, 0);
1980 ghc = 0;
1981 rxmac = CSR_READ_4(sc, JME_RXMAC);
1982 rxmac &= ~RXMAC_FC_ENB;
1983 txmac = CSR_READ_4(sc, JME_TXMAC);
1984 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1985 txpause = CSR_READ_4(sc, JME_TXPFC);
1986 txpause &= ~TXPFC_PAUSE_ENB;
1987 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1988 ghc |= GHC_FULL_DUPLEX;
1989 rxmac &= ~RXMAC_COLL_DET_ENB;
1990 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1991 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1992 TXMAC_FRAME_BURST);
1993#ifdef notyet
1994 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1995 txpause |= TXPFC_PAUSE_ENB;
1996 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1997 rxmac |= RXMAC_FC_ENB;
1998#endif
1999 /* Disable retry transmit timer/retry limit. */
2000 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
2001 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
2002 } else {
2003 rxmac |= RXMAC_COLL_DET_ENB;
2004 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
2005 /* Enable retry transmit timer/retry limit. */
2006 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
2007 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
2008 }
2009
3b3da110
SZ
2010 /*
2011 * Reprogram Tx/Rx MACs with resolved speed/duplex.
2012 */
2013 gp1 = CSR_READ_4(sc, JME_GPREG1);
2014 gp1 &= ~GPREG1_WA_HDX;
2015
2016 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
2017 hdx = 1;
2018
76fbb0b9
SZ
2019 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2020 case IFM_10_T:
b249905b 2021 ghc |= GHC_SPEED_10 | sc->jme_clksrc;
3b3da110
SZ
2022 if (hdx)
2023 gp1 |= GPREG1_WA_HDX;
76fbb0b9 2024 break;
dbe37f03 2025
76fbb0b9 2026 case IFM_100_TX:
b249905b 2027 ghc |= GHC_SPEED_100 | sc->jme_clksrc;
3b3da110
SZ
2028 if (hdx)
2029 gp1 |= GPREG1_WA_HDX;
dbe37f03
SZ
2030
2031 /*
2032 * Use extended FIFO depth to workaround CRC errors
2033 * emitted by chips before JMC250B
2034 */
2035 phyconf = JMPHY_CONF_EXTFIFO;
76fbb0b9 2036 break;
dbe37f03 2037
76fbb0b9 2038 case IFM_1000_T:
ec7e787b 2039 if (sc->jme_caps & JME_CAP_FASTETH)
76fbb0b9 2040 break;
dbe37f03 2041
b249905b 2042 ghc |= GHC_SPEED_1000 | sc->jme_clksrc_1000;
3b3da110 2043 if (hdx)
76fbb0b9
SZ
2044 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
2045 break;
dbe37f03 2046
76fbb0b9
SZ
2047 default:
2048 break;
2049 }
2050 CSR_WRITE_4(sc, JME_GHC, ghc);
2051 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
2052 CSR_WRITE_4(sc, JME_TXMAC, txmac);
2053 CSR_WRITE_4(sc, JME_TXPFC, txpause);
dbe37f03 2054
ad22907f 2055 if (sc->jme_workaround & JME_WA_EXTFIFO) {
dbe37f03
SZ
2056 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2057 JMPHY_CONF, phyconf);
2058 }
3b3da110
SZ
2059 if (sc->jme_workaround & JME_WA_HDX)
2060 CSR_WRITE_4(sc, JME_GPREG1, gp1);
76fbb0b9
SZ
2061}
2062
2063static void
2064jme_intr(void *xsc)
2065{
2066 struct jme_softc *sc = xsc;
2067 struct ifnet *ifp = &sc->arpcom.ac_if;
2068 uint32_t status;
4447c752 2069 int r;
76fbb0b9 2070
31f0d5a2 2071 ASSERT_SERIALIZED(&sc->jme_serialize);
76fbb0b9
SZ
2072
2073 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2074 if (status == 0 || status == 0xFFFFFFFF)
2075 return;
2076
2077 /* Disable interrupts. */
2078 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2079
2080 status = CSR_READ_4(sc, JME_INTR_STATUS);
2081 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2082 goto back;
2083
2084 /* Reset PCC counter/timer and Ack interrupts. */
2085 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
4447c752 2086
76fbb0b9
SZ
2087 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
2088 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
4447c752 2089
7b040092 2090 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
4447c752
SZ
2091 if (status & jme_rx_status[r].jme_coal) {
2092 status |= jme_rx_status[r].jme_coal |
2093 jme_rx_status[r].jme_comp;
2094 }
2095 }
2096
76fbb0b9
SZ
2097 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2098
2099 if (ifp->if_flags & IFF_RUNNING) {
2100 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
4447c752 2101 jme_rx_intr(sc, status);
76fbb0b9
SZ
2102
2103 if (status & INTR_RXQ_DESC_EMPTY) {
2104 /*
2105 * Notify hardware availability of new Rx buffers.
2106 * Reading RXCSR takes very long time under heavy
2107 * load so cache RXCSR value and writes the ORed
2108 * value with the kick command to the RXCSR. This
2109 * saves one register access cycle.
2110 */
2111 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2112 RXCSR_RX_ENB | RXCSR_RXQ_START);
2113 }
2114
2115 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
31f0d5a2 2116 lwkt_serialize_enter(&sc->jme_cdata.jme_tx_serialize);
76fbb0b9
SZ
2117 jme_txeof(sc);
2118 if (!ifq_is_empty(&ifp->if_snd))
2119 if_devstart(ifp);
31f0d5a2 2120 lwkt_serialize_exit(&sc->jme_cdata.jme_tx_serialize);
76fbb0b9
SZ
2121 }
2122 }
2123back:
2124 /* Reenable interrupts. */
2125 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2126}
2127
2128static void
2129jme_txeof(struct jme_softc *sc)
2130{
2131 struct ifnet *ifp = &sc->arpcom.ac_if;
6960d7d2 2132 int cons;
76fbb0b9
SZ
2133
2134 cons = sc->jme_cdata.jme_tx_cons;
2135 if (cons == sc->jme_cdata.jme_tx_prod)
2136 return;
2137
76fbb0b9
SZ
2138 /*
2139 * Go through our Tx list and free mbufs for those
2140 * frames which have been transmitted.
2141 */
2142 while (cons != sc->jme_cdata.jme_tx_prod) {
6960d7d2
SZ
2143 struct jme_txdesc *txd, *next_txd;
2144 uint32_t status, next_status;
2145 int next_cons, nsegs;
2146
76fbb0b9
SZ
2147 txd = &sc->jme_cdata.jme_txdesc[cons];
2148 KASSERT(txd->tx_m != NULL,
ed20d0e3 2149 ("%s: freeing NULL mbuf!", __func__));
76fbb0b9
SZ
2150
2151 status = le32toh(txd->tx_desc->flags);
2152 if ((status & JME_TD_OWN) == JME_TD_OWN)
2153 break;
2154
6960d7d2
SZ
2155 /*
2156 * NOTE:
2157 * This chip will always update the TX descriptor's
2158 * buflen field and this updating always happens
2159 * after clearing the OWN bit, so even if the OWN
2160 * bit is cleared by the chip, we still don't sure
2161 * about whether the buflen field has been updated
2162 * by the chip or not. To avoid this race, we wait
2163 * for the next TX descriptor's OWN bit to be cleared
2164 * by the chip before reusing this TX descriptor.
2165 */
2166 next_cons = cons;
2167 JME_DESC_ADD(next_cons, txd->tx_ndesc,
2168 sc->jme_cdata.jme_tx_desc_cnt);
2169 next_txd = &sc->jme_cdata.jme_txdesc[next_cons];
2170 if (next_txd->tx_m == NULL)
2171 break;
2172 next_status = le32toh(next_txd->tx_desc->flags);
2173 if ((next_status & JME_TD_OWN) == JME_TD_OWN)
2174 break;
2175
76fbb0b9
SZ
2176 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
2177 ifp->if_oerrors++;
2178 } else {
2179 ifp->if_opackets++;
2180 if (status & JME_TD_COLLISION) {
2181 ifp->if_collisions +=
2182 le32toh(txd->tx_desc->buflen) &
2183 JME_TD_BUF_LEN_MASK;
2184 }
2185 }
2186
2187 /*
2188 * Only the first descriptor of multi-descriptor
2189 * transmission is updated so driver have to skip entire
2190 * chained buffers for the transmiited frame. In other
2191 * words, JME_TD_OWN bit is valid only at the first
2192 * descriptor of a multi-descriptor transmission.
2193 */
2194 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
560616bf 2195 sc->jme_cdata.jme_tx_ring[cons].flags = 0;
b020bb10 2196 JME_DESC_INC(cons, sc->jme_cdata.jme_tx_desc_cnt);
76fbb0b9
SZ
2197 }
2198
2199 /* Reclaim transferred mbufs. */
2200 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2201 m_freem(txd->tx_m);
2202 txd->tx_m = NULL;
2203 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2204 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
ed20d0e3 2205 ("%s: Active Tx desc counter was garbled", __func__));
76fbb0b9
SZ
2206 txd->tx_ndesc = 0;
2207 }
2208 sc->jme_cdata.jme_tx_cons = cons;
2209
1bedd927
SZ
2210 /* 1 for symbol TX descriptor */
2211 if (sc->jme_cdata.jme_tx_cnt <= JME_MAXTXSEGS + 1)
76fbb0b9
SZ
2212 ifp->if_timer = 0;
2213
1bedd927 2214 if (sc->jme_cdata.jme_tx_cnt + JME_TXD_SPARE <=
b020bb10 2215 sc->jme_cdata.jme_tx_desc_cnt - JME_TXD_RSVD)
76fbb0b9 2216 ifp->if_flags &= ~IFF_OACTIVE;
76fbb0b9
SZ
2217}
2218
2219static __inline void
dea2452a 2220jme_discard_rxbufs(struct jme_rxdata *rdata, int cons, int count)
76fbb0b9
SZ
2221{
2222 int i;
2223
2224 for (i = 0; i < count; ++i) {
fd2a6d2c 2225 jme_setup_rxdesc(&rdata->jme_rxdesc[cons]);
7b040092 2226 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
76fbb0b9
SZ
2227 }
2228}
2229
a6acc6e2
SZ
2230static __inline struct pktinfo *
2231jme_pktinfo(struct pktinfo *pi, uint32_t flags)
2232{
2233 if (flags & JME_RD_IPV4)
2234 pi->pi_netisr = NETISR_IP;
2235 else if (flags & JME_RD_IPV6)
2236 pi->pi_netisr = NETISR_IPV6;
2237 else
2238 return NULL;
2239
2240 pi->pi_flags = 0;
2241 pi->pi_l3proto = IPPROTO_UNKNOWN;
2242
2243 if (flags & JME_RD_MORE_FRAG)
2244 pi->pi_flags |= PKTINFO_FLAG_FRAG;
2245 else if (flags & JME_RD_TCP)
2246 pi->pi_l3proto = IPPROTO_TCP;
2247 else if (flags & JME_RD_UDP)
2248 pi->pi_l3proto = IPPROTO_UDP;
7345eb80
SZ
2249 else
2250 pi = NULL;
a6acc6e2
SZ
2251 return pi;
2252}
2253
76fbb0b9
SZ
2254/* Receive a frame. */
2255static void
dea2452a 2256jme_rxpkt(struct jme_rxdata *rdata)
76fbb0b9 2257{
dea2452a 2258 struct ifnet *ifp = &rdata->jme_sc->arpcom.ac_if;
76fbb0b9
SZ
2259 struct jme_desc *desc;
2260 struct jme_rxdesc *rxd;
2261 struct mbuf *mp, *m;
a6acc6e2 2262 uint32_t flags, status, hash, hashinfo;
76fbb0b9
SZ
2263 int cons, count, nsegs;
2264
4447c752
SZ
2265 cons = rdata->jme_rx_cons;
2266 desc = &rdata->jme_rx_ring[cons];
9d4f763d 2267
76fbb0b9
SZ
2268 flags = le32toh(desc->flags);
2269 status = le32toh(desc->buflen);
a6acc6e2
SZ
2270 hash = le32toh(desc->addr_hi);
2271 hashinfo = le32toh(desc->addr_lo);
76fbb0b9
SZ
2272 nsegs = JME_RX_NSEGS(status);
2273
9d4f763d
SZ
2274 if (nsegs > 1) {
2275 /* Skip the first descriptor. */
2276 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt);
2277
2278 /*
2279 * Clear the OWN bit of the following RX descriptors;
2280 * hardware will not clear the OWN bit except the first
2281 * RX descriptor.
2282 *
2283 * Since the first RX descriptor is setup, i.e. OWN bit
2284 * on, before its followins RX descriptors, leaving the
2285 * OWN bit on the following RX descriptors will trick
2286 * the hardware into thinking that the following RX
2287 * descriptors are ready to be used too.
2288 */
2289 for (count = 1; count < nsegs; count++,
2290 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt))
2291 rdata->jme_rx_ring[cons].flags = 0;
2292
2293 cons = rdata->jme_rx_cons;
2294 }
2295
7b040092 2296 JME_RSS_DPRINTF(rdata->jme_sc, 15, "ring%d, flags 0x%08x, "
a6acc6e2 2297 "hash 0x%08x, hash info 0x%08x\n",
7b040092 2298 rdata->jme_rx_idx, flags, hash, hashinfo);
760c056c 2299
76fbb0b9
SZ
2300 if (status & JME_RX_ERR_STAT) {
2301 ifp->if_ierrors++;
dea2452a 2302 jme_discard_rxbufs(rdata, cons, nsegs);
76fbb0b9 2303#ifdef JME_SHOW_ERRORS
7b040092 2304 if_printf(ifp, "%s : receive error = 0x%b\n",
76fbb0b9
SZ
2305 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2306#endif
4447c752 2307 rdata->jme_rx_cons += nsegs;
7b040092 2308 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
76fbb0b9
SZ
2309 return;
2310 }
2311
4447c752 2312 rdata->jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
76fbb0b9 2313 for (count = 0; count < nsegs; count++,
7b040092 2314 JME_DESC_INC(cons, rdata->jme_rx_desc_cnt)) {
4447c752 2315 rxd = &rdata->jme_rxdesc[cons];
76fbb0b9
SZ
2316 mp = rxd->rx_m;
2317
2318 /* Add a new receive buffer to the ring. */
dea2452a 2319 if (jme_newbuf(rdata, rxd, 0) != 0) {
76fbb0b9
SZ
2320 ifp->if_iqdrops++;
2321 /* Reuse buffer. */
dea2452a 2322 jme_discard_rxbufs(rdata, cons, nsegs - count);
4447c752
SZ
2323 if (rdata->jme_rxhead != NULL) {
2324 m_freem(rdata->jme_rxhead);
dea2452a 2325 JME_RXCHAIN_RESET(rdata);
76fbb0b9
SZ
2326 }
2327 break;
2328 }
2329
2330 /*
2331 * Assume we've received a full sized frame.
2332 * Actual size is fixed when we encounter the end of
2333 * multi-segmented frame.
2334 */
2335 mp->m_len = MCLBYTES;
2336
2337 /* Chain received mbufs. */
4447c752
SZ
2338 if (rdata->jme_rxhead == NULL) {
2339 rdata->jme_rxhead = mp;
2340 rdata->jme_rxtail = mp;
76fbb0b9
SZ
2341 } else {
2342 /*
2343 * Receive processor can receive a maximum frame
2344 * size of 65535 bytes.
2345 */
4447c752
SZ
2346 rdata->jme_rxtail->m_next = mp;
2347 rdata->jme_rxtail = mp;
76fbb0b9
SZ
2348 }
2349
2350 if (count == nsegs - 1) {
a6acc6e2
SZ
2351 struct pktinfo pi0, *pi;
2352
76fbb0b9 2353 /* Last desc. for this frame. */
4447c752 2354 m = rdata->jme_rxhead;
4447c752 2355 m->m_pkthdr.len = rdata->jme_rxlen;
76fbb0b9
SZ
2356 if (nsegs > 1) {
2357 /* Set first mbuf size. */
2358 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2359 /* Set last mbuf size. */
4447c752 2360 mp->m_len = rdata->jme_rxlen -
76fbb0b9
SZ
2361 ((MCLBYTES - JME_RX_PAD_BYTES) +
2362 (MCLBYTES * (nsegs - 2)));
2363 } else {
4447c752 2364 m->m_len = rdata->jme_rxlen;
76fbb0b9
SZ
2365 }
2366 m->m_pkthdr.rcvif = ifp;
2367
2368 /*
2369 * Account for 10bytes auto padding which is used
2370 * to align IP header on 32bit boundary. Also note,
2371 * CRC bytes is automatically removed by the
2372 * hardware.
2373 */
2374 m->m_data += JME_RX_PAD_BYTES;
2375
2376 /* Set checksum information. */
2377 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2378 (flags & JME_RD_IPV4)) {
2379 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2380 if (flags & JME_RD_IPCSUM)
2381 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2382 if ((flags & JME_RD_MORE_FRAG) == 0 &&
2383 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2384 (JME_RD_TCP | JME_RD_TCPCSUM) ||
2385 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2386 (JME_RD_UDP | JME_RD_UDPCSUM))) {
2387 m->m_pkthdr.csum_flags |=
2388 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2389 m->m_pkthdr.csum_data = 0xffff;
2390 }
2391 }
2392
2393 /* Check for VLAN tagged packets. */
2394 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2395 (flags & JME_RD_VLAN_TAG)) {
2396 m->m_pkthdr.ether_vlantag =
2397 flags & JME_RD_VLAN_MASK;
2398 m->m_flags |= M_VLANTAG;
2399 }
2400
2401 ifp->if_ipackets++;
a6acc6e2
SZ
2402
2403 if (ifp->if_capenable & IFCAP_RSS)
2404 pi = jme_pktinfo(&pi0, flags);
2405 else
2406 pi = NULL;
2407
2408 if (pi != NULL &&
055b7997
SZ
2409 (hashinfo & JME_RD_HASH_FN_MASK) ==
2410 JME_RD_HASH_FN_TOEPLITZ) {
2411 m->m_flags |= (M_HASH | M_CKHASH);
a6acc6e2
SZ
2412 m->m_pkthdr.hash = toeplitz_hash(hash);
2413 }
2414
2415#ifdef JME_RSS_DEBUG
2416 if (pi != NULL) {
7b040092 2417 JME_RSS_DPRINTF(rdata->jme_sc, 10,
a6acc6e2
SZ
2418 "isr %d flags %08x, l3 %d %s\n",
2419 pi->pi_netisr, pi->pi_flags,
2420 pi->pi_l3proto,
2421 (m->m_flags & M_HASH) ? "hash" : "");
2422 }
2423#endif
2424
76fbb0b9 2425 /* Pass it on. */
eda7db08 2426 ether_input_pkt(ifp, m, pi);
76fbb0b9
SZ
2427
2428 /* Reset mbuf chains. */
dea2452a 2429 JME_RXCHAIN_RESET(rdata);
760c056c 2430#ifdef JME_RSS_DEBUG
7b040092 2431 rdata->jme_rx_pkt++;
760c056c 2432#endif
76fbb0b9
SZ
2433 }
2434 }
2435
4447c752 2436 rdata->jme_rx_cons += nsegs;
7b040092 2437 rdata->jme_rx_cons %= rdata->jme_rx_desc_cnt;
76fbb0b9
SZ
2438}
2439
eda7db08 2440static void
dea2452a 2441jme_rxeof(struct jme_rxdata *rdata, int count)
76fbb0b9
SZ
2442{
2443 struct jme_desc *desc;
eda7db08 2444 int nsegs, pktlen;
76fbb0b9 2445
76fbb0b9 2446 for (;;) {
8a0620e4 2447#ifdef IFPOLL_ENABLE
3fa06afc
SZ
2448 if (count >= 0 && count-- == 0)
2449 break;
2450#endif
4447c752 2451 desc = &rdata->jme_rx_ring[rdata->jme_rx_cons];
76fbb0b9
SZ
2452 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2453 break;
2454 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2455 break;
2456
2457 /*
2458 * Check number of segments against received bytes.
2459 * Non-matching value would indicate that hardware
2460 * is still trying to update Rx descriptors. I'm not
2461 * sure whether this check is needed.
2462 */
2463 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2464 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2465 if (nsegs != howmany(pktlen, MCLBYTES)) {
dea2452a
SZ
2466 if_printf(&rdata->jme_sc->arpcom.ac_if,
2467 "RX fragment count(%d) and "
2468 "packet size(%d) mismach\n", nsegs, pktlen);
76fbb0b9
SZ
2469 break;
2470 }
2471
6afef6ab
SZ
2472 /*
2473 * NOTE:
2474 * RSS hash and hash information may _not_ be set by the
2475 * hardware even if the OWN bit is cleared and VALID bit
2476 * is set.
2477 *
2478 * If the RSS information is not delivered by the hardware
2479 * yet, we MUST NOT accept this packet, let alone reusing
2480 * its RX descriptor. If this packet was accepted and its
2481 * RX descriptor was reused before hardware delivering the
2482 * RSS information, the RX buffer's address would be trashed
2483 * by the RSS information delivered by the hardware.
2484 */
2485 if (JME_ENABLE_HWRSS(rdata->jme_sc)) {
2486 struct jme_rxdesc *rxd;
2487 uint32_t hashinfo;
2488
2489 hashinfo = le32toh(desc->addr_lo);
2490 rxd = &rdata->jme_rxdesc[rdata->jme_rx_cons];
2491
2492 /*
2493 * This test should be enough to detect the pending
2494 * RSS information delivery, given:
2495 * - If RSS hash is not calculated, the hashinfo
064b75ed
SZ
2496 * will be 0. Howvever, the lower 32bits of RX
2497 * buffers' physical address will never be 0.
2498 * (see jme_rxbuf_dma_filter)
6afef6ab
SZ
2499 * - If RSS hash is calculated, the lowest 4 bits
2500 * of hashinfo will be set, while the RX buffers
2501 * are at least 2K aligned.
2502 */
2503 if (hashinfo == JME_ADDR_LO(rxd->rx_paddr)) {
2504#ifdef JME_SHOW_RSSWB
2505 if_printf(&rdata->jme_sc->arpcom.ac_if,
2506 "RSS is not written back yet\n");
2507#endif
2508 break;
2509 }
2510 }
2511
76fbb0b9 2512 /* Received a frame. */
dea2452a 2513 jme_rxpkt(rdata);
76fbb0b9 2514 }
76fbb0b9
SZ
2515}
2516
2517static void
2518jme_tick(void *xsc)
2519{
2520 struct jme_softc *sc = xsc;
76fbb0b9
SZ
2521 struct mii_data *mii = device_get_softc(sc->jme_miibus);
2522
cccc3955 2523 lwkt_serialize_enter(&sc->jme_serialize);
76fbb0b9 2524
cccc3955 2525 sc->jme_in_tick = TRUE;
76fbb0b9 2526 mii_tick(mii);
cccc3955
SZ
2527 sc->jme_in_tick = FALSE;
2528
76fbb0b9
SZ
2529 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2530
cccc3955 2531 lwkt_serialize_exit(&sc->jme_serialize);
76fbb0b9
SZ
2532}
2533
2534static void
2535jme_reset(struct jme_softc *sc)
2536{
409fe405
SZ
2537 uint32_t val;
2538
2539 /* Make sure that TX and RX are stopped */
76fbb0b9 2540 jme_stop_tx(sc);
409fe405
SZ
2541 jme_stop_rx(sc);
2542
2543 /* Start reset */
76fbb0b9 2544 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
409fe405
SZ
2545 DELAY(20);
2546
2547 /*
2548 * Hold reset bit before stop reset
2549 */
2550
2551 /* Disable TXMAC and TXOFL clock sources */
2552 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2553 /* Disable RXMAC clock source */
2554 val = CSR_READ_4(sc, JME_GPREG1);
2555 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2556 /* Flush */
2557 CSR_READ_4(sc, JME_GHC);
2558
2559 /* Stop reset */
2560 CSR_WRITE_4(sc, JME_GHC, 0);
2561 /* Flush */
2562 CSR_READ_4(sc, JME_GHC);
2563
2564 /*
2565 * Clear reset bit after stop reset
2566 */
2567
2568 /* Enable TXMAC and TXOFL clock sources */
2569 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2570 /* Enable RXMAC clock source */
2571 val = CSR_READ_4(sc, JME_GPREG1);
2572 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2573 /* Flush */
2574 CSR_READ_4(sc, JME_GHC);
2575
2576 /* Disable TXMAC and TXOFL clock sources */
76fbb0b9 2577 CSR_WRITE_4(sc, JME_GHC, 0);
409fe405
SZ
2578 /* Disable RXMAC clock source */
2579 val = CSR_READ_4(sc, JME_GPREG1);
2580 CSR_WRITE_4(sc, JME_GPREG1, val | GPREG1_DIS_RXMAC_CLKSRC);
2581 /* Flush */
2582 CSR_READ_4(sc, JME_GHC);
2583
2584 /* Enable TX and RX */
2585 val = CSR_READ_4(sc, JME_TXCSR);
2586 CSR_WRITE_4(sc, JME_TXCSR, val | TXCSR_TX_ENB);
2587 val = CSR_READ_4(sc, JME_RXCSR);
2588 CSR_WRITE_4(sc, JME_RXCSR, val | RXCSR_RX_ENB);
2589 /* Flush */
2590 CSR_READ_4(sc, JME_TXCSR);
2591 CSR_READ_4(sc, JME_RXCSR);
2592
2593 /* Enable TXMAC and TXOFL clock sources */
2594 CSR_WRITE_4(sc, JME_GHC, GHC_TXOFL_CLKSRC | GHC_TXMAC_CLKSRC);
2595 /* Eisable RXMAC clock source */
2596 val = CSR_READ_4(sc, JME_GPREG1);
2597 CSR_WRITE_4(sc, JME_GPREG1, val & ~GPREG1_DIS_RXMAC_CLKSRC);
2598 /* Flush */
2599 CSR_READ_4(sc, JME_GHC);
2600
2601 /* Stop TX and RX */
2602 jme_stop_tx(sc);
2603 jme_stop_rx(sc);
76fbb0b9
SZ
2604}
2605
2606static void
2607jme_init(void *xsc)
2608{
2609 struct jme_softc *sc = xsc;
2610 struct ifnet *ifp = &sc->arpcom.ac_if;
2611 struct mii_data *mii;
2612 uint8_t eaddr[ETHER_ADDR_LEN];
2613 bus_addr_t paddr;
2614 uint32_t reg;
4447c752 2615 int error, r;
76fbb0b9 2616
31f0d5a2 2617 ASSERT_IFNET_SERIALIZED_ALL(ifp);
76fbb0b9
SZ
2618
2619 /*
2620 * Cancel any pending I/O.
2621 */
2622 jme_stop(sc);
2623
2624 /*
2625 * Reset the chip to a known state.
2626 */
2627 jme_reset(sc);
2628
58880b0d
SZ
2629 /*
2630 * Setup MSI/MSI-X vectors to interrupts mapping
2631 */
2632 jme_set_msinum(sc);
2633
6afef6ab 2634 if (JME_ENABLE_HWRSS(sc))
760c056c
SZ
2635 jme_enable_rss(sc);
2636 else
2637 jme_disable_rss(sc);
4447c752
SZ
2638
2639 /* Init RX descriptors */
7b040092 2640 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
dea2452a 2641 error = jme_init_rx_ring(&sc->jme_cdata.jme_rx_data[r]);
4447c752
SZ
2642 if (error) {
2643 if_printf(ifp, "initialization failed: "
2644 "no memory for %dth RX ring.\n", r);
2645 jme_stop(sc);
2646 return;
2647 }
2648 }
2649
2650 /* Init TX descriptors */
76fbb0b9
SZ
2651 jme_init_tx_ring(sc);
2652
2653 /* Initialize shadow status block. */
2654 jme_init_ssb(sc);
2655
2656 /* Reprogram the station address. */
2657 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2658 CSR_WRITE_4(sc, JME_PAR0,
2659 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2660 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2661
2662 /*
2663 * Configure Tx queue.
2664 * Tx priority queue weight value : 0
2665 * Tx FIFO threshold for processing next packet : 16QW
2666 * Maximum Tx DMA length : 512
2667 * Allow Tx DMA burst.
2668 */
2669 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2670 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2671 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2672 sc->jme_txcsr |= sc->jme_tx_dma_size;
2673 sc->jme_txcsr |= TXCSR_DMA_BURST;
2674 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2675
2676 /* Set Tx descriptor counter. */
b020bb10 2677 CSR_WRITE_4(sc, JME_TXQDC, sc->jme_cdata.jme_tx_desc_cnt);
76fbb0b9
SZ
2678
2679 /* Set Tx ring address to the hardware. */
7405bec3 2680 paddr = sc->jme_cdata.jme_tx_ring_paddr;
76fbb0b9
SZ
2681 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2682 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2683
2684 /* Configure TxMAC parameters. */
2685 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2686 reg |= TXMAC_THRESH_1_PKT;
2687 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2688 CSR_WRITE_4(sc, JME_TXMAC, reg);
2689
2690 /*
2691 * Configure Rx queue.
2692 * FIFO full threshold for transmitting Tx pause packet : 128T
2693 * FIFO threshold for processing next packet : 128QW
2694 * Rx queue 0 select
2695 * Max Rx DMA length : 128
2696 * Rx descriptor retry : 32
2697 * Rx descriptor retry time gap : 256ns
2698 * Don't receive runt/bad frame.
2699 */
2700 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
223cfc2f 2701#if 0
76fbb0b9
SZ
2702 /*
2703 * Since Rx FIFO size is 4K bytes, receiving frames larger
2704 * than 4K bytes will suffer from Rx FIFO overruns. So
2705 * decrease FIFO threshold to reduce the FIFO overruns for
2706 * frames larger than 4000 bytes.
2707 * For best performance of standard MTU sized frames use
2708 * maximum allowable FIFO threshold, 128QW.
2709 */
2710 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2711 JME_RX_FIFO_SIZE)
2712 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2713 else
2714 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
223cfc2f
SZ
2715#else
2716 /* Improve PCI Express compatibility */
2717 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2718#endif
2719 sc->jme_rxcsr |= sc->jme_rx_dma_size;
76fbb0b9
SZ
2720 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2721 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2722 /* XXX TODO DROP_BAD */
76fbb0b9 2723
7b040092
SZ
2724 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
2725 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
2726
4447c752
SZ
2727 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RXQ_N_SEL(r));
2728
2729 /* Set Rx descriptor counter. */
7b040092 2730 CSR_WRITE_4(sc, JME_RXQDC, rdata->jme_rx_desc_cnt);
76fbb0b9 2731
4447c752 2732 /* Set Rx ring address to the hardware. */
7b040092 2733 paddr = rdata->jme_rx_ring_paddr;
4447c752
SZ
2734 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2735 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2736 }
76fbb0b9
SZ
2737
2738 /* Clear receive filter. */
2739 CSR_WRITE_4(sc, JME_RXMAC, 0);
2740
2741 /* Set up the receive filter. */
2742 jme_set_filter(sc);
2743 jme_set_vlan(sc);
2744
2745 /*
2746 * Disable all WOL bits as WOL can interfere normal Rx
2747 * operation. Also clear WOL detection status bits.
2748 */
2749 reg = CSR_READ_4(sc, JME_PMCS);
2750 reg &= ~PMCS_WOL_ENB_MASK;
2751 CSR_WRITE_4(sc, JME_PMCS, reg);
2752
2753 /*
2754 * Pad 10bytes right before received frame. This will greatly
2755 * help Rx performance on strict-alignment architectures as
2756 * it does not need to copy the frame to align the payload.
2757 */
2758 reg = CSR_READ_4(sc, JME_RXMAC);
2759 reg |= RXMAC_PAD_10BYTES;
2760
2761 if (ifp->if_capenable & IFCAP_RXCSUM)
2762 reg |= RXMAC_CSUM_ENB;
2763 CSR_WRITE_4(sc, JME_RXMAC, reg);
2764
2765 /* Configure general purpose reg0 */
2766 reg = CSR_READ_4(sc, JME_GPREG0);
2767 reg &= ~GPREG0_PCC_UNIT_MASK;
2768 /* Set PCC timer resolution to micro-seconds unit. */
2769 reg |= GPREG0_PCC_UNIT_US;
2770 /*
2771 * Disable all shadow register posting as we have to read
2772 * JME_INTR_STATUS register in jme_intr. Also it seems
2773 * that it's hard to synchronize interrupt status between
2774 * hardware and software with shadow posting due to
2775 * requirements of bus_dmamap_sync(9).
2776 */
2777 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2778 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2779 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2780 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2781 /* Disable posting of DW0. */
2782 reg &= ~GPREG0_POST_DW0_ENB;
2783 /* Clear PME message. */
2784 reg &= ~GPREG0_PME_ENB;
2785 /* Set PHY address. */
2786 reg &= ~GPREG0_PHY_ADDR_MASK;
2787 reg |= sc->jme_phyaddr;
2788 CSR_WRITE_4(sc, JME_GPREG0, reg);
2789
2790 /* Configure Tx queue 0 packet completion coalescing. */
2870abc4 2791 jme_set_tx_coal(sc);
76fbb0b9 2792
dea2452a 2793 /* Configure Rx queues packet completion coalescing. */
2870abc4 2794 jme_set_rx_coal(sc);
76fbb0b9
SZ
2795
2796 /* Configure shadow status block but don't enable posting. */
560616bf 2797 paddr = sc->jme_cdata.jme_ssb_block_paddr;
76fbb0b9
SZ
2798 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2799 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2800
2801 /* Disable Timer 1 and Timer 2. */
2802 CSR_WRITE_4(sc, JME_TIMER1, 0);
2803 CSR_WRITE_4(sc, JME_TIMER2, 0);
2804
2805 /* Configure retry transmit period, retry limit value. */
2806 CSR_WRITE_4(sc, JME_TXTRHD,
2807 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2808 TXTRHD_RT_PERIOD_MASK) |
2809 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2810 TXTRHD_RT_LIMIT_SHIFT));
2811
8a0620e4
SZ
2812#ifdef IFPOLL_ENABLE
2813 if (!(ifp->if_flags & IFF_NPOLLING))
9de40864 2814#endif
76fbb0b9 2815 /* Initialize the interrupt mask. */
8a0620e4 2816 jme_enable_intr(sc);
76fbb0b9
SZ
2817 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2818
2819 /*
2820 * Enabling Tx/Rx DMA engines and Rx queue processing is
2821 * done after detection of valid link in jme_miibus_statchg.
2822 */
cccc3955 2823 sc->jme_has_link = FALSE;
76fbb0b9
SZ
2824
2825 /* Set the current media. */
2826 mii = device_get_softc(sc->jme_miibus);
2827 mii_mediachg(mii);
2828
2829 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2830
2831 ifp->if_flags |= IFF_RUNNING;
2832 ifp->if_flags &= ~IFF_OACTIVE;
2833}
2834
2835static void
2836jme_stop(struct jme_softc *sc)
2837{
2838 struct ifnet *ifp = &sc->arpcom.ac_if;
2839 struct jme_txdesc *txd;
2840 struct jme_rxdesc *rxd;
4447c752
SZ
2841 struct jme_rxdata *rdata;
2842 int i, r;
76fbb0b9 2843
31f0d5a2 2844 ASSERT_IFNET_SERIALIZED_ALL(ifp);
76fbb0b9
SZ
2845
2846 /*
2847 * Mark the interface down and cancel the watchdog timer.
2848 */
2849 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2850 ifp->if_timer = 0;
2851
2852 callout_stop(&sc->jme_tick_ch);
cccc3955 2853 sc->jme_has_link = FALSE;
76fbb0b9
SZ
2854
2855 /*
2856 * Disable interrupts.
2857 */
8a0620e4 2858 jme_disable_intr(sc);
76fbb0b9
SZ
2859 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2860
2861 /* Disable updating shadow status block. */
2862 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2863 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2864
2865 /* Stop receiver, transmitter. */
2866 jme_stop_rx(sc);
2867 jme_stop_tx(sc);
2868
76fbb0b9
SZ
2869 /*
2870 * Free partial finished RX segments
2871 */
7b040092 2872 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
4447c752
SZ
2873 rdata = &sc->jme_cdata.jme_rx_data[r];
2874 if (rdata->jme_rxhead != NULL)
2875 m_freem(rdata->jme_rxhead);
dea2452a 2876 JME_RXCHAIN_RESET(rdata);
4447c752 2877 }
76fbb0b9
SZ
2878
2879 /*
2880 * Free RX and TX mbufs still in the queues.
2881 */
7b040092 2882 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
4447c752 2883 rdata = &sc->jme_cdata.jme_rx_data[r];
7b040092 2884 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
4447c752
SZ
2885 rxd = &rdata->jme_rxdesc[i];
2886 if (rxd->rx_m != NULL) {
2887 bus_dmamap_unload(rdata->jme_rx_tag,
2888 rxd->rx_dmamap);
2889 m_freem(rxd->rx_m);
2890 rxd->rx_m = NULL;
2891 }
76fbb0b9 2892 }
4447c752 2893 }
b020bb10 2894 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
76fbb0b9
SZ
2895 txd = &sc->jme_cdata.jme_txdesc[i];
2896 if (txd->tx_m != NULL) {
2897 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2898 txd->tx_dmamap);
2899 m_freem(txd->tx_m);
2900 txd->tx_m = NULL;
2901 txd->tx_ndesc = 0;
2902 }
2903 }
2904}
2905
2906static void
2907jme_stop_tx(struct jme_softc *sc)
2908{
2909 uint32_t reg;
2910 int i;
2911
2912 reg = CSR_READ_4(sc, JME_TXCSR);
2913 if ((reg & TXCSR_TX_ENB) == 0)
2914 return;
2915 reg &= ~TXCSR_TX_ENB;
2916 CSR_WRITE_4(sc, JME_TXCSR, reg);
2917 for (i = JME_TIMEOUT; i > 0; i--) {
2918 DELAY(1);
2919 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2920 break;
2921 }
2922 if (i == 0)
2923 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2924}
2925
2926static void
2927jme_stop_rx(struct jme_softc *sc)
2928{
2929 uint32_t reg;
2930 int i;
2931
2932 reg = CSR_READ_4(sc, JME_RXCSR);
2933 if ((reg & RXCSR_RX_ENB) == 0)
2934 return;
2935 reg &= ~RXCSR_RX_ENB;
2936 CSR_WRITE_4(sc, JME_RXCSR, reg);
2937 for (i = JME_TIMEOUT; i > 0; i--) {
2938 DELAY(1);
2939 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2940 break;
2941 }
2942 if (i == 0)
2943 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2944}
2945
2946static void
2947jme_init_tx_ring(struct jme_softc *sc)
2948{
560616bf 2949 struct jme_chain_data *cd;
76fbb0b9
SZ
2950 struct jme_txdesc *txd;
2951 int i;
2952
2953 sc->jme_cdata.jme_tx_prod = 0;
2954 sc->jme_cdata.jme_tx_cons = 0;
2955 sc->jme_cdata.jme_tx_cnt = 0;
2956
560616bf
SZ
2957 cd = &sc->jme_cdata;
2958 bzero(cd->jme_tx_ring, JME_TX_RING_SIZE(sc));
b020bb10 2959 for (i = 0; i < sc->jme_cdata.jme_tx_desc_cnt; i++) {
76fbb0b9
SZ
2960 txd = &sc->jme_cdata.jme_txdesc[i];
2961 txd->tx_m = NULL;
560616bf 2962 txd->tx_desc = &cd->jme_tx_ring[i];
76fbb0b9
SZ
2963 txd->tx_ndesc = 0;
2964 }
76fbb0b9
SZ
2965}
2966
2967static void
2968jme_init_ssb(struct jme_softc *sc)
2969{
560616bf 2970 struct jme_chain_data *cd;
76fbb0b9 2971
560616bf
SZ
2972 cd = &sc->jme_cdata;
2973 bzero(cd->jme_ssb_block, JME_SSB_SIZE);
76fbb0b9
SZ
2974}
2975
2976static int
dea2452a 2977jme_init_rx_ring(struct jme_rxdata *rdata)
76fbb0b9 2978{
76fbb0b9
SZ
2979 struct jme_rxdesc *rxd;
2980 int i;
2981
4447c752
SZ
2982 KKASSERT(rdata->jme_rxhead == NULL &&
2983 rdata->jme_rxtail == NULL &&
2984 rdata->jme_rxlen == 0);
2985 rdata->jme_rx_cons = 0;
76fbb0b9 2986
7b040092
SZ
2987 bzero(rdata->jme_rx_ring, JME_RX_RING_SIZE(rdata));
2988 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
76fbb0b9
SZ
2989 int error;
2990
4447c752 2991 rxd = &rdata->jme_rxdesc[i];
76fbb0b9 2992 rxd->rx_m = NULL;
4447c752 2993 rxd->rx_desc = &rdata->jme_rx_ring[i];
dea2452a 2994 error = jme_newbuf(rdata, rxd, 1);
76fbb0b9 2995 if (error)
4447c752 2996 return error;
76fbb0b9 2997 }
4447c752 2998 return 0;
76fbb0b9
SZ
2999}
3000
3001static int
dea2452a 3002jme_newbuf(struct jme_rxdata *rdata, struct jme_rxdesc *rxd, int init)
76fbb0b9 3003{
76fbb0b9 3004 struct mbuf *m;
76fbb0b9
SZ
3005 bus_dma_segment_t segs;
3006 bus_dmamap_t map;
b0ba1747 3007 int error, nsegs;
76fbb0b9
SZ
3008
3009 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
3010 if (m == NULL)
4447c752 3011 return ENOBUFS;
76fbb0b9
SZ
3012 /*
3013 * JMC250 has 64bit boundary alignment limitation so jme(4)
3014 * takes advantage of 10 bytes padding feature of hardware
3015 * in order not to copy entire frame to align IP header on
3016 * 32bit boundary.
3017 */
3018 m->m_len = m->m_pkthdr.len = MCLBYTES;
3019
b0ba1747
SZ
3020 error = bus_dmamap_load_mbuf_segment(rdata->jme_rx_tag,
3021 rdata->jme_rx_sparemap, m, &segs, 1, &nsegs,
3022 BUS_DMA_NOWAIT);
3023 if (error) {
76fbb0b9 3024 m_freem(m);
dea2452a
SZ
3025 if (init) {
3026 if_printf(&rdata->jme_sc->arpcom.ac_if,
3027 "can't load RX mbuf\n");
3028 }
4447c752 3029 return error;
76fbb0b9
SZ
3030 }
3031
3032 if (rxd->rx_m != NULL) {
4447c752 3033 bus_dmamap_sync(rdata->jme_rx_tag, rxd->rx_dmamap,
76fbb0b9 3034 BUS_DMASYNC_POSTREAD);
4447c752 3035 bus_dmamap_unload(rdata->jme_rx_tag, rxd->rx_dmamap);
76fbb0b9
SZ
3036 }
3037 map = rxd->rx_dmamap;
4447c752
SZ
3038 rxd->rx_dmamap = rdata->jme_rx_sparemap;
3039 rdata->jme_rx_sparemap = map;
76fbb0b9 3040 rxd->rx_m = m;
fd2a6d2c 3041 rxd->rx_paddr = segs.ds_addr;
76fbb0b9 3042
fd2a6d2c 3043 jme_setup_rxdesc(rxd);
4447c752 3044 return 0;
76fbb0b9
SZ
3045}
3046
3047static void
3048jme_set_vlan(struct jme_softc *sc)
3049{
3050 struct ifnet *ifp = &sc->arpcom.ac_if;
3051 uint32_t reg;
3052
31f0d5a2 3053 ASSERT_IFNET_SERIALIZED_ALL(ifp);
76fbb0b9
SZ
3054
3055 reg = CSR_READ_4(sc, JME_RXMAC);
3056 reg &= ~RXMAC_VLAN_ENB;
3057 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
3058 reg |= RXMAC_VLAN_ENB;
3059 CSR_WRITE_4(sc, JME_RXMAC, reg);
3060}
3061
3062static void
3063jme_set_filter(struct jme_softc *sc)
3064{
3065 struct ifnet *ifp = &sc->arpcom.ac_if;
3066 struct ifmultiaddr *ifma;
3067 uint32_t crc;
3068 uint32_t mchash[2];
3069 uint32_t rxcfg;
3070
31f0d5a2 3071 ASSERT_IFNET_SERIALIZED_ALL(ifp);
76fbb0b9
SZ
3072
3073 rxcfg = CSR_READ_4(sc, JME_RXMAC);
3074 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
3075 RXMAC_ALLMULTI);
3076
3077 /*
3078 * Always accept frames destined to our station address.
3079 * Always accept broadcast frames.
3080 */
3081 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
3082
3083 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
3084 if (ifp->if_flags & IFF_PROMISC)
3085 rxcfg |= RXMAC_PROMISC;
3086 if (ifp->if_flags & IFF_ALLMULTI)
3087 rxcfg |= RXMAC_ALLMULTI;
3088 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
3089 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
3090 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3091 return;
3092 }
3093
3094 /*
3095 * Set up the multicast address filter by passing all multicast
3096 * addresses through a CRC generator, and then using the low-order
3097 * 6 bits as an index into the 64 bit multicast hash table. The
3098 * high order bits select the register, while the rest of the bits
3099 * select the bit within the register.
3100 */
3101 rxcfg |= RXMAC_MULTICAST;
3102 bzero(mchash, sizeof(mchash));
3103
441d34b2 3104 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
76fbb0b9
SZ
3105 if (ifma->ifma_addr->sa_family != AF_LINK)
3106 continue;
3107 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3108 ifma->ifma_addr), ETHER_ADDR_LEN);
3109
3110 /* Just want the 6 least significant bits. */
3111 crc &= 0x3f;
3112
3113 /* Set the corresponding bit in the hash table. */
3114 mchash[crc >> 5] |= 1 << (crc & 0x1f);
3115 }
3116
3117 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
3118 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
3119 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3120}
3121
3122static int
2870abc4 3123jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
76fbb0b9 3124{
2870abc4
SZ
3125 struct jme_softc *sc = arg1;
3126 struct ifnet *ifp = &sc->arpcom.ac_if;
3127 int error, v;
3128
31f0d5a2 3129 ifnet_serialize_all(ifp);
2870abc4
SZ
3130
3131 v = sc->jme_tx_coal_to;
3132 error = sysctl_handle_int(oidp, &v, 0, req);
3133 if (error || req->newptr == NULL)
3134 goto back;
3135
3136 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
3137 error = EINVAL;
3138 goto back;
3139 }
3140
3141 if (v != sc->jme_tx_coal_to) {
3142 sc->jme_tx_coal_to = v;
3143 if (ifp->if_flags & IFF_RUNNING)
3144 jme_set_tx_coal(sc);
3145 }
3146back:
31f0d5a2 3147 ifnet_deserialize_all(ifp);
2870abc4 3148 return error;
76fbb0b9
SZ
3149}
3150
3151static int
2870abc4 3152jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
76fbb0b9 3153{
2870abc4
SZ
3154 struct jme_softc *sc = arg1;
3155 struct ifnet *ifp = &sc->arpcom.ac_if;
3156 int error, v;
3157
31f0d5a2 3158 ifnet_serialize_all(ifp);
2870abc4
SZ
3159
3160 v = sc->jme_tx_coal_pkt;
3161 error = sysctl_handle_int(oidp, &v, 0, req);
3162 if (error || req->newptr == NULL)
3163 goto back;
3164
3165 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
3166 error = EINVAL;
3167 goto back;
3168 }
3169
3170 if (v != sc->jme_tx_coal_pkt) {
3171 sc->jme_tx_coal_pkt = v;
3172 if (ifp->if_flags & IFF_RUNNING)
3173 jme_set_tx_coal(sc);
3174 }
3175back:
31f0d5a2 3176 ifnet_deserialize_all(ifp);
2870abc4 3177 return error;
76fbb0b9
SZ
3178}
3179
3180static int
2870abc4 3181jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
76fbb0b9 3182{
2870abc4
SZ
3183 struct jme_softc *sc = arg1;
3184 struct ifnet *ifp = &sc->arpcom.ac_if;
3185 int error, v;
3186
31f0d5a2 3187 ifnet_serialize_all(ifp);
2870abc4
SZ
3188
3189 v = sc->jme_rx_coal_to;
3190 error = sysctl_handle_int(oidp, &v, 0, req);
3191 if (error || req->newptr == NULL)
3192 goto back;
3193
3194 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
3195 error = EINVAL;
3196 goto back;
3197 }
3198
3199 if (v != sc->jme_rx_coal_to) {
3200 sc->jme_rx_coal_to = v;
3201 if (ifp->if_flags & IFF_RUNNING)
3202 jme_set_rx_coal(sc);
3203 }
3204back:
31f0d5a2 3205 ifnet_deserialize_all(ifp);
2870abc4 3206 return error;
76fbb0b9
SZ
3207}
3208
3209static int
2870abc4
SZ
3210jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3211{
3212 struct jme_softc *sc = arg1;
3213 struct ifnet *ifp = &sc->arpcom.ac_if;
3214 int error, v;
3215
31f0d5a2 3216 ifnet_serialize_all(ifp);
2870abc4
SZ
3217
3218 v = sc->jme_rx_coal_pkt;
3219 error = sysctl_handle_int(oidp, &v, 0, req);
3220 if (error || req->newptr == NULL)
3221 goto back;
3222
3223 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
3224 error = EINVAL;
3225 goto back;
3226 }
3227
3228 if (v != sc->jme_rx_coal_pkt) {
3229 sc->jme_rx_coal_pkt = v;
3230 if (ifp->if_flags & IFF_RUNNING)
3231 jme_set_rx_coal(sc);
3232 }
3233back:
31f0d5a2 3234 ifnet_deserialize_all(ifp);
2870abc4
SZ
3235 return error;
3236}
3237
3238static void
3239jme_set_tx_coal(struct jme_softc *sc)
3240{
3241 uint32_t reg;
3242
3243 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
3244 PCCTX_COAL_TO_MASK;
3245 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
3246 PCCTX_COAL_PKT_MASK;
3247 reg |= PCCTX_COAL_TXQ0;
3248 CSR_WRITE_4(sc, JME_PCCTX, reg);
3249}
3250
3251static void
3252jme_set_rx_coal(struct jme_softc *sc)
76fbb0b9 3253{
2870abc4 3254 uint32_t reg;
4447c752 3255 int r;
2870abc4
SZ
3256
3257 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
3258 PCCRX_COAL_TO_MASK;
3259 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
3260 PCCRX_COAL_PKT_MASK;
7b040092 3261 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r)
9f20b7b3 3262 CSR_WRITE_4(sc, JME_PCCRX(r), reg);
76fbb0b9 3263}
9de40864 3264
8a0620e4 3265#ifdef IFPOLL_ENABLE
9de40864
SZ
3266
3267static void
8a0620e4 3268jme_npoll_status(struct ifnet *ifp, int pollhz __unused)
9de40864
SZ
3269{
3270 struct jme_softc *sc = ifp->if_softc;
3271 uint32_t status;
3272
31f0d5a2 3273 ASSERT_SERIALIZED(&sc->jme_serialize);
9de40864 3274
8a0620e4
SZ
3275 status = CSR_READ_4(sc, JME_INTR_STATUS);
3276 if (status & INTR_RXQ_DESC_EMPTY) {
41d0e8e7 3277 CSR_WRITE_4(sc, JME_INTR_STATUS, status & INTR_RXQ_DESC_EMPTY);
de437f82 3278 jme_rx_restart(sc, status);
8a0620e4
SZ
3279 }
3280}
0e7f1e6f 3281
8a0620e4
SZ
3282static void
3283jme_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle)
3284{
3285 struct jme_rxdata *rdata = arg;
3286
3287 ASSERT_SERIALIZED(&rdata->jme_rx_serialize);
3288
3289 jme_rxeof(rdata, cycle);
3290}
3291
3292static void
3293jme_npoll_tx(struct ifnet *ifp, void *arg __unused, int cycle __unused)
3294{
3295 struct jme_softc *sc = ifp->if_softc;
3296
3297 ASSERT_SERIALIZED(&sc->jme_cdata.jme_tx_serialize);
3298
3299 jme_txeof(sc);
3300 if (!ifq_is_empty(&ifp->if_snd))
3301 if_devstart(ifp);
3302}
3303
3304static void
3305jme_npoll(struct ifnet *ifp, struct ifpoll_info *info)
3306{
3307 struct jme_softc *sc = ifp->if_softc;
3308
3309 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3310
3311 if (info) {
3312 int i, off;
3313
3314 info->ifpi_status.status_func = jme_npoll_status;
3315 info->ifpi_status.serializer = &sc->jme_serialize;
3316
3317 off = sc->jme_npoll_txoff;
3318 KKASSERT(off <= ncpus2);
3319 info->ifpi_tx[off].poll_func = jme_npoll_tx;
3320 info->ifpi_tx[off].arg = NULL;
3321 info->ifpi_tx[off].serializer = &sc->jme_cdata.jme_tx_serialize;
3322
3323 off = sc->jme_npoll_rxoff;
3324 for (i = 0; i < sc->jme_cdata.jme_rx_ring_cnt; ++i) {
31f0d5a2 3325 struct jme_rxdata *rdata =
8a0620e4
SZ
3326 &sc->jme_cdata.jme_rx_data[i];
3327 int idx = i + off;
31f0d5a2 3328
8a0620e4
SZ
3329 info->ifpi_rx[idx].poll_func = jme_npoll_rx;
3330 info->ifpi_rx[idx].arg = rdata;
3331 info->ifpi_rx[idx].serializer =
3332 &rdata->jme_rx_serialize;
31f0d5a2 3333 }
9de40864 3334
8a0620e4
SZ
3335 if (ifp->if_flags & IFF_RUNNING)
3336 jme_disable_intr(sc);
3337 ifp->if_npoll_cpuid = sc->jme_npoll_txoff;
3338 } else {
3339 if (ifp->if_flags & IFF_RUNNING)
3340 jme_enable_intr(sc);
3341 ifp->if_npoll_cpuid = -1;
3342 }
3343}
9de40864 3344
8a0620e4
SZ
3345static int
3346jme_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS)
3347{
3348 struct jme_softc *sc = (void *)arg1;
3349 struct ifnet *ifp = &sc->arpcom.ac_if;
3350 int error, off;
3351
3352 off = sc->jme_npoll_rxoff;
3353 error = sysctl_handle_int(oidp, &off, 0, req);
3354 if (error || req->newptr == NULL)
3355 return error;
3356 if (off < 0)
3357 return EINVAL;
3358
3359 ifnet_serialize_all(ifp);
3360 if (off >= ncpus2 || off % sc->jme_cdata.jme_rx_ring_cnt != 0) {
3361 error = EINVAL;
3362 } else {
3363 error = 0;
3364 sc->jme_npoll_rxoff = off;
3365 }
3366 ifnet_deserialize_all(ifp);
3367
3368 return error;
3369}
3370
3371static int
3372jme_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS)
3373{
3374 struct jme_softc *sc = (void *)arg1;
3375 struct ifnet *ifp = &sc->arpcom.ac_if;
3376 int error, off;
3377
3378 off = sc->jme_npoll_txoff;
3379 error = sysctl_handle_int(oidp, &off, 0, req);
3380 if (error || req->newptr == NULL)
3381 return error;
3382 if (off < 0)
3383 return EINVAL;
3384
3385 ifnet_serialize_all(ifp);
3386 if (off >= ncpus2) {
3387 error = EINVAL;
3388 } else {
3389 error = 0;
3390 sc->jme_npoll_txoff = off;
9de40864 3391 }
8a0620e4
SZ
3392 ifnet_deserialize_all(ifp);
3393
3394 return error;
9de40864
SZ
3395}
3396
8a0620e4 3397#endif /* IFPOLL_ENABLE */
4447c752
SZ
3398
3399static int
dea2452a 3400jme_rxring_dma_alloc(struct jme_rxdata *rdata)
4447c752 3401{
1128a202 3402 bus_dmamem_t dmem;
ff7f3632 3403 int error, asize;
4447c752 3404
ff7f3632 3405 asize = roundup2(JME_RX_RING_SIZE(rdata), JME_RX_RING_ALIGN);
dea2452a 3406 error = bus_dmamem_coherent(rdata->jme_sc->jme_cdata.jme_ring_tag,
1128a202 3407 JME_RX_RING_ALIGN, 0,
0eb220ec 3408 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
ff7f3632 3409 asize, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
4447c752 3410 if (error) {
dea2452a
SZ
3411 device_printf(rdata->jme_sc->jme_dev,
3412 "could not allocate %dth Rx ring.\n", rdata->jme_rx_idx);
4447c752
SZ
3413 return error;
3414 }
1128a202
SZ
3415 rdata->jme_rx_ring_tag = dmem.dmem_tag;
3416 rdata->jme_rx_ring_map = dmem.dmem_map;
3417 rdata->jme_rx_ring = dmem.dmem_addr;
3418 rdata->jme_rx_ring_paddr = dmem.dmem_busaddr;
4447c752
SZ
3419
3420 return 0;
3421}
3422
3423static int
064b75ed
SZ
3424jme_rxbuf_dma_filter(void *arg __unused, bus_addr_t paddr)
3425{
3426 if ((paddr & 0xffffffff) == 0) {
3427 /*
3428 * Don't allow lower 32bits of the RX buffer's
3429 * physical address to be 0, else it will break
3430 * hardware pending RSS information delivery
3431 * detection on RX path.
3432 */
3433 return 1;
3434 }
3435 return 0;
3436}
3437
3438static int
dea2452a 3439jme_rxbuf_dma_alloc(struct jme_rxdata *rdata)
4447c752 3440{
064b75ed 3441 bus_addr_t lowaddr;
4447c752
SZ
3442 int i, error;
3443
064b75ed
SZ
3444 lowaddr = BUS_SPACE_MAXADDR;
3445 if (JME_ENABLE_HWRSS(rdata->jme_sc)) {
3446 /* jme_rxbuf_dma_filter will be called */
3447 lowaddr = BUS_SPACE_MAXADDR_32BIT;
3448 }
3449
4447c752 3450 /* Create tag for Rx buffers. */
dea2452a
SZ
3451 error = bus_dma_tag_create(
3452 rdata->jme_sc->jme_cdata.jme_buffer_tag,/* parent */
4447c752 3453 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
064b75ed 3454 lowaddr, /* lowaddr */
4447c752 3455 BUS_SPACE_MAXADDR, /* highaddr */
064b75ed 3456 jme_rxbuf_dma_filter, NULL, /* filter, filterarg */
4447c752
SZ
3457 MCLBYTES, /* maxsize */
3458 1, /* nsegments */
3459 MCLBYTES, /* maxsegsize */
9d424cee 3460 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,/* flags */
4447c752
SZ
3461 &rdata->jme_rx_tag);
3462 if (error) {
dea2452a
SZ
3463 device_printf(rdata->jme_sc->jme_dev,
3464 "could not create %dth Rx DMA tag.\n", rdata->jme_rx_idx);
4447c752
SZ
3465 return error;
3466 }
3467
3468 /* Create DMA maps for Rx buffers. */
9d424cee 3469 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
4447c752
SZ
3470 &rdata->jme_rx_sparemap);
3471 if (error) {
dea2452a
SZ
3472 device_printf(rdata->jme_sc->jme_dev,
3473 "could not create %dth spare Rx dmamap.\n",
3474 rdata->jme_rx_idx);
4447c752
SZ
3475 bus_dma_tag_destroy(rdata->jme_rx_tag);
3476 rdata->jme_rx_tag = NULL;
3477 return error;
3478 }
7b040092 3479 for (i = 0; i < rdata->jme_rx_desc_cnt; i++) {
4447c752
SZ
3480 struct jme_rxdesc *rxd = &rdata->jme_rxdesc[i];
3481
9d424cee 3482 error = bus_dmamap_create(rdata->jme_rx_tag, BUS_DMA_WAITOK,
4447c752
SZ
3483 &rxd->rx_dmamap);
3484 if (error) {
3485 int j;
3486
dea2452a 3487 device_printf(rdata->jme_sc->jme_dev,
4447c752 3488 "could not create %dth Rx dmamap "
dea2452a 3489 "for %dth RX ring.\n", i, rdata->jme_rx_idx);
4447c752
SZ
3490
3491 for (j = 0; j < i; ++j) {
3492 rxd = &rdata->jme_rxdesc[j];
3493 bus_dmamap_destroy(rdata->jme_rx_tag,
3494 rxd->rx_dmamap);
3495 }
3496 bus_dmamap_destroy(rdata->jme_rx_tag,
3497 rdata->jme_rx_sparemap);
3498 bus_dma_tag_destroy(rdata->jme_rx_tag);
3499 rdata->jme_rx_tag = NULL;
3500 return error;
3501 }
3502 }
3503 return 0;
3504}
3505
3506static void
3507jme_rx_intr(struct jme_softc *sc, uint32_t status)
3508{
eda7db08 3509 int r;
4447c752 3510
7b040092 3511 for (r = 0; r < sc->jme_cdata.jme_rx_ring_cnt; ++r) {
31810fb8 3512 struct jme_rxdata *rdata = &sc->jme_cdata.jme_rx_data[r];
31f0d5a2 3513
31810fb8 3514 if (status & rdata->jme_rx_coal) {
31f0d5a2 3515 lwkt_serialize_enter(&rdata->jme_rx_serialize);
dea2452a 3516 jme_rxeof(rdata, -1);
31f0d5a2
SZ
3517 lwkt_serialize_exit(&rdata->jme_rx_serialize);
3518 }
4447c752
SZ
3519 }
3520}
760c056c
SZ
3521
3522static void
3523jme_enable_rss(struct jme_softc *sc)
3524{
24dd1705
SZ
3525 uint32_t rssc, ind;
3526 uint8_t key[RSSKEY_NREGS * RSSKEY_REGSIZE];
760c056c
SZ
3527 int i;
3528
022f915e
SZ
3529 KASSERT(sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_2 ||
3530 sc->jme_cdata.jme_rx_ring_cnt == JME_NRXRING_4,
ed20d0e3 3531 ("%s: invalid # of RX rings (%d)",
022f915e 3532 sc->arpcom.ac_if.if_xname, sc->jme_cdata.jme_rx_ring_cnt));
66f75939 3533
760c056c
SZ
3534 rssc = RSSC_HASH_64_ENTRY;
3535 rssc |= RSSC_HASH_IPV4 | RSSC_HASH_IPV4_TCP;
7b040092 3536 rssc |= sc->jme_cdata.jme_rx_ring_cnt >> 1;
760c056c
SZ
3537 JME_RSS_DPRINTF(sc, 1, "rssc 0x%08x\n", rssc);
3538 CSR_WRITE_4(sc, JME_RSSC, rssc);
3539
24dd1705
SZ
3540 toeplitz_get_key(key, sizeof(key));
3541 for (i = 0; i < RSSKEY_NREGS; ++i) {
3542 uint32_t keyreg;
3543
3544 keyreg = RSSKEY_REGVAL(key, i);
3545 JME_RSS_DPRINTF(sc, 5, "keyreg%d 0x%08x\n", i, keyreg);
3546
3547 CSR_WRITE_4(sc, RSSKEY_REG(i), keyreg);
3548 }
760c056c 3549
66f75939
SZ
3550 /*
3551 * Create redirect table in following fashion:
3552 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3553 */
760c056c 3554 ind = 0;
66f75939
SZ
3555 for (i = 0; i < RSSTBL_REGSIZE; ++i) {
3556 int q;
3557
7b040092 3558 q = i % sc->jme_cdata.jme_rx_ring_cnt;
66f75939 3559 ind |= q << (i * 8);
760c056c
SZ
3560 }
3561 JME_RSS_DPRINTF(sc, 1, "ind 0x%08x\n", ind);
66f75939 3562
760c056c
SZ
3563 for (i = 0; i < RSSTBL_NREGS; ++i)
3564 CSR_WRITE_4(sc, RSSTBL_REG(i), ind);
3565}
3566
3567static void
3568jme_disable_rss(struct jme_softc *sc)
3569{
760c056c
SZ
3570 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
3571}
31f0d5a2
SZ
3572
3573static void
3574jme_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3575{
3576 struct jme_softc *sc