- RX/TX coal parameters could be set at any time.
[dragonfly.git] / sys / dev / netif / jme / if_jme.c
CommitLineData
76fbb0b9
SZ
1/*-
2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $
2870abc4 28 * $DragonFly: src/sys/dev/netif/jme/if_jme.c,v 1.11 2008/10/25 10:46:55 sephe Exp $
76fbb0b9
SZ
29 */
30
76fbb0b9
SZ
31#include <sys/param.h>
32#include <sys/endian.h>
33#include <sys/kernel.h>
34#include <sys/bus.h>
35#include <sys/interrupt.h>
36#include <sys/malloc.h>
37#include <sys/proc.h>
38#include <sys/rman.h>
39#include <sys/serialize.h>
40#include <sys/socket.h>
41#include <sys/sockio.h>
42#include <sys/sysctl.h>
43
44#include <net/ethernet.h>
45#include <net/if.h>
46#include <net/bpf.h>
47#include <net/if_arp.h>
48#include <net/if_dl.h>
49#include <net/if_media.h>
50#include <net/ifq_var.h>
51#include <net/vlan/if_vlan_var.h>
52#include <net/vlan/if_vlan_ether.h>
53
54#include <dev/netif/mii_layer/miivar.h>
dbe37f03 55#include <dev/netif/mii_layer/jmphyreg.h>
76fbb0b9
SZ
56
57#include <bus/pci/pcireg.h>
58#include <bus/pci/pcivar.h>
59#include <bus/pci/pcidevs.h>
60
08c76ecf
SZ
61#include <dev/netif/jme/if_jmereg.h>
62#include <dev/netif/jme/if_jmevar.h>
76fbb0b9
SZ
63
64#include "miibus_if.h"
65
66/* Define the following to disable printing Rx errors. */
67#undef JME_SHOW_ERRORS
68
69#define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
70
71static int jme_probe(device_t);
72static int jme_attach(device_t);
73static int jme_detach(device_t);
74static int jme_shutdown(device_t);
75static int jme_suspend(device_t);
76static int jme_resume(device_t);
77
78static int jme_miibus_readreg(device_t, int, int);
79static int jme_miibus_writereg(device_t, int, int, int);
80static void jme_miibus_statchg(device_t);
81
82static void jme_init(void *);
83static int jme_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
84static void jme_start(struct ifnet *);
85static void jme_watchdog(struct ifnet *);
86static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
87static int jme_mediachange(struct ifnet *);
88
89static void jme_intr(void *);
90static void jme_txeof(struct jme_softc *);
91static void jme_rxeof(struct jme_softc *);
92
93static int jme_dma_alloc(struct jme_softc *);
94static void jme_dma_free(struct jme_softc *);
95static void jme_dmamap_ring_cb(void *, bus_dma_segment_t *, int, int);
96static void jme_dmamap_buf_cb(void *, bus_dma_segment_t *, int,
97 bus_size_t, int);
98static int jme_init_rx_ring(struct jme_softc *);
99static void jme_init_tx_ring(struct jme_softc *);
100static void jme_init_ssb(struct jme_softc *);
101static int jme_newbuf(struct jme_softc *, struct jme_rxdesc *, int);
102static int jme_encap(struct jme_softc *, struct mbuf **);
103static void jme_rxpkt(struct jme_softc *);
104
105static void jme_tick(void *);
106static void jme_stop(struct jme_softc *);
107static void jme_reset(struct jme_softc *);
108static void jme_set_vlan(struct jme_softc *);
109static void jme_set_filter(struct jme_softc *);
110static void jme_stop_tx(struct jme_softc *);
111static void jme_stop_rx(struct jme_softc *);
112static void jme_mac_config(struct jme_softc *);
113static void jme_reg_macaddr(struct jme_softc *, uint8_t[]);
114static int jme_eeprom_macaddr(struct jme_softc *, uint8_t[]);
115static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
116#ifdef notyet
117static void jme_setwol(struct jme_softc *);
118static void jme_setlinkspeed(struct jme_softc *);
119#endif
2870abc4
SZ
120static void jme_set_tx_coal(struct jme_softc *);
121static void jme_set_rx_coal(struct jme_softc *);
76fbb0b9
SZ
122
123static void jme_sysctl_node(struct jme_softc *);
2870abc4
SZ
124static int jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS);
125static int jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
126static int jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS);
127static int jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
76fbb0b9
SZ
128
129/*
130 * Devices supported by this driver.
131 */
132static const struct jme_dev {
133 uint16_t jme_vendorid;
134 uint16_t jme_deviceid;
3a5f3f36 135 uint32_t jme_caps;
76fbb0b9
SZ
136 const char *jme_name;
137} jme_devs[] = {
44e8c66c 138 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC250,
3a5f3f36 139 JME_CAP_JUMBO,
76fbb0b9 140 "JMicron Inc, JMC250 Gigabit Ethernet" },
44e8c66c 141 { PCI_VENDOR_JMICRON, PCI_PRODUCT_JMICRON_JMC260,
3a5f3f36 142 JME_CAP_FASTETH,
76fbb0b9 143 "JMicron Inc, JMC260 Fast Ethernet" },
3a5f3f36 144 { 0, 0, 0, NULL }
76fbb0b9
SZ
145};
146
147static device_method_t jme_methods[] = {
148 /* Device interface. */
149 DEVMETHOD(device_probe, jme_probe),
150 DEVMETHOD(device_attach, jme_attach),
151 DEVMETHOD(device_detach, jme_detach),
152 DEVMETHOD(device_shutdown, jme_shutdown),
153 DEVMETHOD(device_suspend, jme_suspend),
154 DEVMETHOD(device_resume, jme_resume),
155
156 /* Bus interface. */
157 DEVMETHOD(bus_print_child, bus_generic_print_child),
158 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
159
160 /* MII interface. */
161 DEVMETHOD(miibus_readreg, jme_miibus_readreg),
162 DEVMETHOD(miibus_writereg, jme_miibus_writereg),
163 DEVMETHOD(miibus_statchg, jme_miibus_statchg),
164
165 { NULL, NULL }
166};
167
168static driver_t jme_driver = {
169 "jme",
170 jme_methods,
171 sizeof(struct jme_softc)
172};
173
174static devclass_t jme_devclass;
175
176DECLARE_DUMMY_MODULE(if_jme);
177MODULE_DEPEND(if_jme, miibus, 1, 1, 1);
178DRIVER_MODULE(if_jme, pci, jme_driver, jme_devclass, 0, 0);
179DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0);
180
181/*
182 * Read a PHY register on the MII of the JMC250.
183 */
184static int
185jme_miibus_readreg(device_t dev, int phy, int reg)
186{
187 struct jme_softc *sc = device_get_softc(dev);
188 uint32_t val;
189 int i;
190
191 /* For FPGA version, PHY address 0 should be ignored. */
ec7e787b 192 if (sc->jme_caps & JME_CAP_FPGA) {
76fbb0b9
SZ
193 if (phy == 0)
194 return (0);
195 } else {
196 if (sc->jme_phyaddr != phy)
197 return (0);
198 }
199
200 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
201 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
202
203 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
204 DELAY(1);
205 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
206 break;
207 }
208 if (i == 0) {
209 device_printf(sc->jme_dev, "phy read timeout: "
210 "phy %d, reg %d\n", phy, reg);
211 return (0);
212 }
213
214 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
215}
216
217/*
218 * Write a PHY register on the MII of the JMC250.
219 */
220static int
221jme_miibus_writereg(device_t dev, int phy, int reg, int val)
222{
223 struct jme_softc *sc = device_get_softc(dev);
224 int i;
225
226 /* For FPGA version, PHY address 0 should be ignored. */
ec7e787b 227 if (sc->jme_caps & JME_CAP_FPGA) {
76fbb0b9
SZ
228 if (phy == 0)
229 return (0);
230 } else {
231 if (sc->jme_phyaddr != phy)
232 return (0);
233 }
234
235 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
236 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
237 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
238
239 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
240 DELAY(1);
241 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
242 break;
243 }
244 if (i == 0) {
245 device_printf(sc->jme_dev, "phy write timeout: "
246 "phy %d, reg %d\n", phy, reg);
247 }
248
249 return (0);
250}
251
252/*
253 * Callback from MII layer when media changes.
254 */
255static void
256jme_miibus_statchg(device_t dev)
257{
258 struct jme_softc *sc = device_get_softc(dev);
259 struct ifnet *ifp = &sc->arpcom.ac_if;
260 struct mii_data *mii;
261 struct jme_txdesc *txd;
262 bus_addr_t paddr;
263 int i;
264
265 ASSERT_SERIALIZED(ifp->if_serializer);
266
267 if ((ifp->if_flags & IFF_RUNNING) == 0)
268 return;
269
270 mii = device_get_softc(sc->jme_miibus);
271
272 sc->jme_flags &= ~JME_FLAG_LINK;
273 if ((mii->mii_media_status & IFM_AVALID) != 0) {
274 switch (IFM_SUBTYPE(mii->mii_media_active)) {
275 case IFM_10_T:
276 case IFM_100_TX:
277 sc->jme_flags |= JME_FLAG_LINK;
278 break;
279 case IFM_1000_T:
ec7e787b 280 if (sc->jme_caps & JME_CAP_FASTETH)
76fbb0b9
SZ
281 break;
282 sc->jme_flags |= JME_FLAG_LINK;
283 break;
284 default:
285 break;
286 }
287 }
288
289 /*
290 * Disabling Rx/Tx MACs have a side-effect of resetting
291 * JME_TXNDA/JME_RXNDA register to the first address of
292 * Tx/Rx descriptor address. So driver should reset its
293 * internal procucer/consumer pointer and reclaim any
294 * allocated resources. Note, just saving the value of
295 * JME_TXNDA and JME_RXNDA registers before stopping MAC
296 * and restoring JME_TXNDA/JME_RXNDA register is not
297 * sufficient to make sure correct MAC state because
298 * stopping MAC operation can take a while and hardware
299 * might have updated JME_TXNDA/JME_RXNDA registers
300 * during the stop operation.
301 */
302
303 /* Disable interrupts */
304 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
305
306 /* Stop driver */
307 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
308 ifp->if_timer = 0;
309 callout_stop(&sc->jme_tick_ch);
310
311 /* Stop receiver/transmitter. */
312 jme_stop_rx(sc);
313 jme_stop_tx(sc);
314
315 jme_rxeof(sc);
316 if (sc->jme_cdata.jme_rxhead != NULL)
317 m_freem(sc->jme_cdata.jme_rxhead);
318 JME_RXCHAIN_RESET(sc);
319
320 jme_txeof(sc);
321 if (sc->jme_cdata.jme_tx_cnt != 0) {
322 /* Remove queued packets for transmit. */
323 for (i = 0; i < JME_TX_RING_CNT; i++) {
324 txd = &sc->jme_cdata.jme_txdesc[i];
325 if (txd->tx_m != NULL) {
326 bus_dmamap_unload(
327 sc->jme_cdata.jme_tx_tag,
328 txd->tx_dmamap);
329 m_freem(txd->tx_m);
330 txd->tx_m = NULL;
331 txd->tx_ndesc = 0;
332 ifp->if_oerrors++;
333 }
334 }
335 }
336
337 /*
338 * Reuse configured Rx descriptors and reset
339 * procuder/consumer index.
340 */
341 sc->jme_cdata.jme_rx_cons = 0;
342
343 jme_init_tx_ring(sc);
344
345 /* Initialize shadow status block. */
346 jme_init_ssb(sc);
347
348 /* Program MAC with resolved speed/duplex/flow-control. */
349 if (sc->jme_flags & JME_FLAG_LINK) {
350 jme_mac_config(sc);
351
352 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
353 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
354
355 /* Set Tx ring address to the hardware. */
356 paddr = JME_TX_RING_ADDR(sc, 0);
357 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
358 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
359
360 /* Set Rx ring address to the hardware. */
361 paddr = JME_RX_RING_ADDR(sc, 0);
362 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
363 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
364
365 /* Restart receiver/transmitter. */
366 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
367 RXCSR_RXQ_START);
368 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
369 }
370
371 ifp->if_flags |= IFF_RUNNING;
372 ifp->if_flags &= ~IFF_OACTIVE;
373 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
374
375 /* Reenable interrupts. */
376 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
377}
378
379/*
380 * Get the current interface media status.
381 */
382static void
383jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
384{
385 struct jme_softc *sc = ifp->if_softc;
386 struct mii_data *mii = device_get_softc(sc->jme_miibus);
387
388 ASSERT_SERIALIZED(ifp->if_serializer);
389
390 mii_pollstat(mii);
391 ifmr->ifm_status = mii->mii_media_status;
392 ifmr->ifm_active = mii->mii_media_active;
393}
394
395/*
396 * Set hardware to newly-selected media.
397 */
398static int
399jme_mediachange(struct ifnet *ifp)
400{
401 struct jme_softc *sc = ifp->if_softc;
402 struct mii_data *mii = device_get_softc(sc->jme_miibus);
403 int error;
404
405 ASSERT_SERIALIZED(ifp->if_serializer);
406
407 if (mii->mii_instance != 0) {
408 struct mii_softc *miisc;
409
410 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
411 mii_phy_reset(miisc);
412 }
413 error = mii_mediachg(mii);
414
415 return (error);
416}
417
418static int
419jme_probe(device_t dev)
420{
421 const struct jme_dev *sp;
422 uint16_t vid, did;
423
424 vid = pci_get_vendor(dev);
425 did = pci_get_device(dev);
426 for (sp = jme_devs; sp->jme_name != NULL; ++sp) {
427 if (vid == sp->jme_vendorid && did == sp->jme_deviceid) {
3a5f3f36
SZ
428 struct jme_softc *sc = device_get_softc(dev);
429
430 sc->jme_caps = sp->jme_caps;
dbe37f03 431 if (did == PCI_PRODUCT_JMICRON_JMC250 &&
3b3da110
SZ
432 pci_get_revid(dev) == JME_REV_JMC250_A2) {
433 sc->jme_workaround |= JME_WA_EXTFIFO |
434 JME_WA_HDX;
435 }
dbe37f03 436
76fbb0b9 437 device_set_desc(dev, sp->jme_name);
76fbb0b9
SZ
438 return (0);
439 }
440 }
441 return (ENXIO);
442}
443
444static int
445jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
446{
447 uint32_t reg;
448 int i;
449
450 *val = 0;
451 for (i = JME_TIMEOUT; i > 0; i--) {
452 reg = CSR_READ_4(sc, JME_SMBCSR);
453 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
454 break;
455 DELAY(1);
456 }
457
458 if (i == 0) {
459 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
460 return (ETIMEDOUT);
461 }
462
463 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
464 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
465 for (i = JME_TIMEOUT; i > 0; i--) {
466 DELAY(1);
467 reg = CSR_READ_4(sc, JME_SMBINTF);
468 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
469 break;
470 }
471
472 if (i == 0) {
473 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
474 return (ETIMEDOUT);
475 }
476
477 reg = CSR_READ_4(sc, JME_SMBINTF);
478 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
479
480 return (0);
481}
482
483static int
484jme_eeprom_macaddr(struct jme_softc *sc, uint8_t eaddr[])
485{
486 uint8_t fup, reg, val;
487 uint32_t offset;
488 int match;
489
490 offset = 0;
491 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
492 fup != JME_EEPROM_SIG0)
493 return (ENOENT);
494 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
495 fup != JME_EEPROM_SIG1)
496 return (ENOENT);
497 match = 0;
498 do {
499 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
500 break;
501 /* Check for the end of EEPROM descriptor. */
502 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
503 break;
504 if ((uint8_t)JME_EEPROM_MKDESC(JME_EEPROM_FUNC0,
505 JME_EEPROM_PAGE_BAR1) == fup) {
506 if (jme_eeprom_read_byte(sc, offset + 1, &reg) != 0)
507 break;
508 if (reg >= JME_PAR0 &&
509 reg < JME_PAR0 + ETHER_ADDR_LEN) {
510 if (jme_eeprom_read_byte(sc, offset + 2,
511 &val) != 0)
512 break;
513 eaddr[reg - JME_PAR0] = val;
514 match++;
515 }
516 }
517 /* Try next eeprom descriptor. */
518 offset += JME_EEPROM_DESC_BYTES;
519 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
520
521 if (match == ETHER_ADDR_LEN)
522 return (0);
523
524 return (ENOENT);
525}
526
527static void
528jme_reg_macaddr(struct jme_softc *sc, uint8_t eaddr[])
529{
530 uint32_t par0, par1;
531
532 /* Read station address. */
533 par0 = CSR_READ_4(sc, JME_PAR0);
534 par1 = CSR_READ_4(sc, JME_PAR1);
535 par1 &= 0xFFFF;
536 if ((par0 == 0 && par1 == 0) || (par0 & 0x1)) {
537 device_printf(sc->jme_dev,
538 "generating fake ethernet address.\n");
539 par0 = karc4random();
540 /* Set OUI to JMicron. */
541 eaddr[0] = 0x00;
542 eaddr[1] = 0x1B;
543 eaddr[2] = 0x8C;
544 eaddr[3] = (par0 >> 16) & 0xff;
545 eaddr[4] = (par0 >> 8) & 0xff;
546 eaddr[5] = par0 & 0xff;
547 } else {
548 eaddr[0] = (par0 >> 0) & 0xFF;
549 eaddr[1] = (par0 >> 8) & 0xFF;
550 eaddr[2] = (par0 >> 16) & 0xFF;
551 eaddr[3] = (par0 >> 24) & 0xFF;
552 eaddr[4] = (par1 >> 0) & 0xFF;
553 eaddr[5] = (par1 >> 8) & 0xFF;
554 }
555}
556
557static int
558jme_attach(device_t dev)
559{
560 struct jme_softc *sc = device_get_softc(dev);
561 struct ifnet *ifp = &sc->arpcom.ac_if;
562 uint32_t reg;
563 uint8_t pcie_ptr;
564 int error = 0;
565 uint8_t eaddr[ETHER_ADDR_LEN];
566
567 sc->jme_dev = dev;
568 ifp = &sc->arpcom.ac_if;
569 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
570
571 callout_init(&sc->jme_tick_ch);
572
573#ifndef BURN_BRIDGES
574 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
575 uint32_t irq, mem;
576
577 irq = pci_read_config(dev, PCIR_INTLINE, 4);
578 mem = pci_read_config(dev, JME_PCIR_BAR, 4);
579
580 device_printf(dev, "chip is in D%d power mode "
581 "-- setting to D0\n", pci_get_powerstate(dev));
582
583 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
584
585 pci_write_config(dev, PCIR_INTLINE, irq, 4);
586 pci_write_config(dev, JME_PCIR_BAR, mem, 4);
587 }
588#endif /* !BURN_BRIDGE */
589
590 /* Enable bus mastering */
591 pci_enable_busmaster(dev);
592
593 /*
594 * Allocate IO memory
595 *
596 * JMC250 supports both memory mapped and I/O register space
597 * access. Because I/O register access should use different
598 * BARs to access registers it's waste of time to use I/O
599 * register spce access. JMC250 uses 16K to map entire memory
600 * space.
601 */
602 sc->jme_mem_rid = JME_PCIR_BAR;
603 sc->jme_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
604 &sc->jme_mem_rid, RF_ACTIVE);
605 if (sc->jme_mem_res == NULL) {
606 device_printf(dev, "can't allocate IO memory\n");
607 return ENXIO;
608 }
609 sc->jme_mem_bt = rman_get_bustag(sc->jme_mem_res);
610 sc->jme_mem_bh = rman_get_bushandle(sc->jme_mem_res);
611
612 /*
613 * Allocate IRQ
614 */
615 sc->jme_irq_rid = 0;
616 sc->jme_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
617 &sc->jme_irq_rid,
618 RF_SHAREABLE | RF_ACTIVE);
619 if (sc->jme_irq_res == NULL) {
620 device_printf(dev, "can't allocate irq\n");
621 error = ENXIO;
622 goto fail;
623 }
624
625 /*
626 * Extract FPGA revision
627 */
628 reg = CSR_READ_4(sc, JME_CHIPMODE);
629 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
630 CHIPMODE_NOT_FPGA) {
ec7e787b 631 sc->jme_caps |= JME_CAP_FPGA;
76fbb0b9
SZ
632 if (bootverbose) {
633 device_printf(dev, "FPGA revision : 0x%04x\n",
634 (reg & CHIPMODE_FPGA_REV_MASK) >>
635 CHIPMODE_FPGA_REV_SHIFT);
636 }
637 }
638
639 /* Reset the ethernet controller. */
640 jme_reset(sc);
641
642 /* Get station address. */
643 reg = CSR_READ_4(sc, JME_SMBCSR);
644 if (reg & SMBCSR_EEPROM_PRESENT)
645 error = jme_eeprom_macaddr(sc, eaddr);
646 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
647 if (error != 0 && (bootverbose)) {
648 device_printf(dev, "ethernet hardware address "
649 "not found in EEPROM.\n");
650 }
651 jme_reg_macaddr(sc, eaddr);
652 }
653
654 /*
655 * Save PHY address.
656 * Integrated JR0211 has fixed PHY address whereas FPGA version
657 * requires PHY probing to get correct PHY address.
658 */
ec7e787b 659 if ((sc->jme_caps & JME_CAP_FPGA) == 0) {
76fbb0b9
SZ
660 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
661 GPREG0_PHY_ADDR_MASK;
662 if (bootverbose) {
663 device_printf(dev, "PHY is at address %d.\n",
664 sc->jme_phyaddr);
665 }
666 } else {
667 sc->jme_phyaddr = 0;
668 }
669
670 /* Set max allowable DMA size. */
671 pcie_ptr = pci_get_pciecap_ptr(dev);
672 if (pcie_ptr != 0) {
673 uint16_t ctrl;
674
ec7e787b 675 sc->jme_caps |= JME_CAP_PCIE;
76fbb0b9
SZ
676 ctrl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
677 if (bootverbose) {
678 device_printf(dev, "Read request size : %d bytes.\n",
679 128 << ((ctrl >> 12) & 0x07));
680 device_printf(dev, "TLP payload size : %d bytes.\n",
681 128 << ((ctrl >> 5) & 0x07));
682 }
683 switch (ctrl & PCIEM_DEVCTL_MAX_READRQ_MASK) {
684 case PCIEM_DEVCTL_MAX_READRQ_128:
685 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
686 break;
687 case PCIEM_DEVCTL_MAX_READRQ_256:
688 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
689 break;
690 default:
691 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
692 break;
693 }
694 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
695 } else {
696 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
697 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
698 }
699
700#ifdef notyet
701 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0)
ec7e787b 702 sc->jme_caps |= JME_CAP_PMCAP;
76fbb0b9
SZ
703#endif
704
705 /*
706 * Create sysctl tree
707 */
708 jme_sysctl_node(sc);
709
710 /* Allocate DMA stuffs */
711 error = jme_dma_alloc(sc);
712 if (error)
713 goto fail;
714
715 ifp->if_softc = sc;
716 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
717 ifp->if_init = jme_init;
718 ifp->if_ioctl = jme_ioctl;
719 ifp->if_start = jme_start;
720 ifp->if_watchdog = jme_watchdog;
721 ifq_set_maxlen(&ifp->if_snd, JME_TX_RING_CNT - 1);
722 ifq_set_ready(&ifp->if_snd);
723
724 /* JMC250 supports Tx/Rx checksum offload and hardware vlan tagging. */
725 ifp->if_capabilities = IFCAP_HWCSUM |
726 IFCAP_VLAN_MTU |
727 IFCAP_VLAN_HWTAGGING;
728 ifp->if_hwassist = JME_CSUM_FEATURES;
729 ifp->if_capenable = ifp->if_capabilities;
730
731 /* Set up MII bus. */
732 error = mii_phy_probe(dev, &sc->jme_miibus,
733 jme_mediachange, jme_mediastatus);
734 if (error) {
735 device_printf(dev, "no PHY found!\n");
736 goto fail;
737 }
738
739 /*
740 * Save PHYADDR for FPGA mode PHY.
741 */
ec7e787b 742 if (sc->jme_caps & JME_CAP_FPGA) {
76fbb0b9
SZ
743 struct mii_data *mii = device_get_softc(sc->jme_miibus);
744
745 if (mii->mii_instance != 0) {
746 struct mii_softc *miisc;
747
748 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
749 if (miisc->mii_phy != 0) {
750 sc->jme_phyaddr = miisc->mii_phy;
751 break;
752 }
753 }
754 if (sc->jme_phyaddr != 0) {
755 device_printf(sc->jme_dev,
756 "FPGA PHY is at %d\n", sc->jme_phyaddr);
757 /* vendor magic. */
dbe37f03
SZ
758 jme_miibus_writereg(dev, sc->jme_phyaddr,
759 JMPHY_CONF, JMPHY_CONF_DEFFIFO);
760
ad22907f 761 /* XXX should we clear JME_WA_EXTFIFO */
76fbb0b9
SZ
762 }
763 }
764 }
765
766 ether_ifattach(ifp, eaddr, NULL);
767
768 /* Tell the upper layer(s) we support long frames. */
769 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
770
771 error = bus_setup_intr(dev, sc->jme_irq_res, INTR_MPSAFE, jme_intr, sc,
772 &sc->jme_irq_handle, ifp->if_serializer);
773 if (error) {
774 device_printf(dev, "could not set up interrupt handler.\n");
775 ether_ifdetach(ifp);
776 goto fail;
777 }
778
779 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->jme_irq_res));
780 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
781 return 0;
782fail:
783 jme_detach(dev);
784 return (error);
785}
786
787static int
788jme_detach(device_t dev)
789{
790 struct jme_softc *sc = device_get_softc(dev);
791
792 if (device_is_attached(dev)) {
793 struct ifnet *ifp = &sc->arpcom.ac_if;
794
795 lwkt_serialize_enter(ifp->if_serializer);
796 jme_stop(sc);
797 bus_teardown_intr(dev, sc->jme_irq_res, sc->jme_irq_handle);
798 lwkt_serialize_exit(ifp->if_serializer);
799
800 ether_ifdetach(ifp);
801 }
802
803 if (sc->jme_sysctl_tree != NULL)
804 sysctl_ctx_free(&sc->jme_sysctl_ctx);
805
806 if (sc->jme_miibus != NULL)
807 device_delete_child(dev, sc->jme_miibus);
808 bus_generic_detach(dev);
809
810 if (sc->jme_irq_res != NULL) {
811 bus_release_resource(dev, SYS_RES_IRQ, sc->jme_irq_rid,
812 sc->jme_irq_res);
813 }
814
815 if (sc->jme_mem_res != NULL) {
816 bus_release_resource(dev, SYS_RES_MEMORY, sc->jme_mem_rid,
817 sc->jme_mem_res);
818 }
819
820 jme_dma_free(sc);
821
822 return (0);
823}
824
825static void
826jme_sysctl_node(struct jme_softc *sc)
827{
76fbb0b9
SZ
828 sysctl_ctx_init(&sc->jme_sysctl_ctx);
829 sc->jme_sysctl_tree = SYSCTL_ADD_NODE(&sc->jme_sysctl_ctx,
830 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
831 device_get_nameunit(sc->jme_dev),
832 CTLFLAG_RD, 0, "");
833 if (sc->jme_sysctl_tree == NULL) {
834 device_printf(sc->jme_dev, "can't add sysctl node\n");
835 return;
836 }
837
838 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
839 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
2870abc4
SZ
840 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
841 sc, 0, jme_sysctl_tx_coal_to, "I", "jme tx coalescing timeout");
76fbb0b9
SZ
842
843 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
844 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
2870abc4
SZ
845 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
846 sc, 0, jme_sysctl_tx_coal_pkt, "I", "jme tx coalescing packet");
76fbb0b9
SZ
847
848 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
849 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
2870abc4
SZ
850 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW,
851 sc, 0, jme_sysctl_rx_coal_to, "I", "jme rx coalescing timeout");
76fbb0b9
SZ
852
853 SYSCTL_ADD_PROC(&sc->jme_sysctl_ctx,
854 SYSCTL_CHILDREN(sc->jme_sysctl_tree), OID_AUTO,
2870abc4
SZ
855 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW,
856 sc, 0, jme_sysctl_rx_coal_pkt, "I", "jme rx coalescing packet");
76fbb0b9
SZ
857
858 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
76fbb0b9 859 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
76fbb0b9 860 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
76fbb0b9 861 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
76fbb0b9
SZ
862}
863
864static void
865jme_dmamap_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
866{
867 if (error)
868 return;
869
870 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
871 *((bus_addr_t *)arg) = segs->ds_addr;
872}
873
874static void
875jme_dmamap_buf_cb(void *xctx, bus_dma_segment_t *segs, int nsegs,
876 bus_size_t mapsz __unused, int error)
877{
878 struct jme_dmamap_ctx *ctx = xctx;
879 int i;
880
881 if (error)
882 return;
883
884 if (nsegs > ctx->nsegs) {
885 ctx->nsegs = 0;
886 return;
887 }
888
889 ctx->nsegs = nsegs;
890 for (i = 0; i < nsegs; ++i)
891 ctx->segs[i] = segs[i];
892}
893
894static int
895jme_dma_alloc(struct jme_softc *sc)
896{
897 struct jme_txdesc *txd;
898 struct jme_rxdesc *rxd;
899 bus_addr_t busaddr, lowaddr, rx_ring_end, tx_ring_end;
900 int error, i;
901
902 lowaddr = BUS_SPACE_MAXADDR;
903
904again:
905 /* Create parent ring tag. */
906 error = bus_dma_tag_create(NULL,/* parent */
907 1, 0, /* algnmnt, boundary */
908 lowaddr, /* lowaddr */
909 BUS_SPACE_MAXADDR, /* highaddr */
910 NULL, NULL, /* filter, filterarg */
911 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
912 0, /* nsegments */
913 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
914 0, /* flags */
915 &sc->jme_cdata.jme_ring_tag);
916 if (error) {
917 device_printf(sc->jme_dev,
918 "could not create parent ring DMA tag.\n");
919 return error;
920 }
921
922 /*
923 * Create DMA stuffs for TX ring
924 */
925
926 /* Create tag for Tx ring. */
927 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
928 JME_TX_RING_ALIGN, 0, /* algnmnt, boundary */
929 BUS_SPACE_MAXADDR, /* lowaddr */
930 BUS_SPACE_MAXADDR, /* highaddr */
931 NULL, NULL, /* filter, filterarg */
932 JME_TX_RING_SIZE, /* maxsize */
933 1, /* nsegments */
934 JME_TX_RING_SIZE, /* maxsegsize */
935 0, /* flags */
936 &sc->jme_cdata.jme_tx_ring_tag);
937 if (error) {
938 device_printf(sc->jme_dev,
939 "could not allocate Tx ring DMA tag.\n");
940 return error;
941 }
942
943 /* Allocate DMA'able memory for TX ring */
944 error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag,
945 (void **)&sc->jme_rdata.jme_tx_ring,
946 BUS_DMA_WAITOK | BUS_DMA_ZERO,
947 &sc->jme_cdata.jme_tx_ring_map);
948 if (error) {
949 device_printf(sc->jme_dev,
950 "could not allocate DMA'able memory for Tx ring.\n");
951 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
952 sc->jme_cdata.jme_tx_ring_tag = NULL;
953 return error;
954 }
955
956 /* Load the DMA map for Tx ring. */
957 error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag,
958 sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
959 JME_TX_RING_SIZE, jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
960 if (error) {
961 device_printf(sc->jme_dev,
962 "could not load DMA'able memory for Tx ring.\n");
963 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
964 sc->jme_rdata.jme_tx_ring,
965 sc->jme_cdata.jme_tx_ring_map);
966 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
967 sc->jme_cdata.jme_tx_ring_tag = NULL;
968 return error;
969 }
970 sc->jme_rdata.jme_tx_ring_paddr = busaddr;
971
972 /*
973 * Create DMA stuffs for RX ring
974 */
975
976 /* Create tag for Rx ring. */
977 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
978 JME_RX_RING_ALIGN, 0, /* algnmnt, boundary */
979 lowaddr, /* lowaddr */
980 BUS_SPACE_MAXADDR, /* highaddr */
981 NULL, NULL, /* filter, filterarg */
982 JME_RX_RING_SIZE, /* maxsize */
983 1, /* nsegments */
984 JME_RX_RING_SIZE, /* maxsegsize */
985 0, /* flags */
986 &sc->jme_cdata.jme_rx_ring_tag);
987 if (error) {
988 device_printf(sc->jme_dev,
989 "could not allocate Rx ring DMA tag.\n");
990 return error;
991 }
992
993 /* Allocate DMA'able memory for RX ring */
994 error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag,
995 (void **)&sc->jme_rdata.jme_rx_ring,
996 BUS_DMA_WAITOK | BUS_DMA_ZERO,
997 &sc->jme_cdata.jme_rx_ring_map);
998 if (error) {
999 device_printf(sc->jme_dev,
1000 "could not allocate DMA'able memory for Rx ring.\n");
1001 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1002 sc->jme_cdata.jme_rx_ring_tag = NULL;
1003 return error;
1004 }
1005
1006 /* Load the DMA map for Rx ring. */
1007 error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag,
1008 sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
1009 JME_RX_RING_SIZE, jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
1010 if (error) {
1011 device_printf(sc->jme_dev,
1012 "could not load DMA'able memory for Rx ring.\n");
1013 bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
1014 sc->jme_rdata.jme_rx_ring,
1015 sc->jme_cdata.jme_rx_ring_map);
1016 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1017 sc->jme_cdata.jme_rx_ring_tag = NULL;
1018 return error;
1019 }
1020 sc->jme_rdata.jme_rx_ring_paddr = busaddr;
1021
1022 /* Tx/Rx descriptor queue should reside within 4GB boundary. */
1023 tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + JME_TX_RING_SIZE;
1024 rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + JME_RX_RING_SIZE;
1025 if ((JME_ADDR_HI(tx_ring_end) !=
1026 JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
1027 (JME_ADDR_HI(rx_ring_end) !=
1028 JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
1029 device_printf(sc->jme_dev, "4GB boundary crossed, "
1030 "switching to 32bit DMA address mode.\n");
1031 jme_dma_free(sc);
1032 /* Limit DMA address space to 32bit and try again. */
1033 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1034 goto again;
1035 }
1036
1037 /* Create parent buffer tag. */
1038 error = bus_dma_tag_create(NULL,/* parent */
1039 1, 0, /* algnmnt, boundary */
1040 BUS_SPACE_MAXADDR, /* lowaddr */
1041 BUS_SPACE_MAXADDR, /* highaddr */
1042 NULL, NULL, /* filter, filterarg */
1043 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1044 0, /* nsegments */
1045 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1046 0, /* flags */
1047 &sc->jme_cdata.jme_buffer_tag);
1048 if (error) {
1049 device_printf(sc->jme_dev,
1050 "could not create parent buffer DMA tag.\n");
1051 return error;
1052 }
1053
1054 /*
1055 * Create DMA stuffs for shadow status block
1056 */
1057
1058 /* Create shadow status block tag. */
1059 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1060 JME_SSB_ALIGN, 0, /* algnmnt, boundary */
1061 BUS_SPACE_MAXADDR, /* lowaddr */
1062 BUS_SPACE_MAXADDR, /* highaddr */
1063 NULL, NULL, /* filter, filterarg */
1064 JME_SSB_SIZE, /* maxsize */
1065 1, /* nsegments */
1066 JME_SSB_SIZE, /* maxsegsize */
1067 0, /* flags */
1068 &sc->jme_cdata.jme_ssb_tag);
1069 if (error) {
1070 device_printf(sc->jme_dev,
1071 "could not create shared status block DMA tag.\n");
1072 return error;
1073 }
1074
1075 /* Allocate DMA'able memory for shared status block. */
1076 error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag,
1077 (void **)&sc->jme_rdata.jme_ssb_block,
1078 BUS_DMA_WAITOK | BUS_DMA_ZERO,
1079 &sc->jme_cdata.jme_ssb_map);
1080 if (error) {
1081 device_printf(sc->jme_dev, "could not allocate DMA'able "
1082 "memory for shared status block.\n");
1083 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1084 sc->jme_cdata.jme_ssb_tag = NULL;
1085 return error;
1086 }
1087
1088 /* Load the DMA map for shared status block */
1089 error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag,
1090 sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
1091 JME_SSB_SIZE, jme_dmamap_ring_cb, &busaddr, BUS_DMA_NOWAIT);
1092 if (error) {
1093 device_printf(sc->jme_dev, "could not load DMA'able memory "
1094 "for shared status block.\n");
1095 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1096 sc->jme_rdata.jme_ssb_block,
1097 sc->jme_cdata.jme_ssb_map);
1098 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1099 sc->jme_cdata.jme_ssb_tag = NULL;
1100 return error;
1101 }
1102 sc->jme_rdata.jme_ssb_block_paddr = busaddr;
1103
1104 /*
1105 * Create DMA stuffs for TX buffers
1106 */
1107
1108 /* Create tag for Tx buffers. */
1109 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1110 1, 0, /* algnmnt, boundary */
1111 BUS_SPACE_MAXADDR, /* lowaddr */
1112 BUS_SPACE_MAXADDR, /* highaddr */
1113 NULL, NULL, /* filter, filterarg */
1114 JME_TSO_MAXSIZE, /* maxsize */
1115 JME_MAXTXSEGS, /* nsegments */
1116 JME_TSO_MAXSEGSIZE, /* maxsegsize */
1117 0, /* flags */
1118 &sc->jme_cdata.jme_tx_tag);
1119 if (error != 0) {
1120 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1121 return error;
1122 }
1123
1124 /* Create DMA maps for Tx buffers. */
1125 for (i = 0; i < JME_TX_RING_CNT; i++) {
1126 txd = &sc->jme_cdata.jme_txdesc[i];
1127 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0,
1128 &txd->tx_dmamap);
1129 if (error) {
1130 int j;
1131
1132 device_printf(sc->jme_dev,
1133 "could not create %dth Tx dmamap.\n", i);
1134
1135 for (j = 0; j < i; ++j) {
1136 txd = &sc->jme_cdata.jme_txdesc[j];
1137 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1138 txd->tx_dmamap);
1139 }
1140 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1141 sc->jme_cdata.jme_tx_tag = NULL;
1142 return error;
1143 }
1144 }
1145
1146 /*
1147 * Create DMA stuffs for RX buffers
1148 */
1149
1150 /* Create tag for Rx buffers. */
1151 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1152 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
1153 BUS_SPACE_MAXADDR, /* lowaddr */
1154 BUS_SPACE_MAXADDR, /* highaddr */
1155 NULL, NULL, /* filter, filterarg */
1156 MCLBYTES, /* maxsize */
1157 1, /* nsegments */
1158 MCLBYTES, /* maxsegsize */
1159 0, /* flags */
1160 &sc->jme_cdata.jme_rx_tag);
1161 if (error) {
1162 device_printf(sc->jme_dev, "could not create Rx DMA tag.\n");
1163 return error;
1164 }
1165
1166 /* Create DMA maps for Rx buffers. */
1167 error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1168 &sc->jme_cdata.jme_rx_sparemap);
1169 if (error) {
1170 device_printf(sc->jme_dev,
1171 "could not create spare Rx dmamap.\n");
1172 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1173 sc->jme_cdata.jme_rx_tag = NULL;
1174 return error;
1175 }
1176 for (i = 0; i < JME_RX_RING_CNT; i++) {
1177 rxd = &sc->jme_cdata.jme_rxdesc[i];
1178 error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1179 &rxd->rx_dmamap);
1180 if (error) {
1181 int j;
1182
1183 device_printf(sc->jme_dev,
1184 "could not create %dth Rx dmamap.\n", i);
1185
1186 for (j = 0; j < i; ++j) {
1187 rxd = &sc->jme_cdata.jme_rxdesc[j];
1188 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1189 rxd->rx_dmamap);
1190 }
1191 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1192 sc->jme_cdata.jme_rx_sparemap);
1193 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1194 sc->jme_cdata.jme_rx_tag = NULL;
1195 return error;
1196 }
1197 }
1198 return 0;
1199}
1200
1201static void
1202jme_dma_free(struct jme_softc *sc)
1203{
1204 struct jme_txdesc *txd;
1205 struct jme_rxdesc *rxd;
1206 int i;
1207
1208 /* Tx ring */
1209 if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1210 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1211 sc->jme_cdata.jme_tx_ring_map);
1212 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1213 sc->jme_rdata.jme_tx_ring,
1214 sc->jme_cdata.jme_tx_ring_map);
1215 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1216 sc->jme_cdata.jme_tx_ring_tag = NULL;
1217 }
1218
1219 /* Rx ring */
1220 if (sc->jme_cdata.jme_rx_ring_tag != NULL) {
1221 bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag,
1222 sc->jme_cdata.jme_rx_ring_map);
1223 bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
1224 sc->jme_rdata.jme_rx_ring,
1225 sc->jme_cdata.jme_rx_ring_map);
1226 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1227 sc->jme_cdata.jme_rx_ring_tag = NULL;
1228 }
1229
1230 /* Tx buffers */
1231 if (sc->jme_cdata.jme_tx_tag != NULL) {
1232 for (i = 0; i < JME_TX_RING_CNT; i++) {
1233 txd = &sc->jme_cdata.jme_txdesc[i];
1234 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1235 txd->tx_dmamap);
1236 }
1237 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1238 sc->jme_cdata.jme_tx_tag = NULL;
1239 }
1240
1241 /* Rx buffers */
1242 if (sc->jme_cdata.jme_rx_tag != NULL) {
1243 for (i = 0; i < JME_RX_RING_CNT; i++) {
1244 rxd = &sc->jme_cdata.jme_rxdesc[i];
1245 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1246 rxd->rx_dmamap);
1247 }
1248 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1249 sc->jme_cdata.jme_rx_sparemap);
1250 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1251 sc->jme_cdata.jme_rx_tag = NULL;
1252 }
1253
1254 /* Shadow status block. */
1255 if (sc->jme_cdata.jme_ssb_tag != NULL) {
1256 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1257 sc->jme_cdata.jme_ssb_map);
1258 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1259 sc->jme_rdata.jme_ssb_block,
1260 sc->jme_cdata.jme_ssb_map);
1261 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1262 sc->jme_cdata.jme_ssb_tag = NULL;
1263 }
1264
1265 if (sc->jme_cdata.jme_buffer_tag != NULL) {
1266 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1267 sc->jme_cdata.jme_buffer_tag = NULL;
1268 }
1269 if (sc->jme_cdata.jme_ring_tag != NULL) {
1270 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1271 sc->jme_cdata.jme_ring_tag = NULL;
1272 }
1273}
1274
1275/*
1276 * Make sure the interface is stopped at reboot time.
1277 */
1278static int
1279jme_shutdown(device_t dev)
1280{
1281 return jme_suspend(dev);
1282}
1283
1284#ifdef notyet
1285/*
1286 * Unlike other ethernet controllers, JMC250 requires
1287 * explicit resetting link speed to 10/100Mbps as gigabit
1288 * link will cunsume more power than 375mA.
1289 * Note, we reset the link speed to 10/100Mbps with
1290 * auto-negotiation but we don't know whether that operation
1291 * would succeed or not as we have no control after powering
1292 * off. If the renegotiation fail WOL may not work. Running
1293 * at 1Gbps draws more power than 375mA at 3.3V which is
1294 * specified in PCI specification and that would result in
1295 * complete shutdowning power to ethernet controller.
1296 *
1297 * TODO
1298 * Save current negotiated media speed/duplex/flow-control
1299 * to softc and restore the same link again after resuming.
1300 * PHY handling such as power down/resetting to 100Mbps
1301 * may be better handled in suspend method in phy driver.
1302 */
1303static void
1304jme_setlinkspeed(struct jme_softc *sc)
1305{
1306 struct mii_data *mii;
1307 int aneg, i;
1308
1309 JME_LOCK_ASSERT(sc);
1310
1311 mii = device_get_softc(sc->jme_miibus);
1312 mii_pollstat(mii);
1313 aneg = 0;
1314 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1315 switch IFM_SUBTYPE(mii->mii_media_active) {
1316 case IFM_10_T:
1317 case IFM_100_TX:
1318 return;
1319 case IFM_1000_T:
1320 aneg++;
1321 default:
1322 break;
1323 }
1324 }
1325 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1326 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1327 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1328 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1329 BMCR_AUTOEN | BMCR_STARTNEG);
1330 DELAY(1000);
1331 if (aneg != 0) {
1332 /* Poll link state until jme(4) get a 10/100 link. */
1333 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1334 mii_pollstat(mii);
1335 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1336 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1337 case IFM_10_T:
1338 case IFM_100_TX:
1339 jme_mac_config(sc);
1340 return;
1341 default:
1342 break;
1343 }
1344 }
1345 JME_UNLOCK(sc);
1346 pause("jmelnk", hz);
1347 JME_LOCK(sc);
1348 }
1349 if (i == MII_ANEGTICKS_GIGE)
1350 device_printf(sc->jme_dev, "establishing link failed, "
1351 "WOL may not work!");
1352 }
1353 /*
1354 * No link, force MAC to have 100Mbps, full-duplex link.
1355 * This is the last resort and may/may not work.
1356 */
1357 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1358 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1359 jme_mac_config(sc);
1360}
1361
1362static void
1363jme_setwol(struct jme_softc *sc)
1364{
1365 struct ifnet *ifp = &sc->arpcom.ac_if;
1366 uint32_t gpr, pmcs;
1367 uint16_t pmstat;
1368 int pmc;
1369
1370 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1371 /* No PME capability, PHY power down. */
1372 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1373 MII_BMCR, BMCR_PDOWN);
1374 return;
1375 }
1376
1377 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1378 pmcs = CSR_READ_4(sc, JME_PMCS);
1379 pmcs &= ~PMCS_WOL_ENB_MASK;
1380 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1381 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1382 /* Enable PME message. */
1383 gpr |= GPREG0_PME_ENB;
1384 /* For gigabit controllers, reset link speed to 10/100. */
ec7e787b 1385 if ((sc->jme_caps & JME_CAP_FASTETH) == 0)
76fbb0b9
SZ
1386 jme_setlinkspeed(sc);
1387 }
1388
1389 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1390 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1391
1392 /* Request PME. */
1393 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1394 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1395 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1396 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1397 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1398 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1399 /* No WOL, PHY power down. */
1400 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1401 MII_BMCR, BMCR_PDOWN);
1402 }
1403}
1404#endif
1405
1406static int
1407jme_suspend(device_t dev)
1408{
1409 struct jme_softc *sc = device_get_softc(dev);
1410 struct ifnet *ifp = &sc->arpcom.ac_if;
1411
1412 lwkt_serialize_enter(ifp->if_serializer);
1413 jme_stop(sc);
1414#ifdef notyet
1415 jme_setwol(sc);
1416#endif
1417 lwkt_serialize_exit(ifp->if_serializer);
1418
1419 return (0);
1420}
1421
1422static int
1423jme_resume(device_t dev)
1424{
1425 struct jme_softc *sc = device_get_softc(dev);
1426 struct ifnet *ifp = &sc->arpcom.ac_if;
1427#ifdef notyet
1428 int pmc;
1429#endif
1430
1431 lwkt_serialize_enter(ifp->if_serializer);
1432
1433#ifdef notyet
1434 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1435 uint16_t pmstat;
1436
1437 pmstat = pci_read_config(sc->jme_dev,
1438 pmc + PCIR_POWER_STATUS, 2);
1439 /* Disable PME clear PME status. */
1440 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1441 pci_write_config(sc->jme_dev,
1442 pmc + PCIR_POWER_STATUS, pmstat, 2);
1443 }
1444#endif
1445
1446 if (ifp->if_flags & IFF_UP)
1447 jme_init(sc);
1448
1449 lwkt_serialize_exit(ifp->if_serializer);
1450
1451 return (0);
1452}
1453
1454static int
1455jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1456{
1457 struct jme_txdesc *txd;
1458 struct jme_desc *desc;
1459 struct mbuf *m;
1460 struct jme_dmamap_ctx ctx;
1461 bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1462 int maxsegs;
1463 int error, i, prod;
1464 uint32_t cflags;
1465
1466 M_ASSERTPKTHDR((*m_head));
1467
1468 prod = sc->jme_cdata.jme_tx_prod;
1469 txd = &sc->jme_cdata.jme_txdesc[prod];
1470
1471 maxsegs = (JME_TX_RING_CNT - sc->jme_cdata.jme_tx_cnt) -
1472 (JME_TXD_RSVD + 1);
1473 if (maxsegs > JME_MAXTXSEGS)
1474 maxsegs = JME_MAXTXSEGS;
1475 KASSERT(maxsegs >= (sc->jme_txd_spare - 1),
1476 ("not enough segments %d\n", maxsegs));
1477
1478 ctx.nsegs = maxsegs;
1479 ctx.segs = txsegs;
1480 error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1481 *m_head, jme_dmamap_buf_cb, &ctx,
1482 BUS_DMA_NOWAIT);
1483 if (!error && ctx.nsegs == 0) {
1484 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1485 error = EFBIG;
1486 }
1487 if (error == EFBIG) {
1488 m = m_defrag(*m_head, MB_DONTWAIT);
1489 if (m == NULL) {
1490 if_printf(&sc->arpcom.ac_if,
1491 "could not defrag TX mbuf\n");
1492 m_freem(*m_head);
1493 *m_head = NULL;
1494 return (ENOMEM);
1495 }
1496 *m_head = m;
1497
1498 ctx.nsegs = maxsegs;
1499 ctx.segs = txsegs;
1500 error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_tx_tag,
1501 txd->tx_dmamap, *m_head,
1502 jme_dmamap_buf_cb, &ctx,
1503 BUS_DMA_NOWAIT);
1504 if (error || ctx.nsegs == 0) {
1505 if_printf(&sc->arpcom.ac_if,
1506 "could not load defragged TX mbuf\n");
1507 if (!error) {
1508 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
1509 txd->tx_dmamap);
1510 error = EFBIG;
1511 }
1512 m_freem(*m_head);
1513 *m_head = NULL;
1514 return (error);
1515 }
1516 } else if (error) {
1517 if_printf(&sc->arpcom.ac_if, "could not load TX mbuf\n");
1518 return (error);
1519 }
1520
1521 m = *m_head;
1522 cflags = 0;
1523
1524 /* Configure checksum offload. */
1525 if (m->m_pkthdr.csum_flags & CSUM_IP)
1526 cflags |= JME_TD_IPCSUM;
1527 if (m->m_pkthdr.csum_flags & CSUM_TCP)
1528 cflags |= JME_TD_TCPCSUM;
1529 if (m->m_pkthdr.csum_flags & CSUM_UDP)
1530 cflags |= JME_TD_UDPCSUM;
1531
1532 /* Configure VLAN. */
1533 if (m->m_flags & M_VLANTAG) {
1534 cflags |= (m->m_pkthdr.ether_vlantag & JME_TD_VLAN_MASK);
1535 cflags |= JME_TD_VLAN_TAG;
1536 }
1537
1538 desc = &sc->jme_rdata.jme_tx_ring[prod];
1539 desc->flags = htole32(cflags);
1540 desc->buflen = 0;
1541 desc->addr_hi = htole32(m->m_pkthdr.len);
1542 desc->addr_lo = 0;
1543 sc->jme_cdata.jme_tx_cnt++;
1544 KKASSERT(sc->jme_cdata.jme_tx_cnt < JME_TX_RING_CNT - JME_TXD_RSVD);
1545 JME_DESC_INC(prod, JME_TX_RING_CNT);
1546 for (i = 0; i < ctx.nsegs; i++) {
1547 desc = &sc->jme_rdata.jme_tx_ring[prod];
1548 desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
1549 desc->buflen = htole32(txsegs[i].ds_len);
1550 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1551 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1552
1553 sc->jme_cdata.jme_tx_cnt++;
1554 KKASSERT(sc->jme_cdata.jme_tx_cnt <=
1555 JME_TX_RING_CNT - JME_TXD_RSVD);
1556 JME_DESC_INC(prod, JME_TX_RING_CNT);
1557 }
1558
1559 /* Update producer index. */
1560 sc->jme_cdata.jme_tx_prod = prod;
1561 /*
1562 * Finally request interrupt and give the first descriptor
1563 * owenership to hardware.
1564 */
1565 desc = txd->tx_desc;
1566 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1567
1568 txd->tx_m = m;
1569 txd->tx_ndesc = ctx.nsegs + 1;
1570
1571 /* Sync descriptors. */
1572 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1573 BUS_DMASYNC_PREWRITE);
1574 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1575 sc->jme_cdata.jme_tx_ring_map, BUS_DMASYNC_PREWRITE);
1576 return (0);
1577}
1578
1579static void
1580jme_start(struct ifnet *ifp)
1581{
1582 struct jme_softc *sc = ifp->if_softc;
1583 struct mbuf *m_head;
1584 int enq = 0;
1585
1586 ASSERT_SERIALIZED(ifp->if_serializer);
1587
1588 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1589 ifq_purge(&ifp->if_snd);
1590 return;
1591 }
1592
1593 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1594 return;
1595
1596 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT)
1597 jme_txeof(sc);
1598
1599 while (!ifq_is_empty(&ifp->if_snd)) {
1600 /*
1601 * Check number of available TX descs, always
1602 * leave JME_TXD_RSVD free TX descs.
1603 */
1604 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare >
1605 JME_TX_RING_CNT - JME_TXD_RSVD) {
1606 ifp->if_flags |= IFF_OACTIVE;
1607 break;
1608 }
1609
1610 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1611 if (m_head == NULL)
1612 break;
1613
1614 /*
1615 * Pack the data into the transmit ring. If we
1616 * don't have room, set the OACTIVE flag and wait
1617 * for the NIC to drain the ring.
1618 */
1619 if (jme_encap(sc, &m_head)) {
1620 if (m_head == NULL) {
1621 ifp->if_oerrors++;
1622 break;
1623 }
1624 ifq_prepend(&ifp->if_snd, m_head);
1625 ifp->if_flags |= IFF_OACTIVE;
1626 break;
1627 }
1628 enq++;
1629
1630 /*
1631 * If there's a BPF listener, bounce a copy of this frame
1632 * to him.
1633 */
1634 ETHER_BPF_MTAP(ifp, m_head);
1635 }
1636
1637 if (enq > 0) {
1638 /*
1639 * Reading TXCSR takes very long time under heavy load
1640 * so cache TXCSR value and writes the ORed value with
1641 * the kick command to the TXCSR. This saves one register
1642 * access cycle.
1643 */
1644 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1645 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1646 /* Set a timeout in case the chip goes out to lunch. */
1647 ifp->if_timer = JME_TX_TIMEOUT;
1648 }
1649}
1650
1651static void
1652jme_watchdog(struct ifnet *ifp)
1653{
1654 struct jme_softc *sc = ifp->if_softc;
1655
1656 ASSERT_SERIALIZED(ifp->if_serializer);
1657
1658 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1659 if_printf(ifp, "watchdog timeout (missed link)\n");
1660 ifp->if_oerrors++;
1661 jme_init(sc);
1662 return;
1663 }
1664
1665 jme_txeof(sc);
1666 if (sc->jme_cdata.jme_tx_cnt == 0) {
1667 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
1668 "-- recovering\n");
1669 if (!ifq_is_empty(&ifp->if_snd))
1670 if_devstart(ifp);
1671 return;
1672 }
1673
1674 if_printf(ifp, "watchdog timeout\n");
1675 ifp->if_oerrors++;
1676 jme_init(sc);
1677 if (!ifq_is_empty(&ifp->if_snd))
1678 if_devstart(ifp);
1679}
1680
1681static int
1682jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1683{
1684 struct jme_softc *sc = ifp->if_softc;
1685 struct mii_data *mii = device_get_softc(sc->jme_miibus);
1686 struct ifreq *ifr = (struct ifreq *)data;
1687 int error = 0, mask;
1688
1689 ASSERT_SERIALIZED(ifp->if_serializer);
1690
1691 switch (cmd) {
1692 case SIOCSIFMTU:
1693 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
3a5f3f36 1694 (!(sc->jme_caps & JME_CAP_JUMBO) &&
76fbb0b9
SZ
1695 ifr->ifr_mtu > JME_MAX_MTU)) {
1696 error = EINVAL;
1697 break;
1698 }
1699
1700 if (ifp->if_mtu != ifr->ifr_mtu) {
1701 /*
1702 * No special configuration is required when interface
1703 * MTU is changed but availability of Tx checksum
1704 * offload should be chcked against new MTU size as
1705 * FIFO size is just 2K.
1706 */
1707 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1708 ifp->if_capenable &= ~IFCAP_TXCSUM;
1709 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1710 }
1711 ifp->if_mtu = ifr->ifr_mtu;
1712 if (ifp->if_flags & IFF_RUNNING)
1713 jme_init(sc);
1714 }
1715 break;
1716
1717 case SIOCSIFFLAGS:
1718 if (ifp->if_flags & IFF_UP) {
1719 if (ifp->if_flags & IFF_RUNNING) {
1720 if ((ifp->if_flags ^ sc->jme_if_flags) &
1721 (IFF_PROMISC | IFF_ALLMULTI))
1722 jme_set_filter(sc);
1723 } else {
1724 jme_init(sc);
1725 }
1726 } else {
1727 if (ifp->if_flags & IFF_RUNNING)
1728 jme_stop(sc);
1729 }
1730 sc->jme_if_flags = ifp->if_flags;
1731 break;
1732
1733 case SIOCADDMULTI:
1734 case SIOCDELMULTI:
1735 if (ifp->if_flags & IFF_RUNNING)
1736 jme_set_filter(sc);
1737 break;
1738
1739 case SIOCSIFMEDIA:
1740 case SIOCGIFMEDIA:
1741 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1742 break;
1743
1744 case SIOCSIFCAP:
1745 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1746
1747 if ((mask & IFCAP_TXCSUM) && ifp->if_mtu < JME_TX_FIFO_SIZE) {
1748 if (IFCAP_TXCSUM & ifp->if_capabilities) {
1749 ifp->if_capenable ^= IFCAP_TXCSUM;
1750 if (IFCAP_TXCSUM & ifp->if_capenable)
1751 ifp->if_hwassist |= JME_CSUM_FEATURES;
1752 else
1753 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1754 }
1755 }
1756 if ((mask & IFCAP_RXCSUM) &&
1757 (IFCAP_RXCSUM & ifp->if_capabilities)) {
1758 uint32_t reg;
1759
1760 ifp->if_capenable ^= IFCAP_RXCSUM;
1761 reg = CSR_READ_4(sc, JME_RXMAC);
1762 reg &= ~RXMAC_CSUM_ENB;
1763 if (ifp->if_capenable & IFCAP_RXCSUM)
1764 reg |= RXMAC_CSUM_ENB;
1765 CSR_WRITE_4(sc, JME_RXMAC, reg);
1766 }
1767
1768 if ((mask & IFCAP_VLAN_HWTAGGING) &&
1769 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities)) {
1770 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1771 jme_set_vlan(sc);
1772 }
1773 break;
1774
1775 default:
1776 error = ether_ioctl(ifp, cmd, data);
1777 break;
1778 }
1779 return (error);
1780}
1781
1782static void
1783jme_mac_config(struct jme_softc *sc)
1784{
1785 struct mii_data *mii;
3b3da110
SZ
1786 uint32_t ghc, rxmac, txmac, txpause, gp1;
1787 int phyconf = JMPHY_CONF_DEFFIFO, hdx = 0;
76fbb0b9
SZ
1788
1789 mii = device_get_softc(sc->jme_miibus);
1790
1791 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1792 DELAY(10);
1793 CSR_WRITE_4(sc, JME_GHC, 0);
1794 ghc = 0;
1795 rxmac = CSR_READ_4(sc, JME_RXMAC);
1796 rxmac &= ~RXMAC_FC_ENB;
1797 txmac = CSR_READ_4(sc, JME_TXMAC);
1798 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1799 txpause = CSR_READ_4(sc, JME_TXPFC);
1800 txpause &= ~TXPFC_PAUSE_ENB;
1801 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1802 ghc |= GHC_FULL_DUPLEX;
1803 rxmac &= ~RXMAC_COLL_DET_ENB;
1804 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1805 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1806 TXMAC_FRAME_BURST);
1807#ifdef notyet
1808 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1809 txpause |= TXPFC_PAUSE_ENB;
1810 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1811 rxmac |= RXMAC_FC_ENB;
1812#endif
1813 /* Disable retry transmit timer/retry limit. */
1814 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1815 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1816 } else {
1817 rxmac |= RXMAC_COLL_DET_ENB;
1818 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1819 /* Enable retry transmit timer/retry limit. */
1820 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1821 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1822 }
1823
3b3da110
SZ
1824 /*
1825 * Reprogram Tx/Rx MACs with resolved speed/duplex.
1826 */
1827 gp1 = CSR_READ_4(sc, JME_GPREG1);
1828 gp1 &= ~GPREG1_WA_HDX;
1829
1830 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1831 hdx = 1;
1832
76fbb0b9
SZ
1833 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1834 case IFM_10_T:
1835 ghc |= GHC_SPEED_10;
3b3da110
SZ
1836 if (hdx)
1837 gp1 |= GPREG1_WA_HDX;
76fbb0b9 1838 break;
dbe37f03 1839
76fbb0b9
SZ
1840 case IFM_100_TX:
1841 ghc |= GHC_SPEED_100;
3b3da110
SZ
1842 if (hdx)
1843 gp1 |= GPREG1_WA_HDX;
dbe37f03
SZ
1844
1845 /*
1846 * Use extended FIFO depth to workaround CRC errors
1847 * emitted by chips before JMC250B
1848 */
1849 phyconf = JMPHY_CONF_EXTFIFO;
76fbb0b9 1850 break;
dbe37f03 1851
76fbb0b9 1852 case IFM_1000_T:
ec7e787b 1853 if (sc->jme_caps & JME_CAP_FASTETH)
76fbb0b9 1854 break;
dbe37f03 1855
76fbb0b9 1856 ghc |= GHC_SPEED_1000;
3b3da110 1857 if (hdx)
76fbb0b9
SZ
1858 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1859 break;
dbe37f03 1860
76fbb0b9
SZ
1861 default:
1862 break;
1863 }
1864 CSR_WRITE_4(sc, JME_GHC, ghc);
1865 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
1866 CSR_WRITE_4(sc, JME_TXMAC, txmac);
1867 CSR_WRITE_4(sc, JME_TXPFC, txpause);
dbe37f03 1868
ad22907f 1869 if (sc->jme_workaround & JME_WA_EXTFIFO) {
dbe37f03
SZ
1870 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1871 JMPHY_CONF, phyconf);
1872 }
3b3da110
SZ
1873 if (sc->jme_workaround & JME_WA_HDX)
1874 CSR_WRITE_4(sc, JME_GPREG1, gp1);
76fbb0b9
SZ
1875}
1876
1877static void
1878jme_intr(void *xsc)
1879{
1880 struct jme_softc *sc = xsc;
1881 struct ifnet *ifp = &sc->arpcom.ac_if;
1882 uint32_t status;
1883
1884 ASSERT_SERIALIZED(ifp->if_serializer);
1885
1886 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
1887 if (status == 0 || status == 0xFFFFFFFF)
1888 return;
1889
1890 /* Disable interrupts. */
1891 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
1892
1893 status = CSR_READ_4(sc, JME_INTR_STATUS);
1894 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
1895 goto back;
1896
1897 /* Reset PCC counter/timer and Ack interrupts. */
1898 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
1899 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO))
1900 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
1901 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1902 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
1903 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
1904
1905 if (ifp->if_flags & IFF_RUNNING) {
1906 if (status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO))
1907 jme_rxeof(sc);
1908
1909 if (status & INTR_RXQ_DESC_EMPTY) {
1910 /*
1911 * Notify hardware availability of new Rx buffers.
1912 * Reading RXCSR takes very long time under heavy
1913 * load so cache RXCSR value and writes the ORed
1914 * value with the kick command to the RXCSR. This
1915 * saves one register access cycle.
1916 */
1917 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
1918 RXCSR_RX_ENB | RXCSR_RXQ_START);
1919 }
1920
1921 if (status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) {
1922 jme_txeof(sc);
1923 if (!ifq_is_empty(&ifp->if_snd))
1924 if_devstart(ifp);
1925 }
1926 }
1927back:
1928 /* Reenable interrupts. */
1929 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
1930}
1931
1932static void
1933jme_txeof(struct jme_softc *sc)
1934{
1935 struct ifnet *ifp = &sc->arpcom.ac_if;
1936 struct jme_txdesc *txd;
1937 uint32_t status;
1938 int cons, nsegs;
1939
1940 cons = sc->jme_cdata.jme_tx_cons;
1941 if (cons == sc->jme_cdata.jme_tx_prod)
1942 return;
1943
1944 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1945 sc->jme_cdata.jme_tx_ring_map,
1946 BUS_DMASYNC_POSTREAD);
1947
1948 /*
1949 * Go through our Tx list and free mbufs for those
1950 * frames which have been transmitted.
1951 */
1952 while (cons != sc->jme_cdata.jme_tx_prod) {
1953 txd = &sc->jme_cdata.jme_txdesc[cons];
1954 KASSERT(txd->tx_m != NULL,
1955 ("%s: freeing NULL mbuf!\n", __func__));
1956
1957 status = le32toh(txd->tx_desc->flags);
1958 if ((status & JME_TD_OWN) == JME_TD_OWN)
1959 break;
1960
1961 if (status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) {
1962 ifp->if_oerrors++;
1963 } else {
1964 ifp->if_opackets++;
1965 if (status & JME_TD_COLLISION) {
1966 ifp->if_collisions +=
1967 le32toh(txd->tx_desc->buflen) &
1968 JME_TD_BUF_LEN_MASK;
1969 }
1970 }
1971
1972 /*
1973 * Only the first descriptor of multi-descriptor
1974 * transmission is updated so driver have to skip entire
1975 * chained buffers for the transmiited frame. In other
1976 * words, JME_TD_OWN bit is valid only at the first
1977 * descriptor of a multi-descriptor transmission.
1978 */
1979 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
1980 sc->jme_rdata.jme_tx_ring[cons].flags = 0;
1981 JME_DESC_INC(cons, JME_TX_RING_CNT);
1982 }
1983
1984 /* Reclaim transferred mbufs. */
1985 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1986 m_freem(txd->tx_m);
1987 txd->tx_m = NULL;
1988 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
1989 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
1990 ("%s: Active Tx desc counter was garbled\n", __func__));
1991 txd->tx_ndesc = 0;
1992 }
1993 sc->jme_cdata.jme_tx_cons = cons;
1994
1995 if (sc->jme_cdata.jme_tx_cnt == 0)
1996 ifp->if_timer = 0;
1997
1998 if (sc->jme_cdata.jme_tx_cnt + sc->jme_txd_spare <=
1999 JME_TX_RING_CNT - JME_TXD_RSVD)
2000 ifp->if_flags &= ~IFF_OACTIVE;
2001
2002 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2003 sc->jme_cdata.jme_tx_ring_map,
2004 BUS_DMASYNC_PREWRITE);
2005}
2006
2007static __inline void
2008jme_discard_rxbufs(struct jme_softc *sc, int cons, int count)
2009{
2010 int i;
2011
2012 for (i = 0; i < count; ++i) {
2013 struct jme_desc *desc = &sc->jme_rdata.jme_rx_ring[cons];
2014
2015 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2016 desc->buflen = htole32(MCLBYTES);
2017 JME_DESC_INC(cons, JME_RX_RING_CNT);
2018 }
2019}
2020
2021/* Receive a frame. */
2022static void
2023jme_rxpkt(struct jme_softc *sc)
2024{
2025 struct ifnet *ifp = &sc->arpcom.ac_if;
2026 struct jme_desc *desc;
2027 struct jme_rxdesc *rxd;
2028 struct mbuf *mp, *m;
2029 uint32_t flags, status;
2030 int cons, count, nsegs;
2031
2032 cons = sc->jme_cdata.jme_rx_cons;
2033 desc = &sc->jme_rdata.jme_rx_ring[cons];
2034 flags = le32toh(desc->flags);
2035 status = le32toh(desc->buflen);
2036 nsegs = JME_RX_NSEGS(status);
2037
2038 if (status & JME_RX_ERR_STAT) {
2039 ifp->if_ierrors++;
2040 jme_discard_rxbufs(sc, cons, nsegs);
2041#ifdef JME_SHOW_ERRORS
2042 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2043 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2044#endif
2045 sc->jme_cdata.jme_rx_cons += nsegs;
2046 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2047 return;
2048 }
2049
2050 sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2051 for (count = 0; count < nsegs; count++,
2052 JME_DESC_INC(cons, JME_RX_RING_CNT)) {
2053 rxd = &sc->jme_cdata.jme_rxdesc[cons];
2054 mp = rxd->rx_m;
2055
2056 /* Add a new receive buffer to the ring. */
2057 if (jme_newbuf(sc, rxd, 0) != 0) {
2058 ifp->if_iqdrops++;
2059 /* Reuse buffer. */
2060 jme_discard_rxbufs(sc, cons, nsegs - count);
2061 if (sc->jme_cdata.jme_rxhead != NULL) {
2062 m_freem(sc->jme_cdata.jme_rxhead);
2063 JME_RXCHAIN_RESET(sc);
2064 }
2065 break;
2066 }
2067
2068 /*
2069 * Assume we've received a full sized frame.
2070 * Actual size is fixed when we encounter the end of
2071 * multi-segmented frame.
2072 */
2073 mp->m_len = MCLBYTES;
2074
2075 /* Chain received mbufs. */
2076 if (sc->jme_cdata.jme_rxhead == NULL) {
2077 sc->jme_cdata.jme_rxhead = mp;
2078 sc->jme_cdata.jme_rxtail = mp;
2079 } else {
2080 /*
2081 * Receive processor can receive a maximum frame
2082 * size of 65535 bytes.
2083 */
2084 mp->m_flags &= ~M_PKTHDR;
2085 sc->jme_cdata.jme_rxtail->m_next = mp;
2086 sc->jme_cdata.jme_rxtail = mp;
2087 }
2088
2089 if (count == nsegs - 1) {
2090 /* Last desc. for this frame. */
2091 m = sc->jme_cdata.jme_rxhead;
2092 /* XXX assert PKTHDR? */
2093 m->m_flags |= M_PKTHDR;
2094 m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
2095 if (nsegs > 1) {
2096 /* Set first mbuf size. */
2097 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2098 /* Set last mbuf size. */
2099 mp->m_len = sc->jme_cdata.jme_rxlen -
2100 ((MCLBYTES - JME_RX_PAD_BYTES) +
2101 (MCLBYTES * (nsegs - 2)));
2102 } else {
2103 m->m_len = sc->jme_cdata.jme_rxlen;
2104 }
2105 m->m_pkthdr.rcvif = ifp;
2106
2107 /*
2108 * Account for 10bytes auto padding which is used
2109 * to align IP header on 32bit boundary. Also note,
2110 * CRC bytes is automatically removed by the
2111 * hardware.
2112 */
2113 m->m_data += JME_RX_PAD_BYTES;
2114
2115 /* Set checksum information. */
2116 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
2117 (flags & JME_RD_IPV4)) {
2118 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2119 if (flags & JME_RD_IPCSUM)
2120 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2121 if ((flags & JME_RD_MORE_FRAG) == 0 &&
2122 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2123 (JME_RD_TCP | JME_RD_TCPCSUM) ||
2124 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2125 (JME_RD_UDP | JME_RD_UDPCSUM))) {
2126 m->m_pkthdr.csum_flags |=
2127 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2128 m->m_pkthdr.csum_data = 0xffff;
2129 }
2130 }
2131
2132 /* Check for VLAN tagged packets. */
2133 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
2134 (flags & JME_RD_VLAN_TAG)) {
2135 m->m_pkthdr.ether_vlantag =
2136 flags & JME_RD_VLAN_MASK;
2137 m->m_flags |= M_VLANTAG;
2138 }
2139
2140 ifp->if_ipackets++;
2141 /* Pass it on. */
2142 ifp->if_input(ifp, m);
2143
2144 /* Reset mbuf chains. */
2145 JME_RXCHAIN_RESET(sc);
2146 }
2147 }
2148
2149 sc->jme_cdata.jme_rx_cons += nsegs;
2150 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2151}
2152
2153static void
2154jme_rxeof(struct jme_softc *sc)
2155{
2156 struct jme_desc *desc;
2157 int nsegs, prog, pktlen;
2158
2159 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2160 sc->jme_cdata.jme_rx_ring_map,
2161 BUS_DMASYNC_POSTREAD);
2162
2163 prog = 0;
2164 for (;;) {
2165 desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
2166 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2167 break;
2168 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2169 break;
2170
2171 /*
2172 * Check number of segments against received bytes.
2173 * Non-matching value would indicate that hardware
2174 * is still trying to update Rx descriptors. I'm not
2175 * sure whether this check is needed.
2176 */
2177 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2178 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2179 if (nsegs != howmany(pktlen, MCLBYTES)) {
2180 if_printf(&sc->arpcom.ac_if, "RX fragment count(%d) "
2181 "and packet size(%d) mismach\n",
2182 nsegs, pktlen);
2183 break;
2184 }
2185
2186 /* Received a frame. */
2187 jme_rxpkt(sc);
2188 prog++;
2189 }
2190
2191 if (prog > 0) {
2192 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2193 sc->jme_cdata.jme_rx_ring_map,
2194 BUS_DMASYNC_PREWRITE);
2195 }
2196}
2197
2198static void
2199jme_tick(void *xsc)
2200{
2201 struct jme_softc *sc = xsc;
2202 struct ifnet *ifp = &sc->arpcom.ac_if;
2203 struct mii_data *mii = device_get_softc(sc->jme_miibus);
2204
2205 lwkt_serialize_enter(ifp->if_serializer);
2206
2207 mii_tick(mii);
2208 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2209
2210 lwkt_serialize_exit(ifp->if_serializer);
2211}
2212
2213static void
2214jme_reset(struct jme_softc *sc)
2215{
2216#ifdef foo
2217 /* Stop receiver, transmitter. */
2218 jme_stop_rx(sc);
2219 jme_stop_tx(sc);
2220#endif
2221 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2222 DELAY(10);
2223 CSR_WRITE_4(sc, JME_GHC, 0);
2224}
2225
2226static void
2227jme_init(void *xsc)
2228{
2229 struct jme_softc *sc = xsc;
2230 struct ifnet *ifp = &sc->arpcom.ac_if;
2231 struct mii_data *mii;
2232 uint8_t eaddr[ETHER_ADDR_LEN];
2233 bus_addr_t paddr;
2234 uint32_t reg;
2235 int error;
2236
2237 ASSERT_SERIALIZED(ifp->if_serializer);
2238
2239 /*
2240 * Cancel any pending I/O.
2241 */
2242 jme_stop(sc);
2243
2244 /*
2245 * Reset the chip to a known state.
2246 */
2247 jme_reset(sc);
2248
2249 /*
2250 * Since we always use 64bit address mode for transmitting,
2251 * each Tx request requires one more dummy descriptor.
2252 */
2253 sc->jme_txd_spare =
2254 howmany(ifp->if_mtu + sizeof(struct ether_vlan_header), MCLBYTES) + 1;
2255 KKASSERT(sc->jme_txd_spare >= 2);
2256
2257 /* Init descriptors. */
2258 error = jme_init_rx_ring(sc);
2259 if (error != 0) {
2260 device_printf(sc->jme_dev,
2261 "%s: initialization failed: no memory for Rx buffers.\n",
2262 __func__);
2263 jme_stop(sc);
2264 return;
2265 }
2266 jme_init_tx_ring(sc);
2267
2268 /* Initialize shadow status block. */
2269 jme_init_ssb(sc);
2270
2271 /* Reprogram the station address. */
2272 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2273 CSR_WRITE_4(sc, JME_PAR0,
2274 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2275 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2276
2277 /*
2278 * Configure Tx queue.
2279 * Tx priority queue weight value : 0
2280 * Tx FIFO threshold for processing next packet : 16QW
2281 * Maximum Tx DMA length : 512
2282 * Allow Tx DMA burst.
2283 */
2284 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2285 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2286 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2287 sc->jme_txcsr |= sc->jme_tx_dma_size;
2288 sc->jme_txcsr |= TXCSR_DMA_BURST;
2289 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2290
2291 /* Set Tx descriptor counter. */
2292 CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT);
2293
2294 /* Set Tx ring address to the hardware. */
2295 paddr = JME_TX_RING_ADDR(sc, 0);
2296 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2297 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2298
2299 /* Configure TxMAC parameters. */
2300 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2301 reg |= TXMAC_THRESH_1_PKT;
2302 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2303 CSR_WRITE_4(sc, JME_TXMAC, reg);
2304
2305 /*
2306 * Configure Rx queue.
2307 * FIFO full threshold for transmitting Tx pause packet : 128T
2308 * FIFO threshold for processing next packet : 128QW
2309 * Rx queue 0 select
2310 * Max Rx DMA length : 128
2311 * Rx descriptor retry : 32
2312 * Rx descriptor retry time gap : 256ns
2313 * Don't receive runt/bad frame.
2314 */
2315 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2316 /*
2317 * Since Rx FIFO size is 4K bytes, receiving frames larger
2318 * than 4K bytes will suffer from Rx FIFO overruns. So
2319 * decrease FIFO threshold to reduce the FIFO overruns for
2320 * frames larger than 4000 bytes.
2321 * For best performance of standard MTU sized frames use
2322 * maximum allowable FIFO threshold, 128QW.
2323 */
2324 if ((ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN + ETHER_CRC_LEN) >
2325 JME_RX_FIFO_SIZE)
2326 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2327 else
2328 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2329 sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
2330 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2331 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2332 /* XXX TODO DROP_BAD */
2333 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2334
2335 /* Set Rx descriptor counter. */
2336 CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT);
2337
2338 /* Set Rx ring address to the hardware. */
2339 paddr = JME_RX_RING_ADDR(sc, 0);
2340 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2341 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2342
2343 /* Clear receive filter. */
2344 CSR_WRITE_4(sc, JME_RXMAC, 0);
2345
2346 /* Set up the receive filter. */
2347 jme_set_filter(sc);
2348 jme_set_vlan(sc);
2349
2350 /*
2351 * Disable all WOL bits as WOL can interfere normal Rx
2352 * operation. Also clear WOL detection status bits.
2353 */
2354 reg = CSR_READ_4(sc, JME_PMCS);
2355 reg &= ~PMCS_WOL_ENB_MASK;
2356 CSR_WRITE_4(sc, JME_PMCS, reg);
2357
2358 /*
2359 * Pad 10bytes right before received frame. This will greatly
2360 * help Rx performance on strict-alignment architectures as
2361 * it does not need to copy the frame to align the payload.
2362 */
2363 reg = CSR_READ_4(sc, JME_RXMAC);
2364 reg |= RXMAC_PAD_10BYTES;
2365
2366 if (ifp->if_capenable & IFCAP_RXCSUM)
2367 reg |= RXMAC_CSUM_ENB;
2368 CSR_WRITE_4(sc, JME_RXMAC, reg);
2369
2370 /* Configure general purpose reg0 */
2371 reg = CSR_READ_4(sc, JME_GPREG0);
2372 reg &= ~GPREG0_PCC_UNIT_MASK;
2373 /* Set PCC timer resolution to micro-seconds unit. */
2374 reg |= GPREG0_PCC_UNIT_US;
2375 /*
2376 * Disable all shadow register posting as we have to read
2377 * JME_INTR_STATUS register in jme_intr. Also it seems
2378 * that it's hard to synchronize interrupt status between
2379 * hardware and software with shadow posting due to
2380 * requirements of bus_dmamap_sync(9).
2381 */
2382 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2383 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2384 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2385 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2386 /* Disable posting of DW0. */
2387 reg &= ~GPREG0_POST_DW0_ENB;
2388 /* Clear PME message. */
2389 reg &= ~GPREG0_PME_ENB;
2390 /* Set PHY address. */
2391 reg &= ~GPREG0_PHY_ADDR_MASK;
2392 reg |= sc->jme_phyaddr;
2393 CSR_WRITE_4(sc, JME_GPREG0, reg);
2394
2395 /* Configure Tx queue 0 packet completion coalescing. */
2870abc4 2396 jme_set_tx_coal(sc);
76fbb0b9
SZ
2397
2398 /* Configure Rx queue 0 packet completion coalescing. */
2870abc4 2399 jme_set_rx_coal(sc);
76fbb0b9
SZ
2400
2401 /* Configure shadow status block but don't enable posting. */
2402 paddr = sc->jme_rdata.jme_ssb_block_paddr;
2403 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2404 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2405
2406 /* Disable Timer 1 and Timer 2. */
2407 CSR_WRITE_4(sc, JME_TIMER1, 0);
2408 CSR_WRITE_4(sc, JME_TIMER2, 0);
2409
2410 /* Configure retry transmit period, retry limit value. */
2411 CSR_WRITE_4(sc, JME_TXTRHD,
2412 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2413 TXTRHD_RT_PERIOD_MASK) |
2414 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2415 TXTRHD_RT_LIMIT_SHIFT));
2416
2417 /* Disable RSS. */
2418 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
2419
2420 /* Initialize the interrupt mask. */
2421 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2422 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2423
2424 /*
2425 * Enabling Tx/Rx DMA engines and Rx queue processing is
2426 * done after detection of valid link in jme_miibus_statchg.
2427 */
2428 sc->jme_flags &= ~JME_FLAG_LINK;
2429
2430 /* Set the current media. */
2431 mii = device_get_softc(sc->jme_miibus);
2432 mii_mediachg(mii);
2433
2434 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2435
2436 ifp->if_flags |= IFF_RUNNING;
2437 ifp->if_flags &= ~IFF_OACTIVE;
2438}
2439
2440static void
2441jme_stop(struct jme_softc *sc)
2442{
2443 struct ifnet *ifp = &sc->arpcom.ac_if;
2444 struct jme_txdesc *txd;
2445 struct jme_rxdesc *rxd;
2446 int i;
2447
2448 ASSERT_SERIALIZED(ifp->if_serializer);
2449
2450 /*
2451 * Mark the interface down and cancel the watchdog timer.
2452 */
2453 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2454 ifp->if_timer = 0;
2455
2456 callout_stop(&sc->jme_tick_ch);
2457 sc->jme_flags &= ~JME_FLAG_LINK;
2458
2459 /*
2460 * Disable interrupts.
2461 */
2462 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2463 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2464
2465 /* Disable updating shadow status block. */
2466 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2467 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2468
2469 /* Stop receiver, transmitter. */
2470 jme_stop_rx(sc);
2471 jme_stop_tx(sc);
2472
2473#ifdef foo
2474 /* Reclaim Rx/Tx buffers that have been completed. */
2475 jme_rxeof(sc);
2476 if (sc->jme_cdata.jme_rxhead != NULL)
2477 m_freem(sc->jme_cdata.jme_rxhead);
2478 JME_RXCHAIN_RESET(sc);
2479 jme_txeof(sc);
2480#endif
2481
2482 /*
2483 * Free partial finished RX segments
2484 */
2485 if (sc->jme_cdata.jme_rxhead != NULL)
2486 m_freem(sc->jme_cdata.jme_rxhead);
2487 JME_RXCHAIN_RESET(sc);
2488
2489 /*
2490 * Free RX and TX mbufs still in the queues.
2491 */
2492 for (i = 0; i < JME_RX_RING_CNT; i++) {
2493 rxd = &sc->jme_cdata.jme_rxdesc[i];
2494 if (rxd->rx_m != NULL) {
2495 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
2496 rxd->rx_dmamap);
2497 m_freem(rxd->rx_m);
2498 rxd->rx_m = NULL;
2499 }
2500 }
2501 for (i = 0; i < JME_TX_RING_CNT; i++) {
2502 txd = &sc->jme_cdata.jme_txdesc[i];
2503 if (txd->tx_m != NULL) {
2504 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2505 txd->tx_dmamap);
2506 m_freem(txd->tx_m);
2507 txd->tx_m = NULL;
2508 txd->tx_ndesc = 0;
2509 }
2510 }
2511}
2512
2513static void
2514jme_stop_tx(struct jme_softc *sc)
2515{
2516 uint32_t reg;
2517 int i;
2518
2519 reg = CSR_READ_4(sc, JME_TXCSR);
2520 if ((reg & TXCSR_TX_ENB) == 0)
2521 return;
2522 reg &= ~TXCSR_TX_ENB;
2523 CSR_WRITE_4(sc, JME_TXCSR, reg);
2524 for (i = JME_TIMEOUT; i > 0; i--) {
2525 DELAY(1);
2526 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2527 break;
2528 }
2529 if (i == 0)
2530 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2531}
2532
2533static void
2534jme_stop_rx(struct jme_softc *sc)
2535{
2536 uint32_t reg;
2537 int i;
2538
2539 reg = CSR_READ_4(sc, JME_RXCSR);
2540 if ((reg & RXCSR_RX_ENB) == 0)
2541 return;
2542 reg &= ~RXCSR_RX_ENB;
2543 CSR_WRITE_4(sc, JME_RXCSR, reg);
2544 for (i = JME_TIMEOUT; i > 0; i--) {
2545 DELAY(1);
2546 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2547 break;
2548 }
2549 if (i == 0)
2550 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2551}
2552
2553static void
2554jme_init_tx_ring(struct jme_softc *sc)
2555{
2556 struct jme_ring_data *rd;
2557 struct jme_txdesc *txd;
2558 int i;
2559
2560 sc->jme_cdata.jme_tx_prod = 0;
2561 sc->jme_cdata.jme_tx_cons = 0;
2562 sc->jme_cdata.jme_tx_cnt = 0;
2563
2564 rd = &sc->jme_rdata;
2565 bzero(rd->jme_tx_ring, JME_TX_RING_SIZE);
2566 for (i = 0; i < JME_TX_RING_CNT; i++) {
2567 txd = &sc->jme_cdata.jme_txdesc[i];
2568 txd->tx_m = NULL;
2569 txd->tx_desc = &rd->jme_tx_ring[i];
2570 txd->tx_ndesc = 0;
2571 }
2572
2573 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2574 sc->jme_cdata.jme_tx_ring_map,
2575 BUS_DMASYNC_PREWRITE);
2576}
2577
2578static void
2579jme_init_ssb(struct jme_softc *sc)
2580{
2581 struct jme_ring_data *rd;
2582
2583 rd = &sc->jme_rdata;
2584 bzero(rd->jme_ssb_block, JME_SSB_SIZE);
2585 bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map,
2586 BUS_DMASYNC_PREWRITE);
2587}
2588
2589static int
2590jme_init_rx_ring(struct jme_softc *sc)
2591{
2592 struct jme_ring_data *rd;
2593 struct jme_rxdesc *rxd;
2594 int i;
2595
2596 KKASSERT(sc->jme_cdata.jme_rxhead == NULL &&
2597 sc->jme_cdata.jme_rxtail == NULL &&
2598 sc->jme_cdata.jme_rxlen == 0);
2599 sc->jme_cdata.jme_rx_cons = 0;
2600
2601 rd = &sc->jme_rdata;
2602 bzero(rd->jme_rx_ring, JME_RX_RING_SIZE);
2603 for (i = 0; i < JME_RX_RING_CNT; i++) {
2604 int error;
2605
2606 rxd = &sc->jme_cdata.jme_rxdesc[i];
2607 rxd->rx_m = NULL;
2608 rxd->rx_desc = &rd->jme_rx_ring[i];
2609 error = jme_newbuf(sc, rxd, 1);
2610 if (error)
2611 return (error);
2612 }
2613
2614 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2615 sc->jme_cdata.jme_rx_ring_map,
2616 BUS_DMASYNC_PREWRITE);
2617 return (0);
2618}
2619
2620static int
2621jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd, int init)
2622{
2623 struct jme_desc *desc;
2624 struct mbuf *m;
2625 struct jme_dmamap_ctx ctx;
2626 bus_dma_segment_t segs;
2627 bus_dmamap_t map;
2628 int error;
2629
2630 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2631 if (m == NULL)
2632 return (ENOBUFS);
2633 /*
2634 * JMC250 has 64bit boundary alignment limitation so jme(4)
2635 * takes advantage of 10 bytes padding feature of hardware
2636 * in order not to copy entire frame to align IP header on
2637 * 32bit boundary.
2638 */
2639 m->m_len = m->m_pkthdr.len = MCLBYTES;
2640
2641 ctx.nsegs = 1;
2642 ctx.segs = &segs;
2643 error = bus_dmamap_load_mbuf(sc->jme_cdata.jme_rx_tag,
2644 sc->jme_cdata.jme_rx_sparemap,
2645 m, jme_dmamap_buf_cb, &ctx,
2646 BUS_DMA_NOWAIT);
2647 if (error || ctx.nsegs == 0) {
2648 if (!error) {
2649 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
2650 sc->jme_cdata.jme_rx_sparemap);
2651 error = EFBIG;
2652 if_printf(&sc->arpcom.ac_if, "too many segments?!\n");
2653 }
2654 m_freem(m);
2655
2656 if (init)
2657 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
2658 return (error);
2659 }
2660
2661 if (rxd->rx_m != NULL) {
2662 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
2663 BUS_DMASYNC_POSTREAD);
2664 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap);
2665 }
2666 map = rxd->rx_dmamap;
2667 rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
2668 sc->jme_cdata.jme_rx_sparemap = map;
2669 rxd->rx_m = m;
2670
2671 desc = rxd->rx_desc;
2672 desc->buflen = htole32(segs.ds_len);
2673 desc->addr_lo = htole32(JME_ADDR_LO(segs.ds_addr));
2674 desc->addr_hi = htole32(JME_ADDR_HI(segs.ds_addr));
2675 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2676
2677 return (0);
2678}
2679
2680static void
2681jme_set_vlan(struct jme_softc *sc)
2682{
2683 struct ifnet *ifp = &sc->arpcom.ac_if;
2684 uint32_t reg;
2685
2686 ASSERT_SERIALIZED(ifp->if_serializer);
2687
2688 reg = CSR_READ_4(sc, JME_RXMAC);
2689 reg &= ~RXMAC_VLAN_ENB;
2690 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
2691 reg |= RXMAC_VLAN_ENB;
2692 CSR_WRITE_4(sc, JME_RXMAC, reg);
2693}
2694
2695static void
2696jme_set_filter(struct jme_softc *sc)
2697{
2698 struct ifnet *ifp = &sc->arpcom.ac_if;
2699 struct ifmultiaddr *ifma;
2700 uint32_t crc;
2701 uint32_t mchash[2];
2702 uint32_t rxcfg;
2703
2704 ASSERT_SERIALIZED(ifp->if_serializer);
2705
2706 rxcfg = CSR_READ_4(sc, JME_RXMAC);
2707 rxcfg &= ~(RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2708 RXMAC_ALLMULTI);
2709
2710 /*
2711 * Always accept frames destined to our station address.
2712 * Always accept broadcast frames.
2713 */
2714 rxcfg |= RXMAC_UNICAST | RXMAC_BROADCAST;
2715
2716 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2717 if (ifp->if_flags & IFF_PROMISC)
2718 rxcfg |= RXMAC_PROMISC;
2719 if (ifp->if_flags & IFF_ALLMULTI)
2720 rxcfg |= RXMAC_ALLMULTI;
2721 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2722 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2723 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2724 return;
2725 }
2726
2727 /*
2728 * Set up the multicast address filter by passing all multicast
2729 * addresses through a CRC generator, and then using the low-order
2730 * 6 bits as an index into the 64 bit multicast hash table. The
2731 * high order bits select the register, while the rest of the bits
2732 * select the bit within the register.
2733 */
2734 rxcfg |= RXMAC_MULTICAST;
2735 bzero(mchash, sizeof(mchash));
2736
2737 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2738 if (ifma->ifma_addr->sa_family != AF_LINK)
2739 continue;
2740 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
2741 ifma->ifma_addr), ETHER_ADDR_LEN);
2742
2743 /* Just want the 6 least significant bits. */
2744 crc &= 0x3f;
2745
2746 /* Set the corresponding bit in the hash table. */
2747 mchash[crc >> 5] |= 1 << (crc & 0x1f);
2748 }
2749
2750 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
2751 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
2752 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
2753}
2754
76fbb0b9 2755static int
2870abc4 2756jme_sysctl_tx_coal_to(SYSCTL_HANDLER_ARGS)
76fbb0b9 2757{
2870abc4
SZ
2758 struct jme_softc *sc = arg1;
2759 struct ifnet *ifp = &sc->arpcom.ac_if;
2760 int error, v;
2761
2762 lwkt_serialize_enter(ifp->if_serializer);
2763
2764 v = sc->jme_tx_coal_to;
2765 error = sysctl_handle_int(oidp, &v, 0, req);
2766 if (error || req->newptr == NULL)
2767 goto back;
2768
2769 if (v < PCCTX_COAL_TO_MIN || v > PCCTX_COAL_TO_MAX) {
2770 error = EINVAL;
2771 goto back;
2772 }
2773
2774 if (v != sc->jme_tx_coal_to) {
2775 sc->jme_tx_coal_to = v;
2776 if (ifp->if_flags & IFF_RUNNING)
2777 jme_set_tx_coal(sc);
2778 }
2779back:
2780 lwkt_serialize_exit(ifp->if_serializer);
2781 return error;
76fbb0b9
SZ
2782}
2783
2784static int
2870abc4 2785jme_sysctl_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
76fbb0b9 2786{
2870abc4
SZ
2787 struct jme_softc *sc = arg1;
2788 struct ifnet *ifp = &sc->arpcom.ac_if;
2789 int error, v;
2790
2791 lwkt_serialize_enter(ifp->if_serializer);
2792
2793 v = sc->jme_tx_coal_pkt;
2794 error = sysctl_handle_int(oidp, &v, 0, req);
2795 if (error || req->newptr == NULL)
2796 goto back;
2797
2798 if (v < PCCTX_COAL_PKT_MIN || v > PCCTX_COAL_PKT_MAX) {
2799 error = EINVAL;
2800 goto back;
2801 }
2802
2803 if (v != sc->jme_tx_coal_pkt) {
2804 sc->jme_tx_coal_pkt = v;
2805 if (ifp->if_flags & IFF_RUNNING)
2806 jme_set_tx_coal(sc);
2807 }
2808back:
2809 lwkt_serialize_exit(ifp->if_serializer);
2810 return error;
76fbb0b9
SZ
2811}
2812
2813static int
2870abc4 2814jme_sysctl_rx_coal_to(SYSCTL_HANDLER_ARGS)
76fbb0b9 2815{
2870abc4
SZ
2816 struct jme_softc *sc = arg1;
2817 struct ifnet *ifp = &sc->arpcom.ac_if;
2818 int error, v;
2819
2820 lwkt_serialize_enter(ifp->if_serializer);
2821
2822 v = sc->jme_rx_coal_to;
2823 error = sysctl_handle_int(oidp, &v, 0, req);
2824 if (error || req->newptr == NULL)
2825 goto back;
2826
2827 if (v < PCCRX_COAL_TO_MIN || v > PCCRX_COAL_TO_MAX) {
2828 error = EINVAL;
2829 goto back;
2830 }
2831
2832 if (v != sc->jme_rx_coal_to) {
2833 sc->jme_rx_coal_to = v;
2834 if (ifp->if_flags & IFF_RUNNING)
2835 jme_set_rx_coal(sc);
2836 }
2837back:
2838 lwkt_serialize_exit(ifp->if_serializer);
2839 return error;
76fbb0b9
SZ
2840}
2841
2842static int
2870abc4
SZ
2843jme_sysctl_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
2844{
2845 struct jme_softc *sc = arg1;
2846 struct ifnet *ifp = &sc->arpcom.ac_if;
2847 int error, v;
2848
2849 lwkt_serialize_enter(ifp->if_serializer);
2850
2851 v = sc->jme_rx_coal_pkt;
2852 error = sysctl_handle_int(oidp, &v, 0, req);
2853 if (error || req->newptr == NULL)
2854 goto back;
2855
2856 if (v < PCCRX_COAL_PKT_MIN || v > PCCRX_COAL_PKT_MAX) {
2857 error = EINVAL;
2858 goto back;
2859 }
2860
2861 if (v != sc->jme_rx_coal_pkt) {
2862 sc->jme_rx_coal_pkt = v;
2863 if (ifp->if_flags & IFF_RUNNING)
2864 jme_set_rx_coal(sc);
2865 }
2866back:
2867 lwkt_serialize_exit(ifp->if_serializer);
2868 return error;
2869}
2870
2871static void
2872jme_set_tx_coal(struct jme_softc *sc)
2873{
2874 uint32_t reg;
2875
2876 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2877 PCCTX_COAL_TO_MASK;
2878 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2879 PCCTX_COAL_PKT_MASK;
2880 reg |= PCCTX_COAL_TXQ0;
2881 CSR_WRITE_4(sc, JME_PCCTX, reg);
2882}
2883
2884static void
2885jme_set_rx_coal(struct jme_softc *sc)
76fbb0b9 2886{
2870abc4
SZ
2887 uint32_t reg;
2888
2889 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
2890 PCCRX_COAL_TO_MASK;
2891 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
2892 PCCRX_COAL_PKT_MASK;
2893 CSR_WRITE_4(sc, JME_PCCRX0, reg);
76fbb0b9 2894}