1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */
2 /* $DragonFly: src/sys/dev/netif/nfe/if_nfe.c,v 1.46 2008/10/28 07:30:49 sephe Exp $ */
5 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
7 * This code is derived from software contributed to The DragonFly Project
8 * by Sepherosa Ziehau <sepherosa@gmail.com> and
9 * Matthew Dillon <dillon@apollo.backplane.com>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
41 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
43 * Permission to use, copy, modify, and distribute this software for any
44 * purpose with or without fee is hereby granted, provided that the above
45 * copyright notice and this permission notice appear in all copies.
47 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
48 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
49 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
50 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
51 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
52 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
53 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
56 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
58 #include "opt_polling.h"
60 #include <sys/param.h>
61 #include <sys/endian.h>
62 #include <sys/kernel.h>
64 #include <sys/interrupt.h>
67 #include <sys/serialize.h>
68 #include <sys/socket.h>
69 #include <sys/sockio.h>
70 #include <sys/sysctl.h>
72 #include <net/ethernet.h>
75 #include <net/if_arp.h>
76 #include <net/if_dl.h>
77 #include <net/if_media.h>
78 #include <net/ifq_var.h>
79 #include <net/if_types.h>
80 #include <net/if_var.h>
81 #include <net/vlan/if_vlan_var.h>
82 #include <net/vlan/if_vlan_ether.h>
84 #include <bus/pci/pcireg.h>
85 #include <bus/pci/pcivar.h>
86 #include <bus/pci/pcidevs.h>
88 #include <dev/netif/mii_layer/mii.h>
89 #include <dev/netif/mii_layer/miivar.h>
91 #include "miibus_if.h"
93 #include <dev/netif/nfe/if_nfereg.h>
94 #include <dev/netif/nfe/if_nfevar.h>
97 #define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
99 static int nfe_probe(device_t);
100 static int nfe_attach(device_t);
101 static int nfe_detach(device_t);
102 static void nfe_shutdown(device_t);
103 static int nfe_resume(device_t);
104 static int nfe_suspend(device_t);
106 static int nfe_miibus_readreg(device_t, int, int);
107 static void nfe_miibus_writereg(device_t, int, int, int);
108 static void nfe_miibus_statchg(device_t);
110 #ifdef DEVICE_POLLING
111 static void nfe_poll(struct ifnet *, enum poll_cmd, int);
113 static void nfe_intr(void *);
114 static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
115 static int nfe_rxeof(struct nfe_softc *);
116 static int nfe_txeof(struct nfe_softc *, int);
117 static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *,
119 static void nfe_start(struct ifnet *);
120 static void nfe_watchdog(struct ifnet *);
121 static void nfe_init(void *);
122 static void nfe_stop(struct nfe_softc *);
123 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *);
124 static void nfe_jfree(void *);
125 static void nfe_jref(void *);
126 static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *);
127 static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *);
128 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
129 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
130 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
131 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
132 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
133 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
134 static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
135 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
136 static int nfe_ifmedia_upd(struct ifnet *);
137 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
138 static void nfe_setmulti(struct nfe_softc *);
139 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
140 static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
141 static void nfe_powerup(device_t);
142 static void nfe_mac_reset(struct nfe_softc *);
143 static void nfe_tick(void *);
144 static void nfe_buf_dma_addr(void *, bus_dma_segment_t *, int, bus_size_t,
146 static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
148 static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
150 static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int,
152 static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int,
154 static void nfe_enable_intrs(struct nfe_softc *);
155 static void nfe_disable_intrs(struct nfe_softc *);
157 static int nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS);
162 static int nfe_debug = 0;
163 static int nfe_rx_ring_count = NFE_RX_RING_DEF_COUNT;
164 static int nfe_tx_ring_count = NFE_TX_RING_DEF_COUNT;
165 /* hw timer simulated interrupt moderation @8000Hz */
166 static int nfe_imtime = -125;
168 TUNABLE_INT("hw.nfe.rx_ring_count", &nfe_rx_ring_count);
169 TUNABLE_INT("hw.nfe.tx_ring_count", &nfe_tx_ring_count);
170 TUNABLE_INT("hw.nfe.imtimer", &nfe_imtime);
171 TUNABLE_INT("hw.nfe.debug", &nfe_debug);
173 #define DPRINTF(sc, fmt, ...) do { \
174 if ((sc)->sc_debug) { \
175 if_printf(&(sc)->arpcom.ac_if, \
180 #define DPRINTFN(sc, lv, fmt, ...) do { \
181 if ((sc)->sc_debug >= (lv)) { \
182 if_printf(&(sc)->arpcom.ac_if, \
187 #else /* !NFE_DEBUG */
189 #define DPRINTF(sc, fmt, ...)
190 #define DPRINTFN(sc, lv, fmt, ...)
192 #endif /* NFE_DEBUG */
196 bus_dma_segment_t *segs;
199 static const struct nfe_dev {
204 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
205 "NVIDIA nForce Fast Ethernet" },
207 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
208 "NVIDIA nForce2 Fast Ethernet" },
210 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
211 "NVIDIA nForce3 Gigabit Ethernet" },
213 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb
214 chipset, and possibly also the 400R; it might be both nForce2- and
215 nForce3-based boards can use the same MCPs (= southbridges) */
216 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2,
217 "NVIDIA nForce3 Gigabit Ethernet" },
219 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3,
220 "NVIDIA nForce3 Gigabit Ethernet" },
222 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
223 "NVIDIA nForce3 Gigabit Ethernet" },
225 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5,
226 "NVIDIA nForce3 Gigabit Ethernet" },
228 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1,
229 "NVIDIA CK804 Gigabit Ethernet" },
231 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2,
232 "NVIDIA CK804 Gigabit Ethernet" },
234 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
235 "NVIDIA MCP04 Gigabit Ethernet" },
237 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
238 "NVIDIA MCP04 Gigabit Ethernet" },
240 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1,
241 "NVIDIA MCP51 Gigabit Ethernet" },
243 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2,
244 "NVIDIA MCP51 Gigabit Ethernet" },
246 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
247 "NVIDIA MCP55 Gigabit Ethernet" },
249 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
250 "NVIDIA MCP55 Gigabit Ethernet" },
252 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
253 "NVIDIA MCP61 Gigabit Ethernet" },
255 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
256 "NVIDIA MCP61 Gigabit Ethernet" },
258 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
259 "NVIDIA MCP61 Gigabit Ethernet" },
261 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
262 "NVIDIA MCP61 Gigabit Ethernet" },
264 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
265 "NVIDIA MCP65 Gigabit Ethernet" },
267 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
268 "NVIDIA MCP65 Gigabit Ethernet" },
270 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
271 "NVIDIA MCP65 Gigabit Ethernet" },
273 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
274 "NVIDIA MCP65 Gigabit Ethernet" },
276 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
277 "NVIDIA MCP67 Gigabit Ethernet" },
279 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
280 "NVIDIA MCP67 Gigabit Ethernet" },
282 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
283 "NVIDIA MCP67 Gigabit Ethernet" },
285 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
286 "NVIDIA MCP67 Gigabit Ethernet" },
288 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
289 "NVIDIA MCP73 Gigabit Ethernet" },
291 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
292 "NVIDIA MCP73 Gigabit Ethernet" },
294 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
295 "NVIDIA MCP73 Gigabit Ethernet" },
297 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
298 "NVIDIA MCP73 Gigabit Ethernet" },
300 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
301 "NVIDIA MCP77 Gigabit Ethernet" },
303 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
304 "NVIDIA MCP77 Gigabit Ethernet" },
306 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
307 "NVIDIA MCP77 Gigabit Ethernet" },
309 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
310 "NVIDIA MCP77 Gigabit Ethernet" },
312 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
313 "NVIDIA MCP79 Gigabit Ethernet" },
315 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
316 "NVIDIA MCP79 Gigabit Ethernet" },
318 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
319 "NVIDIA MCP79 Gigabit Ethernet" },
321 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
322 "NVIDIA MCP79 Gigabit Ethernet" },
327 static device_method_t nfe_methods[] = {
328 /* Device interface */
329 DEVMETHOD(device_probe, nfe_probe),
330 DEVMETHOD(device_attach, nfe_attach),
331 DEVMETHOD(device_detach, nfe_detach),
332 DEVMETHOD(device_suspend, nfe_suspend),
333 DEVMETHOD(device_resume, nfe_resume),
334 DEVMETHOD(device_shutdown, nfe_shutdown),
337 DEVMETHOD(bus_print_child, bus_generic_print_child),
338 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
341 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
342 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
343 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
348 static driver_t nfe_driver = {
351 sizeof(struct nfe_softc)
354 static devclass_t nfe_devclass;
356 DECLARE_DUMMY_MODULE(if_nfe);
357 MODULE_DEPEND(if_nfe, miibus, 1, 1, 1);
358 DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, 0, 0);
359 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
362 nfe_probe(device_t dev)
364 const struct nfe_dev *n;
367 vid = pci_get_vendor(dev);
368 did = pci_get_device(dev);
369 for (n = nfe_devices; n->desc != NULL; ++n) {
370 if (vid == n->vid && did == n->did) {
371 struct nfe_softc *sc = device_get_softc(dev);
374 case PCI_PRODUCT_NVIDIA_NFORCE_LAN:
375 case PCI_PRODUCT_NVIDIA_NFORCE2_LAN:
376 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN1:
377 sc->sc_caps = NFE_NO_PWRCTL |
380 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
381 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
382 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
383 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
384 sc->sc_caps = NFE_JUMBO_SUP |
389 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
390 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
391 sc->sc_caps = NFE_FIX_EADDR;
393 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
394 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
395 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
396 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
397 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
398 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
399 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
400 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
401 case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
402 case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
403 case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
404 case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
405 sc->sc_caps |= NFE_40BIT_ADDR;
407 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
408 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
409 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
410 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
411 sc->sc_caps = NFE_JUMBO_SUP |
417 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
418 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
419 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
420 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
421 sc->sc_caps = NFE_JUMBO_SUP |
424 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
425 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
426 sc->sc_caps = NFE_JUMBO_SUP |
432 case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
433 case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
434 case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
435 case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
436 case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
437 case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
438 case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
439 case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
440 sc->sc_caps = NFE_40BIT_ADDR |
445 device_set_desc(dev, n->desc);
446 device_set_async_attach(dev, TRUE);
454 nfe_attach(device_t dev)
456 struct nfe_softc *sc = device_get_softc(dev);
457 struct ifnet *ifp = &sc->arpcom.ac_if;
458 uint8_t eaddr[ETHER_ADDR_LEN];
462 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
463 lwkt_serialize_init(&sc->sc_jbuf_serializer);
466 * Initialize sysctl variables
468 sc->sc_rx_ring_count = nfe_rx_ring_count;
469 sc->sc_tx_ring_count = nfe_tx_ring_count;
470 sc->sc_debug = nfe_debug;
471 if (nfe_imtime < 0) {
472 sc->sc_flags |= NFE_F_DYN_IM;
473 sc->sc_imtime = -nfe_imtime;
475 sc->sc_imtime = nfe_imtime;
477 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc);
479 sc->sc_mem_rid = PCIR_BAR(0);
481 if (sc->sc_caps & NFE_40BIT_ADDR)
482 sc->rxtxctl_desc = NFE_RXTX_DESC_V3;
483 else if (sc->sc_caps & NFE_JUMBO_SUP)
484 sc->rxtxctl_desc = NFE_RXTX_DESC_V2;
487 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
490 mem = pci_read_config(dev, sc->sc_mem_rid, 4);
491 irq = pci_read_config(dev, PCIR_INTLINE, 4);
493 device_printf(dev, "chip is in D%d power mode "
494 "-- setting to D0\n", pci_get_powerstate(dev));
496 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
498 pci_write_config(dev, sc->sc_mem_rid, mem, 4);
499 pci_write_config(dev, PCIR_INTLINE, irq, 4);
501 #endif /* !BURN_BRIDGE */
503 /* Enable bus mastering */
504 pci_enable_busmaster(dev);
506 /* Allocate IO memory */
507 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
508 &sc->sc_mem_rid, RF_ACTIVE);
509 if (sc->sc_mem_res == NULL) {
510 device_printf(dev, "cound not allocate io memory\n");
513 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res);
514 sc->sc_memt = rman_get_bustag(sc->sc_mem_res);
518 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
520 RF_SHAREABLE | RF_ACTIVE);
521 if (sc->sc_irq_res == NULL) {
522 device_printf(dev, "could not allocate irq\n");
528 NFE_WRITE(sc, NFE_WOL_CTL, 0);
530 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0)
533 nfe_get_macaddr(sc, eaddr);
536 * Allocate top level DMA tag
538 if (sc->sc_caps & NFE_40BIT_ADDR)
539 lowaddr = NFE_BUS_SPACE_MAXADDR;
541 lowaddr = BUS_SPACE_MAXADDR_32BIT;
542 error = bus_dma_tag_create(NULL, /* parent */
543 1, 0, /* alignment, boundary */
544 lowaddr, /* lowaddr */
545 BUS_SPACE_MAXADDR, /* highaddr */
546 NULL, NULL, /* filter, filterarg */
547 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
549 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
553 device_printf(dev, "could not allocate parent dma tag\n");
558 * Allocate Tx and Rx rings.
560 error = nfe_alloc_tx_ring(sc, &sc->txq);
562 device_printf(dev, "could not allocate Tx ring\n");
566 error = nfe_alloc_rx_ring(sc, &sc->rxq);
568 device_printf(dev, "could not allocate Rx ring\n");
575 sysctl_ctx_init(&sc->sc_sysctl_ctx);
576 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx,
577 SYSCTL_STATIC_CHILDREN(_hw),
579 device_get_nameunit(dev),
581 if (sc->sc_sysctl_tree == NULL) {
582 device_printf(dev, "can't add sysctl node\n");
586 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
587 SYSCTL_CHILDREN(sc->sc_sysctl_tree),
588 OID_AUTO, "imtimer", CTLTYPE_INT | CTLFLAG_RW,
589 sc, 0, nfe_sysctl_imtime, "I",
590 "Interrupt moderation time (usec). "
591 "0 to disable interrupt moderation.");
592 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx,
593 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
594 "rx_ring_count", CTLFLAG_RD, &sc->sc_rx_ring_count,
596 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx,
597 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
598 "tx_ring_count", CTLFLAG_RD, &sc->sc_tx_ring_count,
600 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx,
601 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
602 "debug", CTLFLAG_RW, &sc->sc_debug,
603 0, "control debugging printfs");
605 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd,
608 device_printf(dev, "MII without any phy\n");
613 ifp->if_mtu = ETHERMTU;
614 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
615 ifp->if_ioctl = nfe_ioctl;
616 ifp->if_start = nfe_start;
617 #ifdef DEVICE_POLLING
618 ifp->if_poll = nfe_poll;
620 ifp->if_watchdog = nfe_watchdog;
621 ifp->if_init = nfe_init;
622 ifq_set_maxlen(&ifp->if_snd, sc->sc_tx_ring_count);
623 ifq_set_ready(&ifp->if_snd);
625 ifp->if_capabilities = IFCAP_VLAN_MTU;
627 if (sc->sc_caps & NFE_HW_VLAN)
628 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
631 if (sc->sc_caps & NFE_HW_CSUM) {
632 ifp->if_capabilities |= IFCAP_HWCSUM;
633 ifp->if_hwassist = NFE_CSUM_FEATURES;
636 sc->sc_caps &= ~NFE_HW_CSUM;
638 ifp->if_capenable = ifp->if_capabilities;
640 callout_init(&sc->sc_tick_ch);
642 ether_ifattach(ifp, eaddr, NULL);
644 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc,
645 &sc->sc_ih, ifp->if_serializer);
647 device_printf(dev, "could not setup intr\n");
652 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->sc_irq_res));
653 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
662 nfe_detach(device_t dev)
664 struct nfe_softc *sc = device_get_softc(dev);
666 if (device_is_attached(dev)) {
667 struct ifnet *ifp = &sc->arpcom.ac_if;
669 lwkt_serialize_enter(ifp->if_serializer);
671 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih);
672 lwkt_serialize_exit(ifp->if_serializer);
677 if (sc->sc_miibus != NULL)
678 device_delete_child(dev, sc->sc_miibus);
679 bus_generic_detach(dev);
681 if (sc->sc_sysctl_tree != NULL)
682 sysctl_ctx_free(&sc->sc_sysctl_ctx);
684 if (sc->sc_irq_res != NULL) {
685 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
689 if (sc->sc_mem_res != NULL) {
690 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
694 nfe_free_tx_ring(sc, &sc->txq);
695 nfe_free_rx_ring(sc, &sc->rxq);
696 if (sc->sc_dtag != NULL)
697 bus_dma_tag_destroy(sc->sc_dtag);
703 nfe_shutdown(device_t dev)
705 struct nfe_softc *sc = device_get_softc(dev);
706 struct ifnet *ifp = &sc->arpcom.ac_if;
708 lwkt_serialize_enter(ifp->if_serializer);
710 lwkt_serialize_exit(ifp->if_serializer);
714 nfe_suspend(device_t dev)
716 struct nfe_softc *sc = device_get_softc(dev);
717 struct ifnet *ifp = &sc->arpcom.ac_if;
719 lwkt_serialize_enter(ifp->if_serializer);
721 lwkt_serialize_exit(ifp->if_serializer);
727 nfe_resume(device_t dev)
729 struct nfe_softc *sc = device_get_softc(dev);
730 struct ifnet *ifp = &sc->arpcom.ac_if;
732 lwkt_serialize_enter(ifp->if_serializer);
733 if (ifp->if_flags & IFF_UP)
735 lwkt_serialize_exit(ifp->if_serializer);
741 nfe_miibus_statchg(device_t dev)
743 struct nfe_softc *sc = device_get_softc(dev);
744 struct mii_data *mii = device_get_softc(sc->sc_miibus);
745 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
747 ASSERT_SERIALIZED(sc->arpcom.ac_if.if_serializer);
749 phy = NFE_READ(sc, NFE_PHY_IFACE);
750 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
752 seed = NFE_READ(sc, NFE_RNDSEED);
753 seed &= ~NFE_SEED_MASK;
755 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
756 phy |= NFE_PHY_HDX; /* half-duplex */
757 misc |= NFE_MISC1_HDX;
760 switch (IFM_SUBTYPE(mii->mii_media_active)) {
761 case IFM_1000_T: /* full-duplex only */
762 link |= NFE_MEDIA_1000T;
763 seed |= NFE_SEED_1000T;
764 phy |= NFE_PHY_1000T;
767 link |= NFE_MEDIA_100TX;
768 seed |= NFE_SEED_100TX;
769 phy |= NFE_PHY_100TX;
772 link |= NFE_MEDIA_10T;
773 seed |= NFE_SEED_10T;
777 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
779 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
780 NFE_WRITE(sc, NFE_MISC1, misc);
781 NFE_WRITE(sc, NFE_LINKSPEED, link);
785 nfe_miibus_readreg(device_t dev, int phy, int reg)
787 struct nfe_softc *sc = device_get_softc(dev);
791 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
793 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
794 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
798 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
800 for (ntries = 0; ntries < 1000; ntries++) {
802 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
805 if (ntries == 1000) {
806 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", "");
810 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
811 DPRINTFN(sc, 2, "could not read PHY %s\n", "");
815 val = NFE_READ(sc, NFE_PHY_DATA);
816 if (val != 0xffffffff && val != 0)
817 sc->mii_phyaddr = phy;
819 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
825 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
827 struct nfe_softc *sc = device_get_softc(dev);
831 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
833 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
834 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
838 NFE_WRITE(sc, NFE_PHY_DATA, val);
839 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
840 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
842 for (ntries = 0; ntries < 1000; ntries++) {
844 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
850 DPRINTFN(sc, 2, "could not write to PHY %s\n", "");
854 #ifdef DEVICE_POLLING
857 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
859 struct nfe_softc *sc = ifp->if_softc;
861 ASSERT_SERIALIZED(ifp->if_serializer);
865 nfe_disable_intrs(sc);
868 case POLL_DEREGISTER:
869 nfe_enable_intrs(sc);
872 case POLL_AND_CHECK_STATUS:
875 if (ifp->if_flags & IFF_RUNNING) {
888 struct nfe_softc *sc = arg;
889 struct ifnet *ifp = &sc->arpcom.ac_if;
892 r = NFE_READ(sc, NFE_IRQ_STATUS);
894 return; /* not for us */
895 NFE_WRITE(sc, NFE_IRQ_STATUS, r);
897 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r);
899 if (r & NFE_IRQ_LINK) {
900 NFE_READ(sc, NFE_PHY_STATUS);
901 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
902 DPRINTF(sc, "link state changed %s\n", "");
905 if (ifp->if_flags & IFF_RUNNING) {
912 ret |= nfe_txeof(sc, 1);
914 if (sc->sc_flags & NFE_F_DYN_IM) {
915 if (ret && (sc->sc_flags & NFE_F_IRQ_TIMER) == 0) {
917 * Assume that using hardware timer could reduce
918 * the interrupt rate.
920 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_IMTIMER);
921 sc->sc_flags |= NFE_F_IRQ_TIMER;
922 } else if (!ret && (sc->sc_flags & NFE_F_IRQ_TIMER)) {
924 * Nothing needs to be processed, fall back to
925 * use TX/RX interrupts.
927 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_NOIMTIMER);
928 sc->sc_flags &= ~NFE_F_IRQ_TIMER;
931 * Recollect, mainly to avoid the possible race
932 * introduced by changing interrupt masks.
942 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
944 struct nfe_softc *sc = ifp->if_softc;
945 struct ifreq *ifr = (struct ifreq *)data;
946 struct mii_data *mii;
947 int error = 0, mask, jumbo_cap;
949 ASSERT_SERIALIZED(ifp->if_serializer);
953 if ((sc->sc_caps & NFE_JUMBO_SUP) && sc->rxq.jbuf != NULL)
958 if ((jumbo_cap && ifr->ifr_mtu > NFE_JUMBO_MTU) ||
959 (!jumbo_cap && ifr->ifr_mtu > ETHERMTU)) {
961 } else if (ifp->if_mtu != ifr->ifr_mtu) {
962 ifp->if_mtu = ifr->ifr_mtu;
963 if (ifp->if_flags & IFF_RUNNING)
968 if (ifp->if_flags & IFF_UP) {
970 * If only the PROMISC or ALLMULTI flag changes, then
971 * don't do a full re-init of the chip, just update
974 if ((ifp->if_flags & IFF_RUNNING) &&
975 ((ifp->if_flags ^ sc->sc_if_flags) &
976 (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
979 if (!(ifp->if_flags & IFF_RUNNING))
983 if (ifp->if_flags & IFF_RUNNING)
986 sc->sc_if_flags = ifp->if_flags;
990 if (ifp->if_flags & IFF_RUNNING)
995 mii = device_get_softc(sc->sc_miibus);
996 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
999 mask = (ifr->ifr_reqcap ^ ifp->if_capenable) & IFCAP_HWCSUM;
1000 if (mask && (ifp->if_capabilities & IFCAP_HWCSUM)) {
1001 ifp->if_capenable ^= mask;
1002 if (IFCAP_TXCSUM & ifp->if_capenable)
1003 ifp->if_hwassist = NFE_CSUM_FEATURES;
1005 ifp->if_hwassist = 0;
1007 if (ifp->if_flags & IFF_RUNNING)
1012 error = ether_ioctl(ifp, cmd, data);
1019 nfe_rxeof(struct nfe_softc *sc)
1021 struct ifnet *ifp = &sc->arpcom.ac_if;
1022 struct nfe_rx_ring *ring = &sc->rxq;
1024 struct mbuf_chain chain[MAXCPU];
1027 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD);
1029 ether_input_chain_init(chain);
1032 struct nfe_rx_data *data = &ring->data[ring->cur];
1037 if (sc->sc_caps & NFE_40BIT_ADDR) {
1038 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur];
1040 flags = le16toh(desc64->flags);
1041 len = le16toh(desc64->length) & 0x3fff;
1043 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur];
1045 flags = le16toh(desc32->flags);
1046 len = le16toh(desc32->length) & 0x3fff;
1049 if (flags & NFE_RX_READY)
1054 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
1055 if (!(flags & NFE_RX_VALID_V1))
1058 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
1059 flags &= ~NFE_RX_ERROR;
1060 len--; /* fix buffer length */
1063 if (!(flags & NFE_RX_VALID_V2))
1066 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
1067 flags &= ~NFE_RX_ERROR;
1068 len--; /* fix buffer length */
1072 if (flags & NFE_RX_ERROR) {
1079 if (sc->sc_flags & NFE_F_USE_JUMBO)
1080 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0);
1082 error = nfe_newbuf_std(sc, ring, ring->cur, 0);
1089 m->m_pkthdr.len = m->m_len = len;
1090 m->m_pkthdr.rcvif = ifp;
1092 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
1093 (flags & NFE_RX_CSUMOK)) {
1094 if (flags & NFE_RX_IP_CSUMOK_V2) {
1095 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED |
1100 (NFE_RX_UDP_CSUMOK_V2 | NFE_RX_TCP_CSUMOK_V2)) {
1101 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
1103 CSUM_FRAG_NOT_CHECKED;
1104 m->m_pkthdr.csum_data = 0xffff;
1109 ether_input_chain(ifp, m, chain);
1111 nfe_set_ready_rxdesc(sc, ring, ring->cur);
1112 sc->rxq.cur = (sc->rxq.cur + 1) % sc->sc_rx_ring_count;
1116 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1117 ether_input_dispatch(chain);
1123 nfe_txeof(struct nfe_softc *sc, int start)
1125 struct ifnet *ifp = &sc->arpcom.ac_if;
1126 struct nfe_tx_ring *ring = &sc->txq;
1127 struct nfe_tx_data *data = NULL;
1129 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD);
1130 while (ring->next != ring->cur) {
1133 if (sc->sc_caps & NFE_40BIT_ADDR)
1134 flags = le16toh(ring->desc64[ring->next].flags);
1136 flags = le16toh(ring->desc32[ring->next].flags);
1138 if (flags & NFE_TX_VALID)
1141 data = &ring->data[ring->next];
1143 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
1144 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
1147 if ((flags & NFE_TX_ERROR_V1) != 0) {
1148 if_printf(ifp, "tx v1 error 0x%4b\n", flags,
1155 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
1158 if ((flags & NFE_TX_ERROR_V2) != 0) {
1159 if_printf(ifp, "tx v2 error 0x%4b\n", flags,
1167 if (data->m == NULL) { /* should not get there */
1169 "last fragment bit w/o associated mbuf!\n");
1173 /* last fragment of the mbuf chain transmitted */
1174 bus_dmamap_unload(ring->data_tag, data->map);
1179 KKASSERT(ring->queued >= 0);
1180 ring->next = (ring->next + 1) % sc->sc_tx_ring_count;
1183 if (sc->sc_tx_ring_count - ring->queued >=
1184 sc->sc_tx_spare + NFE_NSEG_RSVD)
1185 ifp->if_flags &= ~IFF_OACTIVE;
1187 if (ring->queued == 0)
1190 if (start && !ifq_is_empty(&ifp->if_snd))
1200 nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0)
1202 struct nfe_dma_ctx ctx;
1203 bus_dma_segment_t segs[NFE_MAX_SCATTER];
1204 struct nfe_tx_data *data, *data_map;
1206 struct nfe_desc64 *desc64 = NULL;
1207 struct nfe_desc32 *desc32 = NULL;
1210 int error, i, j, maxsegs;
1212 data = &ring->data[ring->cur];
1214 data_map = data; /* Remember who owns the DMA map */
1216 maxsegs = (sc->sc_tx_ring_count - ring->queued) - NFE_NSEG_RSVD;
1217 if (maxsegs > NFE_MAX_SCATTER)
1218 maxsegs = NFE_MAX_SCATTER;
1219 KASSERT(maxsegs >= sc->sc_tx_spare,
1220 ("no enough segments %d,%d\n", maxsegs, sc->sc_tx_spare));
1222 ctx.nsegs = maxsegs;
1224 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0,
1225 nfe_buf_dma_addr, &ctx, BUS_DMA_NOWAIT);
1226 if (!error && ctx.nsegs == 0) {
1227 bus_dmamap_unload(ring->data_tag, map);
1230 if (error && error != EFBIG)
1232 if (error) { /* error == EFBIG */
1235 m_new = m_defrag(m0, MB_DONTWAIT);
1236 if (m_new == NULL) {
1243 ctx.nsegs = maxsegs;
1245 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0,
1246 nfe_buf_dma_addr, &ctx,
1248 if (error || ctx.nsegs == 0) {
1250 bus_dmamap_unload(ring->data_tag, map);
1256 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE);
1260 /* setup h/w VLAN tagging */
1261 if (m0->m_flags & M_VLANTAG)
1262 vtag = m0->m_pkthdr.ether_vlantag;
1264 if (sc->arpcom.ac_if.if_capenable & IFCAP_TXCSUM) {
1265 if (m0->m_pkthdr.csum_flags & CSUM_IP)
1266 flags |= NFE_TX_IP_CSUM;
1267 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
1268 flags |= NFE_TX_TCP_CSUM;
1272 * XXX urm. somebody is unaware of how hardware works. You
1273 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in
1274 * the ring until the entire chain is actually *VALID*. Otherwise
1275 * the hardware may encounter a partially initialized chain that
1276 * is marked as being ready to go when it in fact is not ready to
1280 for (i = 0; i < ctx.nsegs; i++) {
1281 j = (ring->cur + i) % sc->sc_tx_ring_count;
1282 data = &ring->data[j];
1284 if (sc->sc_caps & NFE_40BIT_ADDR) {
1285 desc64 = &ring->desc64[j];
1286 desc64->physaddr[0] =
1287 htole32(NFE_ADDR_HI(segs[i].ds_addr));
1288 desc64->physaddr[1] =
1289 htole32(NFE_ADDR_LO(segs[i].ds_addr));
1290 desc64->length = htole16(segs[i].ds_len - 1);
1291 desc64->vtag = htole32(vtag);
1292 desc64->flags = htole16(flags);
1294 desc32 = &ring->desc32[j];
1295 desc32->physaddr = htole32(segs[i].ds_addr);
1296 desc32->length = htole16(segs[i].ds_len - 1);
1297 desc32->flags = htole16(flags);
1300 /* csum flags and vtag belong to the first fragment only */
1301 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
1305 KKASSERT(ring->queued <= sc->sc_tx_ring_count);
1308 /* the whole mbuf chain has been DMA mapped, fix last descriptor */
1309 if (sc->sc_caps & NFE_40BIT_ADDR) {
1310 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
1312 if (sc->sc_caps & NFE_JUMBO_SUP)
1313 flags = NFE_TX_LASTFRAG_V2;
1315 flags = NFE_TX_LASTFRAG_V1;
1316 desc32->flags |= htole16(flags);
1320 * Set NFE_TX_VALID backwards so the hardware doesn't see the
1321 * whole mess until the first descriptor in the map is flagged.
1323 for (i = ctx.nsegs - 1; i >= 0; --i) {
1324 j = (ring->cur + i) % sc->sc_tx_ring_count;
1325 if (sc->sc_caps & NFE_40BIT_ADDR) {
1326 desc64 = &ring->desc64[j];
1327 desc64->flags |= htole16(NFE_TX_VALID);
1329 desc32 = &ring->desc32[j];
1330 desc32->flags |= htole16(NFE_TX_VALID);
1333 ring->cur = (ring->cur + ctx.nsegs) % sc->sc_tx_ring_count;
1335 /* Exchange DMA map */
1336 data_map->map = data->map;
1346 nfe_start(struct ifnet *ifp)
1348 struct nfe_softc *sc = ifp->if_softc;
1349 struct nfe_tx_ring *ring = &sc->txq;
1350 int count = 0, oactive = 0;
1353 ASSERT_SERIALIZED(ifp->if_serializer);
1355 if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING)
1361 if (sc->sc_tx_ring_count - ring->queued <
1362 sc->sc_tx_spare + NFE_NSEG_RSVD) {
1364 ifp->if_flags |= IFF_OACTIVE;
1373 m0 = ifq_dequeue(&ifp->if_snd, NULL);
1377 ETHER_BPF_MTAP(ifp, m0);
1379 error = nfe_encap(sc, ring, m0);
1382 if (error == EFBIG) {
1384 ifp->if_flags |= IFF_OACTIVE;
1398 * `m0' may be freed in nfe_encap(), so
1399 * it should not be touched any more.
1402 if (count == 0) /* nothing sent */
1405 /* Sync TX descriptor ring */
1406 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1409 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1412 * Set a timeout in case the chip goes out to lunch.
1418 nfe_watchdog(struct ifnet *ifp)
1420 struct nfe_softc *sc = ifp->if_softc;
1422 ASSERT_SERIALIZED(ifp->if_serializer);
1424 if (ifp->if_flags & IFF_RUNNING) {
1425 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n");
1430 if_printf(ifp, "watchdog timeout\n");
1432 nfe_init(ifp->if_softc);
1440 struct nfe_softc *sc = xsc;
1441 struct ifnet *ifp = &sc->arpcom.ac_if;
1445 ASSERT_SERIALIZED(ifp->if_serializer);
1449 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0)
1454 * Switching between jumbo frames and normal frames should
1455 * be done _after_ nfe_stop() but _before_ nfe_init_rx_ring().
1457 if (ifp->if_mtu > ETHERMTU) {
1458 sc->sc_flags |= NFE_F_USE_JUMBO;
1459 sc->rxq.bufsz = NFE_JBYTES;
1460 sc->sc_tx_spare = NFE_NSEG_SPARE_JUMBO;
1462 if_printf(ifp, "use jumbo frames\n");
1464 sc->sc_flags &= ~NFE_F_USE_JUMBO;
1465 sc->rxq.bufsz = MCLBYTES;
1466 sc->sc_tx_spare = NFE_NSEG_SPARE;
1468 if_printf(ifp, "use non-jumbo frames\n");
1471 error = nfe_init_tx_ring(sc, &sc->txq);
1477 error = nfe_init_rx_ring(sc, &sc->rxq);
1483 NFE_WRITE(sc, NFE_TX_POLL, 0);
1484 NFE_WRITE(sc, NFE_STATUS, 0);
1486 sc->rxtxctl = NFE_RXTX_BIT2 | sc->rxtxctl_desc;
1488 if (ifp->if_capenable & IFCAP_RXCSUM)
1489 sc->rxtxctl |= NFE_RXTX_RXCSUM;
1492 * Although the adapter is capable of stripping VLAN tags from received
1493 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1494 * purpose. This will be done in software by our network stack.
1496 if (sc->sc_caps & NFE_HW_VLAN)
1497 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
1499 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1501 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1503 if (sc->sc_caps & NFE_HW_VLAN)
1504 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1506 NFE_WRITE(sc, NFE_SETUP_R6, 0);
1508 /* set MAC address */
1509 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr);
1511 /* tell MAC where rings are in memory */
1512 if (sc->sc_caps & NFE_40BIT_ADDR) {
1513 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
1514 NFE_ADDR_HI(sc->rxq.physaddr));
1516 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, NFE_ADDR_LO(sc->rxq.physaddr));
1518 if (sc->sc_caps & NFE_40BIT_ADDR) {
1519 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI,
1520 NFE_ADDR_HI(sc->txq.physaddr));
1522 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
1524 NFE_WRITE(sc, NFE_RING_SIZE,
1525 (sc->sc_rx_ring_count - 1) << 16 |
1526 (sc->sc_tx_ring_count - 1));
1528 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1530 /* force MAC to wakeup */
1531 tmp = NFE_READ(sc, NFE_PWR_STATE);
1532 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1534 tmp = NFE_READ(sc, NFE_PWR_STATE);
1535 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1537 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1538 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1539 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1541 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1542 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1544 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1546 sc->rxtxctl &= ~NFE_RXTX_BIT2;
1547 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1549 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1554 nfe_ifmedia_upd(ifp);
1557 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1560 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1562 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1564 #ifdef DEVICE_POLLING
1565 if ((ifp->if_flags & IFF_POLLING))
1566 nfe_disable_intrs(sc);
1569 nfe_enable_intrs(sc);
1571 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
1573 ifp->if_flags |= IFF_RUNNING;
1574 ifp->if_flags &= ~IFF_OACTIVE;
1577 * If we had stuff in the tx ring before its all cleaned out now
1578 * so we are not going to get an interrupt, jump-start any pending
1581 if (!ifq_is_empty(&ifp->if_snd))
1586 nfe_stop(struct nfe_softc *sc)
1588 struct ifnet *ifp = &sc->arpcom.ac_if;
1589 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2;
1592 ASSERT_SERIALIZED(ifp->if_serializer);
1594 callout_stop(&sc->sc_tick_ch);
1597 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1598 sc->sc_flags &= ~NFE_F_IRQ_TIMER;
1600 #define WAITMAX 50000
1605 NFE_WRITE(sc, NFE_TX_CTL, 0);
1606 for (i = 0; i < WAITMAX; ++i) {
1608 if ((NFE_READ(sc, NFE_TX_STATUS) & NFE_TX_STATUS_BUSY) == 0)
1612 if_printf(ifp, "can't stop TX\n");
1618 NFE_WRITE(sc, NFE_RX_CTL, 0);
1619 for (i = 0; i < WAITMAX; ++i) {
1621 if ((NFE_READ(sc, NFE_RX_STATUS) & NFE_RX_STATUS_BUSY) == 0)
1625 if_printf(ifp, "can't stop RX\n");
1630 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl);
1632 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
1634 /* Disable interrupts */
1635 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1637 /* Reset Tx and Rx rings */
1638 nfe_reset_tx_ring(sc, &sc->txq);
1639 nfe_reset_rx_ring(sc, &sc->rxq);
1643 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1645 int i, j, error, descsize;
1649 if (sc->sc_caps & NFE_40BIT_ADDR) {
1650 desc = (void **)&ring->desc64;
1651 descsize = sizeof(struct nfe_desc64);
1653 desc = (void **)&ring->desc32;
1654 descsize = sizeof(struct nfe_desc32);
1657 ring->bufsz = MCLBYTES;
1658 ring->cur = ring->next = 0;
1660 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0,
1661 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1662 sc->sc_rx_ring_count * descsize,
1663 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1665 if_printf(&sc->arpcom.ac_if,
1666 "could not create RX desc ring\n");
1669 ring->tag = dmem.dmem_tag;
1670 ring->map = dmem.dmem_map;
1671 *desc = dmem.dmem_addr;
1672 ring->physaddr = dmem.dmem_busaddr;
1674 if (sc->sc_caps & NFE_JUMBO_SUP) {
1676 kmalloc(sizeof(struct nfe_jbuf) * NFE_JPOOL_COUNT(sc),
1677 M_DEVBUF, M_WAITOK | M_ZERO);
1679 error = nfe_jpool_alloc(sc, ring);
1681 if_printf(&sc->arpcom.ac_if,
1682 "could not allocate jumbo frames\n");
1683 kfree(ring->jbuf, M_DEVBUF);
1685 /* Allow jumbo frame allocation to fail */
1689 ring->data = kmalloc(sizeof(struct nfe_rx_data) * sc->sc_rx_ring_count,
1690 M_DEVBUF, M_WAITOK | M_ZERO);
1692 error = bus_dma_tag_create(sc->sc_dtag, 1, 0,
1693 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1695 MCLBYTES, 1, MCLBYTES,
1696 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
1699 if_printf(&sc->arpcom.ac_if,
1700 "could not create RX mbuf DMA tag\n");
1704 /* Create a spare RX mbuf DMA map */
1705 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK,
1706 &ring->data_tmpmap);
1708 if_printf(&sc->arpcom.ac_if,
1709 "could not create spare RX mbuf DMA map\n");
1710 bus_dma_tag_destroy(ring->data_tag);
1711 ring->data_tag = NULL;
1715 for (i = 0; i < sc->sc_rx_ring_count; i++) {
1716 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK,
1717 &ring->data[i].map);
1719 if_printf(&sc->arpcom.ac_if,
1720 "could not create %dth RX mbuf DMA mapn", i);
1726 for (j = 0; j < i; ++j)
1727 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1728 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1729 bus_dma_tag_destroy(ring->data_tag);
1730 ring->data_tag = NULL;
1735 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1739 for (i = 0; i < sc->sc_rx_ring_count; i++) {
1740 struct nfe_rx_data *data = &ring->data[i];
1742 if (data->m != NULL) {
1743 if ((sc->sc_flags & NFE_F_USE_JUMBO) == 0)
1744 bus_dmamap_unload(ring->data_tag, data->map);
1750 ring->cur = ring->next = 0;
1754 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1758 for (i = 0; i < sc->sc_rx_ring_count; ++i) {
1761 /* XXX should use a function pointer */
1762 if (sc->sc_flags & NFE_F_USE_JUMBO)
1763 error = nfe_newbuf_jumbo(sc, ring, i, 1);
1765 error = nfe_newbuf_std(sc, ring, i, 1);
1767 if_printf(&sc->arpcom.ac_if,
1768 "could not allocate RX buffer\n");
1771 nfe_set_ready_rxdesc(sc, ring, i);
1773 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1779 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1781 if (ring->data_tag != NULL) {
1782 struct nfe_rx_data *data;
1785 for (i = 0; i < sc->sc_rx_ring_count; i++) {
1786 data = &ring->data[i];
1788 if (data->m != NULL) {
1789 bus_dmamap_unload(ring->data_tag, data->map);
1792 bus_dmamap_destroy(ring->data_tag, data->map);
1794 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1795 bus_dma_tag_destroy(ring->data_tag);
1798 nfe_jpool_free(sc, ring);
1800 if (ring->jbuf != NULL)
1801 kfree(ring->jbuf, M_DEVBUF);
1802 if (ring->data != NULL)
1803 kfree(ring->data, M_DEVBUF);
1805 if (ring->tag != NULL) {
1808 if (sc->sc_caps & NFE_40BIT_ADDR)
1809 desc = ring->desc64;
1811 desc = ring->desc32;
1813 bus_dmamap_unload(ring->tag, ring->map);
1814 bus_dmamem_free(ring->tag, desc, ring->map);
1815 bus_dma_tag_destroy(ring->tag);
1819 static struct nfe_jbuf *
1820 nfe_jalloc(struct nfe_softc *sc)
1822 struct ifnet *ifp = &sc->arpcom.ac_if;
1823 struct nfe_jbuf *jbuf;
1825 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1827 jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
1829 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
1832 if_printf(ifp, "no free jumbo buffer\n");
1835 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1841 nfe_jfree(void *arg)
1843 struct nfe_jbuf *jbuf = arg;
1844 struct nfe_softc *sc = jbuf->sc;
1845 struct nfe_rx_ring *ring = jbuf->ring;
1847 if (&ring->jbuf[jbuf->slot] != jbuf)
1848 panic("%s: free wrong jumbo buffer\n", __func__);
1849 else if (jbuf->inuse == 0)
1850 panic("%s: jumbo buffer already freed\n", __func__);
1852 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1853 atomic_subtract_int(&jbuf->inuse, 1);
1854 if (jbuf->inuse == 0)
1855 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1856 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1862 struct nfe_jbuf *jbuf = arg;
1863 struct nfe_rx_ring *ring = jbuf->ring;
1865 if (&ring->jbuf[jbuf->slot] != jbuf)
1866 panic("%s: ref wrong jumbo buffer\n", __func__);
1867 else if (jbuf->inuse == 0)
1868 panic("%s: jumbo buffer already freed\n", __func__);
1870 atomic_add_int(&jbuf->inuse, 1);
1874 nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1876 struct nfe_jbuf *jbuf;
1878 bus_addr_t physaddr;
1883 * Allocate a big chunk of DMA'able memory.
1885 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0,
1886 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1888 BUS_DMA_WAITOK, &dmem);
1890 if_printf(&sc->arpcom.ac_if,
1891 "could not create jumbo buffer\n");
1894 ring->jtag = dmem.dmem_tag;
1895 ring->jmap = dmem.dmem_map;
1896 ring->jpool = dmem.dmem_addr;
1897 physaddr = dmem.dmem_busaddr;
1899 /* ..and split it into 9KB chunks */
1900 SLIST_INIT(&ring->jfreelist);
1903 for (i = 0; i < NFE_JPOOL_COUNT(sc); i++) {
1904 jbuf = &ring->jbuf[i];
1911 jbuf->physaddr = physaddr;
1913 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1916 physaddr += NFE_JBYTES;
1923 nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1925 if (ring->jtag != NULL) {
1926 bus_dmamap_unload(ring->jtag, ring->jmap);
1927 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap);
1928 bus_dma_tag_destroy(ring->jtag);
1933 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1935 int i, j, error, descsize;
1939 if (sc->sc_caps & NFE_40BIT_ADDR) {
1940 desc = (void **)&ring->desc64;
1941 descsize = sizeof(struct nfe_desc64);
1943 desc = (void **)&ring->desc32;
1944 descsize = sizeof(struct nfe_desc32);
1948 ring->cur = ring->next = 0;
1950 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0,
1951 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1952 sc->sc_tx_ring_count * descsize,
1953 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1955 if_printf(&sc->arpcom.ac_if,
1956 "could not create TX desc ring\n");
1959 ring->tag = dmem.dmem_tag;
1960 ring->map = dmem.dmem_map;
1961 *desc = dmem.dmem_addr;
1962 ring->physaddr = dmem.dmem_busaddr;
1964 ring->data = kmalloc(sizeof(struct nfe_tx_data) * sc->sc_tx_ring_count,
1965 M_DEVBUF, M_WAITOK | M_ZERO);
1967 error = bus_dma_tag_create(sc->sc_dtag, 1, 0,
1968 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1970 NFE_JBYTES, NFE_MAX_SCATTER, MCLBYTES,
1971 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1974 if_printf(&sc->arpcom.ac_if,
1975 "could not create TX buf DMA tag\n");
1979 for (i = 0; i < sc->sc_tx_ring_count; i++) {
1980 error = bus_dmamap_create(ring->data_tag,
1981 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1982 &ring->data[i].map);
1984 if_printf(&sc->arpcom.ac_if,
1985 "could not create %dth TX buf DMA map\n", i);
1992 for (j = 0; j < i; ++j)
1993 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1994 bus_dma_tag_destroy(ring->data_tag);
1995 ring->data_tag = NULL;
2000 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
2004 for (i = 0; i < sc->sc_tx_ring_count; i++) {
2005 struct nfe_tx_data *data = &ring->data[i];
2007 if (sc->sc_caps & NFE_40BIT_ADDR)
2008 ring->desc64[i].flags = 0;
2010 ring->desc32[i].flags = 0;
2012 if (data->m != NULL) {
2013 bus_dmamap_unload(ring->data_tag, data->map);
2018 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
2021 ring->cur = ring->next = 0;
2025 nfe_init_tx_ring(struct nfe_softc *sc __unused,
2026 struct nfe_tx_ring *ring __unused)
2032 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
2034 if (ring->data_tag != NULL) {
2035 struct nfe_tx_data *data;
2038 for (i = 0; i < sc->sc_tx_ring_count; ++i) {
2039 data = &ring->data[i];
2041 if (data->m != NULL) {
2042 bus_dmamap_unload(ring->data_tag, data->map);
2045 bus_dmamap_destroy(ring->data_tag, data->map);
2048 bus_dma_tag_destroy(ring->data_tag);
2051 if (ring->data != NULL)
2052 kfree(ring->data, M_DEVBUF);
2054 if (ring->tag != NULL) {
2057 if (sc->sc_caps & NFE_40BIT_ADDR)
2058 desc = ring->desc64;
2060 desc = ring->desc32;
2062 bus_dmamap_unload(ring->tag, ring->map);
2063 bus_dmamem_free(ring->tag, desc, ring->map);
2064 bus_dma_tag_destroy(ring->tag);
2069 nfe_ifmedia_upd(struct ifnet *ifp)
2071 struct nfe_softc *sc = ifp->if_softc;
2072 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2074 ASSERT_SERIALIZED(ifp->if_serializer);
2076 if (mii->mii_instance != 0) {
2077 struct mii_softc *miisc;
2079 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2080 mii_phy_reset(miisc);
2088 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2090 struct nfe_softc *sc = ifp->if_softc;
2091 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2093 ASSERT_SERIALIZED(ifp->if_serializer);
2096 ifmr->ifm_status = mii->mii_media_status;
2097 ifmr->ifm_active = mii->mii_media_active;
2101 nfe_setmulti(struct nfe_softc *sc)
2103 struct ifnet *ifp = &sc->arpcom.ac_if;
2104 struct ifmultiaddr *ifma;
2105 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
2106 uint32_t filter = NFE_RXFILTER_MAGIC;
2109 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2110 bzero(addr, ETHER_ADDR_LEN);
2111 bzero(mask, ETHER_ADDR_LEN);
2115 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
2116 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
2118 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2121 if (ifma->ifma_addr->sa_family != AF_LINK)
2124 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2125 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2126 addr[i] &= maddr[i];
2127 mask[i] &= ~maddr[i];
2131 for (i = 0; i < ETHER_ADDR_LEN; i++)
2135 addr[0] |= 0x01; /* make sure multicast bit is set */
2137 NFE_WRITE(sc, NFE_MULTIADDR_HI,
2138 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2139 NFE_WRITE(sc, NFE_MULTIADDR_LO,
2140 addr[5] << 8 | addr[4]);
2141 NFE_WRITE(sc, NFE_MULTIMASK_HI,
2142 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
2143 NFE_WRITE(sc, NFE_MULTIMASK_LO,
2144 mask[5] << 8 | mask[4]);
2146 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
2147 NFE_WRITE(sc, NFE_RXFILTER, filter);
2151 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
2155 lo = NFE_READ(sc, NFE_MACADDR_LO);
2156 hi = NFE_READ(sc, NFE_MACADDR_HI);
2157 if (sc->sc_caps & NFE_FIX_EADDR) {
2158 addr[0] = (lo >> 8) & 0xff;
2159 addr[1] = (lo & 0xff);
2161 addr[2] = (hi >> 24) & 0xff;
2162 addr[3] = (hi >> 16) & 0xff;
2163 addr[4] = (hi >> 8) & 0xff;
2164 addr[5] = (hi & 0xff);
2166 addr[0] = (hi & 0xff);
2167 addr[1] = (hi >> 8) & 0xff;
2168 addr[2] = (hi >> 16) & 0xff;
2169 addr[3] = (hi >> 24) & 0xff;
2171 addr[4] = (lo & 0xff);
2172 addr[5] = (lo >> 8) & 0xff;
2177 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
2179 NFE_WRITE(sc, NFE_MACADDR_LO,
2180 addr[5] << 8 | addr[4]);
2181 NFE_WRITE(sc, NFE_MACADDR_HI,
2182 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2188 struct nfe_softc *sc = arg;
2189 struct ifnet *ifp = &sc->arpcom.ac_if;
2190 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2192 lwkt_serialize_enter(ifp->if_serializer);
2195 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
2197 lwkt_serialize_exit(ifp->if_serializer);
2201 nfe_buf_dma_addr(void *arg, bus_dma_segment_t *segs, int nsegs,
2202 bus_size_t mapsz __unused, int error)
2204 struct nfe_dma_ctx *ctx = arg;
2210 if (nsegs > ctx->nsegs) {
2216 for (i = 0; i < nsegs; ++i)
2217 ctx->segs[i] = segs[i];
2221 nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2224 struct nfe_rx_data *data = &ring->data[idx];
2225 struct nfe_dma_ctx ctx;
2226 bus_dma_segment_t seg;
2231 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2234 m->m_len = m->m_pkthdr.len = MCLBYTES;
2238 error = bus_dmamap_load_mbuf(ring->data_tag, ring->data_tmpmap,
2239 m, nfe_buf_dma_addr, &ctx,
2241 if (error || ctx.nsegs == 0) {
2243 bus_dmamap_unload(ring->data_tag, ring->data_tmpmap);
2246 if_printf(&sc->arpcom.ac_if,
2247 "too many segments?!\n");
2253 if_printf(&sc->arpcom.ac_if,
2254 "could map RX mbuf %d\n", error);
2259 if (data->m != NULL) {
2260 /* Sync and unload originally mapped mbuf */
2261 bus_dmamap_sync(ring->data_tag, data->map,
2262 BUS_DMASYNC_POSTREAD);
2263 bus_dmamap_unload(ring->data_tag, data->map);
2266 /* Swap this DMA map with tmp DMA map */
2268 data->map = ring->data_tmpmap;
2269 ring->data_tmpmap = map;
2271 /* Caller is assumed to have collected the old mbuf */
2274 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr);
2279 nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2282 struct nfe_rx_data *data = &ring->data[idx];
2283 struct nfe_jbuf *jbuf;
2286 MGETHDR(m, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA);
2290 jbuf = nfe_jalloc(sc);
2293 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
2294 "-- packet dropped!\n");
2298 m->m_ext.ext_arg = jbuf;
2299 m->m_ext.ext_buf = jbuf->buf;
2300 m->m_ext.ext_free = nfe_jfree;
2301 m->m_ext.ext_ref = nfe_jref;
2302 m->m_ext.ext_size = NFE_JBYTES;
2304 m->m_data = m->m_ext.ext_buf;
2305 m->m_flags |= M_EXT;
2306 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2308 /* Caller is assumed to have collected the old mbuf */
2311 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr);
2316 nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2317 bus_addr_t physaddr)
2319 if (sc->sc_caps & NFE_40BIT_ADDR) {
2320 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2322 desc64->physaddr[0] = htole32(NFE_ADDR_HI(physaddr));
2323 desc64->physaddr[1] = htole32(NFE_ADDR_LO(physaddr));
2325 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2327 desc32->physaddr = htole32(physaddr);
2332 nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx)
2334 if (sc->sc_caps & NFE_40BIT_ADDR) {
2335 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2337 desc64->length = htole16(ring->bufsz);
2338 desc64->flags = htole16(NFE_RX_READY);
2340 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2342 desc32->length = htole16(ring->bufsz);
2343 desc32->flags = htole16(NFE_RX_READY);
2348 nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS)
2350 struct nfe_softc *sc = arg1;
2351 struct ifnet *ifp = &sc->arpcom.ac_if;
2355 lwkt_serialize_enter(ifp->if_serializer);
2357 flags = sc->sc_flags & ~NFE_F_DYN_IM;
2359 if (sc->sc_flags & NFE_F_DYN_IM)
2362 error = sysctl_handle_int(oidp, &v, 0, req);
2363 if (error || req->newptr == NULL)
2367 flags |= NFE_F_DYN_IM;
2371 if (v != sc->sc_imtime || (flags ^ sc->sc_flags)) {
2372 int old_imtime = sc->sc_imtime;
2373 uint32_t old_flags = sc->sc_flags;
2376 sc->sc_flags = flags;
2377 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc);
2379 if ((ifp->if_flags & (IFF_POLLING | IFF_RUNNING))
2381 if (old_imtime * sc->sc_imtime == 0 ||
2382 (old_flags ^ sc->sc_flags)) {
2385 NFE_WRITE(sc, NFE_IMTIMER,
2386 NFE_IMTIME(sc->sc_imtime));
2391 lwkt_serialize_exit(ifp->if_serializer);
2396 nfe_powerup(device_t dev)
2398 struct nfe_softc *sc = device_get_softc(dev);
2403 * Bring MAC and PHY out of low power state
2406 pwr_state = NFE_READ(sc, NFE_PWR_STATE2) & ~NFE_PWRUP_MASK;
2408 did = pci_get_device(dev);
2409 if ((did == PCI_PRODUCT_NVIDIA_MCP51_LAN1 ||
2410 did == PCI_PRODUCT_NVIDIA_MCP51_LAN2) &&
2411 pci_get_revid(dev) >= 0xa3)
2412 pwr_state |= NFE_PWRUP_REV_A3;
2414 NFE_WRITE(sc, NFE_PWR_STATE2, pwr_state);
2418 nfe_mac_reset(struct nfe_softc *sc)
2420 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2;
2421 uint32_t macaddr_hi, macaddr_lo, tx_poll;
2423 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl);
2425 /* Save several registers for later restoration */
2426 macaddr_hi = NFE_READ(sc, NFE_MACADDR_HI);
2427 macaddr_lo = NFE_READ(sc, NFE_MACADDR_LO);
2428 tx_poll = NFE_READ(sc, NFE_TX_POLL);
2430 NFE_WRITE(sc, NFE_MAC_RESET, NFE_RESET_ASSERT);
2433 NFE_WRITE(sc, NFE_MAC_RESET, 0);
2436 /* Restore saved registers */
2437 NFE_WRITE(sc, NFE_MACADDR_HI, macaddr_hi);
2438 NFE_WRITE(sc, NFE_MACADDR_LO, macaddr_lo);
2439 NFE_WRITE(sc, NFE_TX_POLL, tx_poll);
2441 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
2445 nfe_enable_intrs(struct nfe_softc *sc)
2448 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER.
2449 * It is unclear how wide the timer is. Base programming does
2450 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so
2451 * we don't get any interrupt moderation. TX moderation is
2452 * possible by using the timer interrupt instead of TX_DONE.
2454 * It is unclear whether there are other bits that can be
2455 * set to make the NFE device actually do interrupt moderation
2458 * For now set a 128uS interval as a placemark, but don't use
2461 if (sc->sc_imtime == 0)
2462 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME_DEFAULT);
2464 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME(sc->sc_imtime));
2466 /* Enable interrupts */
2467 NFE_WRITE(sc, NFE_IRQ_MASK, sc->sc_irq_enable);
2469 if (sc->sc_irq_enable & NFE_IRQ_TIMER)
2470 sc->sc_flags |= NFE_F_IRQ_TIMER;
2472 sc->sc_flags &= ~NFE_F_IRQ_TIMER;
2476 nfe_disable_intrs(struct nfe_softc *sc)
2478 /* Disable interrupts */
2479 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
2480 sc->sc_flags &= ~NFE_F_IRQ_TIMER;