1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */
2 /* $DragonFly: src/sys/dev/netif/nfe/if_nfe.c,v 1.44 2008/09/17 07:51:59 sephe Exp $ */
5 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
7 * This code is derived from software contributed to The DragonFly Project
8 * by Sepherosa Ziehau <sepherosa@gmail.com> and
9 * Matthew Dillon <dillon@apollo.backplane.com>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
41 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
43 * Permission to use, copy, modify, and distribute this software for any
44 * purpose with or without fee is hereby granted, provided that the above
45 * copyright notice and this permission notice appear in all copies.
47 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
48 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
49 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
50 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
51 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
52 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
53 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
56 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
58 #include "opt_polling.h"
59 #include "opt_ethernet.h"
61 #include <sys/param.h>
62 #include <sys/endian.h>
63 #include <sys/kernel.h>
65 #include <sys/interrupt.h>
68 #include <sys/serialize.h>
69 #include <sys/socket.h>
70 #include <sys/sockio.h>
71 #include <sys/sysctl.h>
73 #include <net/ethernet.h>
76 #include <net/if_arp.h>
77 #include <net/if_dl.h>
78 #include <net/if_media.h>
79 #include <net/ifq_var.h>
80 #include <net/if_types.h>
81 #include <net/if_var.h>
82 #include <net/vlan/if_vlan_var.h>
83 #include <net/vlan/if_vlan_ether.h>
85 #include <bus/pci/pcireg.h>
86 #include <bus/pci/pcivar.h>
87 #include <bus/pci/pcidevs.h>
89 #include <dev/netif/mii_layer/mii.h>
90 #include <dev/netif/mii_layer/miivar.h>
92 #include "miibus_if.h"
94 #include <dev/netif/nfe/if_nfereg.h>
95 #include <dev/netif/nfe/if_nfevar.h>
98 #define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
100 static int nfe_probe(device_t);
101 static int nfe_attach(device_t);
102 static int nfe_detach(device_t);
103 static void nfe_shutdown(device_t);
104 static int nfe_resume(device_t);
105 static int nfe_suspend(device_t);
107 static int nfe_miibus_readreg(device_t, int, int);
108 static void nfe_miibus_writereg(device_t, int, int, int);
109 static void nfe_miibus_statchg(device_t);
111 #ifdef DEVICE_POLLING
112 static void nfe_poll(struct ifnet *, enum poll_cmd, int);
114 static void nfe_intr(void *);
115 static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
116 static int nfe_rxeof(struct nfe_softc *);
117 static int nfe_txeof(struct nfe_softc *, int);
118 static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *,
120 static void nfe_start(struct ifnet *);
121 static void nfe_watchdog(struct ifnet *);
122 static void nfe_init(void *);
123 static void nfe_stop(struct nfe_softc *);
124 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *);
125 static void nfe_jfree(void *);
126 static void nfe_jref(void *);
127 static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *);
128 static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *);
129 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
130 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
131 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
132 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
133 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
134 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
135 static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
136 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
137 static int nfe_ifmedia_upd(struct ifnet *);
138 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
139 static void nfe_setmulti(struct nfe_softc *);
140 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
141 static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
142 static void nfe_powerup(device_t);
143 static void nfe_mac_reset(struct nfe_softc *);
144 static void nfe_tick(void *);
145 static void nfe_ring_dma_addr(void *, bus_dma_segment_t *, int, int);
146 static void nfe_buf_dma_addr(void *, bus_dma_segment_t *, int, bus_size_t,
148 static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
150 static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
152 static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int,
154 static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int,
156 static void nfe_enable_intrs(struct nfe_softc *);
157 static void nfe_disable_intrs(struct nfe_softc *);
159 static int nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS);
164 static int nfe_debug = 0;
165 static int nfe_rx_ring_count = NFE_RX_RING_DEF_COUNT;
166 static int nfe_tx_ring_count = NFE_TX_RING_DEF_COUNT;
167 /* hw timer simulated interrupt moderation @8000Hz */
168 static int nfe_imtime = -125;
170 TUNABLE_INT("hw.nfe.rx_ring_count", &nfe_rx_ring_count);
171 TUNABLE_INT("hw.nfe.tx_ring_count", &nfe_tx_ring_count);
172 TUNABLE_INT("hw.nfe.imtimer", &nfe_imtime);
173 TUNABLE_INT("hw.nfe.debug", &nfe_debug);
175 #define DPRINTF(sc, fmt, ...) do { \
176 if ((sc)->sc_debug) { \
177 if_printf(&(sc)->arpcom.ac_if, \
182 #define DPRINTFN(sc, lv, fmt, ...) do { \
183 if ((sc)->sc_debug >= (lv)) { \
184 if_printf(&(sc)->arpcom.ac_if, \
189 #else /* !NFE_DEBUG */
191 #define DPRINTF(sc, fmt, ...)
192 #define DPRINTFN(sc, lv, fmt, ...)
194 #endif /* NFE_DEBUG */
198 bus_dma_segment_t *segs;
201 static const struct nfe_dev {
206 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
207 "NVIDIA nForce Fast Ethernet" },
209 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
210 "NVIDIA nForce2 Fast Ethernet" },
212 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
213 "NVIDIA nForce3 Gigabit Ethernet" },
215 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb
216 chipset, and possibly also the 400R; it might be both nForce2- and
217 nForce3-based boards can use the same MCPs (= southbridges) */
218 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2,
219 "NVIDIA nForce3 Gigabit Ethernet" },
221 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3,
222 "NVIDIA nForce3 Gigabit Ethernet" },
224 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
225 "NVIDIA nForce3 Gigabit Ethernet" },
227 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5,
228 "NVIDIA nForce3 Gigabit Ethernet" },
230 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1,
231 "NVIDIA CK804 Gigabit Ethernet" },
233 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2,
234 "NVIDIA CK804 Gigabit Ethernet" },
236 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
237 "NVIDIA MCP04 Gigabit Ethernet" },
239 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
240 "NVIDIA MCP04 Gigabit Ethernet" },
242 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1,
243 "NVIDIA MCP51 Gigabit Ethernet" },
245 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2,
246 "NVIDIA MCP51 Gigabit Ethernet" },
248 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
249 "NVIDIA MCP55 Gigabit Ethernet" },
251 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
252 "NVIDIA MCP55 Gigabit Ethernet" },
254 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
255 "NVIDIA MCP61 Gigabit Ethernet" },
257 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
258 "NVIDIA MCP61 Gigabit Ethernet" },
260 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
261 "NVIDIA MCP61 Gigabit Ethernet" },
263 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
264 "NVIDIA MCP61 Gigabit Ethernet" },
266 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
267 "NVIDIA MCP65 Gigabit Ethernet" },
269 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
270 "NVIDIA MCP65 Gigabit Ethernet" },
272 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
273 "NVIDIA MCP65 Gigabit Ethernet" },
275 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
276 "NVIDIA MCP65 Gigabit Ethernet" },
278 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
279 "NVIDIA MCP67 Gigabit Ethernet" },
281 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
282 "NVIDIA MCP67 Gigabit Ethernet" },
284 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
285 "NVIDIA MCP67 Gigabit Ethernet" },
287 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
288 "NVIDIA MCP67 Gigabit Ethernet" },
290 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
291 "NVIDIA MCP73 Gigabit Ethernet" },
293 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
294 "NVIDIA MCP73 Gigabit Ethernet" },
296 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
297 "NVIDIA MCP73 Gigabit Ethernet" },
299 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
300 "NVIDIA MCP73 Gigabit Ethernet" },
302 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
303 "NVIDIA MCP77 Gigabit Ethernet" },
305 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
306 "NVIDIA MCP77 Gigabit Ethernet" },
308 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
309 "NVIDIA MCP77 Gigabit Ethernet" },
311 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
312 "NVIDIA MCP77 Gigabit Ethernet" },
314 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
315 "NVIDIA MCP79 Gigabit Ethernet" },
317 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
318 "NVIDIA MCP79 Gigabit Ethernet" },
320 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
321 "NVIDIA MCP79 Gigabit Ethernet" },
323 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
324 "NVIDIA MCP79 Gigabit Ethernet" },
329 static device_method_t nfe_methods[] = {
330 /* Device interface */
331 DEVMETHOD(device_probe, nfe_probe),
332 DEVMETHOD(device_attach, nfe_attach),
333 DEVMETHOD(device_detach, nfe_detach),
334 DEVMETHOD(device_suspend, nfe_suspend),
335 DEVMETHOD(device_resume, nfe_resume),
336 DEVMETHOD(device_shutdown, nfe_shutdown),
339 DEVMETHOD(bus_print_child, bus_generic_print_child),
340 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
343 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
344 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
345 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
350 static driver_t nfe_driver = {
353 sizeof(struct nfe_softc)
356 static devclass_t nfe_devclass;
358 DECLARE_DUMMY_MODULE(if_nfe);
359 MODULE_DEPEND(if_nfe, miibus, 1, 1, 1);
360 DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, 0, 0);
361 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
364 nfe_probe(device_t dev)
366 const struct nfe_dev *n;
369 vid = pci_get_vendor(dev);
370 did = pci_get_device(dev);
371 for (n = nfe_devices; n->desc != NULL; ++n) {
372 if (vid == n->vid && did == n->did) {
373 struct nfe_softc *sc = device_get_softc(dev);
376 case PCI_PRODUCT_NVIDIA_NFORCE_LAN:
377 case PCI_PRODUCT_NVIDIA_NFORCE2_LAN:
378 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN1:
379 sc->sc_caps = NFE_NO_PWRCTL |
382 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
383 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
384 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
385 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
386 sc->sc_caps = NFE_JUMBO_SUP |
391 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
392 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
393 sc->sc_caps = NFE_FIX_EADDR;
395 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
396 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
397 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
398 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
399 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
400 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
401 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
402 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
403 case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
404 case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
405 case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
406 case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
407 sc->sc_caps |= NFE_40BIT_ADDR;
409 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
410 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
411 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
412 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
413 sc->sc_caps = NFE_JUMBO_SUP |
419 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
420 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
421 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
422 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
423 sc->sc_caps = NFE_JUMBO_SUP |
426 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
427 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
428 sc->sc_caps = NFE_JUMBO_SUP |
434 case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
435 case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
436 case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
437 case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
438 case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
439 case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
440 case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
441 case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
442 sc->sc_caps = NFE_40BIT_ADDR |
447 device_set_desc(dev, n->desc);
448 device_set_async_attach(dev, TRUE);
456 nfe_attach(device_t dev)
458 struct nfe_softc *sc = device_get_softc(dev);
459 struct ifnet *ifp = &sc->arpcom.ac_if;
460 uint8_t eaddr[ETHER_ADDR_LEN];
463 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
464 lwkt_serialize_init(&sc->sc_jbuf_serializer);
467 * Initialize sysctl variables
469 sc->sc_rx_ring_count = nfe_rx_ring_count;
470 sc->sc_tx_ring_count = nfe_tx_ring_count;
471 sc->sc_debug = nfe_debug;
472 if (nfe_imtime < 0) {
473 sc->sc_flags |= NFE_F_DYN_IM;
474 sc->sc_imtime = -nfe_imtime;
476 sc->sc_imtime = nfe_imtime;
478 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc);
480 sc->sc_mem_rid = PCIR_BAR(0);
482 if (sc->sc_caps & NFE_40BIT_ADDR)
483 sc->rxtxctl_desc = NFE_RXTX_DESC_V3;
484 else if (sc->sc_caps & NFE_JUMBO_SUP)
485 sc->rxtxctl_desc = NFE_RXTX_DESC_V2;
488 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
491 mem = pci_read_config(dev, sc->sc_mem_rid, 4);
492 irq = pci_read_config(dev, PCIR_INTLINE, 4);
494 device_printf(dev, "chip is in D%d power mode "
495 "-- setting to D0\n", pci_get_powerstate(dev));
497 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
499 pci_write_config(dev, sc->sc_mem_rid, mem, 4);
500 pci_write_config(dev, PCIR_INTLINE, irq, 4);
502 #endif /* !BURN_BRIDGE */
504 /* Enable bus mastering */
505 pci_enable_busmaster(dev);
507 /* Allocate IO memory */
508 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
509 &sc->sc_mem_rid, RF_ACTIVE);
510 if (sc->sc_mem_res == NULL) {
511 device_printf(dev, "cound not allocate io memory\n");
514 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res);
515 sc->sc_memt = rman_get_bustag(sc->sc_mem_res);
519 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
521 RF_SHAREABLE | RF_ACTIVE);
522 if (sc->sc_irq_res == NULL) {
523 device_printf(dev, "could not allocate irq\n");
529 NFE_WRITE(sc, NFE_WOL_CTL, 0);
531 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0)
534 nfe_get_macaddr(sc, eaddr);
537 * Allocate Tx and Rx rings.
539 error = nfe_alloc_tx_ring(sc, &sc->txq);
541 device_printf(dev, "could not allocate Tx ring\n");
545 error = nfe_alloc_rx_ring(sc, &sc->rxq);
547 device_printf(dev, "could not allocate Rx ring\n");
554 sysctl_ctx_init(&sc->sc_sysctl_ctx);
555 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx,
556 SYSCTL_STATIC_CHILDREN(_hw),
558 device_get_nameunit(dev),
560 if (sc->sc_sysctl_tree == NULL) {
561 device_printf(dev, "can't add sysctl node\n");
565 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
566 SYSCTL_CHILDREN(sc->sc_sysctl_tree),
567 OID_AUTO, "imtimer", CTLTYPE_INT | CTLFLAG_RW,
568 sc, 0, nfe_sysctl_imtime, "I",
569 "Interrupt moderation time (usec). "
570 "0 to disable interrupt moderation.");
571 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx,
572 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
573 "rx_ring_count", CTLFLAG_RD, &sc->sc_rx_ring_count,
575 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx,
576 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
577 "tx_ring_count", CTLFLAG_RD, &sc->sc_tx_ring_count,
579 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx,
580 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
581 "debug", CTLFLAG_RW, &sc->sc_debug,
582 0, "control debugging printfs");
584 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd,
587 device_printf(dev, "MII without any phy\n");
592 ifp->if_mtu = ETHERMTU;
593 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
594 ifp->if_ioctl = nfe_ioctl;
595 ifp->if_start = nfe_start;
596 #ifdef DEVICE_POLLING
597 ifp->if_poll = nfe_poll;
599 ifp->if_watchdog = nfe_watchdog;
600 ifp->if_init = nfe_init;
601 ifq_set_maxlen(&ifp->if_snd, sc->sc_tx_ring_count);
602 ifq_set_ready(&ifp->if_snd);
604 ifp->if_capabilities = IFCAP_VLAN_MTU;
606 if (sc->sc_caps & NFE_HW_VLAN)
607 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
610 if (sc->sc_caps & NFE_HW_CSUM) {
611 ifp->if_capabilities |= IFCAP_HWCSUM;
612 ifp->if_hwassist = NFE_CSUM_FEATURES;
615 sc->sc_caps &= ~NFE_HW_CSUM;
617 ifp->if_capenable = ifp->if_capabilities;
619 callout_init(&sc->sc_tick_ch);
621 ether_ifattach(ifp, eaddr, NULL);
623 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc,
624 &sc->sc_ih, ifp->if_serializer);
626 device_printf(dev, "could not setup intr\n");
631 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->sc_irq_res));
632 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
641 nfe_detach(device_t dev)
643 struct nfe_softc *sc = device_get_softc(dev);
645 if (device_is_attached(dev)) {
646 struct ifnet *ifp = &sc->arpcom.ac_if;
648 lwkt_serialize_enter(ifp->if_serializer);
650 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih);
651 lwkt_serialize_exit(ifp->if_serializer);
656 if (sc->sc_miibus != NULL)
657 device_delete_child(dev, sc->sc_miibus);
658 bus_generic_detach(dev);
660 if (sc->sc_sysctl_tree != NULL)
661 sysctl_ctx_free(&sc->sc_sysctl_ctx);
663 if (sc->sc_irq_res != NULL) {
664 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
668 if (sc->sc_mem_res != NULL) {
669 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
673 nfe_free_tx_ring(sc, &sc->txq);
674 nfe_free_rx_ring(sc, &sc->rxq);
680 nfe_shutdown(device_t dev)
682 struct nfe_softc *sc = device_get_softc(dev);
683 struct ifnet *ifp = &sc->arpcom.ac_if;
685 lwkt_serialize_enter(ifp->if_serializer);
687 lwkt_serialize_exit(ifp->if_serializer);
691 nfe_suspend(device_t dev)
693 struct nfe_softc *sc = device_get_softc(dev);
694 struct ifnet *ifp = &sc->arpcom.ac_if;
696 lwkt_serialize_enter(ifp->if_serializer);
698 lwkt_serialize_exit(ifp->if_serializer);
704 nfe_resume(device_t dev)
706 struct nfe_softc *sc = device_get_softc(dev);
707 struct ifnet *ifp = &sc->arpcom.ac_if;
709 lwkt_serialize_enter(ifp->if_serializer);
710 if (ifp->if_flags & IFF_UP)
712 lwkt_serialize_exit(ifp->if_serializer);
718 nfe_miibus_statchg(device_t dev)
720 struct nfe_softc *sc = device_get_softc(dev);
721 struct mii_data *mii = device_get_softc(sc->sc_miibus);
722 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
724 ASSERT_SERIALIZED(sc->arpcom.ac_if.if_serializer);
726 phy = NFE_READ(sc, NFE_PHY_IFACE);
727 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
729 seed = NFE_READ(sc, NFE_RNDSEED);
730 seed &= ~NFE_SEED_MASK;
732 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
733 phy |= NFE_PHY_HDX; /* half-duplex */
734 misc |= NFE_MISC1_HDX;
737 switch (IFM_SUBTYPE(mii->mii_media_active)) {
738 case IFM_1000_T: /* full-duplex only */
739 link |= NFE_MEDIA_1000T;
740 seed |= NFE_SEED_1000T;
741 phy |= NFE_PHY_1000T;
744 link |= NFE_MEDIA_100TX;
745 seed |= NFE_SEED_100TX;
746 phy |= NFE_PHY_100TX;
749 link |= NFE_MEDIA_10T;
750 seed |= NFE_SEED_10T;
754 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
756 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
757 NFE_WRITE(sc, NFE_MISC1, misc);
758 NFE_WRITE(sc, NFE_LINKSPEED, link);
762 nfe_miibus_readreg(device_t dev, int phy, int reg)
764 struct nfe_softc *sc = device_get_softc(dev);
768 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
770 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
771 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
775 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
777 for (ntries = 0; ntries < 1000; ntries++) {
779 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
782 if (ntries == 1000) {
783 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", "");
787 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
788 DPRINTFN(sc, 2, "could not read PHY %s\n", "");
792 val = NFE_READ(sc, NFE_PHY_DATA);
793 if (val != 0xffffffff && val != 0)
794 sc->mii_phyaddr = phy;
796 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
802 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
804 struct nfe_softc *sc = device_get_softc(dev);
808 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
810 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
811 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
815 NFE_WRITE(sc, NFE_PHY_DATA, val);
816 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
817 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
819 for (ntries = 0; ntries < 1000; ntries++) {
821 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
827 DPRINTFN(sc, 2, "could not write to PHY %s\n", "");
831 #ifdef DEVICE_POLLING
834 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
836 struct nfe_softc *sc = ifp->if_softc;
838 ASSERT_SERIALIZED(ifp->if_serializer);
842 nfe_disable_intrs(sc);
845 case POLL_DEREGISTER:
846 nfe_enable_intrs(sc);
849 case POLL_AND_CHECK_STATUS:
852 if (ifp->if_flags & IFF_RUNNING) {
865 struct nfe_softc *sc = arg;
866 struct ifnet *ifp = &sc->arpcom.ac_if;
869 r = NFE_READ(sc, NFE_IRQ_STATUS);
871 return; /* not for us */
872 NFE_WRITE(sc, NFE_IRQ_STATUS, r);
874 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r);
876 if (r & NFE_IRQ_LINK) {
877 NFE_READ(sc, NFE_PHY_STATUS);
878 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
879 DPRINTF(sc, "link state changed %s\n", "");
882 if (ifp->if_flags & IFF_RUNNING) {
889 ret |= nfe_txeof(sc, 1);
891 if (sc->sc_flags & NFE_F_DYN_IM) {
892 if (ret && (sc->sc_flags & NFE_F_IRQ_TIMER) == 0) {
894 * Assume that using hardware timer could reduce
895 * the interrupt rate.
897 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_IMTIMER);
898 sc->sc_flags |= NFE_F_IRQ_TIMER;
899 } else if (!ret && (sc->sc_flags & NFE_F_IRQ_TIMER)) {
901 * Nothing needs to be processed, fall back to
902 * use TX/RX interrupts.
904 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_NOIMTIMER);
905 sc->sc_flags &= ~NFE_F_IRQ_TIMER;
912 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
914 struct nfe_softc *sc = ifp->if_softc;
915 struct ifreq *ifr = (struct ifreq *)data;
916 struct mii_data *mii;
917 int error = 0, mask, jumbo_cap;
919 ASSERT_SERIALIZED(ifp->if_serializer);
923 if ((sc->sc_caps & NFE_JUMBO_SUP) && sc->rxq.jbuf != NULL)
928 if ((jumbo_cap && ifr->ifr_mtu > NFE_JUMBO_MTU) ||
929 (!jumbo_cap && ifr->ifr_mtu > ETHERMTU)) {
931 } else if (ifp->if_mtu != ifr->ifr_mtu) {
932 ifp->if_mtu = ifr->ifr_mtu;
933 if (ifp->if_flags & IFF_RUNNING)
938 if (ifp->if_flags & IFF_UP) {
940 * If only the PROMISC or ALLMULTI flag changes, then
941 * don't do a full re-init of the chip, just update
944 if ((ifp->if_flags & IFF_RUNNING) &&
945 ((ifp->if_flags ^ sc->sc_if_flags) &
946 (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
949 if (!(ifp->if_flags & IFF_RUNNING))
953 if (ifp->if_flags & IFF_RUNNING)
956 sc->sc_if_flags = ifp->if_flags;
960 if (ifp->if_flags & IFF_RUNNING)
965 mii = device_get_softc(sc->sc_miibus);
966 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
969 mask = (ifr->ifr_reqcap ^ ifp->if_capenable) & IFCAP_HWCSUM;
970 if (mask && (ifp->if_capabilities & IFCAP_HWCSUM)) {
971 ifp->if_capenable ^= mask;
972 if (IFCAP_TXCSUM & ifp->if_capenable)
973 ifp->if_hwassist = NFE_CSUM_FEATURES;
975 ifp->if_hwassist = 0;
977 if (ifp->if_flags & IFF_RUNNING)
982 error = ether_ioctl(ifp, cmd, data);
989 nfe_rxeof(struct nfe_softc *sc)
991 struct ifnet *ifp = &sc->arpcom.ac_if;
992 struct nfe_rx_ring *ring = &sc->rxq;
994 #ifdef ETHER_INPUT_CHAIN
995 struct mbuf_chain chain[MAXCPU];
999 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD);
1001 #ifdef ETHER_INPUT_CHAIN
1002 ether_input_chain_init(chain);
1006 struct nfe_rx_data *data = &ring->data[ring->cur];
1011 if (sc->sc_caps & NFE_40BIT_ADDR) {
1012 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur];
1014 flags = le16toh(desc64->flags);
1015 len = le16toh(desc64->length) & 0x3fff;
1017 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur];
1019 flags = le16toh(desc32->flags);
1020 len = le16toh(desc32->length) & 0x3fff;
1023 if (flags & NFE_RX_READY)
1028 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
1029 if (!(flags & NFE_RX_VALID_V1))
1032 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
1033 flags &= ~NFE_RX_ERROR;
1034 len--; /* fix buffer length */
1037 if (!(flags & NFE_RX_VALID_V2))
1040 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
1041 flags &= ~NFE_RX_ERROR;
1042 len--; /* fix buffer length */
1046 if (flags & NFE_RX_ERROR) {
1053 if (sc->sc_flags & NFE_F_USE_JUMBO)
1054 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0);
1056 error = nfe_newbuf_std(sc, ring, ring->cur, 0);
1063 m->m_pkthdr.len = m->m_len = len;
1064 m->m_pkthdr.rcvif = ifp;
1066 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
1067 (flags & NFE_RX_CSUMOK)) {
1068 if (flags & NFE_RX_IP_CSUMOK_V2) {
1069 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED |
1074 (NFE_RX_UDP_CSUMOK_V2 | NFE_RX_TCP_CSUMOK_V2)) {
1075 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
1077 CSUM_FRAG_NOT_CHECKED;
1078 m->m_pkthdr.csum_data = 0xffff;
1083 #ifdef ETHER_INPUT_CHAIN
1084 ether_input_chain(ifp, m, chain);
1086 ifp->if_input(ifp, m);
1089 nfe_set_ready_rxdesc(sc, ring, ring->cur);
1090 sc->rxq.cur = (sc->rxq.cur + 1) % sc->sc_rx_ring_count;
1094 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1095 #ifdef ETHER_INPUT_CHAIN
1096 ether_input_dispatch(chain);
1103 nfe_txeof(struct nfe_softc *sc, int start)
1105 struct ifnet *ifp = &sc->arpcom.ac_if;
1106 struct nfe_tx_ring *ring = &sc->txq;
1107 struct nfe_tx_data *data = NULL;
1109 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD);
1110 while (ring->next != ring->cur) {
1113 if (sc->sc_caps & NFE_40BIT_ADDR)
1114 flags = le16toh(ring->desc64[ring->next].flags);
1116 flags = le16toh(ring->desc32[ring->next].flags);
1118 if (flags & NFE_TX_VALID)
1121 data = &ring->data[ring->next];
1123 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
1124 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
1127 if ((flags & NFE_TX_ERROR_V1) != 0) {
1128 if_printf(ifp, "tx v1 error 0x%4b\n", flags,
1135 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
1138 if ((flags & NFE_TX_ERROR_V2) != 0) {
1139 if_printf(ifp, "tx v2 error 0x%4b\n", flags,
1147 if (data->m == NULL) { /* should not get there */
1149 "last fragment bit w/o associated mbuf!\n");
1153 /* last fragment of the mbuf chain transmitted */
1154 bus_dmamap_sync(ring->data_tag, data->map,
1155 BUS_DMASYNC_POSTWRITE);
1156 bus_dmamap_unload(ring->data_tag, data->map);
1161 KKASSERT(ring->queued >= 0);
1162 ring->next = (ring->next + 1) % sc->sc_tx_ring_count;
1165 if (sc->sc_tx_ring_count - ring->queued >=
1166 sc->sc_tx_spare + NFE_NSEG_RSVD)
1167 ifp->if_flags &= ~IFF_OACTIVE;
1169 if (ring->queued == 0)
1172 if (start && !ifq_is_empty(&ifp->if_snd))
1182 nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0)
1184 struct nfe_dma_ctx ctx;
1185 bus_dma_segment_t segs[NFE_MAX_SCATTER];
1186 struct nfe_tx_data *data, *data_map;
1188 struct nfe_desc64 *desc64 = NULL;
1189 struct nfe_desc32 *desc32 = NULL;
1192 int error, i, j, maxsegs;
1194 data = &ring->data[ring->cur];
1196 data_map = data; /* Remember who owns the DMA map */
1198 maxsegs = (sc->sc_tx_ring_count - ring->queued) - NFE_NSEG_RSVD;
1199 if (maxsegs > NFE_MAX_SCATTER)
1200 maxsegs = NFE_MAX_SCATTER;
1201 KASSERT(maxsegs >= sc->sc_tx_spare,
1202 ("no enough segments %d,%d\n", maxsegs, sc->sc_tx_spare));
1204 ctx.nsegs = maxsegs;
1206 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0,
1207 nfe_buf_dma_addr, &ctx, BUS_DMA_NOWAIT);
1208 if (!error && ctx.nsegs == 0) {
1209 bus_dmamap_unload(ring->data_tag, map);
1212 if (error && error != EFBIG) {
1213 if_printf(&sc->arpcom.ac_if, "could not map TX mbuf\n");
1216 if (error) { /* error == EFBIG */
1219 m_new = m_defrag(m0, MB_DONTWAIT);
1220 if (m_new == NULL) {
1221 if_printf(&sc->arpcom.ac_if,
1222 "could not defrag TX mbuf\n");
1229 ctx.nsegs = maxsegs;
1231 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0,
1232 nfe_buf_dma_addr, &ctx,
1234 if (error || ctx.nsegs == 0) {
1236 bus_dmamap_unload(ring->data_tag, map);
1239 if_printf(&sc->arpcom.ac_if,
1240 "could not map defraged TX mbuf\n");
1247 /* setup h/w VLAN tagging */
1248 if (m0->m_flags & M_VLANTAG)
1249 vtag = m0->m_pkthdr.ether_vlantag;
1251 if (sc->arpcom.ac_if.if_capenable & IFCAP_TXCSUM) {
1252 if (m0->m_pkthdr.csum_flags & CSUM_IP)
1253 flags |= NFE_TX_IP_CSUM;
1254 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
1255 flags |= NFE_TX_TCP_CSUM;
1259 * XXX urm. somebody is unaware of how hardware works. You
1260 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in
1261 * the ring until the entire chain is actually *VALID*. Otherwise
1262 * the hardware may encounter a partially initialized chain that
1263 * is marked as being ready to go when it in fact is not ready to
1267 for (i = 0; i < ctx.nsegs; i++) {
1268 j = (ring->cur + i) % sc->sc_tx_ring_count;
1269 data = &ring->data[j];
1271 if (sc->sc_caps & NFE_40BIT_ADDR) {
1272 desc64 = &ring->desc64[j];
1273 #if defined(__LP64__)
1274 desc64->physaddr[0] =
1275 htole32(segs[i].ds_addr >> 32);
1277 desc64->physaddr[1] =
1278 htole32(segs[i].ds_addr & 0xffffffff);
1279 desc64->length = htole16(segs[i].ds_len - 1);
1280 desc64->vtag = htole32(vtag);
1281 desc64->flags = htole16(flags);
1283 desc32 = &ring->desc32[j];
1284 desc32->physaddr = htole32(segs[i].ds_addr);
1285 desc32->length = htole16(segs[i].ds_len - 1);
1286 desc32->flags = htole16(flags);
1289 /* csum flags and vtag belong to the first fragment only */
1290 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
1294 KKASSERT(ring->queued <= sc->sc_tx_ring_count);
1297 /* the whole mbuf chain has been DMA mapped, fix last descriptor */
1298 if (sc->sc_caps & NFE_40BIT_ADDR) {
1299 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
1301 if (sc->sc_caps & NFE_JUMBO_SUP)
1302 flags = NFE_TX_LASTFRAG_V2;
1304 flags = NFE_TX_LASTFRAG_V1;
1305 desc32->flags |= htole16(flags);
1309 * Set NFE_TX_VALID backwards so the hardware doesn't see the
1310 * whole mess until the first descriptor in the map is flagged.
1312 for (i = ctx.nsegs - 1; i >= 0; --i) {
1313 j = (ring->cur + i) % sc->sc_tx_ring_count;
1314 if (sc->sc_caps & NFE_40BIT_ADDR) {
1315 desc64 = &ring->desc64[j];
1316 desc64->flags |= htole16(NFE_TX_VALID);
1318 desc32 = &ring->desc32[j];
1319 desc32->flags |= htole16(NFE_TX_VALID);
1322 ring->cur = (ring->cur + ctx.nsegs) % sc->sc_tx_ring_count;
1324 /* Exchange DMA map */
1325 data_map->map = data->map;
1329 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE);
1337 nfe_start(struct ifnet *ifp)
1339 struct nfe_softc *sc = ifp->if_softc;
1340 struct nfe_tx_ring *ring = &sc->txq;
1341 int count = 0, oactive = 0;
1344 ASSERT_SERIALIZED(ifp->if_serializer);
1346 if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING)
1352 if (sc->sc_tx_ring_count - ring->queued <
1353 sc->sc_tx_spare + NFE_NSEG_RSVD) {
1355 ifp->if_flags |= IFF_OACTIVE;
1364 m0 = ifq_dequeue(&ifp->if_snd, NULL);
1368 ETHER_BPF_MTAP(ifp, m0);
1370 error = nfe_encap(sc, ring, m0);
1373 if (error == EFBIG) {
1375 ifp->if_flags |= IFF_OACTIVE;
1389 * `m0' may be freed in nfe_encap(), so
1390 * it should not be touched any more.
1393 if (count == 0) /* nothing sent */
1396 /* Sync TX descriptor ring */
1397 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1400 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1403 * Set a timeout in case the chip goes out to lunch.
1409 nfe_watchdog(struct ifnet *ifp)
1411 struct nfe_softc *sc = ifp->if_softc;
1413 ASSERT_SERIALIZED(ifp->if_serializer);
1415 if (ifp->if_flags & IFF_RUNNING) {
1416 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n");
1421 if_printf(ifp, "watchdog timeout\n");
1423 nfe_init(ifp->if_softc);
1431 struct nfe_softc *sc = xsc;
1432 struct ifnet *ifp = &sc->arpcom.ac_if;
1436 ASSERT_SERIALIZED(ifp->if_serializer);
1440 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0)
1445 * Switching between jumbo frames and normal frames should
1446 * be done _after_ nfe_stop() but _before_ nfe_init_rx_ring().
1448 if (ifp->if_mtu > ETHERMTU) {
1449 sc->sc_flags |= NFE_F_USE_JUMBO;
1450 sc->rxq.bufsz = NFE_JBYTES;
1451 sc->sc_tx_spare = NFE_NSEG_SPARE_JUMBO;
1453 if_printf(ifp, "use jumbo frames\n");
1455 sc->sc_flags &= ~NFE_F_USE_JUMBO;
1456 sc->rxq.bufsz = MCLBYTES;
1457 sc->sc_tx_spare = NFE_NSEG_SPARE;
1459 if_printf(ifp, "use non-jumbo frames\n");
1462 error = nfe_init_tx_ring(sc, &sc->txq);
1468 error = nfe_init_rx_ring(sc, &sc->rxq);
1474 NFE_WRITE(sc, NFE_TX_POLL, 0);
1475 NFE_WRITE(sc, NFE_STATUS, 0);
1477 sc->rxtxctl = NFE_RXTX_BIT2 | sc->rxtxctl_desc;
1479 if (ifp->if_capenable & IFCAP_RXCSUM)
1480 sc->rxtxctl |= NFE_RXTX_RXCSUM;
1483 * Although the adapter is capable of stripping VLAN tags from received
1484 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1485 * purpose. This will be done in software by our network stack.
1487 if (sc->sc_caps & NFE_HW_VLAN)
1488 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
1490 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1492 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1494 if (sc->sc_caps & NFE_HW_VLAN)
1495 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1497 NFE_WRITE(sc, NFE_SETUP_R6, 0);
1499 /* set MAC address */
1500 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr);
1502 /* tell MAC where rings are in memory */
1504 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1506 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1508 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1510 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1512 NFE_WRITE(sc, NFE_RING_SIZE,
1513 (sc->sc_rx_ring_count - 1) << 16 |
1514 (sc->sc_tx_ring_count - 1));
1516 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1518 /* force MAC to wakeup */
1519 tmp = NFE_READ(sc, NFE_PWR_STATE);
1520 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1522 tmp = NFE_READ(sc, NFE_PWR_STATE);
1523 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1525 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1526 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1527 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1529 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1530 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1532 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1534 sc->rxtxctl &= ~NFE_RXTX_BIT2;
1535 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1537 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1542 nfe_ifmedia_upd(ifp);
1545 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1548 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1550 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1552 #ifdef DEVICE_POLLING
1553 if ((ifp->if_flags & IFF_POLLING))
1554 nfe_disable_intrs(sc);
1557 nfe_enable_intrs(sc);
1559 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
1561 ifp->if_flags |= IFF_RUNNING;
1562 ifp->if_flags &= ~IFF_OACTIVE;
1565 * If we had stuff in the tx ring before its all cleaned out now
1566 * so we are not going to get an interrupt, jump-start any pending
1569 if (!ifq_is_empty(&ifp->if_snd))
1574 nfe_stop(struct nfe_softc *sc)
1576 struct ifnet *ifp = &sc->arpcom.ac_if;
1577 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2;
1580 ASSERT_SERIALIZED(ifp->if_serializer);
1582 callout_stop(&sc->sc_tick_ch);
1585 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1586 sc->sc_flags &= ~NFE_F_IRQ_TIMER;
1588 #define WAITMAX 50000
1593 NFE_WRITE(sc, NFE_TX_CTL, 0);
1594 for (i = 0; i < WAITMAX; ++i) {
1596 if ((NFE_READ(sc, NFE_TX_STATUS) & NFE_TX_STATUS_BUSY) == 0)
1600 if_printf(ifp, "can't stop TX\n");
1606 NFE_WRITE(sc, NFE_RX_CTL, 0);
1607 for (i = 0; i < WAITMAX; ++i) {
1609 if ((NFE_READ(sc, NFE_RX_STATUS) & NFE_RX_STATUS_BUSY) == 0)
1613 if_printf(ifp, "can't stop RX\n");
1618 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl);
1620 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
1622 /* Disable interrupts */
1623 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1625 /* Reset Tx and Rx rings */
1626 nfe_reset_tx_ring(sc, &sc->txq);
1627 nfe_reset_rx_ring(sc, &sc->rxq);
1631 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1633 int i, j, error, descsize;
1636 if (sc->sc_caps & NFE_40BIT_ADDR) {
1637 desc = (void **)&ring->desc64;
1638 descsize = sizeof(struct nfe_desc64);
1640 desc = (void **)&ring->desc32;
1641 descsize = sizeof(struct nfe_desc32);
1644 ring->bufsz = MCLBYTES;
1645 ring->cur = ring->next = 0;
1647 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1648 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1650 sc->sc_rx_ring_count * descsize, 1,
1651 BUS_SPACE_MAXSIZE_32BIT,
1654 if_printf(&sc->arpcom.ac_if,
1655 "could not create desc RX DMA tag\n");
1659 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO,
1662 if_printf(&sc->arpcom.ac_if,
1663 "could not allocate RX desc DMA memory\n");
1664 bus_dma_tag_destroy(ring->tag);
1669 error = bus_dmamap_load(ring->tag, ring->map, *desc,
1670 sc->sc_rx_ring_count * descsize,
1671 nfe_ring_dma_addr, &ring->physaddr,
1674 if_printf(&sc->arpcom.ac_if,
1675 "could not load RX desc DMA map\n");
1676 bus_dmamem_free(ring->tag, *desc, ring->map);
1677 bus_dma_tag_destroy(ring->tag);
1682 if (sc->sc_caps & NFE_JUMBO_SUP) {
1684 kmalloc(sizeof(struct nfe_jbuf) * NFE_JPOOL_COUNT(sc),
1685 M_DEVBUF, M_WAITOK | M_ZERO);
1687 error = nfe_jpool_alloc(sc, ring);
1689 if_printf(&sc->arpcom.ac_if,
1690 "could not allocate jumbo frames\n");
1691 kfree(ring->jbuf, M_DEVBUF);
1693 /* Allow jumbo frame allocation to fail */
1697 ring->data = kmalloc(sizeof(struct nfe_rx_data) * sc->sc_rx_ring_count,
1698 M_DEVBUF, M_WAITOK | M_ZERO);
1700 error = bus_dma_tag_create(NULL, 1, 0,
1701 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1703 MCLBYTES, 1, BUS_SPACE_MAXSIZE_32BIT,
1704 BUS_DMA_ALLOCNOW, &ring->data_tag);
1706 if_printf(&sc->arpcom.ac_if,
1707 "could not create RX mbuf DMA tag\n");
1711 /* Create a spare RX mbuf DMA map */
1712 error = bus_dmamap_create(ring->data_tag, 0, &ring->data_tmpmap);
1714 if_printf(&sc->arpcom.ac_if,
1715 "could not create spare RX mbuf DMA map\n");
1716 bus_dma_tag_destroy(ring->data_tag);
1717 ring->data_tag = NULL;
1721 for (i = 0; i < sc->sc_rx_ring_count; i++) {
1722 error = bus_dmamap_create(ring->data_tag, 0,
1723 &ring->data[i].map);
1725 if_printf(&sc->arpcom.ac_if,
1726 "could not create %dth RX mbuf DMA mapn", i);
1732 for (j = 0; j < i; ++j)
1733 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1734 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1735 bus_dma_tag_destroy(ring->data_tag);
1736 ring->data_tag = NULL;
1741 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1745 for (i = 0; i < sc->sc_rx_ring_count; i++) {
1746 struct nfe_rx_data *data = &ring->data[i];
1748 if (data->m != NULL) {
1749 if ((sc->sc_flags & NFE_F_USE_JUMBO) == 0)
1750 bus_dmamap_unload(ring->data_tag, data->map);
1755 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1757 ring->cur = ring->next = 0;
1761 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1765 for (i = 0; i < sc->sc_rx_ring_count; ++i) {
1768 /* XXX should use a function pointer */
1769 if (sc->sc_flags & NFE_F_USE_JUMBO)
1770 error = nfe_newbuf_jumbo(sc, ring, i, 1);
1772 error = nfe_newbuf_std(sc, ring, i, 1);
1774 if_printf(&sc->arpcom.ac_if,
1775 "could not allocate RX buffer\n");
1779 nfe_set_ready_rxdesc(sc, ring, i);
1781 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1787 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1789 if (ring->data_tag != NULL) {
1790 struct nfe_rx_data *data;
1793 for (i = 0; i < sc->sc_rx_ring_count; i++) {
1794 data = &ring->data[i];
1796 if (data->m != NULL) {
1797 bus_dmamap_unload(ring->data_tag, data->map);
1800 bus_dmamap_destroy(ring->data_tag, data->map);
1802 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1803 bus_dma_tag_destroy(ring->data_tag);
1806 nfe_jpool_free(sc, ring);
1808 if (ring->jbuf != NULL)
1809 kfree(ring->jbuf, M_DEVBUF);
1810 if (ring->data != NULL)
1811 kfree(ring->data, M_DEVBUF);
1813 if (ring->tag != NULL) {
1816 if (sc->sc_caps & NFE_40BIT_ADDR)
1817 desc = ring->desc64;
1819 desc = ring->desc32;
1821 bus_dmamap_unload(ring->tag, ring->map);
1822 bus_dmamem_free(ring->tag, desc, ring->map);
1823 bus_dma_tag_destroy(ring->tag);
1827 static struct nfe_jbuf *
1828 nfe_jalloc(struct nfe_softc *sc)
1830 struct ifnet *ifp = &sc->arpcom.ac_if;
1831 struct nfe_jbuf *jbuf;
1833 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1835 jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
1837 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
1840 if_printf(ifp, "no free jumbo buffer\n");
1843 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1849 nfe_jfree(void *arg)
1851 struct nfe_jbuf *jbuf = arg;
1852 struct nfe_softc *sc = jbuf->sc;
1853 struct nfe_rx_ring *ring = jbuf->ring;
1855 if (&ring->jbuf[jbuf->slot] != jbuf)
1856 panic("%s: free wrong jumbo buffer\n", __func__);
1857 else if (jbuf->inuse == 0)
1858 panic("%s: jumbo buffer already freed\n", __func__);
1860 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1861 atomic_subtract_int(&jbuf->inuse, 1);
1862 if (jbuf->inuse == 0)
1863 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1864 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1870 struct nfe_jbuf *jbuf = arg;
1871 struct nfe_rx_ring *ring = jbuf->ring;
1873 if (&ring->jbuf[jbuf->slot] != jbuf)
1874 panic("%s: ref wrong jumbo buffer\n", __func__);
1875 else if (jbuf->inuse == 0)
1876 panic("%s: jumbo buffer already freed\n", __func__);
1878 atomic_add_int(&jbuf->inuse, 1);
1882 nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1884 struct nfe_jbuf *jbuf;
1885 bus_addr_t physaddr;
1890 * Allocate a big chunk of DMA'able memory.
1892 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1893 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1895 NFE_JPOOL_SIZE(sc), 1,
1896 BUS_SPACE_MAXSIZE_32BIT,
1899 if_printf(&sc->arpcom.ac_if,
1900 "could not create jumbo DMA tag\n");
1904 error = bus_dmamem_alloc(ring->jtag, (void **)&ring->jpool,
1905 BUS_DMA_WAITOK, &ring->jmap);
1907 if_printf(&sc->arpcom.ac_if,
1908 "could not allocate jumbo DMA memory\n");
1909 bus_dma_tag_destroy(ring->jtag);
1914 error = bus_dmamap_load(ring->jtag, ring->jmap, ring->jpool,
1916 nfe_ring_dma_addr, &physaddr, BUS_DMA_WAITOK);
1918 if_printf(&sc->arpcom.ac_if,
1919 "could not load jumbo DMA map\n");
1920 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap);
1921 bus_dma_tag_destroy(ring->jtag);
1926 /* ..and split it into 9KB chunks */
1927 SLIST_INIT(&ring->jfreelist);
1930 for (i = 0; i < NFE_JPOOL_COUNT(sc); i++) {
1931 jbuf = &ring->jbuf[i];
1938 jbuf->physaddr = physaddr;
1940 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1943 physaddr += NFE_JBYTES;
1950 nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1952 if (ring->jtag != NULL) {
1953 bus_dmamap_unload(ring->jtag, ring->jmap);
1954 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap);
1955 bus_dma_tag_destroy(ring->jtag);
1960 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1962 int i, j, error, descsize;
1965 if (sc->sc_caps & NFE_40BIT_ADDR) {
1966 desc = (void **)&ring->desc64;
1967 descsize = sizeof(struct nfe_desc64);
1969 desc = (void **)&ring->desc32;
1970 descsize = sizeof(struct nfe_desc32);
1974 ring->cur = ring->next = 0;
1976 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1977 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1979 sc->sc_tx_ring_count * descsize, 1,
1980 BUS_SPACE_MAXSIZE_32BIT,
1983 if_printf(&sc->arpcom.ac_if,
1984 "could not create TX desc DMA map\n");
1988 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO,
1991 if_printf(&sc->arpcom.ac_if,
1992 "could not allocate TX desc DMA memory\n");
1993 bus_dma_tag_destroy(ring->tag);
1998 error = bus_dmamap_load(ring->tag, ring->map, *desc,
1999 sc->sc_tx_ring_count * descsize,
2000 nfe_ring_dma_addr, &ring->physaddr,
2003 if_printf(&sc->arpcom.ac_if,
2004 "could not load TX desc DMA map\n");
2005 bus_dmamem_free(ring->tag, *desc, ring->map);
2006 bus_dma_tag_destroy(ring->tag);
2011 ring->data = kmalloc(sizeof(struct nfe_tx_data) * sc->sc_tx_ring_count,
2012 M_DEVBUF, M_WAITOK | M_ZERO);
2014 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
2015 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2017 NFE_JBYTES, NFE_MAX_SCATTER,
2018 BUS_SPACE_MAXSIZE_32BIT,
2019 BUS_DMA_ALLOCNOW, &ring->data_tag);
2021 if_printf(&sc->arpcom.ac_if,
2022 "could not create TX buf DMA tag\n");
2026 for (i = 0; i < sc->sc_tx_ring_count; i++) {
2027 error = bus_dmamap_create(ring->data_tag, 0,
2028 &ring->data[i].map);
2030 if_printf(&sc->arpcom.ac_if,
2031 "could not create %dth TX buf DMA map\n", i);
2038 for (j = 0; j < i; ++j)
2039 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
2040 bus_dma_tag_destroy(ring->data_tag);
2041 ring->data_tag = NULL;
2046 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
2050 for (i = 0; i < sc->sc_tx_ring_count; i++) {
2051 struct nfe_tx_data *data = &ring->data[i];
2053 if (sc->sc_caps & NFE_40BIT_ADDR)
2054 ring->desc64[i].flags = 0;
2056 ring->desc32[i].flags = 0;
2058 if (data->m != NULL) {
2059 bus_dmamap_sync(ring->data_tag, data->map,
2060 BUS_DMASYNC_POSTWRITE);
2061 bus_dmamap_unload(ring->data_tag, data->map);
2066 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
2069 ring->cur = ring->next = 0;
2073 nfe_init_tx_ring(struct nfe_softc *sc __unused,
2074 struct nfe_tx_ring *ring __unused)
2080 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
2082 if (ring->data_tag != NULL) {
2083 struct nfe_tx_data *data;
2086 for (i = 0; i < sc->sc_tx_ring_count; ++i) {
2087 data = &ring->data[i];
2089 if (data->m != NULL) {
2090 bus_dmamap_unload(ring->data_tag, data->map);
2093 bus_dmamap_destroy(ring->data_tag, data->map);
2096 bus_dma_tag_destroy(ring->data_tag);
2099 if (ring->data != NULL)
2100 kfree(ring->data, M_DEVBUF);
2102 if (ring->tag != NULL) {
2105 if (sc->sc_caps & NFE_40BIT_ADDR)
2106 desc = ring->desc64;
2108 desc = ring->desc32;
2110 bus_dmamap_unload(ring->tag, ring->map);
2111 bus_dmamem_free(ring->tag, desc, ring->map);
2112 bus_dma_tag_destroy(ring->tag);
2117 nfe_ifmedia_upd(struct ifnet *ifp)
2119 struct nfe_softc *sc = ifp->if_softc;
2120 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2122 ASSERT_SERIALIZED(ifp->if_serializer);
2124 if (mii->mii_instance != 0) {
2125 struct mii_softc *miisc;
2127 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2128 mii_phy_reset(miisc);
2136 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2138 struct nfe_softc *sc = ifp->if_softc;
2139 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2141 ASSERT_SERIALIZED(ifp->if_serializer);
2144 ifmr->ifm_status = mii->mii_media_status;
2145 ifmr->ifm_active = mii->mii_media_active;
2149 nfe_setmulti(struct nfe_softc *sc)
2151 struct ifnet *ifp = &sc->arpcom.ac_if;
2152 struct ifmultiaddr *ifma;
2153 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
2154 uint32_t filter = NFE_RXFILTER_MAGIC;
2157 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2158 bzero(addr, ETHER_ADDR_LEN);
2159 bzero(mask, ETHER_ADDR_LEN);
2163 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
2164 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
2166 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2169 if (ifma->ifma_addr->sa_family != AF_LINK)
2172 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2173 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2174 addr[i] &= maddr[i];
2175 mask[i] &= ~maddr[i];
2179 for (i = 0; i < ETHER_ADDR_LEN; i++)
2183 addr[0] |= 0x01; /* make sure multicast bit is set */
2185 NFE_WRITE(sc, NFE_MULTIADDR_HI,
2186 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2187 NFE_WRITE(sc, NFE_MULTIADDR_LO,
2188 addr[5] << 8 | addr[4]);
2189 NFE_WRITE(sc, NFE_MULTIMASK_HI,
2190 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
2191 NFE_WRITE(sc, NFE_MULTIMASK_LO,
2192 mask[5] << 8 | mask[4]);
2194 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
2195 NFE_WRITE(sc, NFE_RXFILTER, filter);
2199 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
2203 lo = NFE_READ(sc, NFE_MACADDR_LO);
2204 hi = NFE_READ(sc, NFE_MACADDR_HI);
2205 if (sc->sc_caps & NFE_FIX_EADDR) {
2206 addr[0] = (lo >> 8) & 0xff;
2207 addr[1] = (lo & 0xff);
2209 addr[2] = (hi >> 24) & 0xff;
2210 addr[3] = (hi >> 16) & 0xff;
2211 addr[4] = (hi >> 8) & 0xff;
2212 addr[5] = (hi & 0xff);
2214 addr[0] = (hi & 0xff);
2215 addr[1] = (hi >> 8) & 0xff;
2216 addr[2] = (hi >> 16) & 0xff;
2217 addr[3] = (hi >> 24) & 0xff;
2219 addr[4] = (lo & 0xff);
2220 addr[5] = (lo >> 8) & 0xff;
2225 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
2227 NFE_WRITE(sc, NFE_MACADDR_LO,
2228 addr[5] << 8 | addr[4]);
2229 NFE_WRITE(sc, NFE_MACADDR_HI,
2230 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2236 struct nfe_softc *sc = arg;
2237 struct ifnet *ifp = &sc->arpcom.ac_if;
2238 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2240 lwkt_serialize_enter(ifp->if_serializer);
2243 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
2245 lwkt_serialize_exit(ifp->if_serializer);
2249 nfe_ring_dma_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
2254 KASSERT(nseg == 1, ("too many segments, should be 1\n"));
2256 *((uint32_t *)arg) = seg->ds_addr;
2260 nfe_buf_dma_addr(void *arg, bus_dma_segment_t *segs, int nsegs,
2261 bus_size_t mapsz __unused, int error)
2263 struct nfe_dma_ctx *ctx = arg;
2269 if (nsegs > ctx->nsegs) {
2275 for (i = 0; i < nsegs; ++i)
2276 ctx->segs[i] = segs[i];
2280 nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2283 struct nfe_rx_data *data = &ring->data[idx];
2284 struct nfe_dma_ctx ctx;
2285 bus_dma_segment_t seg;
2290 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2293 m->m_len = m->m_pkthdr.len = MCLBYTES;
2297 error = bus_dmamap_load_mbuf(ring->data_tag, ring->data_tmpmap,
2298 m, nfe_buf_dma_addr, &ctx,
2299 wait ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
2300 if (error || ctx.nsegs == 0) {
2302 bus_dmamap_unload(ring->data_tag, ring->data_tmpmap);
2304 if_printf(&sc->arpcom.ac_if, "too many segments?!\n");
2309 if_printf(&sc->arpcom.ac_if,
2310 "could map RX mbuf %d\n", error);
2315 /* Unload originally mapped mbuf */
2316 bus_dmamap_unload(ring->data_tag, data->map);
2318 /* Swap this DMA map with tmp DMA map */
2320 data->map = ring->data_tmpmap;
2321 ring->data_tmpmap = map;
2323 /* Caller is assumed to have collected the old mbuf */
2326 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr);
2328 bus_dmamap_sync(ring->data_tag, data->map, BUS_DMASYNC_PREREAD);
2333 nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2336 struct nfe_rx_data *data = &ring->data[idx];
2337 struct nfe_jbuf *jbuf;
2340 MGETHDR(m, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA);
2344 jbuf = nfe_jalloc(sc);
2347 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
2348 "-- packet dropped!\n");
2352 m->m_ext.ext_arg = jbuf;
2353 m->m_ext.ext_buf = jbuf->buf;
2354 m->m_ext.ext_free = nfe_jfree;
2355 m->m_ext.ext_ref = nfe_jref;
2356 m->m_ext.ext_size = NFE_JBYTES;
2358 m->m_data = m->m_ext.ext_buf;
2359 m->m_flags |= M_EXT;
2360 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2362 /* Caller is assumed to have collected the old mbuf */
2365 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr);
2367 bus_dmamap_sync(ring->jtag, ring->jmap, BUS_DMASYNC_PREREAD);
2372 nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2373 bus_addr_t physaddr)
2375 if (sc->sc_caps & NFE_40BIT_ADDR) {
2376 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2378 #if defined(__LP64__)
2379 desc64->physaddr[0] = htole32(physaddr >> 32);
2381 desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
2383 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2385 desc32->physaddr = htole32(physaddr);
2390 nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx)
2392 if (sc->sc_caps & NFE_40BIT_ADDR) {
2393 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2395 desc64->length = htole16(ring->bufsz);
2396 desc64->flags = htole16(NFE_RX_READY);
2398 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2400 desc32->length = htole16(ring->bufsz);
2401 desc32->flags = htole16(NFE_RX_READY);
2406 nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS)
2408 struct nfe_softc *sc = arg1;
2409 struct ifnet *ifp = &sc->arpcom.ac_if;
2413 lwkt_serialize_enter(ifp->if_serializer);
2415 flags = sc->sc_flags & ~NFE_F_DYN_IM;
2417 if (sc->sc_flags & NFE_F_DYN_IM)
2420 error = sysctl_handle_int(oidp, &v, 0, req);
2421 if (error || req->newptr == NULL)
2425 flags |= NFE_F_DYN_IM;
2429 if (v != sc->sc_imtime || (flags ^ sc->sc_flags)) {
2430 int old_imtime = sc->sc_imtime;
2431 uint32_t old_flags = sc->sc_flags;
2434 sc->sc_flags = flags;
2435 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc);
2437 if ((ifp->if_flags & (IFF_POLLING | IFF_RUNNING))
2439 if (old_imtime * sc->sc_imtime == 0 ||
2440 (old_flags ^ sc->sc_flags)) {
2443 NFE_WRITE(sc, NFE_IMTIMER,
2444 NFE_IMTIME(sc->sc_imtime));
2449 lwkt_serialize_exit(ifp->if_serializer);
2454 nfe_powerup(device_t dev)
2456 struct nfe_softc *sc = device_get_softc(dev);
2461 * Bring MAC and PHY out of low power state
2464 pwr_state = NFE_READ(sc, NFE_PWR_STATE2) & ~NFE_PWRUP_MASK;
2466 did = pci_get_device(dev);
2467 if ((did == PCI_PRODUCT_NVIDIA_MCP51_LAN1 ||
2468 did == PCI_PRODUCT_NVIDIA_MCP51_LAN2) &&
2469 pci_get_revid(dev) >= 0xa3)
2470 pwr_state |= NFE_PWRUP_REV_A3;
2472 NFE_WRITE(sc, NFE_PWR_STATE2, pwr_state);
2476 nfe_mac_reset(struct nfe_softc *sc)
2478 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2;
2479 uint32_t macaddr_hi, macaddr_lo, tx_poll;
2481 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl);
2483 /* Save several registers for later restoration */
2484 macaddr_hi = NFE_READ(sc, NFE_MACADDR_HI);
2485 macaddr_lo = NFE_READ(sc, NFE_MACADDR_LO);
2486 tx_poll = NFE_READ(sc, NFE_TX_POLL);
2488 NFE_WRITE(sc, NFE_MAC_RESET, NFE_RESET_ASSERT);
2491 NFE_WRITE(sc, NFE_MAC_RESET, 0);
2494 /* Restore saved registers */
2495 NFE_WRITE(sc, NFE_MACADDR_HI, macaddr_hi);
2496 NFE_WRITE(sc, NFE_MACADDR_LO, macaddr_lo);
2497 NFE_WRITE(sc, NFE_TX_POLL, tx_poll);
2499 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
2503 nfe_enable_intrs(struct nfe_softc *sc)
2506 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER.
2507 * It is unclear how wide the timer is. Base programming does
2508 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so
2509 * we don't get any interrupt moderation. TX moderation is
2510 * possible by using the timer interrupt instead of TX_DONE.
2512 * It is unclear whether there are other bits that can be
2513 * set to make the NFE device actually do interrupt moderation
2516 * For now set a 128uS interval as a placemark, but don't use
2519 if (sc->sc_imtime == 0)
2520 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME_DEFAULT);
2522 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME(sc->sc_imtime));
2524 /* Enable interrupts */
2525 NFE_WRITE(sc, NFE_IRQ_MASK, sc->sc_irq_enable);
2527 if (sc->sc_irq_enable & NFE_IRQ_TIMER)
2528 sc->sc_flags |= NFE_F_IRQ_TIMER;
2530 sc->sc_flags &= ~NFE_F_IRQ_TIMER;
2534 nfe_disable_intrs(struct nfe_softc *sc)
2536 /* Disable interrupts */
2537 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
2538 sc->sc_flags &= ~NFE_F_IRQ_TIMER;