1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */
2 /* $DragonFly: src/sys/dev/netif/nfe/if_nfe.c,v 1.7 2006/12/24 04:58:27 sephe Exp $ */
5 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
7 * This code is derived from software contributed to The DragonFly Project
8 * by Sepherosa Ziehau <sepherosa@gmail.com> and
9 * Matthew Dillon <dillon@apollo.backplane.com>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
41 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
43 * Permission to use, copy, modify, and distribute this software for any
44 * purpose with or without fee is hereby granted, provided that the above
45 * copyright notice and this permission notice appear in all copies.
47 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
48 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
49 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
50 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
51 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
52 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
53 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
56 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
58 #include "opt_polling.h"
60 #include <sys/param.h>
61 #include <sys/endian.h>
62 #include <sys/kernel.h>
66 #include <sys/serialize.h>
67 #include <sys/socket.h>
68 #include <sys/sockio.h>
69 #include <sys/sysctl.h>
71 #include <net/ethernet.h>
74 #include <net/if_arp.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/ifq_var.h>
78 #include <net/if_types.h>
79 #include <net/if_var.h>
80 #include <net/vlan/if_vlan_var.h>
82 #include <bus/pci/pcireg.h>
83 #include <bus/pci/pcivar.h>
84 #include <bus/pci/pcidevs.h>
86 #include <dev/netif/mii_layer/mii.h>
87 #include <dev/netif/mii_layer/miivar.h>
89 #include "miibus_if.h"
91 #include "if_nfereg.h"
92 #include "if_nfevar.h"
94 static int nfe_probe(device_t);
95 static int nfe_attach(device_t);
96 static int nfe_detach(device_t);
97 static void nfe_shutdown(device_t);
98 static int nfe_resume(device_t);
99 static int nfe_suspend(device_t);
101 static int nfe_miibus_readreg(device_t, int, int);
102 static void nfe_miibus_writereg(device_t, int, int, int);
103 static void nfe_miibus_statchg(device_t);
105 #ifdef DEVICE_POLLING
106 static void nfe_poll(struct ifnet *, enum poll_cmd, int);
108 static void nfe_intr(void *);
109 static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
110 static void nfe_rxeof(struct nfe_softc *);
111 static void nfe_txeof(struct nfe_softc *);
112 static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *,
114 static void nfe_start(struct ifnet *);
115 static void nfe_watchdog(struct ifnet *);
116 static void nfe_init(void *);
117 static void nfe_stop(struct nfe_softc *);
118 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *);
119 static void nfe_jfree(void *);
120 static void nfe_jref(void *);
121 static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *);
122 static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *);
123 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
124 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
125 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
126 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
127 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
128 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
129 static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
130 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
131 static int nfe_ifmedia_upd(struct ifnet *);
132 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
133 static void nfe_setmulti(struct nfe_softc *);
134 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
135 static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
136 static void nfe_tick(void *);
137 static void nfe_ring_dma_addr(void *, bus_dma_segment_t *, int, int);
138 static void nfe_buf_dma_addr(void *, bus_dma_segment_t *, int, bus_size_t,
140 static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
142 static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
144 static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int,
146 static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int,
152 static int nfe_debug = 0;
154 SYSCTL_NODE(_hw, OID_AUTO, nfe, CTLFLAG_RD, 0, "nVidia GigE parameters");
155 SYSCTL_INT(_hw_nfe, OID_AUTO, debug, CTLFLAG_RW, &nfe_debug, 0,
156 "control debugging printfs");
158 #define DPRINTF(sc, fmt, ...) do { \
160 if_printf(&(sc)->arpcom.ac_if, \
165 #define DPRINTFN(sc, lv, fmt, ...) do { \
166 if (nfe_debug >= (lv)) { \
167 if_printf(&(sc)->arpcom.ac_if, \
172 #else /* !NFE_DEBUG */
174 #define DPRINTF(sc, fmt, ...)
175 #define DPRINTFN(sc, lv, fmt, ...)
177 #endif /* NFE_DEBUG */
181 bus_dma_segment_t *segs;
184 static const struct nfe_dev {
189 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
190 "NVIDIA nForce Fast Ethernet" },
192 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
193 "NVIDIA nForce2 Fast Ethernet" },
195 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
196 "NVIDIA nForce3 Gigabit Ethernet" },
198 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb
199 chipset, and possibly also the 400R; it might be both nForce2- and
200 nForce3-based boards can use the same MCPs (= southbridges) */
201 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2,
202 "NVIDIA nForce3 Gigabit Ethernet" },
204 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3,
205 "NVIDIA nForce3 Gigabit Ethernet" },
207 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
208 "NVIDIA nForce3 Gigabit Ethernet" },
210 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5,
211 "NVIDIA nForce3 Gigabit Ethernet" },
213 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1,
214 "NVIDIA CK804 Gigabit Ethernet" },
216 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2,
217 "NVIDIA CK804 Gigabit Ethernet" },
219 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
220 "NVIDIA MCP04 Gigabit Ethernet" },
222 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
223 "NVIDIA MCP04 Gigabit Ethernet" },
225 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1,
226 "NVIDIA MCP51 Gigabit Ethernet" },
228 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2,
229 "NVIDIA MCP51 Gigabit Ethernet" },
231 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
232 "NVIDIA MCP55 Gigabit Ethernet" },
234 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
235 "NVIDIA MCP55 Gigabit Ethernet" },
237 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
238 "NVIDIA MCP61 Gigabit Ethernet" },
240 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
241 "NVIDIA MCP61 Gigabit Ethernet" },
243 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
244 "NVIDIA MCP61 Gigabit Ethernet" },
246 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
247 "NVIDIA MCP61 Gigabit Ethernet" },
249 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
250 "NVIDIA MCP65 Gigabit Ethernet" },
252 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
253 "NVIDIA MCP65 Gigabit Ethernet" },
255 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
256 "NVIDIA MCP65 Gigabit Ethernet" },
258 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
259 "NVIDIA MCP65 Gigabit Ethernet" },
261 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
262 "NVIDIA MCP67 Gigabit Ethernet" },
264 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
265 "NVIDIA MCP67 Gigabit Ethernet" },
267 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
268 "NVIDIA MCP67 Gigabit Ethernet" },
270 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
271 "NVIDIA MCP67 Gigabit Ethernet" }
274 static device_method_t nfe_methods[] = {
275 /* Device interface */
276 DEVMETHOD(device_probe, nfe_probe),
277 DEVMETHOD(device_attach, nfe_attach),
278 DEVMETHOD(device_detach, nfe_detach),
279 DEVMETHOD(device_suspend, nfe_suspend),
280 DEVMETHOD(device_resume, nfe_resume),
281 DEVMETHOD(device_shutdown, nfe_shutdown),
284 DEVMETHOD(bus_print_child, bus_generic_print_child),
285 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
288 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
289 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
290 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
295 static driver_t nfe_driver = {
298 sizeof(struct nfe_softc)
301 static devclass_t nfe_devclass;
303 DECLARE_DUMMY_MODULE(if_nfe);
304 MODULE_DEPEND(if_nfe, miibus, 1, 1, 1);
305 DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, 0, 0);
306 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
309 nfe_probe(device_t dev)
311 const struct nfe_dev *n;
314 vid = pci_get_vendor(dev);
315 did = pci_get_device(dev);
316 for (n = nfe_devices; n->desc != NULL; ++n) {
317 if (vid == n->vid && did == n->did) {
318 struct nfe_softc *sc = device_get_softc(dev);
321 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
322 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
323 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
324 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
325 sc->sc_flags = NFE_JUMBO_SUP |
328 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
329 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
330 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
331 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
332 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
333 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
334 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
335 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
336 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
337 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
338 sc->sc_flags = NFE_40BIT_ADDR;
340 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
341 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
342 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
343 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
344 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
345 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
346 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
347 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
348 sc->sc_flags = NFE_JUMBO_SUP |
352 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
353 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
354 sc->sc_flags = NFE_JUMBO_SUP |
361 /* Enable jumbo frames for adapters that support it */
362 if (sc->sc_flags & NFE_JUMBO_SUP)
363 sc->sc_flags |= NFE_USE_JUMBO;
365 device_set_desc(dev, n->desc);
373 nfe_attach(device_t dev)
375 struct nfe_softc *sc = device_get_softc(dev);
376 struct ifnet *ifp = &sc->arpcom.ac_if;
377 uint8_t eaddr[ETHER_ADDR_LEN];
380 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
381 lwkt_serialize_init(&sc->sc_jbuf_serializer);
383 sc->sc_mem_rid = PCIR_BAR(0);
386 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
389 mem = pci_read_config(dev, sc->sc_mem_rid, 4);
390 irq = pci_read_config(dev, PCIR_INTLINE, 4);
392 device_printf(dev, "chip is in D%d power mode "
393 "-- setting to D0\n", pci_get_powerstate(dev));
395 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
397 pci_write_config(dev, sc->sc_mem_rid, mem, 4);
398 pci_write_config(dev, PCIR_INTLINE, irq, 4);
400 #endif /* !BURN_BRIDGE */
402 /* Enable bus mastering */
403 pci_enable_busmaster(dev);
405 /* Allocate IO memory */
406 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
407 &sc->sc_mem_rid, RF_ACTIVE);
408 if (sc->sc_mem_res == NULL) {
409 device_printf(dev, "cound not allocate io memory\n");
412 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res);
413 sc->sc_memt = rman_get_bustag(sc->sc_mem_res);
417 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
419 RF_SHAREABLE | RF_ACTIVE);
420 if (sc->sc_irq_res == NULL) {
421 device_printf(dev, "could not allocate irq\n");
426 nfe_get_macaddr(sc, eaddr);
429 * Allocate Tx and Rx rings.
431 error = nfe_alloc_tx_ring(sc, &sc->txq);
433 device_printf(dev, "could not allocate Tx ring\n");
437 error = nfe_alloc_rx_ring(sc, &sc->rxq);
439 device_printf(dev, "could not allocate Rx ring\n");
443 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd,
446 device_printf(dev, "MII without any phy\n");
451 ifp->if_mtu = ETHERMTU;
452 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
453 ifp->if_ioctl = nfe_ioctl;
454 ifp->if_start = nfe_start;
455 #ifdef DEVICE_POLLING
456 ifp->if_poll = nfe_poll;
458 ifp->if_watchdog = nfe_watchdog;
459 ifp->if_init = nfe_init;
460 ifq_set_maxlen(&ifp->if_snd, NFE_IFQ_MAXLEN);
461 ifq_set_ready(&ifp->if_snd);
463 ifp->if_capabilities = IFCAP_VLAN_MTU;
466 if (sc->sc_flags & NFE_USE_JUMBO)
467 ifp->if_hardmtu = NFE_JUMBO_MTU;
470 if (sc->sc_flags & NFE_HW_VLAN)
471 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
474 if (sc->sc_flags & NFE_HW_CSUM) {
476 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
479 ifp->if_capabilities = IFCAP_HWCSUM;
480 ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP;
484 ifp->if_capenable = ifp->if_capabilities;
486 callout_init(&sc->sc_tick_ch);
488 ether_ifattach(ifp, eaddr, NULL);
490 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc,
491 &sc->sc_ih, ifp->if_serializer);
493 device_printf(dev, "could not setup intr\n");
505 nfe_detach(device_t dev)
507 struct nfe_softc *sc = device_get_softc(dev);
509 if (device_is_attached(dev)) {
510 struct ifnet *ifp = &sc->arpcom.ac_if;
512 lwkt_serialize_enter(ifp->if_serializer);
514 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih);
515 lwkt_serialize_exit(ifp->if_serializer);
520 if (sc->sc_miibus != NULL)
521 device_delete_child(dev, sc->sc_miibus);
522 bus_generic_detach(dev);
524 if (sc->sc_irq_res != NULL) {
525 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
529 if (sc->sc_mem_res != NULL) {
530 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
534 nfe_free_tx_ring(sc, &sc->txq);
535 nfe_free_rx_ring(sc, &sc->rxq);
541 nfe_shutdown(device_t dev)
543 struct nfe_softc *sc = device_get_softc(dev);
544 struct ifnet *ifp = &sc->arpcom.ac_if;
546 lwkt_serialize_enter(ifp->if_serializer);
548 lwkt_serialize_exit(ifp->if_serializer);
552 nfe_suspend(device_t dev)
554 struct nfe_softc *sc = device_get_softc(dev);
555 struct ifnet *ifp = &sc->arpcom.ac_if;
557 lwkt_serialize_enter(ifp->if_serializer);
559 lwkt_serialize_exit(ifp->if_serializer);
565 nfe_resume(device_t dev)
567 struct nfe_softc *sc = device_get_softc(dev);
568 struct ifnet *ifp = &sc->arpcom.ac_if;
570 lwkt_serialize_enter(ifp->if_serializer);
571 if (ifp->if_flags & IFF_UP) {
573 if (ifp->if_flags & IFF_RUNNING)
576 lwkt_serialize_exit(ifp->if_serializer);
582 nfe_miibus_statchg(device_t dev)
584 struct nfe_softc *sc = device_get_softc(dev);
585 struct mii_data *mii = device_get_softc(sc->sc_miibus);
586 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
588 phy = NFE_READ(sc, NFE_PHY_IFACE);
589 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
591 seed = NFE_READ(sc, NFE_RNDSEED);
592 seed &= ~NFE_SEED_MASK;
594 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
595 phy |= NFE_PHY_HDX; /* half-duplex */
596 misc |= NFE_MISC1_HDX;
599 switch (IFM_SUBTYPE(mii->mii_media_active)) {
600 case IFM_1000_T: /* full-duplex only */
601 link |= NFE_MEDIA_1000T;
602 seed |= NFE_SEED_1000T;
603 phy |= NFE_PHY_1000T;
606 link |= NFE_MEDIA_100TX;
607 seed |= NFE_SEED_100TX;
608 phy |= NFE_PHY_100TX;
611 link |= NFE_MEDIA_10T;
612 seed |= NFE_SEED_10T;
616 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
618 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
619 NFE_WRITE(sc, NFE_MISC1, misc);
620 NFE_WRITE(sc, NFE_LINKSPEED, link);
624 nfe_miibus_readreg(device_t dev, int phy, int reg)
626 struct nfe_softc *sc = device_get_softc(dev);
630 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
632 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
633 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
637 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
639 for (ntries = 0; ntries < 1000; ntries++) {
641 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
644 if (ntries == 1000) {
645 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", "");
649 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
650 DPRINTFN(sc, 2, "could not read PHY %s\n", "");
654 val = NFE_READ(sc, NFE_PHY_DATA);
655 if (val != 0xffffffff && val != 0)
656 sc->mii_phyaddr = phy;
658 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
664 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
666 struct nfe_softc *sc = device_get_softc(dev);
670 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
672 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
673 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
677 NFE_WRITE(sc, NFE_PHY_DATA, val);
678 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
679 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
681 for (ntries = 0; ntries < 1000; ntries++) {
683 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
689 DPRINTFN(sc, 2, "could not write to PHY %s\n", "");
693 #ifdef DEVICE_POLLING
696 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
698 struct nfe_softc *sc = ifp->if_softc;
702 /* Disable interrupts */
703 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
705 case POLL_DEREGISTER:
706 /* enable interrupts */
707 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
709 case POLL_AND_CHECK_STATUS:
712 if (ifp->if_flags & IFF_RUNNING) {
725 struct nfe_softc *sc = arg;
726 struct ifnet *ifp = &sc->arpcom.ac_if;
729 r = NFE_READ(sc, NFE_IRQ_STATUS);
731 return; /* not for us */
732 NFE_WRITE(sc, NFE_IRQ_STATUS, r);
734 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r);
736 if (r & NFE_IRQ_LINK) {
737 NFE_READ(sc, NFE_PHY_STATUS);
738 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
739 DPRINTF(sc, "link state changed %s\n", "");
742 if (ifp->if_flags & IFF_RUNNING) {
752 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
754 struct nfe_softc *sc = ifp->if_softc;
755 struct ifreq *ifr = (struct ifreq *)data;
756 struct mii_data *mii;
761 /* XXX NFE_USE_JUMBO should be set here */
764 if (ifp->if_flags & IFF_UP) {
766 * If only the PROMISC or ALLMULTI flag changes, then
767 * don't do a full re-init of the chip, just update
770 if ((ifp->if_flags & IFF_RUNNING) &&
771 ((ifp->if_flags ^ sc->sc_if_flags) &
772 (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
775 if (!(ifp->if_flags & IFF_RUNNING))
779 if (ifp->if_flags & IFF_RUNNING)
782 sc->sc_if_flags = ifp->if_flags;
786 if (ifp->if_flags & IFF_RUNNING)
791 mii = device_get_softc(sc->sc_miibus);
792 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
795 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
796 if (mask & IFCAP_HWCSUM) {
797 if (IFCAP_HWCSUM & ifp->if_capenable)
798 ifp->if_capenable &= ~IFCAP_HWCSUM;
800 ifp->if_capenable |= IFCAP_HWCSUM;
804 error = ether_ioctl(ifp, cmd, data);
811 nfe_rxeof(struct nfe_softc *sc)
813 struct ifnet *ifp = &sc->arpcom.ac_if;
814 struct nfe_rx_ring *ring = &sc->rxq;
818 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD);
821 struct nfe_rx_data *data = &ring->data[ring->cur];
826 if (sc->sc_flags & NFE_40BIT_ADDR) {
827 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur];
829 flags = le16toh(desc64->flags);
830 len = le16toh(desc64->length) & 0x3fff;
832 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur];
834 flags = le16toh(desc32->flags);
835 len = le16toh(desc32->length) & 0x3fff;
838 if (flags & NFE_RX_READY)
843 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
844 if (!(flags & NFE_RX_VALID_V1))
847 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
848 flags &= ~NFE_RX_ERROR;
849 len--; /* fix buffer length */
852 if (!(flags & NFE_RX_VALID_V2))
855 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
856 flags &= ~NFE_RX_ERROR;
857 len--; /* fix buffer length */
861 if (flags & NFE_RX_ERROR) {
868 if (sc->sc_flags & NFE_USE_JUMBO)
869 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0);
871 error = nfe_newbuf_std(sc, ring, ring->cur, 0);
878 m->m_pkthdr.len = m->m_len = len;
879 m->m_pkthdr.rcvif = ifp;
882 if (sc->sc_flags & NFE_HW_CSUM) {
883 if (flags & NFE_RX_IP_CSUMOK)
884 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
885 if (flags & NFE_RX_UDP_CSUMOK)
886 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
887 if (flags & NFE_RX_TCP_CSUMOK)
888 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
890 #elif defined(NFE_CSUM)
891 if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK))
892 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
896 ifp->if_input(ifp, m);
898 nfe_set_ready_rxdesc(sc, ring, ring->cur);
899 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
903 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
907 nfe_txeof(struct nfe_softc *sc)
909 struct ifnet *ifp = &sc->arpcom.ac_if;
910 struct nfe_tx_ring *ring = &sc->txq;
911 struct nfe_tx_data *data = NULL;
913 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD);
914 while (ring->next != ring->cur) {
917 if (sc->sc_flags & NFE_40BIT_ADDR)
918 flags = le16toh(ring->desc64[ring->next].flags);
920 flags = le16toh(ring->desc32[ring->next].flags);
922 if (flags & NFE_TX_VALID)
925 data = &ring->data[ring->next];
927 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
928 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
931 if ((flags & NFE_TX_ERROR_V1) != 0) {
932 if_printf(ifp, "tx v1 error 0x%4b\n", flags,
939 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
942 if ((flags & NFE_TX_ERROR_V2) != 0) {
943 if_printf(ifp, "tx v2 error 0x%4b\n", flags,
951 if (data->m == NULL) { /* should not get there */
953 "last fragment bit w/o associated mbuf!\n");
957 /* last fragment of the mbuf chain transmitted */
958 bus_dmamap_sync(ring->data_tag, data->map,
959 BUS_DMASYNC_POSTWRITE);
960 bus_dmamap_unload(ring->data_tag, data->map);
967 KKASSERT(ring->queued >= 0);
968 ring->next = (ring->next + 1) % NFE_TX_RING_COUNT;
971 if (data != NULL) { /* at least one slot freed */
972 ifp->if_flags &= ~IFF_OACTIVE;
978 nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0)
980 struct nfe_dma_ctx ctx;
981 bus_dma_segment_t segs[NFE_MAX_SCATTER];
982 struct nfe_tx_data *data, *data_map;
984 struct nfe_desc64 *desc64 = NULL;
985 struct nfe_desc32 *desc32 = NULL;
990 data = &ring->data[ring->cur];
992 data_map = data; /* Remember who owns the DMA map */
994 ctx.nsegs = NFE_MAX_SCATTER;
996 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0,
997 nfe_buf_dma_addr, &ctx, BUS_DMA_NOWAIT);
998 if (error && error != EFBIG) {
999 if_printf(&sc->arpcom.ac_if, "could not map TX mbuf\n");
1003 if (error) { /* error == EFBIG */
1006 m_new = m_defrag(m0, MB_DONTWAIT);
1007 if (m_new == NULL) {
1008 if_printf(&sc->arpcom.ac_if,
1009 "could not defrag TX mbuf\n");
1016 ctx.nsegs = NFE_MAX_SCATTER;
1018 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0,
1019 nfe_buf_dma_addr, &ctx,
1022 if_printf(&sc->arpcom.ac_if,
1023 "could not map defraged TX mbuf\n");
1030 if (ring->queued + ctx.nsegs >= NFE_TX_RING_COUNT - 1) {
1031 bus_dmamap_unload(ring->data_tag, map);
1036 /* setup h/w VLAN tagging */
1037 if ((m0->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) &&
1038 m0->m_pkthdr.rcvif != NULL &&
1039 m0->m_pkthdr.rcvif->if_type == IFT_L2VLAN) {
1040 struct ifvlan *ifv = m0->m_pkthdr.rcvif->if_softc;
1043 vtag = NFE_TX_VTAG | htons(ifv->ifv_tag);
1047 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1048 flags |= NFE_TX_IP_CSUM;
1049 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT))
1050 flags |= NFE_TX_TCP_CSUM;
1054 * XXX urm. somebody is unaware of how hardware works. You
1055 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in
1056 * the ring until the entire chain is actually *VALID*. Otherwise
1057 * the hardware may encounter a partially initialized chain that
1058 * is marked as being ready to go when it in fact is not ready to
1062 for (i = 0; i < ctx.nsegs; i++) {
1063 j = (ring->cur + i) % NFE_TX_RING_COUNT;
1064 data = &ring->data[j];
1066 if (sc->sc_flags & NFE_40BIT_ADDR) {
1067 desc64 = &ring->desc64[j];
1068 #if defined(__LP64__)
1069 desc64->physaddr[0] =
1070 htole32(segs[i].ds_addr >> 32);
1072 desc64->physaddr[1] =
1073 htole32(segs[i].ds_addr & 0xffffffff);
1074 desc64->length = htole16(segs[i].ds_len - 1);
1075 desc64->vtag = htole32(vtag);
1076 desc64->flags = htole16(flags);
1078 desc32 = &ring->desc32[j];
1079 desc32->physaddr = htole32(segs[i].ds_addr);
1080 desc32->length = htole16(segs[i].ds_len - 1);
1081 desc32->flags = htole16(flags);
1084 /* csum flags and vtag belong to the first fragment only */
1085 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
1089 KKASSERT(ring->queued <= NFE_TX_RING_COUNT);
1092 /* the whole mbuf chain has been DMA mapped, fix last descriptor */
1093 if (sc->sc_flags & NFE_40BIT_ADDR) {
1094 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
1096 if (sc->sc_flags & NFE_JUMBO_SUP)
1097 flags = NFE_TX_LASTFRAG_V2;
1099 flags = NFE_TX_LASTFRAG_V1;
1100 desc32->flags |= htole16(flags);
1104 * Set NFE_TX_VALID backwards so the hardware doesn't see the
1105 * whole mess until the first descriptor in the map is flagged.
1107 for (i = ctx.nsegs - 1; i >= 0; --i) {
1108 j = (ring->cur + i) % NFE_TX_RING_COUNT;
1109 if (sc->sc_flags & NFE_40BIT_ADDR) {
1110 desc64 = &ring->desc64[j];
1111 desc64->flags |= htole16(NFE_TX_VALID);
1113 desc32 = &ring->desc32[j];
1114 desc32->flags |= htole16(NFE_TX_VALID);
1117 ring->cur = (ring->cur + ctx.nsegs) % NFE_TX_RING_COUNT;
1119 /* Exchange DMA map */
1120 data_map->map = data->map;
1124 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE);
1132 nfe_start(struct ifnet *ifp)
1134 struct nfe_softc *sc = ifp->if_softc;
1135 struct nfe_tx_ring *ring = &sc->txq;
1139 if (ifp->if_flags & IFF_OACTIVE)
1142 if (ifq_is_empty(&ifp->if_snd))
1146 m0 = ifq_dequeue(&ifp->if_snd, NULL);
1152 if (nfe_encap(sc, ring, m0) != 0) {
1153 ifp->if_flags |= IFF_OACTIVE;
1160 * `m0' may be freed in nfe_encap(), so
1161 * it should not be touched any more.
1164 if (count == 0) /* nothing sent */
1167 /* Sync TX descriptor ring */
1168 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1171 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1174 * Set a timeout in case the chip goes out to lunch.
1180 nfe_watchdog(struct ifnet *ifp)
1182 struct nfe_softc *sc = ifp->if_softc;
1184 if (ifp->if_flags & IFF_RUNNING) {
1185 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n");
1190 if_printf(ifp, "watchdog timeout\n");
1192 nfe_init(ifp->if_softc);
1196 if (!ifq_is_empty(&ifp->if_snd))
1203 struct nfe_softc *sc = xsc;
1204 struct ifnet *ifp = &sc->arpcom.ac_if;
1210 error = nfe_init_tx_ring(sc, &sc->txq);
1216 error = nfe_init_rx_ring(sc, &sc->rxq);
1222 NFE_WRITE(sc, NFE_TX_UNK, 0);
1223 NFE_WRITE(sc, NFE_STATUS, 0);
1225 sc->rxtxctl = NFE_RXTX_BIT2;
1226 if (sc->sc_flags & NFE_40BIT_ADDR)
1227 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1228 else if (sc->sc_flags & NFE_JUMBO_SUP)
1229 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1231 if (sc->sc_flags & NFE_HW_CSUM)
1232 sc->rxtxctl |= NFE_RXTX_RXCSUM;
1236 * Although the adapter is capable of stripping VLAN tags from received
1237 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1238 * purpose. This will be done in software by our network stack.
1240 if (sc->sc_flags & NFE_HW_VLAN)
1241 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
1243 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1245 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1247 if (sc->sc_flags & NFE_HW_VLAN)
1248 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1250 NFE_WRITE(sc, NFE_SETUP_R6, 0);
1252 /* set MAC address */
1253 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr);
1255 /* tell MAC where rings are in memory */
1257 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1259 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1261 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1263 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1265 NFE_WRITE(sc, NFE_RING_SIZE,
1266 (NFE_RX_RING_COUNT - 1) << 16 |
1267 (NFE_TX_RING_COUNT - 1));
1269 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1271 /* force MAC to wakeup */
1272 tmp = NFE_READ(sc, NFE_PWR_STATE);
1273 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1275 tmp = NFE_READ(sc, NFE_PWR_STATE);
1276 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1279 /* configure interrupts coalescing/mitigation */
1280 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1282 /* no interrupt mitigation: one interrupt per packet */
1283 NFE_WRITE(sc, NFE_IMTIMER, 970);
1286 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1287 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1288 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1290 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1291 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1293 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1294 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
1296 sc->rxtxctl &= ~NFE_RXTX_BIT2;
1297 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1299 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1304 nfe_ifmedia_upd(ifp);
1307 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1310 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1312 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1314 #ifdef DEVICE_POLLING
1315 if ((ifp->if_flags & IFF_POLLING) == 0)
1317 /* enable interrupts */
1318 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1320 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
1322 ifp->if_flags |= IFF_RUNNING;
1323 ifp->if_flags &= ~IFF_OACTIVE;
1327 nfe_stop(struct nfe_softc *sc)
1329 struct ifnet *ifp = &sc->arpcom.ac_if;
1331 callout_stop(&sc->sc_tick_ch);
1334 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1337 NFE_WRITE(sc, NFE_TX_CTL, 0);
1340 NFE_WRITE(sc, NFE_RX_CTL, 0);
1342 /* Disable interrupts */
1343 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1345 /* Reset Tx and Rx rings */
1346 nfe_reset_tx_ring(sc, &sc->txq);
1347 nfe_reset_rx_ring(sc, &sc->rxq);
1351 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1353 int i, j, error, descsize;
1356 if (sc->sc_flags & NFE_40BIT_ADDR) {
1357 desc = (void **)&ring->desc64;
1358 descsize = sizeof(struct nfe_desc64);
1360 desc = (void **)&ring->desc32;
1361 descsize = sizeof(struct nfe_desc32);
1364 ring->bufsz = MCLBYTES;
1365 ring->cur = ring->next = 0;
1367 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1368 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1370 NFE_RX_RING_COUNT * descsize, 1,
1371 NFE_RX_RING_COUNT * descsize,
1374 if_printf(&sc->arpcom.ac_if,
1375 "could not create desc RX DMA tag\n");
1379 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO,
1382 if_printf(&sc->arpcom.ac_if,
1383 "could not allocate RX desc DMA memory\n");
1384 bus_dma_tag_destroy(ring->tag);
1389 error = bus_dmamap_load(ring->tag, ring->map, *desc,
1390 NFE_RX_RING_COUNT * descsize,
1391 nfe_ring_dma_addr, &ring->physaddr,
1394 if_printf(&sc->arpcom.ac_if,
1395 "could not load RX desc DMA map\n");
1396 bus_dmamem_free(ring->tag, *desc, ring->map);
1397 bus_dma_tag_destroy(ring->tag);
1402 if (sc->sc_flags & NFE_USE_JUMBO) {
1403 ring->bufsz = NFE_JBYTES;
1405 error = nfe_jpool_alloc(sc, ring);
1407 if_printf(&sc->arpcom.ac_if,
1408 "could not allocate jumbo frames\n");
1413 error = bus_dma_tag_create(NULL, 1, 0,
1414 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1416 MCLBYTES, 1, MCLBYTES,
1417 0, &ring->data_tag);
1419 if_printf(&sc->arpcom.ac_if,
1420 "could not create RX mbuf DMA tag\n");
1424 /* Create a spare RX mbuf DMA map */
1425 error = bus_dmamap_create(ring->data_tag, 0, &ring->data_tmpmap);
1427 if_printf(&sc->arpcom.ac_if,
1428 "could not create spare RX mbuf DMA map\n");
1429 bus_dma_tag_destroy(ring->data_tag);
1430 ring->data_tag = NULL;
1434 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1435 error = bus_dmamap_create(ring->data_tag, 0,
1436 &ring->data[i].map);
1438 if_printf(&sc->arpcom.ac_if,
1439 "could not create %dth RX mbuf DMA mapn", i);
1445 for (j = 0; j < i; ++j)
1446 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1447 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1448 bus_dma_tag_destroy(ring->data_tag);
1449 ring->data_tag = NULL;
1454 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1458 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1459 struct nfe_rx_data *data = &ring->data[i];
1461 if (data->m != NULL) {
1462 bus_dmamap_unload(ring->data_tag, data->map);
1467 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1469 ring->cur = ring->next = 0;
1473 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1477 for (i = 0; i < NFE_RX_RING_COUNT; ++i) {
1480 /* XXX should use a function pointer */
1481 if (sc->sc_flags & NFE_USE_JUMBO)
1482 error = nfe_newbuf_jumbo(sc, ring, i, 1);
1484 error = nfe_newbuf_std(sc, ring, i, 1);
1486 if_printf(&sc->arpcom.ac_if,
1487 "could not allocate RX buffer\n");
1491 nfe_set_ready_rxdesc(sc, ring, i);
1493 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1499 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1501 if (ring->data_tag != NULL) {
1502 struct nfe_rx_data *data;
1505 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1506 data = &ring->data[i];
1508 if (data->m != NULL) {
1509 bus_dmamap_unload(ring->data_tag, data->map);
1512 bus_dmamap_destroy(ring->data_tag, data->map);
1514 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1515 bus_dma_tag_destroy(ring->data_tag);
1518 nfe_jpool_free(sc, ring);
1520 if (ring->tag != NULL) {
1523 if (sc->sc_flags & NFE_40BIT_ADDR)
1524 desc = ring->desc64;
1526 desc = ring->desc32;
1528 bus_dmamap_unload(ring->tag, ring->map);
1529 bus_dmamem_free(ring->tag, desc, ring->map);
1530 bus_dma_tag_destroy(ring->tag);
1534 static struct nfe_jbuf *
1535 nfe_jalloc(struct nfe_softc *sc)
1537 struct ifnet *ifp = &sc->arpcom.ac_if;
1538 struct nfe_jbuf *jbuf;
1540 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1542 jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
1544 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
1547 if_printf(ifp, "no free jumbo buffer\n");
1550 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1556 nfe_jfree(void *arg)
1558 struct nfe_jbuf *jbuf = arg;
1559 struct nfe_softc *sc = jbuf->sc;
1560 struct nfe_rx_ring *ring = jbuf->ring;
1562 if (&ring->jbuf[jbuf->slot] != jbuf)
1563 panic("%s: free wrong jumbo buffer\n", __func__);
1564 else if (jbuf->inuse == 0)
1565 panic("%s: jumbo buffer already freed\n", __func__);
1567 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1568 atomic_subtract_int(&jbuf->inuse, 1);
1569 if (jbuf->inuse == 0)
1570 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1571 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1577 struct nfe_jbuf *jbuf = arg;
1578 struct nfe_rx_ring *ring = jbuf->ring;
1580 if (&ring->jbuf[jbuf->slot] != jbuf)
1581 panic("%s: ref wrong jumbo buffer\n", __func__);
1582 else if (jbuf->inuse == 0)
1583 panic("%s: jumbo buffer already freed\n", __func__);
1585 atomic_add_int(&jbuf->inuse, 1);
1589 nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1591 struct nfe_jbuf *jbuf;
1592 bus_addr_t physaddr;
1597 * Allocate a big chunk of DMA'able memory.
1599 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1600 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1602 NFE_JPOOL_SIZE, 1, NFE_JPOOL_SIZE,
1605 if_printf(&sc->arpcom.ac_if,
1606 "could not create jumbo DMA tag\n");
1610 error = bus_dmamem_alloc(ring->jtag, (void **)&ring->jpool,
1611 BUS_DMA_WAITOK, &ring->jmap);
1613 if_printf(&sc->arpcom.ac_if,
1614 "could not allocate jumbo DMA memory\n");
1615 bus_dma_tag_destroy(ring->jtag);
1620 error = bus_dmamap_load(ring->jtag, ring->jmap, ring->jpool,
1621 NFE_JPOOL_SIZE, nfe_ring_dma_addr, &physaddr,
1624 if_printf(&sc->arpcom.ac_if,
1625 "could not load jumbo DMA map\n");
1626 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap);
1627 bus_dma_tag_destroy(ring->jtag);
1632 /* ..and split it into 9KB chunks */
1633 SLIST_INIT(&ring->jfreelist);
1636 for (i = 0; i < NFE_JPOOL_COUNT; i++) {
1637 jbuf = &ring->jbuf[i];
1644 jbuf->physaddr = physaddr;
1646 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1649 physaddr += NFE_JBYTES;
1656 nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1658 if (ring->jtag != NULL) {
1659 bus_dmamap_unload(ring->jtag, ring->jmap);
1660 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap);
1661 bus_dma_tag_destroy(ring->jtag);
1666 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1668 int i, j, error, descsize;
1671 if (sc->sc_flags & NFE_40BIT_ADDR) {
1672 desc = (void **)&ring->desc64;
1673 descsize = sizeof(struct nfe_desc64);
1675 desc = (void **)&ring->desc32;
1676 descsize = sizeof(struct nfe_desc32);
1680 ring->cur = ring->next = 0;
1682 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1683 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1685 NFE_TX_RING_COUNT * descsize, 1,
1686 NFE_TX_RING_COUNT * descsize,
1689 if_printf(&sc->arpcom.ac_if,
1690 "could not create TX desc DMA map\n");
1694 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO,
1697 if_printf(&sc->arpcom.ac_if,
1698 "could not allocate TX desc DMA memory\n");
1699 bus_dma_tag_destroy(ring->tag);
1704 error = bus_dmamap_load(ring->tag, ring->map, *desc,
1705 NFE_TX_RING_COUNT * descsize,
1706 nfe_ring_dma_addr, &ring->physaddr,
1709 if_printf(&sc->arpcom.ac_if,
1710 "could not load TX desc DMA map\n");
1711 bus_dmamem_free(ring->tag, *desc, ring->map);
1712 bus_dma_tag_destroy(ring->tag);
1717 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1718 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1720 NFE_JBYTES * NFE_MAX_SCATTER,
1721 NFE_MAX_SCATTER, NFE_JBYTES,
1722 0, &ring->data_tag);
1724 if_printf(&sc->arpcom.ac_if,
1725 "could not create TX buf DMA tag\n");
1729 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1730 error = bus_dmamap_create(ring->data_tag, 0,
1731 &ring->data[i].map);
1733 if_printf(&sc->arpcom.ac_if,
1734 "could not create %dth TX buf DMA map\n", i);
1741 for (j = 0; j < i; ++j)
1742 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1743 bus_dma_tag_destroy(ring->data_tag);
1744 ring->data_tag = NULL;
1749 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1753 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1754 struct nfe_tx_data *data = &ring->data[i];
1756 if (sc->sc_flags & NFE_40BIT_ADDR)
1757 ring->desc64[i].flags = 0;
1759 ring->desc32[i].flags = 0;
1761 if (data->m != NULL) {
1762 bus_dmamap_sync(ring->data_tag, data->map,
1763 BUS_DMASYNC_POSTWRITE);
1764 bus_dmamap_unload(ring->data_tag, data->map);
1769 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1772 ring->cur = ring->next = 0;
1776 nfe_init_tx_ring(struct nfe_softc *sc __unused,
1777 struct nfe_tx_ring *ring __unused)
1783 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1785 if (ring->data_tag != NULL) {
1786 struct nfe_tx_data *data;
1789 for (i = 0; i < NFE_TX_RING_COUNT; ++i) {
1790 data = &ring->data[i];
1792 if (data->m != NULL) {
1793 bus_dmamap_unload(ring->data_tag, data->map);
1796 bus_dmamap_destroy(ring->data_tag, data->map);
1799 bus_dma_tag_destroy(ring->data_tag);
1802 if (ring->tag != NULL) {
1805 if (sc->sc_flags & NFE_40BIT_ADDR)
1806 desc = ring->desc64;
1808 desc = ring->desc32;
1810 bus_dmamap_unload(ring->tag, ring->map);
1811 bus_dmamem_free(ring->tag, desc, ring->map);
1812 bus_dma_tag_destroy(ring->tag);
1817 nfe_ifmedia_upd(struct ifnet *ifp)
1819 struct nfe_softc *sc = ifp->if_softc;
1820 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1822 if (mii->mii_instance != 0) {
1823 struct mii_softc *miisc;
1825 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1826 mii_phy_reset(miisc);
1834 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1836 struct nfe_softc *sc = ifp->if_softc;
1837 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1840 ifmr->ifm_status = mii->mii_media_status;
1841 ifmr->ifm_active = mii->mii_media_active;
1845 nfe_setmulti(struct nfe_softc *sc)
1847 struct ifnet *ifp = &sc->arpcom.ac_if;
1848 struct ifmultiaddr *ifma;
1849 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1850 uint32_t filter = NFE_RXFILTER_MAGIC;
1853 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1854 bzero(addr, ETHER_ADDR_LEN);
1855 bzero(mask, ETHER_ADDR_LEN);
1859 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1860 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1862 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1865 if (ifma->ifma_addr->sa_family != AF_LINK)
1868 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1869 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1870 addr[i] &= maddr[i];
1871 mask[i] &= ~maddr[i];
1875 for (i = 0; i < ETHER_ADDR_LEN; i++)
1879 addr[0] |= 0x01; /* make sure multicast bit is set */
1881 NFE_WRITE(sc, NFE_MULTIADDR_HI,
1882 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1883 NFE_WRITE(sc, NFE_MULTIADDR_LO,
1884 addr[5] << 8 | addr[4]);
1885 NFE_WRITE(sc, NFE_MULTIMASK_HI,
1886 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1887 NFE_WRITE(sc, NFE_MULTIMASK_LO,
1888 mask[5] << 8 | mask[4]);
1890 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1891 NFE_WRITE(sc, NFE_RXFILTER, filter);
1895 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1899 tmp = NFE_READ(sc, NFE_MACADDR_LO);
1900 addr[0] = (tmp >> 8) & 0xff;
1901 addr[1] = (tmp & 0xff);
1903 tmp = NFE_READ(sc, NFE_MACADDR_HI);
1904 addr[2] = (tmp >> 24) & 0xff;
1905 addr[3] = (tmp >> 16) & 0xff;
1906 addr[4] = (tmp >> 8) & 0xff;
1907 addr[5] = (tmp & 0xff);
1911 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1913 NFE_WRITE(sc, NFE_MACADDR_LO,
1914 addr[5] << 8 | addr[4]);
1915 NFE_WRITE(sc, NFE_MACADDR_HI,
1916 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1922 struct nfe_softc *sc = arg;
1923 struct ifnet *ifp = &sc->arpcom.ac_if;
1924 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1926 lwkt_serialize_enter(ifp->if_serializer);
1929 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
1931 lwkt_serialize_exit(ifp->if_serializer);
1935 nfe_ring_dma_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
1940 KASSERT(nseg == 1, ("too many segments, should be 1\n"));
1942 *((uint32_t *)arg) = seg->ds_addr;
1946 nfe_buf_dma_addr(void *arg, bus_dma_segment_t *segs, int nsegs,
1947 bus_size_t mapsz __unused, int error)
1949 struct nfe_dma_ctx *ctx = arg;
1955 KASSERT(nsegs <= ctx->nsegs,
1956 ("too many segments(%d), should be <= %d\n",
1957 nsegs, ctx->nsegs));
1960 for (i = 0; i < nsegs; ++i)
1961 ctx->segs[i] = segs[i];
1965 nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
1968 struct nfe_rx_data *data = &ring->data[idx];
1969 struct nfe_dma_ctx ctx;
1970 bus_dma_segment_t seg;
1975 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
1978 m->m_len = m->m_pkthdr.len = MCLBYTES;
1982 error = bus_dmamap_load_mbuf(ring->data_tag, ring->data_tmpmap,
1983 m, nfe_buf_dma_addr, &ctx,
1984 wait ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
1987 if_printf(&sc->arpcom.ac_if, "could map RX mbuf %d\n", error);
1991 /* Unload originally mapped mbuf */
1992 bus_dmamap_unload(ring->data_tag, data->map);
1994 /* Swap this DMA map with tmp DMA map */
1996 data->map = ring->data_tmpmap;
1997 ring->data_tmpmap = map;
1999 /* Caller is assumed to have collected the old mbuf */
2002 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr);
2004 bus_dmamap_sync(ring->data_tag, data->map, BUS_DMASYNC_PREREAD);
2009 nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2012 struct nfe_rx_data *data = &ring->data[idx];
2013 struct nfe_jbuf *jbuf;
2016 MGETHDR(m, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA);
2020 jbuf = nfe_jalloc(sc);
2023 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
2024 "-- packet dropped!\n");
2028 m->m_ext.ext_arg = jbuf;
2029 m->m_ext.ext_buf = jbuf->buf;
2030 m->m_ext.ext_free = nfe_jfree;
2031 m->m_ext.ext_ref = nfe_jref;
2032 m->m_ext.ext_size = NFE_JBYTES;
2034 m->m_data = m->m_ext.ext_buf;
2035 m->m_flags |= M_EXT;
2036 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2038 /* Caller is assumed to have collected the old mbuf */
2041 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr);
2043 bus_dmamap_sync(ring->jtag, ring->jmap, BUS_DMASYNC_PREREAD);
2048 nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2049 bus_addr_t physaddr)
2051 if (sc->sc_flags & NFE_40BIT_ADDR) {
2052 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2054 #if defined(__LP64__)
2055 desc64->physaddr[0] = htole32(physaddr >> 32);
2057 desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
2059 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2061 desc32->physaddr = htole32(physaddr);
2066 nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx)
2068 if (sc->sc_flags & NFE_40BIT_ADDR) {
2069 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2071 desc64->length = htole16(ring->bufsz);
2072 desc64->flags = htole16(NFE_RX_READY);
2074 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2076 desc32->length = htole16(ring->bufsz);
2077 desc32->flags = htole16(NFE_RX_READY);