1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */
2 /* $DragonFly: src/sys/dev/netif/nfe/if_nfe.c,v 1.9 2007/05/01 23:48:03 dillon Exp $ */
5 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
7 * This code is derived from software contributed to The DragonFly Project
8 * by Sepherosa Ziehau <sepherosa@gmail.com> and
9 * Matthew Dillon <dillon@apollo.backplane.com>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
41 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
43 * Permission to use, copy, modify, and distribute this software for any
44 * purpose with or without fee is hereby granted, provided that the above
45 * copyright notice and this permission notice appear in all copies.
47 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
48 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
49 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
50 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
51 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
52 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
53 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
56 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
58 #include "opt_polling.h"
60 #include <sys/param.h>
61 #include <sys/endian.h>
62 #include <sys/kernel.h>
66 #include <sys/serialize.h>
67 #include <sys/socket.h>
68 #include <sys/sockio.h>
69 #include <sys/sysctl.h>
71 #include <net/ethernet.h>
74 #include <net/if_arp.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/ifq_var.h>
78 #include <net/if_types.h>
79 #include <net/if_var.h>
80 #include <net/vlan/if_vlan_var.h>
82 #include <bus/pci/pcireg.h>
83 #include <bus/pci/pcivar.h>
84 #include <bus/pci/pcidevs.h>
86 #include <dev/netif/mii_layer/mii.h>
87 #include <dev/netif/mii_layer/miivar.h>
89 #include "miibus_if.h"
91 #include "if_nfereg.h"
92 #include "if_nfevar.h"
94 static int nfe_probe(device_t);
95 static int nfe_attach(device_t);
96 static int nfe_detach(device_t);
97 static void nfe_shutdown(device_t);
98 static int nfe_resume(device_t);
99 static int nfe_suspend(device_t);
101 static int nfe_miibus_readreg(device_t, int, int);
102 static void nfe_miibus_writereg(device_t, int, int, int);
103 static void nfe_miibus_statchg(device_t);
105 #ifdef DEVICE_POLLING
106 static void nfe_poll(struct ifnet *, enum poll_cmd, int);
108 static void nfe_intr(void *);
109 static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
110 static void nfe_rxeof(struct nfe_softc *);
111 static void nfe_txeof(struct nfe_softc *);
112 static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *,
114 static void nfe_start(struct ifnet *);
115 static void nfe_watchdog(struct ifnet *);
116 static void nfe_init(void *);
117 static void nfe_stop(struct nfe_softc *);
118 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *);
119 static void nfe_jfree(void *);
120 static void nfe_jref(void *);
121 static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *);
122 static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *);
123 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
124 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
125 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
126 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
127 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
128 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
129 static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
130 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
131 static int nfe_ifmedia_upd(struct ifnet *);
132 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
133 static void nfe_setmulti(struct nfe_softc *);
134 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
135 static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
136 static void nfe_tick(void *);
137 static void nfe_ring_dma_addr(void *, bus_dma_segment_t *, int, int);
138 static void nfe_buf_dma_addr(void *, bus_dma_segment_t *, int, bus_size_t,
140 static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
142 static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
144 static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int,
146 static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int,
152 static int nfe_debug = 0;
154 SYSCTL_NODE(_hw, OID_AUTO, nfe, CTLFLAG_RD, 0, "nVidia GigE parameters");
155 SYSCTL_INT(_hw_nfe, OID_AUTO, debug, CTLFLAG_RW, &nfe_debug, 0,
156 "control debugging printfs");
158 #define DPRINTF(sc, fmt, ...) do { \
160 if_printf(&(sc)->arpcom.ac_if, \
165 #define DPRINTFN(sc, lv, fmt, ...) do { \
166 if (nfe_debug >= (lv)) { \
167 if_printf(&(sc)->arpcom.ac_if, \
172 #else /* !NFE_DEBUG */
174 #define DPRINTF(sc, fmt, ...)
175 #define DPRINTFN(sc, lv, fmt, ...)
177 #endif /* NFE_DEBUG */
181 bus_dma_segment_t *segs;
184 static const struct nfe_dev {
189 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
190 "NVIDIA nForce Fast Ethernet" },
192 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
193 "NVIDIA nForce2 Fast Ethernet" },
195 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
196 "NVIDIA nForce3 Gigabit Ethernet" },
198 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb
199 chipset, and possibly also the 400R; it might be both nForce2- and
200 nForce3-based boards can use the same MCPs (= southbridges) */
201 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2,
202 "NVIDIA nForce3 Gigabit Ethernet" },
204 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3,
205 "NVIDIA nForce3 Gigabit Ethernet" },
207 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
208 "NVIDIA nForce3 Gigabit Ethernet" },
210 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5,
211 "NVIDIA nForce3 Gigabit Ethernet" },
213 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1,
214 "NVIDIA CK804 Gigabit Ethernet" },
216 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2,
217 "NVIDIA CK804 Gigabit Ethernet" },
219 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
220 "NVIDIA MCP04 Gigabit Ethernet" },
222 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
223 "NVIDIA MCP04 Gigabit Ethernet" },
225 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1,
226 "NVIDIA MCP51 Gigabit Ethernet" },
228 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2,
229 "NVIDIA MCP51 Gigabit Ethernet" },
231 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
232 "NVIDIA MCP55 Gigabit Ethernet" },
234 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
235 "NVIDIA MCP55 Gigabit Ethernet" },
237 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
238 "NVIDIA MCP61 Gigabit Ethernet" },
240 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
241 "NVIDIA MCP61 Gigabit Ethernet" },
243 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
244 "NVIDIA MCP61 Gigabit Ethernet" },
246 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
247 "NVIDIA MCP61 Gigabit Ethernet" },
249 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
250 "NVIDIA MCP65 Gigabit Ethernet" },
252 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
253 "NVIDIA MCP65 Gigabit Ethernet" },
255 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
256 "NVIDIA MCP65 Gigabit Ethernet" },
258 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
259 "NVIDIA MCP65 Gigabit Ethernet" },
261 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
262 "NVIDIA MCP67 Gigabit Ethernet" },
264 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
265 "NVIDIA MCP67 Gigabit Ethernet" },
267 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
268 "NVIDIA MCP67 Gigabit Ethernet" },
270 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
271 "NVIDIA MCP67 Gigabit Ethernet" }
274 static device_method_t nfe_methods[] = {
275 /* Device interface */
276 DEVMETHOD(device_probe, nfe_probe),
277 DEVMETHOD(device_attach, nfe_attach),
278 DEVMETHOD(device_detach, nfe_detach),
279 DEVMETHOD(device_suspend, nfe_suspend),
280 DEVMETHOD(device_resume, nfe_resume),
281 DEVMETHOD(device_shutdown, nfe_shutdown),
284 DEVMETHOD(bus_print_child, bus_generic_print_child),
285 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
288 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
289 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
290 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
295 static driver_t nfe_driver = {
298 sizeof(struct nfe_softc)
301 static devclass_t nfe_devclass;
303 DECLARE_DUMMY_MODULE(if_nfe);
304 MODULE_DEPEND(if_nfe, miibus, 1, 1, 1);
305 DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, 0, 0);
306 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
309 nfe_probe(device_t dev)
311 const struct nfe_dev *n;
314 vid = pci_get_vendor(dev);
315 did = pci_get_device(dev);
316 for (n = nfe_devices; n->desc != NULL; ++n) {
317 if (vid == n->vid && did == n->did) {
318 struct nfe_softc *sc = device_get_softc(dev);
321 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
322 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
323 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
324 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
325 sc->sc_flags = NFE_JUMBO_SUP |
328 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
329 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
330 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
331 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
332 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
333 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
334 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
335 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
336 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
337 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
338 sc->sc_flags = NFE_40BIT_ADDR;
340 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
341 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
342 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
343 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
344 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
345 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
346 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
347 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
348 sc->sc_flags = NFE_JUMBO_SUP |
352 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
353 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
354 sc->sc_flags = NFE_JUMBO_SUP |
361 /* Enable jumbo frames for adapters that support it */
362 if (sc->sc_flags & NFE_JUMBO_SUP)
363 sc->sc_flags |= NFE_USE_JUMBO;
365 device_set_desc(dev, n->desc);
366 device_set_async_attach(dev, TRUE);
374 nfe_attach(device_t dev)
376 struct nfe_softc *sc = device_get_softc(dev);
377 struct ifnet *ifp = &sc->arpcom.ac_if;
378 uint8_t eaddr[ETHER_ADDR_LEN];
381 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
382 lwkt_serialize_init(&sc->sc_jbuf_serializer);
384 sc->sc_mem_rid = PCIR_BAR(0);
387 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
390 mem = pci_read_config(dev, sc->sc_mem_rid, 4);
391 irq = pci_read_config(dev, PCIR_INTLINE, 4);
393 device_printf(dev, "chip is in D%d power mode "
394 "-- setting to D0\n", pci_get_powerstate(dev));
396 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
398 pci_write_config(dev, sc->sc_mem_rid, mem, 4);
399 pci_write_config(dev, PCIR_INTLINE, irq, 4);
401 #endif /* !BURN_BRIDGE */
403 /* Enable bus mastering */
404 pci_enable_busmaster(dev);
406 /* Allocate IO memory */
407 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
408 &sc->sc_mem_rid, RF_ACTIVE);
409 if (sc->sc_mem_res == NULL) {
410 device_printf(dev, "cound not allocate io memory\n");
413 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res);
414 sc->sc_memt = rman_get_bustag(sc->sc_mem_res);
418 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
420 RF_SHAREABLE | RF_ACTIVE);
421 if (sc->sc_irq_res == NULL) {
422 device_printf(dev, "could not allocate irq\n");
427 nfe_get_macaddr(sc, eaddr);
430 * Allocate Tx and Rx rings.
432 error = nfe_alloc_tx_ring(sc, &sc->txq);
434 device_printf(dev, "could not allocate Tx ring\n");
438 error = nfe_alloc_rx_ring(sc, &sc->rxq);
440 device_printf(dev, "could not allocate Rx ring\n");
444 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd,
447 device_printf(dev, "MII without any phy\n");
452 ifp->if_mtu = ETHERMTU;
453 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
454 ifp->if_ioctl = nfe_ioctl;
455 ifp->if_start = nfe_start;
456 #ifdef DEVICE_POLLING
457 ifp->if_poll = nfe_poll;
459 ifp->if_watchdog = nfe_watchdog;
460 ifp->if_init = nfe_init;
461 ifq_set_maxlen(&ifp->if_snd, NFE_IFQ_MAXLEN);
462 ifq_set_ready(&ifp->if_snd);
464 ifp->if_capabilities = IFCAP_VLAN_MTU;
467 if (sc->sc_flags & NFE_USE_JUMBO)
468 ifp->if_hardmtu = NFE_JUMBO_MTU;
471 if (sc->sc_flags & NFE_HW_VLAN)
472 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
475 if (sc->sc_flags & NFE_HW_CSUM) {
477 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
480 ifp->if_capabilities = IFCAP_HWCSUM;
481 ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP;
485 ifp->if_capenable = ifp->if_capabilities;
487 callout_init(&sc->sc_tick_ch);
489 ether_ifattach(ifp, eaddr, NULL);
491 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc,
492 &sc->sc_ih, ifp->if_serializer);
494 device_printf(dev, "could not setup intr\n");
506 nfe_detach(device_t dev)
508 struct nfe_softc *sc = device_get_softc(dev);
510 if (device_is_attached(dev)) {
511 struct ifnet *ifp = &sc->arpcom.ac_if;
513 lwkt_serialize_enter(ifp->if_serializer);
515 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih);
516 lwkt_serialize_exit(ifp->if_serializer);
521 if (sc->sc_miibus != NULL)
522 device_delete_child(dev, sc->sc_miibus);
523 bus_generic_detach(dev);
525 if (sc->sc_irq_res != NULL) {
526 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
530 if (sc->sc_mem_res != NULL) {
531 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
535 nfe_free_tx_ring(sc, &sc->txq);
536 nfe_free_rx_ring(sc, &sc->rxq);
542 nfe_shutdown(device_t dev)
544 struct nfe_softc *sc = device_get_softc(dev);
545 struct ifnet *ifp = &sc->arpcom.ac_if;
547 lwkt_serialize_enter(ifp->if_serializer);
549 lwkt_serialize_exit(ifp->if_serializer);
553 nfe_suspend(device_t dev)
555 struct nfe_softc *sc = device_get_softc(dev);
556 struct ifnet *ifp = &sc->arpcom.ac_if;
558 lwkt_serialize_enter(ifp->if_serializer);
560 lwkt_serialize_exit(ifp->if_serializer);
566 nfe_resume(device_t dev)
568 struct nfe_softc *sc = device_get_softc(dev);
569 struct ifnet *ifp = &sc->arpcom.ac_if;
571 lwkt_serialize_enter(ifp->if_serializer);
572 if (ifp->if_flags & IFF_UP) {
574 if (ifp->if_flags & IFF_RUNNING)
577 lwkt_serialize_exit(ifp->if_serializer);
583 nfe_miibus_statchg(device_t dev)
585 struct nfe_softc *sc = device_get_softc(dev);
586 struct mii_data *mii = device_get_softc(sc->sc_miibus);
587 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
589 phy = NFE_READ(sc, NFE_PHY_IFACE);
590 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
592 seed = NFE_READ(sc, NFE_RNDSEED);
593 seed &= ~NFE_SEED_MASK;
595 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
596 phy |= NFE_PHY_HDX; /* half-duplex */
597 misc |= NFE_MISC1_HDX;
600 switch (IFM_SUBTYPE(mii->mii_media_active)) {
601 case IFM_1000_T: /* full-duplex only */
602 link |= NFE_MEDIA_1000T;
603 seed |= NFE_SEED_1000T;
604 phy |= NFE_PHY_1000T;
607 link |= NFE_MEDIA_100TX;
608 seed |= NFE_SEED_100TX;
609 phy |= NFE_PHY_100TX;
612 link |= NFE_MEDIA_10T;
613 seed |= NFE_SEED_10T;
617 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
619 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
620 NFE_WRITE(sc, NFE_MISC1, misc);
621 NFE_WRITE(sc, NFE_LINKSPEED, link);
625 nfe_miibus_readreg(device_t dev, int phy, int reg)
627 struct nfe_softc *sc = device_get_softc(dev);
631 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
633 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
634 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
638 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
640 for (ntries = 0; ntries < 1000; ntries++) {
642 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
645 if (ntries == 1000) {
646 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", "");
650 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
651 DPRINTFN(sc, 2, "could not read PHY %s\n", "");
655 val = NFE_READ(sc, NFE_PHY_DATA);
656 if (val != 0xffffffff && val != 0)
657 sc->mii_phyaddr = phy;
659 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
665 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
667 struct nfe_softc *sc = device_get_softc(dev);
671 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
673 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
674 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
678 NFE_WRITE(sc, NFE_PHY_DATA, val);
679 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
680 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
682 for (ntries = 0; ntries < 1000; ntries++) {
684 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
690 DPRINTFN(sc, 2, "could not write to PHY %s\n", "");
694 #ifdef DEVICE_POLLING
697 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
699 struct nfe_softc *sc = ifp->if_softc;
703 /* Disable interrupts */
704 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
706 case POLL_DEREGISTER:
707 /* enable interrupts */
708 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
710 case POLL_AND_CHECK_STATUS:
713 if (ifp->if_flags & IFF_RUNNING) {
726 struct nfe_softc *sc = arg;
727 struct ifnet *ifp = &sc->arpcom.ac_if;
730 r = NFE_READ(sc, NFE_IRQ_STATUS);
732 return; /* not for us */
733 NFE_WRITE(sc, NFE_IRQ_STATUS, r);
735 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r);
737 if (r & NFE_IRQ_LINK) {
738 NFE_READ(sc, NFE_PHY_STATUS);
739 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
740 DPRINTF(sc, "link state changed %s\n", "");
743 if (ifp->if_flags & IFF_RUNNING) {
753 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
755 struct nfe_softc *sc = ifp->if_softc;
756 struct ifreq *ifr = (struct ifreq *)data;
757 struct mii_data *mii;
762 /* XXX NFE_USE_JUMBO should be set here */
765 if (ifp->if_flags & IFF_UP) {
767 * If only the PROMISC or ALLMULTI flag changes, then
768 * don't do a full re-init of the chip, just update
771 if ((ifp->if_flags & IFF_RUNNING) &&
772 ((ifp->if_flags ^ sc->sc_if_flags) &
773 (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
776 if (!(ifp->if_flags & IFF_RUNNING))
780 if (ifp->if_flags & IFF_RUNNING)
783 sc->sc_if_flags = ifp->if_flags;
787 if (ifp->if_flags & IFF_RUNNING)
792 mii = device_get_softc(sc->sc_miibus);
793 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
796 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
797 if (mask & IFCAP_HWCSUM) {
798 if (IFCAP_HWCSUM & ifp->if_capenable)
799 ifp->if_capenable &= ~IFCAP_HWCSUM;
801 ifp->if_capenable |= IFCAP_HWCSUM;
805 error = ether_ioctl(ifp, cmd, data);
812 nfe_rxeof(struct nfe_softc *sc)
814 struct ifnet *ifp = &sc->arpcom.ac_if;
815 struct nfe_rx_ring *ring = &sc->rxq;
819 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD);
822 struct nfe_rx_data *data = &ring->data[ring->cur];
827 if (sc->sc_flags & NFE_40BIT_ADDR) {
828 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur];
830 flags = le16toh(desc64->flags);
831 len = le16toh(desc64->length) & 0x3fff;
833 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur];
835 flags = le16toh(desc32->flags);
836 len = le16toh(desc32->length) & 0x3fff;
839 if (flags & NFE_RX_READY)
844 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
845 if (!(flags & NFE_RX_VALID_V1))
848 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
849 flags &= ~NFE_RX_ERROR;
850 len--; /* fix buffer length */
853 if (!(flags & NFE_RX_VALID_V2))
856 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
857 flags &= ~NFE_RX_ERROR;
858 len--; /* fix buffer length */
862 if (flags & NFE_RX_ERROR) {
869 if (sc->sc_flags & NFE_USE_JUMBO)
870 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0);
872 error = nfe_newbuf_std(sc, ring, ring->cur, 0);
879 m->m_pkthdr.len = m->m_len = len;
880 m->m_pkthdr.rcvif = ifp;
883 if (sc->sc_flags & NFE_HW_CSUM) {
884 if (flags & NFE_RX_IP_CSUMOK)
885 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
886 if (flags & NFE_RX_UDP_CSUMOK)
887 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
888 if (flags & NFE_RX_TCP_CSUMOK)
889 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
891 #elif defined(NFE_CSUM)
892 if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK))
893 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
897 ifp->if_input(ifp, m);
899 nfe_set_ready_rxdesc(sc, ring, ring->cur);
900 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
904 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
908 nfe_txeof(struct nfe_softc *sc)
910 struct ifnet *ifp = &sc->arpcom.ac_if;
911 struct nfe_tx_ring *ring = &sc->txq;
912 struct nfe_tx_data *data = NULL;
914 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD);
915 while (ring->next != ring->cur) {
918 if (sc->sc_flags & NFE_40BIT_ADDR)
919 flags = le16toh(ring->desc64[ring->next].flags);
921 flags = le16toh(ring->desc32[ring->next].flags);
923 if (flags & NFE_TX_VALID)
926 data = &ring->data[ring->next];
928 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
929 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
932 if ((flags & NFE_TX_ERROR_V1) != 0) {
933 if_printf(ifp, "tx v1 error 0x%4b\n", flags,
940 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
943 if ((flags & NFE_TX_ERROR_V2) != 0) {
944 if_printf(ifp, "tx v2 error 0x%4b\n", flags,
952 if (data->m == NULL) { /* should not get there */
954 "last fragment bit w/o associated mbuf!\n");
958 /* last fragment of the mbuf chain transmitted */
959 bus_dmamap_sync(ring->data_tag, data->map,
960 BUS_DMASYNC_POSTWRITE);
961 bus_dmamap_unload(ring->data_tag, data->map);
968 KKASSERT(ring->queued >= 0);
969 ring->next = (ring->next + 1) % NFE_TX_RING_COUNT;
972 if (data != NULL) { /* at least one slot freed */
973 ifp->if_flags &= ~IFF_OACTIVE;
979 nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0)
981 struct nfe_dma_ctx ctx;
982 bus_dma_segment_t segs[NFE_MAX_SCATTER];
983 struct nfe_tx_data *data, *data_map;
985 struct nfe_desc64 *desc64 = NULL;
986 struct nfe_desc32 *desc32 = NULL;
991 data = &ring->data[ring->cur];
993 data_map = data; /* Remember who owns the DMA map */
995 ctx.nsegs = NFE_MAX_SCATTER;
997 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0,
998 nfe_buf_dma_addr, &ctx, BUS_DMA_NOWAIT);
999 if (error && error != EFBIG) {
1000 if_printf(&sc->arpcom.ac_if, "could not map TX mbuf\n");
1004 if (error) { /* error == EFBIG */
1007 m_new = m_defrag(m0, MB_DONTWAIT);
1008 if (m_new == NULL) {
1009 if_printf(&sc->arpcom.ac_if,
1010 "could not defrag TX mbuf\n");
1017 ctx.nsegs = NFE_MAX_SCATTER;
1019 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0,
1020 nfe_buf_dma_addr, &ctx,
1023 if_printf(&sc->arpcom.ac_if,
1024 "could not map defraged TX mbuf\n");
1031 if (ring->queued + ctx.nsegs >= NFE_TX_RING_COUNT - 1) {
1032 bus_dmamap_unload(ring->data_tag, map);
1037 /* setup h/w VLAN tagging */
1038 if ((m0->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) &&
1039 m0->m_pkthdr.rcvif != NULL &&
1040 m0->m_pkthdr.rcvif->if_type == IFT_L2VLAN) {
1041 struct ifvlan *ifv = m0->m_pkthdr.rcvif->if_softc;
1044 vtag = NFE_TX_VTAG | htons(ifv->ifv_tag);
1048 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1049 flags |= NFE_TX_IP_CSUM;
1050 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT))
1051 flags |= NFE_TX_TCP_CSUM;
1055 * XXX urm. somebody is unaware of how hardware works. You
1056 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in
1057 * the ring until the entire chain is actually *VALID*. Otherwise
1058 * the hardware may encounter a partially initialized chain that
1059 * is marked as being ready to go when it in fact is not ready to
1063 for (i = 0; i < ctx.nsegs; i++) {
1064 j = (ring->cur + i) % NFE_TX_RING_COUNT;
1065 data = &ring->data[j];
1067 if (sc->sc_flags & NFE_40BIT_ADDR) {
1068 desc64 = &ring->desc64[j];
1069 #if defined(__LP64__)
1070 desc64->physaddr[0] =
1071 htole32(segs[i].ds_addr >> 32);
1073 desc64->physaddr[1] =
1074 htole32(segs[i].ds_addr & 0xffffffff);
1075 desc64->length = htole16(segs[i].ds_len - 1);
1076 desc64->vtag = htole32(vtag);
1077 desc64->flags = htole16(flags);
1079 desc32 = &ring->desc32[j];
1080 desc32->physaddr = htole32(segs[i].ds_addr);
1081 desc32->length = htole16(segs[i].ds_len - 1);
1082 desc32->flags = htole16(flags);
1085 /* csum flags and vtag belong to the first fragment only */
1086 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
1090 KKASSERT(ring->queued <= NFE_TX_RING_COUNT);
1093 /* the whole mbuf chain has been DMA mapped, fix last descriptor */
1094 if (sc->sc_flags & NFE_40BIT_ADDR) {
1095 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
1097 if (sc->sc_flags & NFE_JUMBO_SUP)
1098 flags = NFE_TX_LASTFRAG_V2;
1100 flags = NFE_TX_LASTFRAG_V1;
1101 desc32->flags |= htole16(flags);
1105 * Set NFE_TX_VALID backwards so the hardware doesn't see the
1106 * whole mess until the first descriptor in the map is flagged.
1108 for (i = ctx.nsegs - 1; i >= 0; --i) {
1109 j = (ring->cur + i) % NFE_TX_RING_COUNT;
1110 if (sc->sc_flags & NFE_40BIT_ADDR) {
1111 desc64 = &ring->desc64[j];
1112 desc64->flags |= htole16(NFE_TX_VALID);
1114 desc32 = &ring->desc32[j];
1115 desc32->flags |= htole16(NFE_TX_VALID);
1118 ring->cur = (ring->cur + ctx.nsegs) % NFE_TX_RING_COUNT;
1120 /* Exchange DMA map */
1121 data_map->map = data->map;
1125 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE);
1133 nfe_start(struct ifnet *ifp)
1135 struct nfe_softc *sc = ifp->if_softc;
1136 struct nfe_tx_ring *ring = &sc->txq;
1140 if (ifp->if_flags & IFF_OACTIVE)
1143 if (ifq_is_empty(&ifp->if_snd))
1147 m0 = ifq_dequeue(&ifp->if_snd, NULL);
1153 if (nfe_encap(sc, ring, m0) != 0) {
1154 ifp->if_flags |= IFF_OACTIVE;
1161 * `m0' may be freed in nfe_encap(), so
1162 * it should not be touched any more.
1165 if (count == 0) /* nothing sent */
1168 /* Sync TX descriptor ring */
1169 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1172 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1175 * Set a timeout in case the chip goes out to lunch.
1181 nfe_watchdog(struct ifnet *ifp)
1183 struct nfe_softc *sc = ifp->if_softc;
1185 if (ifp->if_flags & IFF_RUNNING) {
1186 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n");
1191 if_printf(ifp, "watchdog timeout\n");
1193 nfe_init(ifp->if_softc);
1197 if (!ifq_is_empty(&ifp->if_snd))
1204 struct nfe_softc *sc = xsc;
1205 struct ifnet *ifp = &sc->arpcom.ac_if;
1211 error = nfe_init_tx_ring(sc, &sc->txq);
1217 error = nfe_init_rx_ring(sc, &sc->rxq);
1223 NFE_WRITE(sc, NFE_TX_UNK, 0);
1224 NFE_WRITE(sc, NFE_STATUS, 0);
1226 sc->rxtxctl = NFE_RXTX_BIT2;
1227 if (sc->sc_flags & NFE_40BIT_ADDR)
1228 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1229 else if (sc->sc_flags & NFE_JUMBO_SUP)
1230 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1232 if (sc->sc_flags & NFE_HW_CSUM)
1233 sc->rxtxctl |= NFE_RXTX_RXCSUM;
1237 * Although the adapter is capable of stripping VLAN tags from received
1238 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1239 * purpose. This will be done in software by our network stack.
1241 if (sc->sc_flags & NFE_HW_VLAN)
1242 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
1244 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1246 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1248 if (sc->sc_flags & NFE_HW_VLAN)
1249 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1251 NFE_WRITE(sc, NFE_SETUP_R6, 0);
1253 /* set MAC address */
1254 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr);
1256 /* tell MAC where rings are in memory */
1258 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1260 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1262 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1264 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1266 NFE_WRITE(sc, NFE_RING_SIZE,
1267 (NFE_RX_RING_COUNT - 1) << 16 |
1268 (NFE_TX_RING_COUNT - 1));
1270 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1272 /* force MAC to wakeup */
1273 tmp = NFE_READ(sc, NFE_PWR_STATE);
1274 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1276 tmp = NFE_READ(sc, NFE_PWR_STATE);
1277 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1280 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER.
1281 * It is unclear how wide the timer is. Base programming does
1282 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so
1283 * we don't get any interrupt moderation. TX moderation is
1284 * possible by using the timer interrupt instead of TX_DONE.
1286 * It is unclear whether there are other bits that can be
1287 * set to make the NFE device actually do interrupt moderation
1290 * For now set a 128uS interval as a placemark, but don't use
1293 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1295 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1296 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1297 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1299 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1300 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1302 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1303 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
1305 sc->rxtxctl &= ~NFE_RXTX_BIT2;
1306 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1308 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1313 nfe_ifmedia_upd(ifp);
1316 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1319 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1321 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1323 #ifdef DEVICE_POLLING
1324 if ((ifp->if_flags & IFF_POLLING) == 0)
1326 /* enable interrupts */
1327 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1329 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
1331 ifp->if_flags |= IFF_RUNNING;
1332 ifp->if_flags &= ~IFF_OACTIVE;
1336 nfe_stop(struct nfe_softc *sc)
1338 struct ifnet *ifp = &sc->arpcom.ac_if;
1340 callout_stop(&sc->sc_tick_ch);
1343 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1346 NFE_WRITE(sc, NFE_TX_CTL, 0);
1349 NFE_WRITE(sc, NFE_RX_CTL, 0);
1351 /* Disable interrupts */
1352 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1354 /* Reset Tx and Rx rings */
1355 nfe_reset_tx_ring(sc, &sc->txq);
1356 nfe_reset_rx_ring(sc, &sc->rxq);
1360 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1362 int i, j, error, descsize;
1365 if (sc->sc_flags & NFE_40BIT_ADDR) {
1366 desc = (void **)&ring->desc64;
1367 descsize = sizeof(struct nfe_desc64);
1369 desc = (void **)&ring->desc32;
1370 descsize = sizeof(struct nfe_desc32);
1373 ring->bufsz = MCLBYTES;
1374 ring->cur = ring->next = 0;
1376 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1377 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1379 NFE_RX_RING_COUNT * descsize, 1,
1380 NFE_RX_RING_COUNT * descsize,
1383 if_printf(&sc->arpcom.ac_if,
1384 "could not create desc RX DMA tag\n");
1388 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO,
1391 if_printf(&sc->arpcom.ac_if,
1392 "could not allocate RX desc DMA memory\n");
1393 bus_dma_tag_destroy(ring->tag);
1398 error = bus_dmamap_load(ring->tag, ring->map, *desc,
1399 NFE_RX_RING_COUNT * descsize,
1400 nfe_ring_dma_addr, &ring->physaddr,
1403 if_printf(&sc->arpcom.ac_if,
1404 "could not load RX desc DMA map\n");
1405 bus_dmamem_free(ring->tag, *desc, ring->map);
1406 bus_dma_tag_destroy(ring->tag);
1411 if (sc->sc_flags & NFE_USE_JUMBO) {
1412 ring->bufsz = NFE_JBYTES;
1414 error = nfe_jpool_alloc(sc, ring);
1416 if_printf(&sc->arpcom.ac_if,
1417 "could not allocate jumbo frames\n");
1422 error = bus_dma_tag_create(NULL, 1, 0,
1423 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1425 MCLBYTES, 1, MCLBYTES,
1426 0, &ring->data_tag);
1428 if_printf(&sc->arpcom.ac_if,
1429 "could not create RX mbuf DMA tag\n");
1433 /* Create a spare RX mbuf DMA map */
1434 error = bus_dmamap_create(ring->data_tag, 0, &ring->data_tmpmap);
1436 if_printf(&sc->arpcom.ac_if,
1437 "could not create spare RX mbuf DMA map\n");
1438 bus_dma_tag_destroy(ring->data_tag);
1439 ring->data_tag = NULL;
1443 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1444 error = bus_dmamap_create(ring->data_tag, 0,
1445 &ring->data[i].map);
1447 if_printf(&sc->arpcom.ac_if,
1448 "could not create %dth RX mbuf DMA mapn", i);
1454 for (j = 0; j < i; ++j)
1455 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1456 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1457 bus_dma_tag_destroy(ring->data_tag);
1458 ring->data_tag = NULL;
1463 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1467 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1468 struct nfe_rx_data *data = &ring->data[i];
1470 if (data->m != NULL) {
1471 bus_dmamap_unload(ring->data_tag, data->map);
1476 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1478 ring->cur = ring->next = 0;
1482 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1486 for (i = 0; i < NFE_RX_RING_COUNT; ++i) {
1489 /* XXX should use a function pointer */
1490 if (sc->sc_flags & NFE_USE_JUMBO)
1491 error = nfe_newbuf_jumbo(sc, ring, i, 1);
1493 error = nfe_newbuf_std(sc, ring, i, 1);
1495 if_printf(&sc->arpcom.ac_if,
1496 "could not allocate RX buffer\n");
1500 nfe_set_ready_rxdesc(sc, ring, i);
1502 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1508 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1510 if (ring->data_tag != NULL) {
1511 struct nfe_rx_data *data;
1514 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1515 data = &ring->data[i];
1517 if (data->m != NULL) {
1518 bus_dmamap_unload(ring->data_tag, data->map);
1521 bus_dmamap_destroy(ring->data_tag, data->map);
1523 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1524 bus_dma_tag_destroy(ring->data_tag);
1527 nfe_jpool_free(sc, ring);
1529 if (ring->tag != NULL) {
1532 if (sc->sc_flags & NFE_40BIT_ADDR)
1533 desc = ring->desc64;
1535 desc = ring->desc32;
1537 bus_dmamap_unload(ring->tag, ring->map);
1538 bus_dmamem_free(ring->tag, desc, ring->map);
1539 bus_dma_tag_destroy(ring->tag);
1543 static struct nfe_jbuf *
1544 nfe_jalloc(struct nfe_softc *sc)
1546 struct ifnet *ifp = &sc->arpcom.ac_if;
1547 struct nfe_jbuf *jbuf;
1549 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1551 jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
1553 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
1556 if_printf(ifp, "no free jumbo buffer\n");
1559 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1565 nfe_jfree(void *arg)
1567 struct nfe_jbuf *jbuf = arg;
1568 struct nfe_softc *sc = jbuf->sc;
1569 struct nfe_rx_ring *ring = jbuf->ring;
1571 if (&ring->jbuf[jbuf->slot] != jbuf)
1572 panic("%s: free wrong jumbo buffer\n", __func__);
1573 else if (jbuf->inuse == 0)
1574 panic("%s: jumbo buffer already freed\n", __func__);
1576 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1577 atomic_subtract_int(&jbuf->inuse, 1);
1578 if (jbuf->inuse == 0)
1579 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1580 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1586 struct nfe_jbuf *jbuf = arg;
1587 struct nfe_rx_ring *ring = jbuf->ring;
1589 if (&ring->jbuf[jbuf->slot] != jbuf)
1590 panic("%s: ref wrong jumbo buffer\n", __func__);
1591 else if (jbuf->inuse == 0)
1592 panic("%s: jumbo buffer already freed\n", __func__);
1594 atomic_add_int(&jbuf->inuse, 1);
1598 nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1600 struct nfe_jbuf *jbuf;
1601 bus_addr_t physaddr;
1606 * Allocate a big chunk of DMA'able memory.
1608 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1609 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1611 NFE_JPOOL_SIZE, 1, NFE_JPOOL_SIZE,
1614 if_printf(&sc->arpcom.ac_if,
1615 "could not create jumbo DMA tag\n");
1619 error = bus_dmamem_alloc(ring->jtag, (void **)&ring->jpool,
1620 BUS_DMA_WAITOK, &ring->jmap);
1622 if_printf(&sc->arpcom.ac_if,
1623 "could not allocate jumbo DMA memory\n");
1624 bus_dma_tag_destroy(ring->jtag);
1629 error = bus_dmamap_load(ring->jtag, ring->jmap, ring->jpool,
1630 NFE_JPOOL_SIZE, nfe_ring_dma_addr, &physaddr,
1633 if_printf(&sc->arpcom.ac_if,
1634 "could not load jumbo DMA map\n");
1635 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap);
1636 bus_dma_tag_destroy(ring->jtag);
1641 /* ..and split it into 9KB chunks */
1642 SLIST_INIT(&ring->jfreelist);
1645 for (i = 0; i < NFE_JPOOL_COUNT; i++) {
1646 jbuf = &ring->jbuf[i];
1653 jbuf->physaddr = physaddr;
1655 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1658 physaddr += NFE_JBYTES;
1665 nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1667 if (ring->jtag != NULL) {
1668 bus_dmamap_unload(ring->jtag, ring->jmap);
1669 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap);
1670 bus_dma_tag_destroy(ring->jtag);
1675 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1677 int i, j, error, descsize;
1680 if (sc->sc_flags & NFE_40BIT_ADDR) {
1681 desc = (void **)&ring->desc64;
1682 descsize = sizeof(struct nfe_desc64);
1684 desc = (void **)&ring->desc32;
1685 descsize = sizeof(struct nfe_desc32);
1689 ring->cur = ring->next = 0;
1691 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1692 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1694 NFE_TX_RING_COUNT * descsize, 1,
1695 NFE_TX_RING_COUNT * descsize,
1698 if_printf(&sc->arpcom.ac_if,
1699 "could not create TX desc DMA map\n");
1703 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO,
1706 if_printf(&sc->arpcom.ac_if,
1707 "could not allocate TX desc DMA memory\n");
1708 bus_dma_tag_destroy(ring->tag);
1713 error = bus_dmamap_load(ring->tag, ring->map, *desc,
1714 NFE_TX_RING_COUNT * descsize,
1715 nfe_ring_dma_addr, &ring->physaddr,
1718 if_printf(&sc->arpcom.ac_if,
1719 "could not load TX desc DMA map\n");
1720 bus_dmamem_free(ring->tag, *desc, ring->map);
1721 bus_dma_tag_destroy(ring->tag);
1726 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1727 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1729 NFE_JBYTES * NFE_MAX_SCATTER,
1730 NFE_MAX_SCATTER, NFE_JBYTES,
1731 0, &ring->data_tag);
1733 if_printf(&sc->arpcom.ac_if,
1734 "could not create TX buf DMA tag\n");
1738 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1739 error = bus_dmamap_create(ring->data_tag, 0,
1740 &ring->data[i].map);
1742 if_printf(&sc->arpcom.ac_if,
1743 "could not create %dth TX buf DMA map\n", i);
1750 for (j = 0; j < i; ++j)
1751 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1752 bus_dma_tag_destroy(ring->data_tag);
1753 ring->data_tag = NULL;
1758 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1762 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1763 struct nfe_tx_data *data = &ring->data[i];
1765 if (sc->sc_flags & NFE_40BIT_ADDR)
1766 ring->desc64[i].flags = 0;
1768 ring->desc32[i].flags = 0;
1770 if (data->m != NULL) {
1771 bus_dmamap_sync(ring->data_tag, data->map,
1772 BUS_DMASYNC_POSTWRITE);
1773 bus_dmamap_unload(ring->data_tag, data->map);
1778 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1781 ring->cur = ring->next = 0;
1785 nfe_init_tx_ring(struct nfe_softc *sc __unused,
1786 struct nfe_tx_ring *ring __unused)
1792 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1794 if (ring->data_tag != NULL) {
1795 struct nfe_tx_data *data;
1798 for (i = 0; i < NFE_TX_RING_COUNT; ++i) {
1799 data = &ring->data[i];
1801 if (data->m != NULL) {
1802 bus_dmamap_unload(ring->data_tag, data->map);
1805 bus_dmamap_destroy(ring->data_tag, data->map);
1808 bus_dma_tag_destroy(ring->data_tag);
1811 if (ring->tag != NULL) {
1814 if (sc->sc_flags & NFE_40BIT_ADDR)
1815 desc = ring->desc64;
1817 desc = ring->desc32;
1819 bus_dmamap_unload(ring->tag, ring->map);
1820 bus_dmamem_free(ring->tag, desc, ring->map);
1821 bus_dma_tag_destroy(ring->tag);
1826 nfe_ifmedia_upd(struct ifnet *ifp)
1828 struct nfe_softc *sc = ifp->if_softc;
1829 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1831 if (mii->mii_instance != 0) {
1832 struct mii_softc *miisc;
1834 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1835 mii_phy_reset(miisc);
1843 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1845 struct nfe_softc *sc = ifp->if_softc;
1846 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1849 ifmr->ifm_status = mii->mii_media_status;
1850 ifmr->ifm_active = mii->mii_media_active;
1854 nfe_setmulti(struct nfe_softc *sc)
1856 struct ifnet *ifp = &sc->arpcom.ac_if;
1857 struct ifmultiaddr *ifma;
1858 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1859 uint32_t filter = NFE_RXFILTER_MAGIC;
1862 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1863 bzero(addr, ETHER_ADDR_LEN);
1864 bzero(mask, ETHER_ADDR_LEN);
1868 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1869 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1871 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1874 if (ifma->ifma_addr->sa_family != AF_LINK)
1877 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1878 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1879 addr[i] &= maddr[i];
1880 mask[i] &= ~maddr[i];
1884 for (i = 0; i < ETHER_ADDR_LEN; i++)
1888 addr[0] |= 0x01; /* make sure multicast bit is set */
1890 NFE_WRITE(sc, NFE_MULTIADDR_HI,
1891 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1892 NFE_WRITE(sc, NFE_MULTIADDR_LO,
1893 addr[5] << 8 | addr[4]);
1894 NFE_WRITE(sc, NFE_MULTIMASK_HI,
1895 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1896 NFE_WRITE(sc, NFE_MULTIMASK_LO,
1897 mask[5] << 8 | mask[4]);
1899 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1900 NFE_WRITE(sc, NFE_RXFILTER, filter);
1904 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1908 tmp = NFE_READ(sc, NFE_MACADDR_LO);
1909 addr[0] = (tmp >> 8) & 0xff;
1910 addr[1] = (tmp & 0xff);
1912 tmp = NFE_READ(sc, NFE_MACADDR_HI);
1913 addr[2] = (tmp >> 24) & 0xff;
1914 addr[3] = (tmp >> 16) & 0xff;
1915 addr[4] = (tmp >> 8) & 0xff;
1916 addr[5] = (tmp & 0xff);
1920 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1922 NFE_WRITE(sc, NFE_MACADDR_LO,
1923 addr[5] << 8 | addr[4]);
1924 NFE_WRITE(sc, NFE_MACADDR_HI,
1925 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1931 struct nfe_softc *sc = arg;
1932 struct ifnet *ifp = &sc->arpcom.ac_if;
1933 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1935 lwkt_serialize_enter(ifp->if_serializer);
1938 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
1940 lwkt_serialize_exit(ifp->if_serializer);
1944 nfe_ring_dma_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
1949 KASSERT(nseg == 1, ("too many segments, should be 1\n"));
1951 *((uint32_t *)arg) = seg->ds_addr;
1955 nfe_buf_dma_addr(void *arg, bus_dma_segment_t *segs, int nsegs,
1956 bus_size_t mapsz __unused, int error)
1958 struct nfe_dma_ctx *ctx = arg;
1964 KASSERT(nsegs <= ctx->nsegs,
1965 ("too many segments(%d), should be <= %d\n",
1966 nsegs, ctx->nsegs));
1969 for (i = 0; i < nsegs; ++i)
1970 ctx->segs[i] = segs[i];
1974 nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
1977 struct nfe_rx_data *data = &ring->data[idx];
1978 struct nfe_dma_ctx ctx;
1979 bus_dma_segment_t seg;
1984 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
1987 m->m_len = m->m_pkthdr.len = MCLBYTES;
1991 error = bus_dmamap_load_mbuf(ring->data_tag, ring->data_tmpmap,
1992 m, nfe_buf_dma_addr, &ctx,
1993 wait ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
1996 if_printf(&sc->arpcom.ac_if, "could map RX mbuf %d\n", error);
2000 /* Unload originally mapped mbuf */
2001 bus_dmamap_unload(ring->data_tag, data->map);
2003 /* Swap this DMA map with tmp DMA map */
2005 data->map = ring->data_tmpmap;
2006 ring->data_tmpmap = map;
2008 /* Caller is assumed to have collected the old mbuf */
2011 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr);
2013 bus_dmamap_sync(ring->data_tag, data->map, BUS_DMASYNC_PREREAD);
2018 nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2021 struct nfe_rx_data *data = &ring->data[idx];
2022 struct nfe_jbuf *jbuf;
2025 MGETHDR(m, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA);
2029 jbuf = nfe_jalloc(sc);
2032 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
2033 "-- packet dropped!\n");
2037 m->m_ext.ext_arg = jbuf;
2038 m->m_ext.ext_buf = jbuf->buf;
2039 m->m_ext.ext_free = nfe_jfree;
2040 m->m_ext.ext_ref = nfe_jref;
2041 m->m_ext.ext_size = NFE_JBYTES;
2043 m->m_data = m->m_ext.ext_buf;
2044 m->m_flags |= M_EXT;
2045 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2047 /* Caller is assumed to have collected the old mbuf */
2050 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr);
2052 bus_dmamap_sync(ring->jtag, ring->jmap, BUS_DMASYNC_PREREAD);
2057 nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2058 bus_addr_t physaddr)
2060 if (sc->sc_flags & NFE_40BIT_ADDR) {
2061 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2063 #if defined(__LP64__)
2064 desc64->physaddr[0] = htole32(physaddr >> 32);
2066 desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
2068 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2070 desc32->physaddr = htole32(physaddr);
2075 nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx)
2077 if (sc->sc_flags & NFE_40BIT_ADDR) {
2078 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2080 desc64->length = htole16(ring->bufsz);
2081 desc64->flags = htole16(NFE_RX_READY);
2083 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2085 desc32->length = htole16(ring->bufsz);
2086 desc32->flags = htole16(NFE_RX_READY);