1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */
2 /* $DragonFly: src/sys/dev/netif/nfe/if_nfe.c,v 1.10 2007/06/17 11:38:58 sephe Exp $ */
5 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
7 * This code is derived from software contributed to The DragonFly Project
8 * by Sepherosa Ziehau <sepherosa@gmail.com> and
9 * Matthew Dillon <dillon@apollo.backplane.com>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
41 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
43 * Permission to use, copy, modify, and distribute this software for any
44 * purpose with or without fee is hereby granted, provided that the above
45 * copyright notice and this permission notice appear in all copies.
47 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
48 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
49 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
50 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
51 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
52 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
53 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
56 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
58 #include "opt_polling.h"
60 #include <sys/param.h>
61 #include <sys/endian.h>
62 #include <sys/kernel.h>
66 #include <sys/serialize.h>
67 #include <sys/socket.h>
68 #include <sys/sockio.h>
69 #include <sys/sysctl.h>
71 #include <net/ethernet.h>
74 #include <net/if_arp.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/ifq_var.h>
78 #include <net/if_types.h>
79 #include <net/if_var.h>
80 #include <net/vlan/if_vlan_var.h>
82 #include <bus/pci/pcireg.h>
83 #include <bus/pci/pcivar.h>
84 #include <bus/pci/pcidevs.h>
86 #include <dev/netif/mii_layer/mii.h>
87 #include <dev/netif/mii_layer/miivar.h>
89 #include "miibus_if.h"
91 #include "if_nfereg.h"
92 #include "if_nfevar.h"
94 static int nfe_probe(device_t);
95 static int nfe_attach(device_t);
96 static int nfe_detach(device_t);
97 static void nfe_shutdown(device_t);
98 static int nfe_resume(device_t);
99 static int nfe_suspend(device_t);
101 static int nfe_miibus_readreg(device_t, int, int);
102 static void nfe_miibus_writereg(device_t, int, int, int);
103 static void nfe_miibus_statchg(device_t);
105 #ifdef DEVICE_POLLING
106 static void nfe_poll(struct ifnet *, enum poll_cmd, int);
108 static void nfe_intr(void *);
109 static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
110 static void nfe_rxeof(struct nfe_softc *);
111 static void nfe_txeof(struct nfe_softc *);
112 static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *,
114 static void nfe_start(struct ifnet *);
115 static void nfe_watchdog(struct ifnet *);
116 static void nfe_init(void *);
117 static void nfe_stop(struct nfe_softc *);
118 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *);
119 static void nfe_jfree(void *);
120 static void nfe_jref(void *);
121 static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *);
122 static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *);
123 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
124 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
125 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
126 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
127 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
128 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
129 static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
130 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
131 static int nfe_ifmedia_upd(struct ifnet *);
132 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
133 static void nfe_setmulti(struct nfe_softc *);
134 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
135 static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
136 static void nfe_tick(void *);
137 static void nfe_ring_dma_addr(void *, bus_dma_segment_t *, int, int);
138 static void nfe_buf_dma_addr(void *, bus_dma_segment_t *, int, bus_size_t,
140 static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
142 static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
144 static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int,
146 static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int,
152 static int nfe_debug = 0;
153 static int nfe_rx_ring_count = NFE_RX_RING_DEF_COUNT;
155 TUNABLE_INT("hw.nfe.rx_ring_count", &nfe_rx_ring_count);
157 SYSCTL_NODE(_hw, OID_AUTO, nfe, CTLFLAG_RD, 0, "nVidia GigE parameters");
158 SYSCTL_INT(_hw_nfe, OID_AUTO, rx_ring_count, CTLFLAG_RD, &nfe_rx_ring_count,
159 NFE_RX_RING_DEF_COUNT, "rx ring count");
160 SYSCTL_INT(_hw_nfe, OID_AUTO, debug, CTLFLAG_RW, &nfe_debug, 0,
161 "control debugging printfs");
163 #define DPRINTF(sc, fmt, ...) do { \
165 if_printf(&(sc)->arpcom.ac_if, \
170 #define DPRINTFN(sc, lv, fmt, ...) do { \
171 if (nfe_debug >= (lv)) { \
172 if_printf(&(sc)->arpcom.ac_if, \
177 #else /* !NFE_DEBUG */
179 #define DPRINTF(sc, fmt, ...)
180 #define DPRINTFN(sc, lv, fmt, ...)
182 #endif /* NFE_DEBUG */
186 bus_dma_segment_t *segs;
189 static const struct nfe_dev {
194 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
195 "NVIDIA nForce Fast Ethernet" },
197 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
198 "NVIDIA nForce2 Fast Ethernet" },
200 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
201 "NVIDIA nForce3 Gigabit Ethernet" },
203 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb
204 chipset, and possibly also the 400R; it might be both nForce2- and
205 nForce3-based boards can use the same MCPs (= southbridges) */
206 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2,
207 "NVIDIA nForce3 Gigabit Ethernet" },
209 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3,
210 "NVIDIA nForce3 Gigabit Ethernet" },
212 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
213 "NVIDIA nForce3 Gigabit Ethernet" },
215 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5,
216 "NVIDIA nForce3 Gigabit Ethernet" },
218 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1,
219 "NVIDIA CK804 Gigabit Ethernet" },
221 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2,
222 "NVIDIA CK804 Gigabit Ethernet" },
224 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
225 "NVIDIA MCP04 Gigabit Ethernet" },
227 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
228 "NVIDIA MCP04 Gigabit Ethernet" },
230 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1,
231 "NVIDIA MCP51 Gigabit Ethernet" },
233 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2,
234 "NVIDIA MCP51 Gigabit Ethernet" },
236 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
237 "NVIDIA MCP55 Gigabit Ethernet" },
239 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
240 "NVIDIA MCP55 Gigabit Ethernet" },
242 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
243 "NVIDIA MCP61 Gigabit Ethernet" },
245 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
246 "NVIDIA MCP61 Gigabit Ethernet" },
248 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
249 "NVIDIA MCP61 Gigabit Ethernet" },
251 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
252 "NVIDIA MCP61 Gigabit Ethernet" },
254 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
255 "NVIDIA MCP65 Gigabit Ethernet" },
257 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
258 "NVIDIA MCP65 Gigabit Ethernet" },
260 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
261 "NVIDIA MCP65 Gigabit Ethernet" },
263 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
264 "NVIDIA MCP65 Gigabit Ethernet" },
266 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
267 "NVIDIA MCP67 Gigabit Ethernet" },
269 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
270 "NVIDIA MCP67 Gigabit Ethernet" },
272 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
273 "NVIDIA MCP67 Gigabit Ethernet" },
275 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
276 "NVIDIA MCP67 Gigabit Ethernet" }
279 static device_method_t nfe_methods[] = {
280 /* Device interface */
281 DEVMETHOD(device_probe, nfe_probe),
282 DEVMETHOD(device_attach, nfe_attach),
283 DEVMETHOD(device_detach, nfe_detach),
284 DEVMETHOD(device_suspend, nfe_suspend),
285 DEVMETHOD(device_resume, nfe_resume),
286 DEVMETHOD(device_shutdown, nfe_shutdown),
289 DEVMETHOD(bus_print_child, bus_generic_print_child),
290 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
293 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
294 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
295 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
300 static driver_t nfe_driver = {
303 sizeof(struct nfe_softc)
306 static devclass_t nfe_devclass;
308 DECLARE_DUMMY_MODULE(if_nfe);
309 MODULE_DEPEND(if_nfe, miibus, 1, 1, 1);
310 DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, 0, 0);
311 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
314 nfe_probe(device_t dev)
316 const struct nfe_dev *n;
319 vid = pci_get_vendor(dev);
320 did = pci_get_device(dev);
321 for (n = nfe_devices; n->desc != NULL; ++n) {
322 if (vid == n->vid && did == n->did) {
323 struct nfe_softc *sc = device_get_softc(dev);
326 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
327 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
328 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
329 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
330 sc->sc_flags = NFE_JUMBO_SUP |
333 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
334 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
335 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
336 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
337 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
338 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
339 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
340 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
341 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
342 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
343 sc->sc_flags = NFE_40BIT_ADDR;
345 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
346 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
347 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
348 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
349 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
350 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
351 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
352 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
353 sc->sc_flags = NFE_JUMBO_SUP |
357 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
358 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
359 sc->sc_flags = NFE_JUMBO_SUP |
366 device_set_desc(dev, n->desc);
367 device_set_async_attach(dev, TRUE);
375 nfe_attach(device_t dev)
377 struct nfe_softc *sc = device_get_softc(dev);
378 struct ifnet *ifp = &sc->arpcom.ac_if;
379 uint8_t eaddr[ETHER_ADDR_LEN];
382 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
383 lwkt_serialize_init(&sc->sc_jbuf_serializer);
385 sc->sc_mem_rid = PCIR_BAR(0);
388 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
391 mem = pci_read_config(dev, sc->sc_mem_rid, 4);
392 irq = pci_read_config(dev, PCIR_INTLINE, 4);
394 device_printf(dev, "chip is in D%d power mode "
395 "-- setting to D0\n", pci_get_powerstate(dev));
397 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
399 pci_write_config(dev, sc->sc_mem_rid, mem, 4);
400 pci_write_config(dev, PCIR_INTLINE, irq, 4);
402 #endif /* !BURN_BRIDGE */
404 /* Enable bus mastering */
405 pci_enable_busmaster(dev);
407 /* Allocate IO memory */
408 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
409 &sc->sc_mem_rid, RF_ACTIVE);
410 if (sc->sc_mem_res == NULL) {
411 device_printf(dev, "cound not allocate io memory\n");
414 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res);
415 sc->sc_memt = rman_get_bustag(sc->sc_mem_res);
419 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
421 RF_SHAREABLE | RF_ACTIVE);
422 if (sc->sc_irq_res == NULL) {
423 device_printf(dev, "could not allocate irq\n");
428 nfe_get_macaddr(sc, eaddr);
431 * Allocate Tx and Rx rings.
433 error = nfe_alloc_tx_ring(sc, &sc->txq);
435 device_printf(dev, "could not allocate Tx ring\n");
439 error = nfe_alloc_rx_ring(sc, &sc->rxq);
441 device_printf(dev, "could not allocate Rx ring\n");
445 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd,
448 device_printf(dev, "MII without any phy\n");
453 ifp->if_mtu = ETHERMTU;
454 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
455 ifp->if_ioctl = nfe_ioctl;
456 ifp->if_start = nfe_start;
457 #ifdef DEVICE_POLLING
458 ifp->if_poll = nfe_poll;
460 ifp->if_watchdog = nfe_watchdog;
461 ifp->if_init = nfe_init;
462 ifq_set_maxlen(&ifp->if_snd, NFE_IFQ_MAXLEN);
463 ifq_set_ready(&ifp->if_snd);
465 ifp->if_capabilities = IFCAP_VLAN_MTU;
467 if (sc->sc_flags & NFE_HW_VLAN)
468 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
471 if (sc->sc_flags & NFE_HW_CSUM) {
473 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
476 ifp->if_capabilities = IFCAP_HWCSUM;
477 ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP;
481 ifp->if_capenable = ifp->if_capabilities;
483 callout_init(&sc->sc_tick_ch);
485 ether_ifattach(ifp, eaddr, NULL);
487 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc,
488 &sc->sc_ih, ifp->if_serializer);
490 device_printf(dev, "could not setup intr\n");
502 nfe_detach(device_t dev)
504 struct nfe_softc *sc = device_get_softc(dev);
506 if (device_is_attached(dev)) {
507 struct ifnet *ifp = &sc->arpcom.ac_if;
509 lwkt_serialize_enter(ifp->if_serializer);
511 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih);
512 lwkt_serialize_exit(ifp->if_serializer);
517 if (sc->sc_miibus != NULL)
518 device_delete_child(dev, sc->sc_miibus);
519 bus_generic_detach(dev);
521 if (sc->sc_irq_res != NULL) {
522 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
526 if (sc->sc_mem_res != NULL) {
527 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
531 nfe_free_tx_ring(sc, &sc->txq);
532 nfe_free_rx_ring(sc, &sc->rxq);
538 nfe_shutdown(device_t dev)
540 struct nfe_softc *sc = device_get_softc(dev);
541 struct ifnet *ifp = &sc->arpcom.ac_if;
543 lwkt_serialize_enter(ifp->if_serializer);
545 lwkt_serialize_exit(ifp->if_serializer);
549 nfe_suspend(device_t dev)
551 struct nfe_softc *sc = device_get_softc(dev);
552 struct ifnet *ifp = &sc->arpcom.ac_if;
554 lwkt_serialize_enter(ifp->if_serializer);
556 lwkt_serialize_exit(ifp->if_serializer);
562 nfe_resume(device_t dev)
564 struct nfe_softc *sc = device_get_softc(dev);
565 struct ifnet *ifp = &sc->arpcom.ac_if;
567 lwkt_serialize_enter(ifp->if_serializer);
568 if (ifp->if_flags & IFF_UP) {
570 if (ifp->if_flags & IFF_RUNNING)
573 lwkt_serialize_exit(ifp->if_serializer);
579 nfe_miibus_statchg(device_t dev)
581 struct nfe_softc *sc = device_get_softc(dev);
582 struct mii_data *mii = device_get_softc(sc->sc_miibus);
583 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
585 phy = NFE_READ(sc, NFE_PHY_IFACE);
586 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
588 seed = NFE_READ(sc, NFE_RNDSEED);
589 seed &= ~NFE_SEED_MASK;
591 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
592 phy |= NFE_PHY_HDX; /* half-duplex */
593 misc |= NFE_MISC1_HDX;
596 switch (IFM_SUBTYPE(mii->mii_media_active)) {
597 case IFM_1000_T: /* full-duplex only */
598 link |= NFE_MEDIA_1000T;
599 seed |= NFE_SEED_1000T;
600 phy |= NFE_PHY_1000T;
603 link |= NFE_MEDIA_100TX;
604 seed |= NFE_SEED_100TX;
605 phy |= NFE_PHY_100TX;
608 link |= NFE_MEDIA_10T;
609 seed |= NFE_SEED_10T;
613 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
615 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
616 NFE_WRITE(sc, NFE_MISC1, misc);
617 NFE_WRITE(sc, NFE_LINKSPEED, link);
621 nfe_miibus_readreg(device_t dev, int phy, int reg)
623 struct nfe_softc *sc = device_get_softc(dev);
627 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
629 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
630 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
634 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
636 for (ntries = 0; ntries < 1000; ntries++) {
638 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
641 if (ntries == 1000) {
642 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", "");
646 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
647 DPRINTFN(sc, 2, "could not read PHY %s\n", "");
651 val = NFE_READ(sc, NFE_PHY_DATA);
652 if (val != 0xffffffff && val != 0)
653 sc->mii_phyaddr = phy;
655 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
661 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
663 struct nfe_softc *sc = device_get_softc(dev);
667 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
669 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
670 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
674 NFE_WRITE(sc, NFE_PHY_DATA, val);
675 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
676 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
678 for (ntries = 0; ntries < 1000; ntries++) {
680 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
686 DPRINTFN(sc, 2, "could not write to PHY %s\n", "");
690 #ifdef DEVICE_POLLING
693 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
695 struct nfe_softc *sc = ifp->if_softc;
699 /* Disable interrupts */
700 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
702 case POLL_DEREGISTER:
703 /* enable interrupts */
704 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
706 case POLL_AND_CHECK_STATUS:
709 if (ifp->if_flags & IFF_RUNNING) {
722 struct nfe_softc *sc = arg;
723 struct ifnet *ifp = &sc->arpcom.ac_if;
726 r = NFE_READ(sc, NFE_IRQ_STATUS);
728 return; /* not for us */
729 NFE_WRITE(sc, NFE_IRQ_STATUS, r);
731 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r);
733 if (r & NFE_IRQ_LINK) {
734 NFE_READ(sc, NFE_PHY_STATUS);
735 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
736 DPRINTF(sc, "link state changed %s\n", "");
739 if (ifp->if_flags & IFF_RUNNING) {
749 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
751 struct nfe_softc *sc = ifp->if_softc;
752 struct ifreq *ifr = (struct ifreq *)data;
753 struct mii_data *mii;
758 if (((sc->sc_flags & NFE_JUMBO_SUP) &&
759 ifr->ifr_mtu > NFE_JUMBO_MTU) ||
760 ((sc->sc_flags & NFE_JUMBO_SUP) == 0 &&
761 ifr->ifr_mtu > ETHERMTU)) {
763 } else if (ifp->if_mtu != ifr->ifr_mtu) {
764 ifp->if_mtu = ifr->ifr_mtu;
769 if (ifp->if_flags & IFF_UP) {
771 * If only the PROMISC or ALLMULTI flag changes, then
772 * don't do a full re-init of the chip, just update
775 if ((ifp->if_flags & IFF_RUNNING) &&
776 ((ifp->if_flags ^ sc->sc_if_flags) &
777 (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
780 if (!(ifp->if_flags & IFF_RUNNING))
784 if (ifp->if_flags & IFF_RUNNING)
787 sc->sc_if_flags = ifp->if_flags;
791 if (ifp->if_flags & IFF_RUNNING)
796 mii = device_get_softc(sc->sc_miibus);
797 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
800 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
801 if (mask & IFCAP_HWCSUM) {
802 if (IFCAP_HWCSUM & ifp->if_capenable)
803 ifp->if_capenable &= ~IFCAP_HWCSUM;
805 ifp->if_capenable |= IFCAP_HWCSUM;
809 error = ether_ioctl(ifp, cmd, data);
816 nfe_rxeof(struct nfe_softc *sc)
818 struct ifnet *ifp = &sc->arpcom.ac_if;
819 struct nfe_rx_ring *ring = &sc->rxq;
823 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD);
826 struct nfe_rx_data *data = &ring->data[ring->cur];
831 if (sc->sc_flags & NFE_40BIT_ADDR) {
832 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur];
834 flags = le16toh(desc64->flags);
835 len = le16toh(desc64->length) & 0x3fff;
837 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur];
839 flags = le16toh(desc32->flags);
840 len = le16toh(desc32->length) & 0x3fff;
843 if (flags & NFE_RX_READY)
848 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
849 if (!(flags & NFE_RX_VALID_V1))
852 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
853 flags &= ~NFE_RX_ERROR;
854 len--; /* fix buffer length */
857 if (!(flags & NFE_RX_VALID_V2))
860 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
861 flags &= ~NFE_RX_ERROR;
862 len--; /* fix buffer length */
866 if (flags & NFE_RX_ERROR) {
873 if (sc->sc_flags & NFE_USE_JUMBO)
874 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0);
876 error = nfe_newbuf_std(sc, ring, ring->cur, 0);
883 m->m_pkthdr.len = m->m_len = len;
884 m->m_pkthdr.rcvif = ifp;
887 if (sc->sc_flags & NFE_HW_CSUM) {
888 if (flags & NFE_RX_IP_CSUMOK)
889 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
890 if (flags & NFE_RX_UDP_CSUMOK)
891 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
892 if (flags & NFE_RX_TCP_CSUMOK)
893 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
895 #elif defined(NFE_CSUM)
896 if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK))
897 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
901 ifp->if_input(ifp, m);
903 nfe_set_ready_rxdesc(sc, ring, ring->cur);
904 sc->rxq.cur = (sc->rxq.cur + 1) % nfe_rx_ring_count;
908 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
912 nfe_txeof(struct nfe_softc *sc)
914 struct ifnet *ifp = &sc->arpcom.ac_if;
915 struct nfe_tx_ring *ring = &sc->txq;
916 struct nfe_tx_data *data = NULL;
918 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD);
919 while (ring->next != ring->cur) {
922 if (sc->sc_flags & NFE_40BIT_ADDR)
923 flags = le16toh(ring->desc64[ring->next].flags);
925 flags = le16toh(ring->desc32[ring->next].flags);
927 if (flags & NFE_TX_VALID)
930 data = &ring->data[ring->next];
932 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
933 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
936 if ((flags & NFE_TX_ERROR_V1) != 0) {
937 if_printf(ifp, "tx v1 error 0x%4b\n", flags,
944 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
947 if ((flags & NFE_TX_ERROR_V2) != 0) {
948 if_printf(ifp, "tx v2 error 0x%4b\n", flags,
956 if (data->m == NULL) { /* should not get there */
958 "last fragment bit w/o associated mbuf!\n");
962 /* last fragment of the mbuf chain transmitted */
963 bus_dmamap_sync(ring->data_tag, data->map,
964 BUS_DMASYNC_POSTWRITE);
965 bus_dmamap_unload(ring->data_tag, data->map);
972 KKASSERT(ring->queued >= 0);
973 ring->next = (ring->next + 1) % NFE_TX_RING_COUNT;
976 if (data != NULL) { /* at least one slot freed */
977 ifp->if_flags &= ~IFF_OACTIVE;
983 nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0)
985 struct nfe_dma_ctx ctx;
986 bus_dma_segment_t segs[NFE_MAX_SCATTER];
987 struct nfe_tx_data *data, *data_map;
989 struct nfe_desc64 *desc64 = NULL;
990 struct nfe_desc32 *desc32 = NULL;
995 data = &ring->data[ring->cur];
997 data_map = data; /* Remember who owns the DMA map */
999 ctx.nsegs = NFE_MAX_SCATTER;
1001 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0,
1002 nfe_buf_dma_addr, &ctx, BUS_DMA_NOWAIT);
1003 if (error && error != EFBIG) {
1004 if_printf(&sc->arpcom.ac_if, "could not map TX mbuf\n");
1008 if (error) { /* error == EFBIG */
1011 m_new = m_defrag(m0, MB_DONTWAIT);
1012 if (m_new == NULL) {
1013 if_printf(&sc->arpcom.ac_if,
1014 "could not defrag TX mbuf\n");
1021 ctx.nsegs = NFE_MAX_SCATTER;
1023 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0,
1024 nfe_buf_dma_addr, &ctx,
1027 if_printf(&sc->arpcom.ac_if,
1028 "could not map defraged TX mbuf\n");
1035 if (ring->queued + ctx.nsegs >= NFE_TX_RING_COUNT - 1) {
1036 bus_dmamap_unload(ring->data_tag, map);
1041 /* setup h/w VLAN tagging */
1042 if ((m0->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) &&
1043 m0->m_pkthdr.rcvif != NULL &&
1044 m0->m_pkthdr.rcvif->if_type == IFT_L2VLAN) {
1045 struct ifvlan *ifv = m0->m_pkthdr.rcvif->if_softc;
1048 vtag = NFE_TX_VTAG | htons(ifv->ifv_tag);
1052 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1053 flags |= NFE_TX_IP_CSUM;
1054 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT))
1055 flags |= NFE_TX_TCP_CSUM;
1059 * XXX urm. somebody is unaware of how hardware works. You
1060 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in
1061 * the ring until the entire chain is actually *VALID*. Otherwise
1062 * the hardware may encounter a partially initialized chain that
1063 * is marked as being ready to go when it in fact is not ready to
1067 for (i = 0; i < ctx.nsegs; i++) {
1068 j = (ring->cur + i) % NFE_TX_RING_COUNT;
1069 data = &ring->data[j];
1071 if (sc->sc_flags & NFE_40BIT_ADDR) {
1072 desc64 = &ring->desc64[j];
1073 #if defined(__LP64__)
1074 desc64->physaddr[0] =
1075 htole32(segs[i].ds_addr >> 32);
1077 desc64->physaddr[1] =
1078 htole32(segs[i].ds_addr & 0xffffffff);
1079 desc64->length = htole16(segs[i].ds_len - 1);
1080 desc64->vtag = htole32(vtag);
1081 desc64->flags = htole16(flags);
1083 desc32 = &ring->desc32[j];
1084 desc32->physaddr = htole32(segs[i].ds_addr);
1085 desc32->length = htole16(segs[i].ds_len - 1);
1086 desc32->flags = htole16(flags);
1089 /* csum flags and vtag belong to the first fragment only */
1090 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
1094 KKASSERT(ring->queued <= NFE_TX_RING_COUNT);
1097 /* the whole mbuf chain has been DMA mapped, fix last descriptor */
1098 if (sc->sc_flags & NFE_40BIT_ADDR) {
1099 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
1101 if (sc->sc_flags & NFE_JUMBO_SUP)
1102 flags = NFE_TX_LASTFRAG_V2;
1104 flags = NFE_TX_LASTFRAG_V1;
1105 desc32->flags |= htole16(flags);
1109 * Set NFE_TX_VALID backwards so the hardware doesn't see the
1110 * whole mess until the first descriptor in the map is flagged.
1112 for (i = ctx.nsegs - 1; i >= 0; --i) {
1113 j = (ring->cur + i) % NFE_TX_RING_COUNT;
1114 if (sc->sc_flags & NFE_40BIT_ADDR) {
1115 desc64 = &ring->desc64[j];
1116 desc64->flags |= htole16(NFE_TX_VALID);
1118 desc32 = &ring->desc32[j];
1119 desc32->flags |= htole16(NFE_TX_VALID);
1122 ring->cur = (ring->cur + ctx.nsegs) % NFE_TX_RING_COUNT;
1124 /* Exchange DMA map */
1125 data_map->map = data->map;
1129 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE);
1137 nfe_start(struct ifnet *ifp)
1139 struct nfe_softc *sc = ifp->if_softc;
1140 struct nfe_tx_ring *ring = &sc->txq;
1144 if (ifp->if_flags & IFF_OACTIVE)
1147 if (ifq_is_empty(&ifp->if_snd))
1151 m0 = ifq_dequeue(&ifp->if_snd, NULL);
1157 if (nfe_encap(sc, ring, m0) != 0) {
1158 ifp->if_flags |= IFF_OACTIVE;
1165 * `m0' may be freed in nfe_encap(), so
1166 * it should not be touched any more.
1169 if (count == 0) /* nothing sent */
1172 /* Sync TX descriptor ring */
1173 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1176 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1179 * Set a timeout in case the chip goes out to lunch.
1185 nfe_watchdog(struct ifnet *ifp)
1187 struct nfe_softc *sc = ifp->if_softc;
1189 if (ifp->if_flags & IFF_RUNNING) {
1190 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n");
1195 if_printf(ifp, "watchdog timeout\n");
1197 nfe_init(ifp->if_softc);
1201 if (!ifq_is_empty(&ifp->if_snd))
1208 struct nfe_softc *sc = xsc;
1209 struct ifnet *ifp = &sc->arpcom.ac_if;
1217 * Switching between jumbo frames and normal frames should
1218 * be done _after_ nfe_stop() but _before_ nfe_init_rx_ring().
1220 if (ifp->if_mtu > ETHERMTU) {
1221 sc->sc_flags |= NFE_USE_JUMBO;
1222 sc->rxq.bufsz = NFE_JBYTES;
1224 if_printf(ifp, "use jumbo frames\n");
1226 sc->sc_flags &= ~NFE_USE_JUMBO;
1227 sc->rxq.bufsz = MCLBYTES;
1229 if_printf(ifp, "use non-jumbo frames\n");
1232 error = nfe_init_tx_ring(sc, &sc->txq);
1238 error = nfe_init_rx_ring(sc, &sc->rxq);
1244 NFE_WRITE(sc, NFE_TX_UNK, 0);
1245 NFE_WRITE(sc, NFE_STATUS, 0);
1247 sc->rxtxctl = NFE_RXTX_BIT2;
1248 if (sc->sc_flags & NFE_40BIT_ADDR)
1249 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1250 else if (sc->sc_flags & NFE_JUMBO_SUP)
1251 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1253 if (sc->sc_flags & NFE_HW_CSUM)
1254 sc->rxtxctl |= NFE_RXTX_RXCSUM;
1258 * Although the adapter is capable of stripping VLAN tags from received
1259 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1260 * purpose. This will be done in software by our network stack.
1262 if (sc->sc_flags & NFE_HW_VLAN)
1263 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
1265 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1267 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1269 if (sc->sc_flags & NFE_HW_VLAN)
1270 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1272 NFE_WRITE(sc, NFE_SETUP_R6, 0);
1274 /* set MAC address */
1275 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr);
1277 /* tell MAC where rings are in memory */
1279 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1281 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1283 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1285 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1287 NFE_WRITE(sc, NFE_RING_SIZE,
1288 (nfe_rx_ring_count - 1) << 16 |
1289 (NFE_TX_RING_COUNT - 1));
1291 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1293 /* force MAC to wakeup */
1294 tmp = NFE_READ(sc, NFE_PWR_STATE);
1295 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1297 tmp = NFE_READ(sc, NFE_PWR_STATE);
1298 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1301 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER.
1302 * It is unclear how wide the timer is. Base programming does
1303 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so
1304 * we don't get any interrupt moderation. TX moderation is
1305 * possible by using the timer interrupt instead of TX_DONE.
1307 * It is unclear whether there are other bits that can be
1308 * set to make the NFE device actually do interrupt moderation
1311 * For now set a 128uS interval as a placemark, but don't use
1314 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1316 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1317 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1318 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1320 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1321 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1323 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1324 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
1326 sc->rxtxctl &= ~NFE_RXTX_BIT2;
1327 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1329 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1334 nfe_ifmedia_upd(ifp);
1337 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1340 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1342 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1344 #ifdef DEVICE_POLLING
1345 if ((ifp->if_flags & IFF_POLLING) == 0)
1347 /* enable interrupts */
1348 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1350 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
1352 ifp->if_flags |= IFF_RUNNING;
1353 ifp->if_flags &= ~IFF_OACTIVE;
1357 nfe_stop(struct nfe_softc *sc)
1359 struct ifnet *ifp = &sc->arpcom.ac_if;
1361 callout_stop(&sc->sc_tick_ch);
1364 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1367 NFE_WRITE(sc, NFE_TX_CTL, 0);
1370 NFE_WRITE(sc, NFE_RX_CTL, 0);
1372 /* Disable interrupts */
1373 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1375 /* Reset Tx and Rx rings */
1376 nfe_reset_tx_ring(sc, &sc->txq);
1377 nfe_reset_rx_ring(sc, &sc->rxq);
1381 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1383 int i, j, error, descsize;
1386 if (sc->sc_flags & NFE_40BIT_ADDR) {
1387 desc = (void **)&ring->desc64;
1388 descsize = sizeof(struct nfe_desc64);
1390 desc = (void **)&ring->desc32;
1391 descsize = sizeof(struct nfe_desc32);
1394 ring->jbuf = kmalloc(sizeof(struct nfe_jbuf) * NFE_JPOOL_COUNT,
1395 M_DEVBUF, M_WAITOK | M_ZERO);
1396 ring->data = kmalloc(sizeof(struct nfe_rx_data) * nfe_rx_ring_count,
1397 M_DEVBUF, M_WAITOK | M_ZERO);
1399 ring->bufsz = MCLBYTES;
1400 ring->cur = ring->next = 0;
1402 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1403 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1405 nfe_rx_ring_count * descsize, 1,
1406 nfe_rx_ring_count * descsize,
1409 if_printf(&sc->arpcom.ac_if,
1410 "could not create desc RX DMA tag\n");
1414 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO,
1417 if_printf(&sc->arpcom.ac_if,
1418 "could not allocate RX desc DMA memory\n");
1419 bus_dma_tag_destroy(ring->tag);
1424 error = bus_dmamap_load(ring->tag, ring->map, *desc,
1425 nfe_rx_ring_count * descsize,
1426 nfe_ring_dma_addr, &ring->physaddr,
1429 if_printf(&sc->arpcom.ac_if,
1430 "could not load RX desc DMA map\n");
1431 bus_dmamem_free(ring->tag, *desc, ring->map);
1432 bus_dma_tag_destroy(ring->tag);
1437 if (sc->sc_flags & NFE_JUMBO_SUP) {
1438 error = nfe_jpool_alloc(sc, ring);
1440 if_printf(&sc->arpcom.ac_if,
1441 "could not allocate jumbo frames\n");
1446 error = bus_dma_tag_create(NULL, 1, 0,
1447 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1449 MCLBYTES, 1, MCLBYTES,
1450 0, &ring->data_tag);
1452 if_printf(&sc->arpcom.ac_if,
1453 "could not create RX mbuf DMA tag\n");
1457 /* Create a spare RX mbuf DMA map */
1458 error = bus_dmamap_create(ring->data_tag, 0, &ring->data_tmpmap);
1460 if_printf(&sc->arpcom.ac_if,
1461 "could not create spare RX mbuf DMA map\n");
1462 bus_dma_tag_destroy(ring->data_tag);
1463 ring->data_tag = NULL;
1467 for (i = 0; i < nfe_rx_ring_count; i++) {
1468 error = bus_dmamap_create(ring->data_tag, 0,
1469 &ring->data[i].map);
1471 if_printf(&sc->arpcom.ac_if,
1472 "could not create %dth RX mbuf DMA mapn", i);
1478 for (j = 0; j < i; ++j)
1479 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1480 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1481 bus_dma_tag_destroy(ring->data_tag);
1482 ring->data_tag = NULL;
1487 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1491 for (i = 0; i < nfe_rx_ring_count; i++) {
1492 struct nfe_rx_data *data = &ring->data[i];
1494 if (data->m != NULL) {
1495 if ((sc->sc_flags & NFE_USE_JUMBO) == 0)
1496 bus_dmamap_unload(ring->data_tag, data->map);
1501 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1503 ring->cur = ring->next = 0;
1507 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1511 for (i = 0; i < nfe_rx_ring_count; ++i) {
1514 /* XXX should use a function pointer */
1515 if (sc->sc_flags & NFE_USE_JUMBO)
1516 error = nfe_newbuf_jumbo(sc, ring, i, 1);
1518 error = nfe_newbuf_std(sc, ring, i, 1);
1520 if_printf(&sc->arpcom.ac_if,
1521 "could not allocate RX buffer\n");
1525 nfe_set_ready_rxdesc(sc, ring, i);
1527 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1533 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1535 if (ring->data_tag != NULL) {
1536 struct nfe_rx_data *data;
1539 for (i = 0; i < nfe_rx_ring_count; i++) {
1540 data = &ring->data[i];
1542 if (data->m != NULL) {
1543 bus_dmamap_unload(ring->data_tag, data->map);
1546 bus_dmamap_destroy(ring->data_tag, data->map);
1548 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1549 bus_dma_tag_destroy(ring->data_tag);
1552 nfe_jpool_free(sc, ring);
1554 if (ring->jbuf != NULL)
1555 kfree(ring->jbuf, M_DEVBUF);
1556 if (ring->data != NULL)
1557 kfree(ring->data, M_DEVBUF);
1559 if (ring->tag != NULL) {
1562 if (sc->sc_flags & NFE_40BIT_ADDR)
1563 desc = ring->desc64;
1565 desc = ring->desc32;
1567 bus_dmamap_unload(ring->tag, ring->map);
1568 bus_dmamem_free(ring->tag, desc, ring->map);
1569 bus_dma_tag_destroy(ring->tag);
1573 static struct nfe_jbuf *
1574 nfe_jalloc(struct nfe_softc *sc)
1576 struct ifnet *ifp = &sc->arpcom.ac_if;
1577 struct nfe_jbuf *jbuf;
1579 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1581 jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
1583 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
1586 if_printf(ifp, "no free jumbo buffer\n");
1589 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1595 nfe_jfree(void *arg)
1597 struct nfe_jbuf *jbuf = arg;
1598 struct nfe_softc *sc = jbuf->sc;
1599 struct nfe_rx_ring *ring = jbuf->ring;
1601 if (&ring->jbuf[jbuf->slot] != jbuf)
1602 panic("%s: free wrong jumbo buffer\n", __func__);
1603 else if (jbuf->inuse == 0)
1604 panic("%s: jumbo buffer already freed\n", __func__);
1606 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1607 atomic_subtract_int(&jbuf->inuse, 1);
1608 if (jbuf->inuse == 0)
1609 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1610 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1616 struct nfe_jbuf *jbuf = arg;
1617 struct nfe_rx_ring *ring = jbuf->ring;
1619 if (&ring->jbuf[jbuf->slot] != jbuf)
1620 panic("%s: ref wrong jumbo buffer\n", __func__);
1621 else if (jbuf->inuse == 0)
1622 panic("%s: jumbo buffer already freed\n", __func__);
1624 atomic_add_int(&jbuf->inuse, 1);
1628 nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1630 struct nfe_jbuf *jbuf;
1631 bus_addr_t physaddr;
1636 * Allocate a big chunk of DMA'able memory.
1638 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1639 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1641 NFE_JPOOL_SIZE, 1, NFE_JPOOL_SIZE,
1644 if_printf(&sc->arpcom.ac_if,
1645 "could not create jumbo DMA tag\n");
1649 error = bus_dmamem_alloc(ring->jtag, (void **)&ring->jpool,
1650 BUS_DMA_WAITOK, &ring->jmap);
1652 if_printf(&sc->arpcom.ac_if,
1653 "could not allocate jumbo DMA memory\n");
1654 bus_dma_tag_destroy(ring->jtag);
1659 error = bus_dmamap_load(ring->jtag, ring->jmap, ring->jpool,
1660 NFE_JPOOL_SIZE, nfe_ring_dma_addr, &physaddr,
1663 if_printf(&sc->arpcom.ac_if,
1664 "could not load jumbo DMA map\n");
1665 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap);
1666 bus_dma_tag_destroy(ring->jtag);
1671 /* ..and split it into 9KB chunks */
1672 SLIST_INIT(&ring->jfreelist);
1675 for (i = 0; i < NFE_JPOOL_COUNT; i++) {
1676 jbuf = &ring->jbuf[i];
1683 jbuf->physaddr = physaddr;
1685 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1688 physaddr += NFE_JBYTES;
1695 nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1697 if (ring->jtag != NULL) {
1698 bus_dmamap_unload(ring->jtag, ring->jmap);
1699 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap);
1700 bus_dma_tag_destroy(ring->jtag);
1705 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1707 int i, j, error, descsize;
1710 if (sc->sc_flags & NFE_40BIT_ADDR) {
1711 desc = (void **)&ring->desc64;
1712 descsize = sizeof(struct nfe_desc64);
1714 desc = (void **)&ring->desc32;
1715 descsize = sizeof(struct nfe_desc32);
1719 ring->cur = ring->next = 0;
1721 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1722 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1724 NFE_TX_RING_COUNT * descsize, 1,
1725 NFE_TX_RING_COUNT * descsize,
1728 if_printf(&sc->arpcom.ac_if,
1729 "could not create TX desc DMA map\n");
1733 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO,
1736 if_printf(&sc->arpcom.ac_if,
1737 "could not allocate TX desc DMA memory\n");
1738 bus_dma_tag_destroy(ring->tag);
1743 error = bus_dmamap_load(ring->tag, ring->map, *desc,
1744 NFE_TX_RING_COUNT * descsize,
1745 nfe_ring_dma_addr, &ring->physaddr,
1748 if_printf(&sc->arpcom.ac_if,
1749 "could not load TX desc DMA map\n");
1750 bus_dmamem_free(ring->tag, *desc, ring->map);
1751 bus_dma_tag_destroy(ring->tag);
1756 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1757 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1759 NFE_JBYTES * NFE_MAX_SCATTER,
1760 NFE_MAX_SCATTER, NFE_JBYTES,
1761 0, &ring->data_tag);
1763 if_printf(&sc->arpcom.ac_if,
1764 "could not create TX buf DMA tag\n");
1768 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1769 error = bus_dmamap_create(ring->data_tag, 0,
1770 &ring->data[i].map);
1772 if_printf(&sc->arpcom.ac_if,
1773 "could not create %dth TX buf DMA map\n", i);
1780 for (j = 0; j < i; ++j)
1781 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1782 bus_dma_tag_destroy(ring->data_tag);
1783 ring->data_tag = NULL;
1788 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1792 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1793 struct nfe_tx_data *data = &ring->data[i];
1795 if (sc->sc_flags & NFE_40BIT_ADDR)
1796 ring->desc64[i].flags = 0;
1798 ring->desc32[i].flags = 0;
1800 if (data->m != NULL) {
1801 bus_dmamap_sync(ring->data_tag, data->map,
1802 BUS_DMASYNC_POSTWRITE);
1803 bus_dmamap_unload(ring->data_tag, data->map);
1808 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1811 ring->cur = ring->next = 0;
1815 nfe_init_tx_ring(struct nfe_softc *sc __unused,
1816 struct nfe_tx_ring *ring __unused)
1822 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1824 if (ring->data_tag != NULL) {
1825 struct nfe_tx_data *data;
1828 for (i = 0; i < NFE_TX_RING_COUNT; ++i) {
1829 data = &ring->data[i];
1831 if (data->m != NULL) {
1832 bus_dmamap_unload(ring->data_tag, data->map);
1835 bus_dmamap_destroy(ring->data_tag, data->map);
1838 bus_dma_tag_destroy(ring->data_tag);
1841 if (ring->tag != NULL) {
1844 if (sc->sc_flags & NFE_40BIT_ADDR)
1845 desc = ring->desc64;
1847 desc = ring->desc32;
1849 bus_dmamap_unload(ring->tag, ring->map);
1850 bus_dmamem_free(ring->tag, desc, ring->map);
1851 bus_dma_tag_destroy(ring->tag);
1856 nfe_ifmedia_upd(struct ifnet *ifp)
1858 struct nfe_softc *sc = ifp->if_softc;
1859 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1861 if (mii->mii_instance != 0) {
1862 struct mii_softc *miisc;
1864 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1865 mii_phy_reset(miisc);
1873 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1875 struct nfe_softc *sc = ifp->if_softc;
1876 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1879 ifmr->ifm_status = mii->mii_media_status;
1880 ifmr->ifm_active = mii->mii_media_active;
1884 nfe_setmulti(struct nfe_softc *sc)
1886 struct ifnet *ifp = &sc->arpcom.ac_if;
1887 struct ifmultiaddr *ifma;
1888 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1889 uint32_t filter = NFE_RXFILTER_MAGIC;
1892 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1893 bzero(addr, ETHER_ADDR_LEN);
1894 bzero(mask, ETHER_ADDR_LEN);
1898 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1899 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1901 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1904 if (ifma->ifma_addr->sa_family != AF_LINK)
1907 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1908 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1909 addr[i] &= maddr[i];
1910 mask[i] &= ~maddr[i];
1914 for (i = 0; i < ETHER_ADDR_LEN; i++)
1918 addr[0] |= 0x01; /* make sure multicast bit is set */
1920 NFE_WRITE(sc, NFE_MULTIADDR_HI,
1921 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1922 NFE_WRITE(sc, NFE_MULTIADDR_LO,
1923 addr[5] << 8 | addr[4]);
1924 NFE_WRITE(sc, NFE_MULTIMASK_HI,
1925 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1926 NFE_WRITE(sc, NFE_MULTIMASK_LO,
1927 mask[5] << 8 | mask[4]);
1929 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1930 NFE_WRITE(sc, NFE_RXFILTER, filter);
1934 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1938 tmp = NFE_READ(sc, NFE_MACADDR_LO);
1939 addr[0] = (tmp >> 8) & 0xff;
1940 addr[1] = (tmp & 0xff);
1942 tmp = NFE_READ(sc, NFE_MACADDR_HI);
1943 addr[2] = (tmp >> 24) & 0xff;
1944 addr[3] = (tmp >> 16) & 0xff;
1945 addr[4] = (tmp >> 8) & 0xff;
1946 addr[5] = (tmp & 0xff);
1950 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1952 NFE_WRITE(sc, NFE_MACADDR_LO,
1953 addr[5] << 8 | addr[4]);
1954 NFE_WRITE(sc, NFE_MACADDR_HI,
1955 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1961 struct nfe_softc *sc = arg;
1962 struct ifnet *ifp = &sc->arpcom.ac_if;
1963 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1965 lwkt_serialize_enter(ifp->if_serializer);
1968 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
1970 lwkt_serialize_exit(ifp->if_serializer);
1974 nfe_ring_dma_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
1979 KASSERT(nseg == 1, ("too many segments, should be 1\n"));
1981 *((uint32_t *)arg) = seg->ds_addr;
1985 nfe_buf_dma_addr(void *arg, bus_dma_segment_t *segs, int nsegs,
1986 bus_size_t mapsz __unused, int error)
1988 struct nfe_dma_ctx *ctx = arg;
1994 KASSERT(nsegs <= ctx->nsegs,
1995 ("too many segments(%d), should be <= %d\n",
1996 nsegs, ctx->nsegs));
1999 for (i = 0; i < nsegs; ++i)
2000 ctx->segs[i] = segs[i];
2004 nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2007 struct nfe_rx_data *data = &ring->data[idx];
2008 struct nfe_dma_ctx ctx;
2009 bus_dma_segment_t seg;
2014 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2017 m->m_len = m->m_pkthdr.len = MCLBYTES;
2021 error = bus_dmamap_load_mbuf(ring->data_tag, ring->data_tmpmap,
2022 m, nfe_buf_dma_addr, &ctx,
2023 wait ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
2026 if_printf(&sc->arpcom.ac_if, "could map RX mbuf %d\n", error);
2030 /* Unload originally mapped mbuf */
2031 bus_dmamap_unload(ring->data_tag, data->map);
2033 /* Swap this DMA map with tmp DMA map */
2035 data->map = ring->data_tmpmap;
2036 ring->data_tmpmap = map;
2038 /* Caller is assumed to have collected the old mbuf */
2041 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr);
2043 bus_dmamap_sync(ring->data_tag, data->map, BUS_DMASYNC_PREREAD);
2048 nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2051 struct nfe_rx_data *data = &ring->data[idx];
2052 struct nfe_jbuf *jbuf;
2055 MGETHDR(m, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA);
2059 jbuf = nfe_jalloc(sc);
2062 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
2063 "-- packet dropped!\n");
2067 m->m_ext.ext_arg = jbuf;
2068 m->m_ext.ext_buf = jbuf->buf;
2069 m->m_ext.ext_free = nfe_jfree;
2070 m->m_ext.ext_ref = nfe_jref;
2071 m->m_ext.ext_size = NFE_JBYTES;
2073 m->m_data = m->m_ext.ext_buf;
2074 m->m_flags |= M_EXT;
2075 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2077 /* Caller is assumed to have collected the old mbuf */
2080 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr);
2082 bus_dmamap_sync(ring->jtag, ring->jmap, BUS_DMASYNC_PREREAD);
2087 nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2088 bus_addr_t physaddr)
2090 if (sc->sc_flags & NFE_40BIT_ADDR) {
2091 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2093 #if defined(__LP64__)
2094 desc64->physaddr[0] = htole32(physaddr >> 32);
2096 desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
2098 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2100 desc32->physaddr = htole32(physaddr);
2105 nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx)
2107 if (sc->sc_flags & NFE_40BIT_ADDR) {
2108 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2110 desc64->length = htole16(ring->bufsz);
2111 desc64->flags = htole16(NFE_RX_READY);
2113 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2115 desc32->length = htole16(ring->bufsz);
2116 desc32->flags = htole16(NFE_RX_READY);