Merge from vendor branch FILE:
[dragonfly.git] / sys / dev / netif / nfe / if_nfe.c
1 /*      $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $        */
2 /*      $DragonFly: src/sys/dev/netif/nfe/if_nfe.c,v 1.5 2006/12/06 13:47:29 sephe Exp $        */
3
4 /*
5  * Copyright (c) 2006 The DragonFly Project.  All rights reserved.
6  * 
7  * This code is derived from software contributed to The DragonFly Project
8  * by Sepherosa Ziehau <sepherosa@gmail.com> and
9  * Matthew Dillon <dillon@apollo.backplane.com>
10  * 
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in
19  *    the documentation and/or other materials provided with the
20  *    distribution.
21  * 3. Neither the name of The DragonFly Project nor the names of its
22  *    contributors may be used to endorse or promote products derived
23  *    from this software without specific, prior written permission.
24  * 
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
29  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  */
38
39 /*
40  * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
41  * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
42  *
43  * Permission to use, copy, modify, and distribute this software for any
44  * purpose with or without fee is hereby granted, provided that the above
45  * copyright notice and this permission notice appear in all copies.
46  *
47  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
48  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
49  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
50  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
51  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
52  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
53  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
54  */
55
56 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
57
58 #include "opt_polling.h"
59
60 #include <sys/param.h>
61 #include <sys/endian.h>
62 #include <sys/kernel.h>
63 #include <sys/bus.h>
64 #include <sys/proc.h>
65 #include <sys/rman.h>
66 #include <sys/serialize.h>
67 #include <sys/socket.h>
68 #include <sys/sockio.h>
69 #include <sys/sysctl.h>
70
71 #include <net/ethernet.h>
72 #include <net/if.h>
73 #include <net/bpf.h>
74 #include <net/if_arp.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/ifq_var.h>
78 #include <net/if_types.h>
79 #include <net/if_var.h>
80 #include <net/vlan/if_vlan_var.h>
81
82 #include <bus/pci/pcireg.h>
83 #include <bus/pci/pcivar.h>
84 #include <bus/pci/pcidevs.h>
85
86 #include <dev/netif/mii_layer/mii.h>
87 #include <dev/netif/mii_layer/miivar.h>
88
89 #include "miibus_if.h"
90
91 #include "if_nfereg.h"
92 #include "if_nfevar.h"
93
94 static int      nfe_probe(device_t);
95 static int      nfe_attach(device_t);
96 static int      nfe_detach(device_t);
97 static void     nfe_shutdown(device_t);
98 static int      nfe_resume(device_t);
99 static int      nfe_suspend(device_t);
100
101 static int      nfe_miibus_readreg(device_t, int, int);
102 static void     nfe_miibus_writereg(device_t, int, int, int);
103 static void     nfe_miibus_statchg(device_t);
104
105 #ifdef DEVICE_POLLING
106 static void     nfe_poll(struct ifnet *, enum poll_cmd, int);
107 #endif
108 static void     nfe_intr(void *);
109 static int      nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
110 static void     nfe_rxeof(struct nfe_softc *);
111 static void     nfe_txeof(struct nfe_softc *);
112 static int      nfe_encap(struct nfe_softc *, struct nfe_tx_ring *,
113                           struct mbuf *);
114 static void     nfe_start(struct ifnet *);
115 static void     nfe_watchdog(struct ifnet *);
116 static void     nfe_init(void *);
117 static void     nfe_stop(struct nfe_softc *);
118 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *);
119 static void     nfe_jfree(void *);
120 static void     nfe_jref(void *);
121 static int      nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *);
122 static void     nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *);
123 static int      nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
124 static void     nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
125 static int      nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
126 static void     nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
127 static int      nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
128 static void     nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
129 static int      nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
130 static void     nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
131 static int      nfe_ifmedia_upd(struct ifnet *);
132 static void     nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
133 static void     nfe_setmulti(struct nfe_softc *);
134 static void     nfe_get_macaddr(struct nfe_softc *, uint8_t *);
135 static void     nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
136 static void     nfe_tick(void *);
137 static void     nfe_ring_dma_addr(void *, bus_dma_segment_t *, int, int);
138 static void     nfe_buf_dma_addr(void *, bus_dma_segment_t *, int, bus_size_t,
139                                  int);
140 static void     nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
141                                      int, bus_addr_t);
142 static void     nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
143                                      int);
144 static int      nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int,
145                                int);
146 static int      nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int,
147                                  int);
148
149 #define NFE_DEBUG
150 #ifdef NFE_DEBUG
151
152 static int      nfe_debug = 0;
153
154 SYSCTL_NODE(_hw, OID_AUTO, nfe, CTLFLAG_RD, 0, "nVidia GigE parameters");
155 SYSCTL_INT(_hw_nfe, OID_AUTO, debug, CTLFLAG_RW, &nfe_debug, 0,
156            "control debugging printfs");
157
158 #define DPRINTF(sc, fmt, ...) do {              \
159         if (nfe_debug) {                        \
160                 if_printf(&(sc)->arpcom.ac_if,  \
161                           fmt, __VA_ARGS__);    \
162         }                                       \
163 } while (0)
164
165 #define DPRINTFN(sc, lv, fmt, ...) do {         \
166         if (nfe_debug >= (lv)) {                \
167                 if_printf(&(sc)->arpcom.ac_if,  \
168                           fmt, __VA_ARGS__);    \
169         }                                       \
170 } while (0)
171
172 #else   /* !NFE_DEBUG */
173
174 #define DPRINTF(sc, fmt, ...)
175 #define DPRINTFN(sc, lv, fmt, ...)
176
177 #endif  /* NFE_DEBUG */
178
179 struct nfe_dma_ctx {
180         int                     nsegs;
181         bus_dma_segment_t       *segs;
182 };
183
184 static const struct nfe_dev {
185         uint16_t        vid;
186         uint16_t        did;
187         const char      *desc;
188 } nfe_devices[] = {
189         { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
190           "NVIDIA nForce Gigabit Ethernet" },
191
192         { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
193           "NVIDIA nForce2 Gigabit Ethernet" },
194
195         { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
196           "NVIDIA nForce3 Gigabit Ethernet" },
197
198         { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2,
199           "NVIDIA nForce3 Gigabit Ethernet" },
200
201         { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3,
202           "NVIDIA nForce3 Gigabit Ethernet" },
203
204         { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
205           "NVIDIA nForce3 Gigabit Ethernet" },
206
207         { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5,
208           "NVIDIA nForce3 Gigabit Ethernet" },
209
210         { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1,
211           "NVIDIA CK804 Gigabit Ethernet" },
212
213         { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2,
214           "NVIDIA CK804 Gigabit Ethernet" },
215
216         { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
217           "NVIDIA MCP04 Gigabit Ethernet" },
218
219         { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
220           "NVIDIA MCP04 Gigabit Ethernet" },
221
222         { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1,
223           "NVIDIA MCP51 Gigabit Ethernet" },
224
225         { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2,
226           "NVIDIA MCP51 Gigabit Ethernet" },
227
228         { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
229           "NVIDIA MCP55 Gigabit Ethernet" },
230
231         { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
232           "NVIDIA MCP55 Gigabit Ethernet" },
233
234         { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
235           "NVIDIA MCP61 Gigabit Ethernet" },
236
237         { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
238           "NVIDIA MCP61 Gigabit Ethernet" },
239
240         { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
241           "NVIDIA MCP61 Gigabit Ethernet" },
242
243         { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
244           "NVIDIA MCP61 Gigabit Ethernet" },
245
246         { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
247           "NVIDIA MCP65 Gigabit Ethernet" },
248
249         { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
250           "NVIDIA MCP65 Gigabit Ethernet" },
251
252         { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
253           "NVIDIA MCP65 Gigabit Ethernet" },
254
255         { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
256           "NVIDIA MCP65 Gigabit Ethernet" }
257 };
258
259 static device_method_t nfe_methods[] = {
260         /* Device interface */
261         DEVMETHOD(device_probe,         nfe_probe),
262         DEVMETHOD(device_attach,        nfe_attach),
263         DEVMETHOD(device_detach,        nfe_detach),
264         DEVMETHOD(device_suspend,       nfe_suspend),
265         DEVMETHOD(device_resume,        nfe_resume),
266         DEVMETHOD(device_shutdown,      nfe_shutdown),
267
268         /* Bus interface */
269         DEVMETHOD(bus_print_child,      bus_generic_print_child),
270         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
271
272         /* MII interface */
273         DEVMETHOD(miibus_readreg,       nfe_miibus_readreg),
274         DEVMETHOD(miibus_writereg,      nfe_miibus_writereg),
275         DEVMETHOD(miibus_statchg,       nfe_miibus_statchg),
276
277         { 0, 0 }
278 };
279
280 static driver_t nfe_driver = {
281         "nfe",
282         nfe_methods,
283         sizeof(struct nfe_softc)
284 };
285
286 static devclass_t       nfe_devclass;
287
288 DECLARE_DUMMY_MODULE(if_nfe);
289 MODULE_DEPEND(if_nfe, miibus, 1, 1, 1);
290 DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, 0, 0);
291 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
292
293 static int
294 nfe_probe(device_t dev)
295 {
296         const struct nfe_dev *n;
297         uint16_t vid, did;
298
299         vid = pci_get_vendor(dev);
300         did = pci_get_device(dev);
301         for (n = nfe_devices; n->desc != NULL; ++n) {
302                 if (vid == n->vid && did == n->did) {
303                         struct nfe_softc *sc = device_get_softc(dev);
304
305                         switch (did) {
306                         case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
307                         case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
308                         case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
309                         case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
310                                 sc->sc_flags = NFE_JUMBO_SUP |
311                                                NFE_HW_CSUM;
312                                 break;
313                         case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
314                         case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
315                         case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
316                         case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
317                         case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
318                         case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
319                                 sc->sc_flags = NFE_40BIT_ADDR;
320                                 break;
321                         case PCI_PRODUCT_NVIDIA_CK804_LAN1:
322                         case PCI_PRODUCT_NVIDIA_CK804_LAN2:
323                         case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
324                         case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
325                         case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
326                         case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
327                         case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
328                         case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
329                                 sc->sc_flags = NFE_JUMBO_SUP |
330                                                NFE_40BIT_ADDR |
331                                                NFE_HW_CSUM;
332                                 break;
333                         case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
334                         case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
335                                 sc->sc_flags = NFE_JUMBO_SUP |
336                                                NFE_40BIT_ADDR |
337                                                NFE_HW_CSUM |
338                                                NFE_HW_VLAN;
339                                 break;
340                         }
341
342                         /* Enable jumbo frames for adapters that support it */
343                         if (sc->sc_flags & NFE_JUMBO_SUP)
344                                 sc->sc_flags |= NFE_USE_JUMBO;
345
346                         device_set_desc(dev, n->desc);
347                         return 0;
348                 }
349         }
350         return ENXIO;
351 }
352
353 static int
354 nfe_attach(device_t dev)
355 {
356         struct nfe_softc *sc = device_get_softc(dev);
357         struct ifnet *ifp = &sc->arpcom.ac_if;
358         uint8_t eaddr[ETHER_ADDR_LEN];
359         int error;
360
361         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
362         lwkt_serialize_init(&sc->sc_jbuf_serializer);
363
364         sc->sc_mem_rid = PCIR_BAR(0);
365
366 #ifndef BURN_BRIDGES
367         if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
368                 uint32_t mem, irq;
369
370                 mem = pci_read_config(dev, sc->sc_mem_rid, 4);
371                 irq = pci_read_config(dev, PCIR_INTLINE, 4);
372
373                 device_printf(dev, "chip is in D%d power mode "
374                     "-- setting to D0\n", pci_get_powerstate(dev));
375
376                 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
377
378                 pci_write_config(dev, sc->sc_mem_rid, mem, 4);
379                 pci_write_config(dev, PCIR_INTLINE, irq, 4);
380         }
381 #endif  /* !BURN_BRIDGE */
382
383         /* Enable bus mastering */
384         pci_enable_busmaster(dev);
385
386         /* Allocate IO memory */
387         sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
388                                                 &sc->sc_mem_rid, RF_ACTIVE);
389         if (sc->sc_mem_res == NULL) {
390                 device_printf(dev, "cound not allocate io memory\n");
391                 return ENXIO;
392         }
393         sc->sc_memh = rman_get_bushandle(sc->sc_mem_res);
394         sc->sc_memt = rman_get_bustag(sc->sc_mem_res);
395
396         /* Allocate IRQ */
397         sc->sc_irq_rid = 0;
398         sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
399                                                 &sc->sc_irq_rid,
400                                                 RF_SHAREABLE | RF_ACTIVE);
401         if (sc->sc_irq_res == NULL) {
402                 device_printf(dev, "could not allocate irq\n");
403                 error = ENXIO;
404                 goto fail;
405         }
406
407         nfe_get_macaddr(sc, eaddr);
408
409         /*
410          * Allocate Tx and Rx rings.
411          */
412         error = nfe_alloc_tx_ring(sc, &sc->txq);
413         if (error) {
414                 device_printf(dev, "could not allocate Tx ring\n");
415                 goto fail;
416         }
417
418         error = nfe_alloc_rx_ring(sc, &sc->rxq);
419         if (error) {
420                 device_printf(dev, "could not allocate Rx ring\n");
421                 goto fail;
422         }
423
424         error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd,
425                               nfe_ifmedia_sts);
426         if (error) {
427                 device_printf(dev, "MII without any phy\n");
428                 goto fail;
429         }
430
431         ifp->if_softc = sc;
432         ifp->if_mtu = ETHERMTU;
433         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
434         ifp->if_ioctl = nfe_ioctl;
435         ifp->if_start = nfe_start;
436 #ifdef DEVICE_POLLING
437         ifp->if_poll = nfe_poll;
438 #endif
439         ifp->if_watchdog = nfe_watchdog;
440         ifp->if_init = nfe_init;
441         ifq_set_maxlen(&ifp->if_snd, NFE_IFQ_MAXLEN);
442         ifq_set_ready(&ifp->if_snd);
443
444         ifp->if_capabilities = IFCAP_VLAN_MTU;
445
446 #if 0
447         if (sc->sc_flags & NFE_USE_JUMBO)
448                 ifp->if_hardmtu = NFE_JUMBO_MTU;
449 #endif
450
451         if (sc->sc_flags & NFE_HW_VLAN)
452                 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
453
454 #ifdef NFE_CSUM
455         if (sc->sc_flags & NFE_HW_CSUM) {
456 #if 0
457                 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
458                     IFCAP_CSUM_UDPv4;
459 #else
460                 ifp->if_capabilities = IFCAP_HWCSUM;
461                 ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP;
462 #endif
463         }
464 #endif
465         ifp->if_capenable = ifp->if_capabilities;
466
467         callout_init(&sc->sc_tick_ch);
468
469         ether_ifattach(ifp, eaddr, NULL);
470
471         error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc,
472                                &sc->sc_ih, ifp->if_serializer);
473         if (error) {
474                 device_printf(dev, "could not setup intr\n");
475                 ether_ifdetach(ifp);
476                 goto fail;
477         }
478
479         return 0;
480 fail:
481         nfe_detach(dev);
482         return error;
483 }
484
485 static int
486 nfe_detach(device_t dev)
487 {
488         struct nfe_softc *sc = device_get_softc(dev);
489
490         if (device_is_attached(dev)) {
491                 struct ifnet *ifp = &sc->arpcom.ac_if;
492
493                 lwkt_serialize_enter(ifp->if_serializer);
494                 nfe_stop(sc);
495                 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih);
496                 lwkt_serialize_exit(ifp->if_serializer);
497
498                 ether_ifdetach(ifp);
499         }
500
501         if (sc->sc_miibus != NULL)
502                 device_delete_child(dev, sc->sc_miibus);
503         bus_generic_detach(dev);
504
505         if (sc->sc_irq_res != NULL) {
506                 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
507                                      sc->sc_irq_res);
508         }
509
510         if (sc->sc_mem_res != NULL) {
511                 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
512                                      sc->sc_mem_res);
513         }
514
515         nfe_free_tx_ring(sc, &sc->txq);
516         nfe_free_rx_ring(sc, &sc->rxq);
517
518         return 0;
519 }
520
521 static void
522 nfe_shutdown(device_t dev)
523 {
524         struct nfe_softc *sc = device_get_softc(dev);
525         struct ifnet *ifp = &sc->arpcom.ac_if;
526
527         lwkt_serialize_enter(ifp->if_serializer);
528         nfe_stop(sc);
529         lwkt_serialize_exit(ifp->if_serializer);
530 }
531
532 static int
533 nfe_suspend(device_t dev)
534 {
535         struct nfe_softc *sc = device_get_softc(dev);
536         struct ifnet *ifp = &sc->arpcom.ac_if;
537
538         lwkt_serialize_enter(ifp->if_serializer);
539         nfe_stop(sc);
540         lwkt_serialize_exit(ifp->if_serializer);
541
542         return 0;
543 }
544
545 static int
546 nfe_resume(device_t dev)
547 {
548         struct nfe_softc *sc = device_get_softc(dev);
549         struct ifnet *ifp = &sc->arpcom.ac_if;
550
551         lwkt_serialize_enter(ifp->if_serializer);
552         if (ifp->if_flags & IFF_UP) {
553                 nfe_init(sc);
554                 if (ifp->if_flags & IFF_RUNNING)
555                         ifp->if_start(ifp);
556         }
557         lwkt_serialize_exit(ifp->if_serializer);
558
559         return 0;
560 }
561
562 static void
563 nfe_miibus_statchg(device_t dev)
564 {
565         struct nfe_softc *sc = device_get_softc(dev);
566         struct mii_data *mii = device_get_softc(sc->sc_miibus);
567         uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
568
569         phy = NFE_READ(sc, NFE_PHY_IFACE);
570         phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
571
572         seed = NFE_READ(sc, NFE_RNDSEED);
573         seed &= ~NFE_SEED_MASK;
574
575         if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
576                 phy  |= NFE_PHY_HDX;    /* half-duplex */
577                 misc |= NFE_MISC1_HDX;
578         }
579
580         switch (IFM_SUBTYPE(mii->mii_media_active)) {
581         case IFM_1000_T:        /* full-duplex only */
582                 link |= NFE_MEDIA_1000T;
583                 seed |= NFE_SEED_1000T;
584                 phy  |= NFE_PHY_1000T;
585                 break;
586         case IFM_100_TX:
587                 link |= NFE_MEDIA_100TX;
588                 seed |= NFE_SEED_100TX;
589                 phy  |= NFE_PHY_100TX;
590                 break;
591         case IFM_10_T:
592                 link |= NFE_MEDIA_10T;
593                 seed |= NFE_SEED_10T;
594                 break;
595         }
596
597         NFE_WRITE(sc, NFE_RNDSEED, seed);       /* XXX: gigabit NICs only? */
598
599         NFE_WRITE(sc, NFE_PHY_IFACE, phy);
600         NFE_WRITE(sc, NFE_MISC1, misc);
601         NFE_WRITE(sc, NFE_LINKSPEED, link);
602 }
603
604 static int
605 nfe_miibus_readreg(device_t dev, int phy, int reg)
606 {
607         struct nfe_softc *sc = device_get_softc(dev);
608         uint32_t val;
609         int ntries;
610
611         NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
612
613         if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
614                 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
615                 DELAY(100);
616         }
617
618         NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
619
620         for (ntries = 0; ntries < 1000; ntries++) {
621                 DELAY(100);
622                 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
623                         break;
624         }
625         if (ntries == 1000) {
626                 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", "");
627                 return 0;
628         }
629
630         if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
631                 DPRINTFN(sc, 2, "could not read PHY %s\n", "");
632                 return 0;
633         }
634
635         val = NFE_READ(sc, NFE_PHY_DATA);
636         if (val != 0xffffffff && val != 0)
637                 sc->mii_phyaddr = phy;
638
639         DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
640
641         return val;
642 }
643
644 static void
645 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
646 {
647         struct nfe_softc *sc = device_get_softc(dev);
648         uint32_t ctl;
649         int ntries;
650
651         NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
652
653         if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
654                 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
655                 DELAY(100);
656         }
657
658         NFE_WRITE(sc, NFE_PHY_DATA, val);
659         ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
660         NFE_WRITE(sc, NFE_PHY_CTL, ctl);
661
662         for (ntries = 0; ntries < 1000; ntries++) {
663                 DELAY(100);
664                 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
665                         break;
666         }
667
668 #ifdef NFE_DEBUG
669         if (ntries == 1000)
670                 DPRINTFN(sc, 2, "could not write to PHY %s\n", "");
671 #endif
672 }
673
674 #ifdef DEVICE_POLLING
675
676 static void
677 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
678 {
679         struct nfe_softc *sc = ifp->if_softc;
680
681         switch(cmd) {
682         case POLL_REGISTER:
683                 /* Disable interrupts */
684                 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
685                 break;
686         case POLL_DEREGISTER:
687                 /* enable interrupts */
688                 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
689                 break;
690         case POLL_AND_CHECK_STATUS:
691                 /* fall through */
692         case POLL_ONLY:
693                 if (ifp->if_flags & IFF_RUNNING) {
694                         nfe_rxeof(sc);
695                         nfe_txeof(sc);
696                 }
697                 break;
698         }
699 }
700
701 #endif
702
703 static void
704 nfe_intr(void *arg)
705 {
706         struct nfe_softc *sc = arg;
707         struct ifnet *ifp = &sc->arpcom.ac_if;
708         uint32_t r;
709
710         r = NFE_READ(sc, NFE_IRQ_STATUS);
711         if (r == 0)
712                 return; /* not for us */
713         NFE_WRITE(sc, NFE_IRQ_STATUS, r);
714
715         DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r);
716
717         if (r & NFE_IRQ_LINK) {
718                 NFE_READ(sc, NFE_PHY_STATUS);
719                 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
720                 DPRINTF(sc, "link state changed %s\n", "");
721         }
722
723         if (ifp->if_flags & IFF_RUNNING) {
724                 /* check Rx ring */
725                 nfe_rxeof(sc);
726
727                 /* check Tx ring */
728                 nfe_txeof(sc);
729         }
730 }
731
732 static int
733 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
734 {
735         struct nfe_softc *sc = ifp->if_softc;
736         struct ifreq *ifr = (struct ifreq *)data;
737         struct mii_data *mii;
738         int error = 0, mask;
739
740         switch (cmd) {
741         case SIOCSIFMTU:
742                 /* XXX NFE_USE_JUMBO should be set here */
743                 break;
744         case SIOCSIFFLAGS:
745                 if (ifp->if_flags & IFF_UP) {
746                         /*
747                          * If only the PROMISC or ALLMULTI flag changes, then
748                          * don't do a full re-init of the chip, just update
749                          * the Rx filter.
750                          */
751                         if ((ifp->if_flags & IFF_RUNNING) &&
752                             ((ifp->if_flags ^ sc->sc_if_flags) &
753                              (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
754                                 nfe_setmulti(sc);
755                         } else {
756                                 if (!(ifp->if_flags & IFF_RUNNING))
757                                         nfe_init(sc);
758                         }
759                 } else {
760                         if (ifp->if_flags & IFF_RUNNING)
761                                 nfe_stop(sc);
762                 }
763                 sc->sc_if_flags = ifp->if_flags;
764                 break;
765         case SIOCADDMULTI:
766         case SIOCDELMULTI:
767                 if (ifp->if_flags & IFF_RUNNING)
768                         nfe_setmulti(sc);
769                 break;
770         case SIOCSIFMEDIA:
771         case SIOCGIFMEDIA:
772                 mii = device_get_softc(sc->sc_miibus);
773                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
774                 break;
775         case SIOCSIFCAP:
776                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
777                 if (mask & IFCAP_HWCSUM) {
778                         if (IFCAP_HWCSUM & ifp->if_capenable)
779                                 ifp->if_capenable &= ~IFCAP_HWCSUM;
780                         else
781                                 ifp->if_capenable |= IFCAP_HWCSUM;
782                 }
783                 break;
784         default:
785                 error = ether_ioctl(ifp, cmd, data);
786                 break;
787         }
788         return error;
789 }
790
791 static void
792 nfe_rxeof(struct nfe_softc *sc)
793 {
794         struct ifnet *ifp = &sc->arpcom.ac_if;
795         struct nfe_rx_ring *ring = &sc->rxq;
796         int reap;
797
798         reap = 0;
799         bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD);
800
801         for (;;) {
802                 struct nfe_rx_data *data = &ring->data[ring->cur];
803                 struct mbuf *m;
804                 uint16_t flags;
805                 int len, error;
806
807                 if (sc->sc_flags & NFE_40BIT_ADDR) {
808                         struct nfe_desc64 *desc64 = &ring->desc64[ring->cur];
809
810                         flags = le16toh(desc64->flags);
811                         len = le16toh(desc64->length) & 0x3fff;
812                 } else {
813                         struct nfe_desc32 *desc32 = &ring->desc32[ring->cur];
814
815                         flags = le16toh(desc32->flags);
816                         len = le16toh(desc32->length) & 0x3fff;
817                 }
818
819                 if (flags & NFE_RX_READY)
820                         break;
821
822                 reap = 1;
823
824                 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
825                         if (!(flags & NFE_RX_VALID_V1))
826                                 goto skip;
827
828                         if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
829                                 flags &= ~NFE_RX_ERROR;
830                                 len--;  /* fix buffer length */
831                         }
832                 } else {
833                         if (!(flags & NFE_RX_VALID_V2))
834                                 goto skip;
835
836                         if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
837                                 flags &= ~NFE_RX_ERROR;
838                                 len--;  /* fix buffer length */
839                         }
840                 }
841
842                 if (flags & NFE_RX_ERROR) {
843                         ifp->if_ierrors++;
844                         goto skip;
845                 }
846
847                 m = data->m;
848
849                 if (sc->sc_flags & NFE_USE_JUMBO)
850                         error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0);
851                 else
852                         error = nfe_newbuf_std(sc, ring, ring->cur, 0);
853                 if (error) {
854                         ifp->if_ierrors++;
855                         goto skip;
856                 }
857
858                 /* finalize mbuf */
859                 m->m_pkthdr.len = m->m_len = len;
860                 m->m_pkthdr.rcvif = ifp;
861
862 #ifdef notyet
863                 if (sc->sc_flags & NFE_HW_CSUM) {
864                         if (flags & NFE_RX_IP_CSUMOK)
865                                 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
866                         if (flags & NFE_RX_UDP_CSUMOK)
867                                 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
868                         if (flags & NFE_RX_TCP_CSUMOK)
869                                 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
870                 }
871 #elif defined(NFE_CSUM)
872                 if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK))
873                         m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
874 #endif
875
876                 ifp->if_ipackets++;
877                 ifp->if_input(ifp, m);
878 skip:
879                 nfe_set_ready_rxdesc(sc, ring, ring->cur);
880                 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
881         }
882
883         if (reap)
884                 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
885 }
886
887 static void
888 nfe_txeof(struct nfe_softc *sc)
889 {
890         struct ifnet *ifp = &sc->arpcom.ac_if;
891         struct nfe_tx_ring *ring = &sc->txq;
892         struct nfe_tx_data *data = NULL;
893
894         bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD);
895         while (ring->next != ring->cur) {
896                 uint16_t flags;
897
898                 if (sc->sc_flags & NFE_40BIT_ADDR)
899                         flags = le16toh(ring->desc64[ring->next].flags);
900                 else
901                         flags = le16toh(ring->desc32[ring->next].flags);
902
903                 if (flags & NFE_TX_VALID)
904                         break;
905
906                 data = &ring->data[ring->next];
907
908                 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
909                         if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
910                                 goto skip;
911
912                         if ((flags & NFE_TX_ERROR_V1) != 0) {
913                                 if_printf(ifp, "tx v1 error 0x%4b\n", flags,
914                                           NFE_V1_TXERR);
915                                 ifp->if_oerrors++;
916                         } else {
917                                 ifp->if_opackets++;
918                         }
919                 } else {
920                         if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
921                                 goto skip;
922
923                         if ((flags & NFE_TX_ERROR_V2) != 0) {
924                                 if_printf(ifp, "tx v2 error 0x%4b\n", flags,
925                                           NFE_V2_TXERR);
926                                 ifp->if_oerrors++;
927                         } else {
928                                 ifp->if_opackets++;
929                         }
930                 }
931
932                 if (data->m == NULL) {  /* should not get there */
933                         if_printf(ifp,
934                                   "last fragment bit w/o associated mbuf!\n");
935                         goto skip;
936                 }
937
938                 /* last fragment of the mbuf chain transmitted */
939                 bus_dmamap_sync(ring->data_tag, data->map,
940                                 BUS_DMASYNC_POSTWRITE);
941                 bus_dmamap_unload(ring->data_tag, data->map);
942                 m_freem(data->m);
943                 data->m = NULL;
944
945                 ifp->if_timer = 0;
946 skip:
947                 ring->queued--;
948                 KKASSERT(ring->queued >= 0);
949                 ring->next = (ring->next + 1) % NFE_TX_RING_COUNT;
950         }
951
952         if (data != NULL) {     /* at least one slot freed */
953                 ifp->if_flags &= ~IFF_OACTIVE;
954                 ifp->if_start(ifp);
955         }
956 }
957
958 static int
959 nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0)
960 {
961         struct nfe_dma_ctx ctx;
962         bus_dma_segment_t segs[NFE_MAX_SCATTER];
963         struct nfe_tx_data *data, *data_map;
964         bus_dmamap_t map;
965         struct nfe_desc64 *desc64 = NULL;
966         struct nfe_desc32 *desc32 = NULL;
967         uint16_t flags = 0;
968         uint32_t vtag = 0;
969         int error, i, j;
970
971         data = &ring->data[ring->cur];
972         map = data->map;
973         data_map = data;        /* Remember who owns the DMA map */
974
975         ctx.nsegs = NFE_MAX_SCATTER;
976         ctx.segs = segs;
977         error = bus_dmamap_load_mbuf(ring->data_tag, map, m0,
978                                      nfe_buf_dma_addr, &ctx, BUS_DMA_NOWAIT);
979         if (error && error != EFBIG) {
980                 if_printf(&sc->arpcom.ac_if, "could not map TX mbuf\n");
981                 goto back;
982         }
983
984         if (error) {    /* error == EFBIG */
985                 struct mbuf *m_new;
986
987                 m_new = m_defrag(m0, MB_DONTWAIT);
988                 if (m_new == NULL) {
989                         if_printf(&sc->arpcom.ac_if,
990                                   "could not defrag TX mbuf\n");
991                         error = ENOBUFS;
992                         goto back;
993                 } else {
994                         m0 = m_new;
995                 }
996
997                 ctx.nsegs = NFE_MAX_SCATTER;
998                 ctx.segs = segs;
999                 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0,
1000                                              nfe_buf_dma_addr, &ctx,
1001                                              BUS_DMA_NOWAIT);
1002                 if (error) {
1003                         if_printf(&sc->arpcom.ac_if,
1004                                   "could not map defraged TX mbuf\n");
1005                         goto back;
1006                 }
1007         }
1008
1009         error = 0;
1010
1011         if (ring->queued + ctx.nsegs >= NFE_TX_RING_COUNT - 1) {
1012                 bus_dmamap_unload(ring->data_tag, map);
1013                 error = ENOBUFS;
1014                 goto back;
1015         }
1016
1017         /* setup h/w VLAN tagging */
1018         if ((m0->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) &&
1019             m0->m_pkthdr.rcvif != NULL &&
1020             m0->m_pkthdr.rcvif->if_type == IFT_L2VLAN) {
1021                 struct ifvlan *ifv = m0->m_pkthdr.rcvif->if_softc;
1022
1023                 if (ifv != NULL)
1024                         vtag = NFE_TX_VTAG | htons(ifv->ifv_tag);
1025         }
1026
1027 #ifdef NFE_CSUM
1028         if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1029                 flags |= NFE_TX_IP_CSUM;
1030         if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT))
1031                 flags |= NFE_TX_TCP_CSUM;
1032 #endif
1033
1034         /*
1035          * XXX urm. somebody is unaware of how hardware works.  You 
1036          * absolutely CANNOT set NFE_TX_VALID on the next descriptor in
1037          * the ring until the entire chain is actually *VALID*.  Otherwise
1038          * the hardware may encounter a partially initialized chain that
1039          * is marked as being ready to go when it in fact is not ready to
1040          * go.
1041          */
1042
1043         for (i = 0; i < ctx.nsegs; i++) {
1044                 j = (ring->cur + i) % NFE_TX_RING_COUNT;
1045                 data = &ring->data[j];
1046
1047                 if (sc->sc_flags & NFE_40BIT_ADDR) {
1048                         desc64 = &ring->desc64[j];
1049 #if defined(__LP64__)
1050                         desc64->physaddr[0] =
1051                             htole32(segs[i].ds_addr >> 32);
1052 #endif
1053                         desc64->physaddr[1] =
1054                             htole32(segs[i].ds_addr & 0xffffffff);
1055                         desc64->length = htole16(segs[i].ds_len - 1);
1056                         desc64->vtag = htole32(vtag);
1057                         desc64->flags = htole16(flags);
1058                 } else {
1059                         desc32 = &ring->desc32[j];
1060                         desc32->physaddr = htole32(segs[i].ds_addr);
1061                         desc32->length = htole16(segs[i].ds_len - 1);
1062                         desc32->flags = htole16(flags);
1063                 }
1064
1065                 /* csum flags and vtag belong to the first fragment only */
1066                 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
1067                 vtag = 0;
1068
1069                 ring->queued++;
1070                 KKASSERT(ring->queued <= NFE_TX_RING_COUNT);
1071         }
1072
1073         /* the whole mbuf chain has been DMA mapped, fix last descriptor */
1074         if (sc->sc_flags & NFE_40BIT_ADDR) {
1075                 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
1076         } else {
1077                 if (sc->sc_flags & NFE_JUMBO_SUP)
1078                         flags = NFE_TX_LASTFRAG_V2;
1079                 else
1080                         flags = NFE_TX_LASTFRAG_V1;
1081                 desc32->flags |= htole16(flags);
1082         }
1083
1084         /*
1085          * Set NFE_TX_VALID backwards so the hardware doesn't see the
1086          * whole mess until the first descriptor in the map is flagged.
1087          */
1088         for (i = ctx.nsegs - 1; i >= 0; --i) {
1089                 j = (ring->cur + i) % NFE_TX_RING_COUNT;
1090                 if (sc->sc_flags & NFE_40BIT_ADDR) {
1091                         desc64 = &ring->desc64[j];
1092                         desc64->flags |= htole16(NFE_TX_VALID);
1093                 } else {
1094                         desc32 = &ring->desc32[j];
1095                         desc32->flags |= htole16(NFE_TX_VALID);
1096                 }
1097         }
1098         ring->cur = (ring->cur + ctx.nsegs) % NFE_TX_RING_COUNT;
1099
1100         /* Exchange DMA map */
1101         data_map->map = data->map;
1102         data->map = map;
1103         data->m = m0;
1104
1105         bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE);
1106 back:
1107         if (error)
1108                 m_freem(m0);
1109         return error;
1110 }
1111
1112 static void
1113 nfe_start(struct ifnet *ifp)
1114 {
1115         struct nfe_softc *sc = ifp->if_softc;
1116         struct nfe_tx_ring *ring = &sc->txq;
1117         int count = 0;
1118         struct mbuf *m0;
1119
1120         if (ifp->if_flags & IFF_OACTIVE)
1121                 return;
1122
1123         if (ifq_is_empty(&ifp->if_snd))
1124                 return;
1125
1126         for (;;) {
1127                 m0 = ifq_dequeue(&ifp->if_snd, NULL);
1128                 if (m0 == NULL)
1129                         break;
1130
1131                 BPF_MTAP(ifp, m0);
1132
1133                 if (nfe_encap(sc, ring, m0) != 0) {
1134                         ifp->if_flags |= IFF_OACTIVE;
1135                         break;
1136                 }
1137                 ++count;
1138
1139                 /*
1140                  * NOTE:
1141                  * `m0' may be freed in nfe_encap(), so
1142                  * it should not be touched any more.
1143                  */
1144         }
1145         if (count == 0) /* nothing sent */
1146                 return;
1147
1148         /* Sync TX descriptor ring */
1149         bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1150
1151         /* Kick Tx */
1152         NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1153
1154         /*
1155          * Set a timeout in case the chip goes out to lunch.
1156          */
1157         ifp->if_timer = 5;
1158 }
1159
1160 static void
1161 nfe_watchdog(struct ifnet *ifp)
1162 {
1163         struct nfe_softc *sc = ifp->if_softc;
1164
1165         if (ifp->if_flags & IFF_RUNNING) {
1166                 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n");
1167                 nfe_txeof(sc);
1168                 return;
1169         }
1170
1171         if_printf(ifp, "watchdog timeout\n");
1172
1173         nfe_init(ifp->if_softc);
1174
1175         ifp->if_oerrors++;
1176
1177         if (!ifq_is_empty(&ifp->if_snd))
1178                 ifp->if_start(ifp);
1179 }
1180
1181 static void
1182 nfe_init(void *xsc)
1183 {
1184         struct nfe_softc *sc = xsc;
1185         struct ifnet *ifp = &sc->arpcom.ac_if;
1186         uint32_t tmp;
1187         int error;
1188
1189         nfe_stop(sc);
1190
1191         error = nfe_init_tx_ring(sc, &sc->txq);
1192         if (error) {
1193                 nfe_stop(sc);
1194                 return;
1195         }
1196
1197         error = nfe_init_rx_ring(sc, &sc->rxq);
1198         if (error) {
1199                 nfe_stop(sc);
1200                 return;
1201         }
1202
1203         NFE_WRITE(sc, NFE_TX_UNK, 0);
1204         NFE_WRITE(sc, NFE_STATUS, 0);
1205
1206         sc->rxtxctl = NFE_RXTX_BIT2;
1207         if (sc->sc_flags & NFE_40BIT_ADDR)
1208                 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1209         else if (sc->sc_flags & NFE_JUMBO_SUP)
1210                 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1211 #ifdef NFE_CSUM
1212         if (sc->sc_flags & NFE_HW_CSUM)
1213                 sc->rxtxctl |= NFE_RXTX_RXCSUM;
1214 #endif
1215
1216         /*
1217          * Although the adapter is capable of stripping VLAN tags from received
1218          * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1219          * purpose.  This will be done in software by our network stack.
1220          */
1221         if (sc->sc_flags & NFE_HW_VLAN)
1222                 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
1223
1224         NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1225         DELAY(10);
1226         NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1227
1228         if (sc->sc_flags & NFE_HW_VLAN)
1229                 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1230
1231         NFE_WRITE(sc, NFE_SETUP_R6, 0);
1232
1233         /* set MAC address */
1234         nfe_set_macaddr(sc, sc->arpcom.ac_enaddr);
1235
1236         /* tell MAC where rings are in memory */
1237 #ifdef __LP64__
1238         NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1239 #endif
1240         NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1241 #ifdef __LP64__
1242         NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1243 #endif
1244         NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1245
1246         NFE_WRITE(sc, NFE_RING_SIZE,
1247             (NFE_RX_RING_COUNT - 1) << 16 |
1248             (NFE_TX_RING_COUNT - 1));
1249
1250         NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1251
1252         /* force MAC to wakeup */
1253         tmp = NFE_READ(sc, NFE_PWR_STATE);
1254         NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1255         DELAY(10);
1256         tmp = NFE_READ(sc, NFE_PWR_STATE);
1257         NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1258
1259 #if 1
1260         /* configure interrupts coalescing/mitigation */
1261         NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1262 #else
1263         /* no interrupt mitigation: one interrupt per packet */
1264         NFE_WRITE(sc, NFE_IMTIMER, 970);
1265 #endif
1266
1267         NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1268         NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1269         NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1270
1271         /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1272         NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1273
1274         NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1275         NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
1276
1277         sc->rxtxctl &= ~NFE_RXTX_BIT2;
1278         NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1279         DELAY(10);
1280         NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1281
1282         /* set Rx filter */
1283         nfe_setmulti(sc);
1284
1285         nfe_ifmedia_upd(ifp);
1286
1287         /* enable Rx */
1288         NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1289
1290         /* enable Tx */
1291         NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1292
1293         NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1294
1295 #ifdef DEVICE_POLLING
1296         if ((ifp->if_flags & IFF_POLLING) == 0)
1297 #endif
1298         /* enable interrupts */
1299         NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1300
1301         callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
1302
1303         ifp->if_flags |= IFF_RUNNING;
1304         ifp->if_flags &= ~IFF_OACTIVE;
1305 }
1306
1307 static void
1308 nfe_stop(struct nfe_softc *sc)
1309 {
1310         struct ifnet *ifp = &sc->arpcom.ac_if;
1311
1312         callout_stop(&sc->sc_tick_ch);
1313
1314         ifp->if_timer = 0;
1315         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1316
1317         /* Abort Tx */
1318         NFE_WRITE(sc, NFE_TX_CTL, 0);
1319
1320         /* Disable Rx */
1321         NFE_WRITE(sc, NFE_RX_CTL, 0);
1322
1323         /* Disable interrupts */
1324         NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1325
1326         /* Reset Tx and Rx rings */
1327         nfe_reset_tx_ring(sc, &sc->txq);
1328         nfe_reset_rx_ring(sc, &sc->rxq);
1329 }
1330
1331 static int
1332 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1333 {
1334         int i, j, error, descsize;
1335         void **desc;
1336
1337         if (sc->sc_flags & NFE_40BIT_ADDR) {
1338                 desc = (void **)&ring->desc64;
1339                 descsize = sizeof(struct nfe_desc64);
1340         } else {
1341                 desc = (void **)&ring->desc32;
1342                 descsize = sizeof(struct nfe_desc32);
1343         }
1344
1345         ring->bufsz = MCLBYTES;
1346         ring->cur = ring->next = 0;
1347
1348         error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1349                                    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1350                                    NULL, NULL,
1351                                    NFE_RX_RING_COUNT * descsize, 1,
1352                                    NFE_RX_RING_COUNT * descsize,
1353                                    0, &ring->tag);
1354         if (error) {
1355                 if_printf(&sc->arpcom.ac_if,
1356                           "could not create desc RX DMA tag\n");
1357                 return error;
1358         }
1359
1360         error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO,
1361                                  &ring->map);
1362         if (error) {
1363                 if_printf(&sc->arpcom.ac_if,
1364                           "could not allocate RX desc DMA memory\n");
1365                 bus_dma_tag_destroy(ring->tag);
1366                 ring->tag = NULL;
1367                 return error;
1368         }
1369
1370         error = bus_dmamap_load(ring->tag, ring->map, *desc,
1371                                 NFE_RX_RING_COUNT * descsize,
1372                                 nfe_ring_dma_addr, &ring->physaddr,
1373                                 BUS_DMA_WAITOK);
1374         if (error) {
1375                 if_printf(&sc->arpcom.ac_if,
1376                           "could not load RX desc DMA map\n");
1377                 bus_dmamem_free(ring->tag, *desc, ring->map);
1378                 bus_dma_tag_destroy(ring->tag);
1379                 ring->tag = NULL;
1380                 return error;
1381         }
1382
1383         if (sc->sc_flags & NFE_USE_JUMBO) {
1384                 ring->bufsz = NFE_JBYTES;
1385
1386                 error = nfe_jpool_alloc(sc, ring);
1387                 if (error) {
1388                         if_printf(&sc->arpcom.ac_if,
1389                                   "could not allocate jumbo frames\n");
1390                         return error;
1391                 }
1392         }
1393
1394         error = bus_dma_tag_create(NULL, 1, 0,
1395                                    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1396                                    NULL, NULL,
1397                                    MCLBYTES, 1, MCLBYTES,
1398                                    0, &ring->data_tag);
1399         if (error) {
1400                 if_printf(&sc->arpcom.ac_if,
1401                           "could not create RX mbuf DMA tag\n");
1402                 return error;
1403         }
1404
1405         /* Create a spare RX mbuf DMA map */
1406         error = bus_dmamap_create(ring->data_tag, 0, &ring->data_tmpmap);
1407         if (error) {
1408                 if_printf(&sc->arpcom.ac_if,
1409                           "could not create spare RX mbuf DMA map\n");
1410                 bus_dma_tag_destroy(ring->data_tag);
1411                 ring->data_tag = NULL;
1412                 return error;
1413         }
1414
1415         for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1416                 error = bus_dmamap_create(ring->data_tag, 0,
1417                                           &ring->data[i].map);
1418                 if (error) {
1419                         if_printf(&sc->arpcom.ac_if,
1420                                   "could not create %dth RX mbuf DMA mapn", i);
1421                         goto fail;
1422                 }
1423         }
1424         return 0;
1425 fail:
1426         for (j = 0; j < i; ++j)
1427                 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1428         bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1429         bus_dma_tag_destroy(ring->data_tag);
1430         ring->data_tag = NULL;
1431         return error;
1432 }
1433
1434 static void
1435 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1436 {
1437         int i;
1438
1439         for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1440                 struct nfe_rx_data *data = &ring->data[i];
1441
1442                 if (data->m != NULL) {
1443                         bus_dmamap_unload(ring->data_tag, data->map);
1444                         m_freem(data->m);
1445                         data->m = NULL;
1446                 }
1447         }
1448         bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1449
1450         ring->cur = ring->next = 0;
1451 }
1452
1453 static int
1454 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1455 {
1456         int i;
1457
1458         for (i = 0; i < NFE_RX_RING_COUNT; ++i) {
1459                 int error;
1460
1461                 /* XXX should use a function pointer */
1462                 if (sc->sc_flags & NFE_USE_JUMBO)
1463                         error = nfe_newbuf_jumbo(sc, ring, i, 1);
1464                 else
1465                         error = nfe_newbuf_std(sc, ring, i, 1);
1466                 if (error) {
1467                         if_printf(&sc->arpcom.ac_if,
1468                                   "could not allocate RX buffer\n");
1469                         return error;
1470                 }
1471
1472                 nfe_set_ready_rxdesc(sc, ring, i);
1473         }
1474         bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1475
1476         return 0;
1477 }
1478
1479 static void
1480 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1481 {
1482         if (ring->data_tag != NULL) {
1483                 struct nfe_rx_data *data;
1484                 int i;
1485
1486                 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1487                         data = &ring->data[i];
1488
1489                         if (data->m != NULL) {
1490                                 bus_dmamap_unload(ring->data_tag, data->map);
1491                                 m_freem(data->m);
1492                         }
1493                         bus_dmamap_destroy(ring->data_tag, data->map);
1494                 }
1495                 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1496                 bus_dma_tag_destroy(ring->data_tag);
1497         }
1498
1499         nfe_jpool_free(sc, ring);
1500
1501         if (ring->tag != NULL) {
1502                 void *desc;
1503
1504                 if (sc->sc_flags & NFE_40BIT_ADDR)
1505                         desc = ring->desc64;
1506                 else
1507                         desc = ring->desc32;
1508
1509                 bus_dmamap_unload(ring->tag, ring->map);
1510                 bus_dmamem_free(ring->tag, desc, ring->map);
1511                 bus_dma_tag_destroy(ring->tag);
1512         }
1513 }
1514
1515 static struct nfe_jbuf *
1516 nfe_jalloc(struct nfe_softc *sc)
1517 {
1518         struct ifnet *ifp = &sc->arpcom.ac_if;
1519         struct nfe_jbuf *jbuf;
1520
1521         lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1522
1523         jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
1524         if (jbuf != NULL) {
1525                 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
1526                 jbuf->inuse = 1;
1527         } else {
1528                 if_printf(ifp, "no free jumbo buffer\n");
1529         }
1530
1531         lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1532
1533         return jbuf;
1534 }
1535
1536 static void
1537 nfe_jfree(void *arg)
1538 {
1539         struct nfe_jbuf *jbuf = arg;
1540         struct nfe_softc *sc = jbuf->sc;
1541         struct nfe_rx_ring *ring = jbuf->ring;
1542
1543         if (&ring->jbuf[jbuf->slot] != jbuf)
1544                 panic("%s: free wrong jumbo buffer\n", __func__);
1545         else if (jbuf->inuse == 0)
1546                 panic("%s: jumbo buffer already freed\n", __func__);
1547
1548         lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1549         atomic_subtract_int(&jbuf->inuse, 1);
1550         if (jbuf->inuse == 0)
1551                 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1552         lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1553 }
1554
1555 static void
1556 nfe_jref(void *arg)
1557 {
1558         struct nfe_jbuf *jbuf = arg;
1559         struct nfe_rx_ring *ring = jbuf->ring;
1560
1561         if (&ring->jbuf[jbuf->slot] != jbuf)
1562                 panic("%s: ref wrong jumbo buffer\n", __func__);
1563         else if (jbuf->inuse == 0)
1564                 panic("%s: jumbo buffer already freed\n", __func__);
1565
1566         atomic_add_int(&jbuf->inuse, 1);
1567 }
1568
1569 static int
1570 nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1571 {
1572         struct nfe_jbuf *jbuf;
1573         bus_addr_t physaddr;
1574         caddr_t buf;
1575         int i, error;
1576
1577         /*
1578          * Allocate a big chunk of DMA'able memory.
1579          */
1580         error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1581                                    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1582                                    NULL, NULL,
1583                                    NFE_JPOOL_SIZE, 1, NFE_JPOOL_SIZE,
1584                                    0, &ring->jtag);
1585         if (error) {
1586                 if_printf(&sc->arpcom.ac_if,
1587                           "could not create jumbo DMA tag\n");
1588                 return error;
1589         }
1590
1591         error = bus_dmamem_alloc(ring->jtag, (void **)&ring->jpool,
1592                                  BUS_DMA_WAITOK, &ring->jmap);
1593         if (error) {
1594                 if_printf(&sc->arpcom.ac_if,
1595                           "could not allocate jumbo DMA memory\n");
1596                 bus_dma_tag_destroy(ring->jtag);
1597                 ring->jtag = NULL;
1598                 return error;
1599         }
1600
1601         error = bus_dmamap_load(ring->jtag, ring->jmap, ring->jpool,
1602                                 NFE_JPOOL_SIZE, nfe_ring_dma_addr, &physaddr,
1603                                 BUS_DMA_WAITOK);
1604         if (error) {
1605                 if_printf(&sc->arpcom.ac_if,
1606                           "could not load jumbo DMA map\n");
1607                 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap);
1608                 bus_dma_tag_destroy(ring->jtag);
1609                 ring->jtag = NULL;
1610                 return error;
1611         }
1612
1613         /* ..and split it into 9KB chunks */
1614         SLIST_INIT(&ring->jfreelist);
1615
1616         buf = ring->jpool;
1617         for (i = 0; i < NFE_JPOOL_COUNT; i++) {
1618                 jbuf = &ring->jbuf[i];
1619
1620                 jbuf->sc = sc;
1621                 jbuf->ring = ring;
1622                 jbuf->inuse = 0;
1623                 jbuf->slot = i;
1624                 jbuf->buf = buf;
1625                 jbuf->physaddr = physaddr;
1626
1627                 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1628
1629                 buf += NFE_JBYTES;
1630                 physaddr += NFE_JBYTES;
1631         }
1632
1633         return 0;
1634 }
1635
1636 static void
1637 nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1638 {
1639         if (ring->jtag != NULL) {
1640                 bus_dmamap_unload(ring->jtag, ring->jmap);
1641                 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap);
1642                 bus_dma_tag_destroy(ring->jtag);
1643         }
1644 }
1645
1646 static int
1647 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1648 {
1649         int i, j, error, descsize;
1650         void **desc;
1651
1652         if (sc->sc_flags & NFE_40BIT_ADDR) {
1653                 desc = (void **)&ring->desc64;
1654                 descsize = sizeof(struct nfe_desc64);
1655         } else {
1656                 desc = (void **)&ring->desc32;
1657                 descsize = sizeof(struct nfe_desc32);
1658         }
1659
1660         ring->queued = 0;
1661         ring->cur = ring->next = 0;
1662
1663         error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1664                                    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1665                                    NULL, NULL,
1666                                    NFE_TX_RING_COUNT * descsize, 1,
1667                                    NFE_TX_RING_COUNT * descsize,
1668                                    0, &ring->tag);
1669         if (error) {
1670                 if_printf(&sc->arpcom.ac_if,
1671                           "could not create TX desc DMA map\n");
1672                 return error;
1673         }
1674
1675         error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO,
1676                                  &ring->map);
1677         if (error) {
1678                 if_printf(&sc->arpcom.ac_if,
1679                           "could not allocate TX desc DMA memory\n");
1680                 bus_dma_tag_destroy(ring->tag);
1681                 ring->tag = NULL;
1682                 return error;
1683         }
1684
1685         error = bus_dmamap_load(ring->tag, ring->map, *desc,
1686                                 NFE_TX_RING_COUNT * descsize,
1687                                 nfe_ring_dma_addr, &ring->physaddr,
1688                                 BUS_DMA_WAITOK);
1689         if (error) {
1690                 if_printf(&sc->arpcom.ac_if,
1691                           "could not load TX desc DMA map\n");
1692                 bus_dmamem_free(ring->tag, *desc, ring->map);
1693                 bus_dma_tag_destroy(ring->tag);
1694                 ring->tag = NULL;
1695                 return error;
1696         }
1697
1698         error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1699                                    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1700                                    NULL, NULL,
1701                                    NFE_JBYTES * NFE_MAX_SCATTER,
1702                                    NFE_MAX_SCATTER, NFE_JBYTES,
1703                                    0, &ring->data_tag);
1704         if (error) {
1705                 if_printf(&sc->arpcom.ac_if,
1706                           "could not create TX buf DMA tag\n");
1707                 return error;
1708         }
1709
1710         for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1711                 error = bus_dmamap_create(ring->data_tag, 0,
1712                                           &ring->data[i].map);
1713                 if (error) {
1714                         if_printf(&sc->arpcom.ac_if,
1715                                   "could not create %dth TX buf DMA map\n", i);
1716                         goto fail;
1717                 }
1718         }
1719
1720         return 0;
1721 fail:
1722         for (j = 0; j < i; ++j)
1723                 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1724         bus_dma_tag_destroy(ring->data_tag);
1725         ring->data_tag = NULL;
1726         return error;
1727 }
1728
1729 static void
1730 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1731 {
1732         int i;
1733
1734         for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1735                 struct nfe_tx_data *data = &ring->data[i];
1736
1737                 if (sc->sc_flags & NFE_40BIT_ADDR)
1738                         ring->desc64[i].flags = 0;
1739                 else
1740                         ring->desc32[i].flags = 0;
1741
1742                 if (data->m != NULL) {
1743                         bus_dmamap_sync(ring->data_tag, data->map,
1744                                         BUS_DMASYNC_POSTWRITE);
1745                         bus_dmamap_unload(ring->data_tag, data->map);
1746                         m_freem(data->m);
1747                         data->m = NULL;
1748                 }
1749         }
1750         bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1751
1752         ring->queued = 0;
1753         ring->cur = ring->next = 0;
1754 }
1755
1756 static int
1757 nfe_init_tx_ring(struct nfe_softc *sc __unused,
1758                  struct nfe_tx_ring *ring __unused)
1759 {
1760         return 0;
1761 }
1762
1763 static void
1764 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1765 {
1766         if (ring->data_tag != NULL) {
1767                 struct nfe_tx_data *data;
1768                 int i;
1769
1770                 for (i = 0; i < NFE_TX_RING_COUNT; ++i) {
1771                         data = &ring->data[i];
1772
1773                         if (data->m != NULL) {
1774                                 bus_dmamap_unload(ring->data_tag, data->map);
1775                                 m_freem(data->m);
1776                         }
1777                         bus_dmamap_destroy(ring->data_tag, data->map);
1778                 }
1779
1780                 bus_dma_tag_destroy(ring->data_tag);
1781         }
1782
1783         if (ring->tag != NULL) {
1784                 void *desc;
1785
1786                 if (sc->sc_flags & NFE_40BIT_ADDR)
1787                         desc = ring->desc64;
1788                 else
1789                         desc = ring->desc32;
1790
1791                 bus_dmamap_unload(ring->tag, ring->map);
1792                 bus_dmamem_free(ring->tag, desc, ring->map);
1793                 bus_dma_tag_destroy(ring->tag);
1794         }
1795 }
1796
1797 static int
1798 nfe_ifmedia_upd(struct ifnet *ifp)
1799 {
1800         struct nfe_softc *sc = ifp->if_softc;
1801         struct mii_data *mii = device_get_softc(sc->sc_miibus);
1802
1803         if (mii->mii_instance != 0) {
1804                 struct mii_softc *miisc;
1805
1806                 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1807                         mii_phy_reset(miisc);
1808         }
1809         mii_mediachg(mii);
1810
1811         return 0;
1812 }
1813
1814 static void
1815 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1816 {
1817         struct nfe_softc *sc = ifp->if_softc;
1818         struct mii_data *mii = device_get_softc(sc->sc_miibus);
1819
1820         mii_pollstat(mii);
1821         ifmr->ifm_status = mii->mii_media_status;
1822         ifmr->ifm_active = mii->mii_media_active;
1823 }
1824
1825 static void
1826 nfe_setmulti(struct nfe_softc *sc)
1827 {
1828         struct ifnet *ifp = &sc->arpcom.ac_if;
1829         struct ifmultiaddr *ifma;
1830         uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1831         uint32_t filter = NFE_RXFILTER_MAGIC;
1832         int i;
1833
1834         if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1835                 bzero(addr, ETHER_ADDR_LEN);
1836                 bzero(mask, ETHER_ADDR_LEN);
1837                 goto done;
1838         }
1839
1840         bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1841         bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1842
1843         LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1844                 caddr_t maddr;
1845
1846                 if (ifma->ifma_addr->sa_family != AF_LINK)
1847                         continue;
1848
1849                 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1850                 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1851                         addr[i] &= maddr[i];
1852                         mask[i] &= ~maddr[i];
1853                 }
1854         }
1855
1856         for (i = 0; i < ETHER_ADDR_LEN; i++)
1857                 mask[i] |= addr[i];
1858
1859 done:
1860         addr[0] |= 0x01;        /* make sure multicast bit is set */
1861
1862         NFE_WRITE(sc, NFE_MULTIADDR_HI,
1863             addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1864         NFE_WRITE(sc, NFE_MULTIADDR_LO,
1865             addr[5] <<  8 | addr[4]);
1866         NFE_WRITE(sc, NFE_MULTIMASK_HI,
1867             mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1868         NFE_WRITE(sc, NFE_MULTIMASK_LO,
1869             mask[5] <<  8 | mask[4]);
1870
1871         filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1872         NFE_WRITE(sc, NFE_RXFILTER, filter);
1873 }
1874
1875 static void
1876 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1877 {
1878         uint32_t tmp;
1879
1880         tmp = NFE_READ(sc, NFE_MACADDR_LO);
1881         addr[0] = (tmp >> 8) & 0xff;
1882         addr[1] = (tmp & 0xff);
1883
1884         tmp = NFE_READ(sc, NFE_MACADDR_HI);
1885         addr[2] = (tmp >> 24) & 0xff;
1886         addr[3] = (tmp >> 16) & 0xff;
1887         addr[4] = (tmp >>  8) & 0xff;
1888         addr[5] = (tmp & 0xff);
1889 }
1890
1891 static void
1892 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1893 {
1894         NFE_WRITE(sc, NFE_MACADDR_LO,
1895             addr[5] <<  8 | addr[4]);
1896         NFE_WRITE(sc, NFE_MACADDR_HI,
1897             addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1898 }
1899
1900 static void
1901 nfe_tick(void *arg)
1902 {
1903         struct nfe_softc *sc = arg;
1904         struct ifnet *ifp = &sc->arpcom.ac_if;
1905         struct mii_data *mii = device_get_softc(sc->sc_miibus);
1906
1907         lwkt_serialize_enter(ifp->if_serializer);
1908
1909         mii_tick(mii);
1910         callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
1911
1912         lwkt_serialize_exit(ifp->if_serializer);
1913 }
1914
1915 static void
1916 nfe_ring_dma_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
1917 {
1918         if (error)
1919                 return;
1920
1921         KASSERT(nseg == 1, ("too many segments, should be 1\n"));
1922
1923         *((uint32_t *)arg) = seg->ds_addr;
1924 }
1925
1926 static void
1927 nfe_buf_dma_addr(void *arg, bus_dma_segment_t *segs, int nsegs,
1928                  bus_size_t mapsz __unused, int error)
1929 {
1930         struct nfe_dma_ctx *ctx = arg;
1931         int i;
1932
1933         if (error)
1934                 return;
1935
1936         KASSERT(nsegs <= ctx->nsegs,
1937                 ("too many segments(%d), should be <= %d\n",
1938                  nsegs, ctx->nsegs));
1939
1940         ctx->nsegs = nsegs;
1941         for (i = 0; i < nsegs; ++i)
1942                 ctx->segs[i] = segs[i];
1943 }
1944
1945 static int
1946 nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
1947                int wait)
1948 {
1949         struct nfe_rx_data *data = &ring->data[idx];
1950         struct nfe_dma_ctx ctx;
1951         bus_dma_segment_t seg;
1952         bus_dmamap_t map;
1953         struct mbuf *m;
1954         int error;
1955
1956         m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
1957         if (m == NULL)
1958                 return ENOBUFS;
1959         m->m_len = m->m_pkthdr.len = MCLBYTES;
1960
1961         ctx.nsegs = 1;
1962         ctx.segs = &seg;
1963         error = bus_dmamap_load_mbuf(ring->data_tag, ring->data_tmpmap,
1964                                      m, nfe_buf_dma_addr, &ctx,
1965                                      wait ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
1966         if (error) {
1967                 m_freem(m);
1968                 if_printf(&sc->arpcom.ac_if, "could map RX mbuf %d\n", error);
1969                 return error;
1970         }
1971
1972         /* Unload originally mapped mbuf */
1973         bus_dmamap_unload(ring->data_tag, data->map);
1974
1975         /* Swap this DMA map with tmp DMA map */
1976         map = data->map;
1977         data->map = ring->data_tmpmap;
1978         ring->data_tmpmap = map;
1979
1980         /* Caller is assumed to have collected the old mbuf */
1981         data->m = m;
1982
1983         nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr);
1984
1985         bus_dmamap_sync(ring->data_tag, data->map, BUS_DMASYNC_PREREAD);
1986         return 0;
1987 }
1988
1989 static int
1990 nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
1991                  int wait)
1992 {
1993         struct nfe_rx_data *data = &ring->data[idx];
1994         struct nfe_jbuf *jbuf;
1995         struct mbuf *m;
1996
1997         MGETHDR(m, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA);
1998         if (m == NULL)
1999                 return ENOBUFS;
2000
2001         jbuf = nfe_jalloc(sc);
2002         if (jbuf == NULL) {
2003                 m_freem(m);
2004                 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
2005                     "-- packet dropped!\n");
2006                 return ENOBUFS;
2007         }
2008
2009         m->m_ext.ext_arg = jbuf;
2010         m->m_ext.ext_buf = jbuf->buf;
2011         m->m_ext.ext_free = nfe_jfree;
2012         m->m_ext.ext_ref = nfe_jref;
2013         m->m_ext.ext_size = NFE_JBYTES;
2014
2015         m->m_data = m->m_ext.ext_buf;
2016         m->m_flags |= M_EXT;
2017         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2018
2019         /* Caller is assumed to have collected the old mbuf */
2020         data->m = m;
2021
2022         nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr);
2023
2024         bus_dmamap_sync(ring->jtag, ring->jmap, BUS_DMASYNC_PREREAD);
2025         return 0;
2026 }
2027
2028 static void
2029 nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2030                      bus_addr_t physaddr)
2031 {
2032         if (sc->sc_flags & NFE_40BIT_ADDR) {
2033                 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2034
2035 #if defined(__LP64__)
2036                 desc64->physaddr[0] = htole32(physaddr >> 32);
2037 #endif
2038                 desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
2039         } else {
2040                 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2041
2042                 desc32->physaddr = htole32(physaddr);
2043         }
2044 }
2045
2046 static void
2047 nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx)
2048 {
2049         if (sc->sc_flags & NFE_40BIT_ADDR) {
2050                 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2051
2052                 desc64->length = htole16(ring->bufsz);
2053                 desc64->flags = htole16(NFE_RX_READY);
2054         } else {
2055                 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2056
2057                 desc32->length = htole16(ring->bufsz);
2058                 desc32->flags = htole16(NFE_RX_READY);
2059         }
2060 }