Modify the device description for the standard nForce and nForce2 on-chip
[dragonfly.git] / sys / dev / netif / nfe / if_nfe.c
CommitLineData
ae813fd8 1/* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */
87e3db44 2/* $DragonFly: src/sys/dev/netif/nfe/if_nfe.c,v 1.6 2006/12/13 14:48:26 tgen Exp $ */
ae813fd8
SZ
3
4/*
5 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
6 *
7 * This code is derived from software contributed to The DragonFly Project
8 * by Sepherosa Ziehau <sepherosa@gmail.com> and
9 * Matthew Dillon <dillon@apollo.backplane.com>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 */
38
39/*
40 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
41 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
42 *
43 * Permission to use, copy, modify, and distribute this software for any
44 * purpose with or without fee is hereby granted, provided that the above
45 * copyright notice and this permission notice appear in all copies.
46 *
47 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
48 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
49 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
50 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
51 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
52 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
53 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
54 */
55
56/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
57
58#include "opt_polling.h"
59
60#include <sys/param.h>
61#include <sys/endian.h>
62#include <sys/kernel.h>
63#include <sys/bus.h>
64#include <sys/proc.h>
65#include <sys/rman.h>
66#include <sys/serialize.h>
67#include <sys/socket.h>
68#include <sys/sockio.h>
69#include <sys/sysctl.h>
70
ae813fd8
SZ
71#include <net/ethernet.h>
72#include <net/if.h>
73#include <net/bpf.h>
74#include <net/if_arp.h>
75#include <net/if_dl.h>
76#include <net/if_media.h>
77#include <net/ifq_var.h>
78#include <net/if_types.h>
79#include <net/if_var.h>
80#include <net/vlan/if_vlan_var.h>
81
82#include <bus/pci/pcireg.h>
83#include <bus/pci/pcivar.h>
84#include <bus/pci/pcidevs.h>
85
86#include <dev/netif/mii_layer/mii.h>
87#include <dev/netif/mii_layer/miivar.h>
88
89#include "miibus_if.h"
90
91#include "if_nfereg.h"
92#include "if_nfevar.h"
93
94static int nfe_probe(device_t);
95static int nfe_attach(device_t);
96static int nfe_detach(device_t);
97static void nfe_shutdown(device_t);
98static int nfe_resume(device_t);
99static int nfe_suspend(device_t);
100
101static int nfe_miibus_readreg(device_t, int, int);
102static void nfe_miibus_writereg(device_t, int, int, int);
103static void nfe_miibus_statchg(device_t);
104
105#ifdef DEVICE_POLLING
106static void nfe_poll(struct ifnet *, enum poll_cmd, int);
107#endif
108static void nfe_intr(void *);
109static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
110static void nfe_rxeof(struct nfe_softc *);
111static void nfe_txeof(struct nfe_softc *);
112static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *,
113 struct mbuf *);
114static void nfe_start(struct ifnet *);
115static void nfe_watchdog(struct ifnet *);
116static void nfe_init(void *);
117static void nfe_stop(struct nfe_softc *);
118static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *);
119static void nfe_jfree(void *);
120static void nfe_jref(void *);
121static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *);
122static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *);
123static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
124static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
125static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
126static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
127static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
128static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
129static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
130static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
131static int nfe_ifmedia_upd(struct ifnet *);
132static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
133static void nfe_setmulti(struct nfe_softc *);
134static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
135static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
136static void nfe_tick(void *);
137static void nfe_ring_dma_addr(void *, bus_dma_segment_t *, int, int);
138static void nfe_buf_dma_addr(void *, bus_dma_segment_t *, int, bus_size_t,
139 int);
140static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
141 int, bus_addr_t);
142static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
143 int);
144static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int,
145 int);
146static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int,
147 int);
148
149#define NFE_DEBUG
150#ifdef NFE_DEBUG
151
152static int nfe_debug = 0;
153
154SYSCTL_NODE(_hw, OID_AUTO, nfe, CTLFLAG_RD, 0, "nVidia GigE parameters");
155SYSCTL_INT(_hw_nfe, OID_AUTO, debug, CTLFLAG_RW, &nfe_debug, 0,
156 "control debugging printfs");
157
158#define DPRINTF(sc, fmt, ...) do { \
159 if (nfe_debug) { \
160 if_printf(&(sc)->arpcom.ac_if, \
161 fmt, __VA_ARGS__); \
162 } \
163} while (0)
164
165#define DPRINTFN(sc, lv, fmt, ...) do { \
166 if (nfe_debug >= (lv)) { \
167 if_printf(&(sc)->arpcom.ac_if, \
168 fmt, __VA_ARGS__); \
169 } \
170} while (0)
171
172#else /* !NFE_DEBUG */
173
174#define DPRINTF(sc, fmt, ...)
175#define DPRINTFN(sc, lv, fmt, ...)
176
177#endif /* NFE_DEBUG */
178
179struct nfe_dma_ctx {
180 int nsegs;
181 bus_dma_segment_t *segs;
182};
183
184static const struct nfe_dev {
185 uint16_t vid;
186 uint16_t did;
187 const char *desc;
188} nfe_devices[] = {
189 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
87e3db44 190 "NVIDIA nForce Fast Ethernet" },
ae813fd8
SZ
191
192 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
87e3db44 193 "NVIDIA nForce2 Fast Ethernet" },
ae813fd8
SZ
194
195 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
196 "NVIDIA nForce3 Gigabit Ethernet" },
197
87e3db44
TS
198 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb
199 chipset, and possibly also the 400R; it might be both nForce2- and
200 nForce3-based boards can use the same MCPs (= southbridges) */
ae813fd8
SZ
201 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2,
202 "NVIDIA nForce3 Gigabit Ethernet" },
203
204 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3,
205 "NVIDIA nForce3 Gigabit Ethernet" },
206
207 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
208 "NVIDIA nForce3 Gigabit Ethernet" },
209
210 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5,
211 "NVIDIA nForce3 Gigabit Ethernet" },
212
213 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1,
214 "NVIDIA CK804 Gigabit Ethernet" },
215
216 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2,
217 "NVIDIA CK804 Gigabit Ethernet" },
218
219 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
220 "NVIDIA MCP04 Gigabit Ethernet" },
221
222 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
223 "NVIDIA MCP04 Gigabit Ethernet" },
224
225 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1,
226 "NVIDIA MCP51 Gigabit Ethernet" },
227
228 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2,
229 "NVIDIA MCP51 Gigabit Ethernet" },
230
231 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
232 "NVIDIA MCP55 Gigabit Ethernet" },
233
234 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
9d1ecb21
SZ
235 "NVIDIA MCP55 Gigabit Ethernet" },
236
237 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
238 "NVIDIA MCP61 Gigabit Ethernet" },
239
240 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
241 "NVIDIA MCP61 Gigabit Ethernet" },
242
243 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
244 "NVIDIA MCP61 Gigabit Ethernet" },
245
246 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
247 "NVIDIA MCP61 Gigabit Ethernet" },
248
249 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
250 "NVIDIA MCP65 Gigabit Ethernet" },
251
252 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
253 "NVIDIA MCP65 Gigabit Ethernet" },
254
255 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
256 "NVIDIA MCP65 Gigabit Ethernet" },
257
258 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
259 "NVIDIA MCP65 Gigabit Ethernet" }
ae813fd8
SZ
260};
261
262static device_method_t nfe_methods[] = {
263 /* Device interface */
264 DEVMETHOD(device_probe, nfe_probe),
265 DEVMETHOD(device_attach, nfe_attach),
266 DEVMETHOD(device_detach, nfe_detach),
267 DEVMETHOD(device_suspend, nfe_suspend),
268 DEVMETHOD(device_resume, nfe_resume),
269 DEVMETHOD(device_shutdown, nfe_shutdown),
270
271 /* Bus interface */
272 DEVMETHOD(bus_print_child, bus_generic_print_child),
273 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
274
275 /* MII interface */
276 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
277 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
278 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
279
280 { 0, 0 }
281};
282
283static driver_t nfe_driver = {
284 "nfe",
285 nfe_methods,
286 sizeof(struct nfe_softc)
287};
288
289static devclass_t nfe_devclass;
290
291DECLARE_DUMMY_MODULE(if_nfe);
292MODULE_DEPEND(if_nfe, miibus, 1, 1, 1);
293DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, 0, 0);
294DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
295
296static int
297nfe_probe(device_t dev)
298{
299 const struct nfe_dev *n;
300 uint16_t vid, did;
301
302 vid = pci_get_vendor(dev);
303 did = pci_get_device(dev);
304 for (n = nfe_devices; n->desc != NULL; ++n) {
305 if (vid == n->vid && did == n->did) {
306 struct nfe_softc *sc = device_get_softc(dev);
307
308 switch (did) {
309 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
310 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
311 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
312 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
313 sc->sc_flags = NFE_JUMBO_SUP |
314 NFE_HW_CSUM;
315 break;
316 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
317 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
9d1ecb21
SZ
318 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
319 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
320 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
321 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
ae813fd8
SZ
322 sc->sc_flags = NFE_40BIT_ADDR;
323 break;
324 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
325 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
326 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
327 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
9d1ecb21
SZ
328 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
329 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
330 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
331 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
ae813fd8
SZ
332 sc->sc_flags = NFE_JUMBO_SUP |
333 NFE_40BIT_ADDR |
334 NFE_HW_CSUM;
335 break;
336 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
337 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
338 sc->sc_flags = NFE_JUMBO_SUP |
339 NFE_40BIT_ADDR |
340 NFE_HW_CSUM |
341 NFE_HW_VLAN;
342 break;
343 }
344
345 /* Enable jumbo frames for adapters that support it */
346 if (sc->sc_flags & NFE_JUMBO_SUP)
347 sc->sc_flags |= NFE_USE_JUMBO;
348
349 device_set_desc(dev, n->desc);
350 return 0;
351 }
352 }
353 return ENXIO;
354}
355
356static int
357nfe_attach(device_t dev)
358{
359 struct nfe_softc *sc = device_get_softc(dev);
360 struct ifnet *ifp = &sc->arpcom.ac_if;
361 uint8_t eaddr[ETHER_ADDR_LEN];
362 int error;
363
364 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
365 lwkt_serialize_init(&sc->sc_jbuf_serializer);
366
367 sc->sc_mem_rid = PCIR_BAR(0);
368
369#ifndef BURN_BRIDGES
370 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
371 uint32_t mem, irq;
372
373 mem = pci_read_config(dev, sc->sc_mem_rid, 4);
374 irq = pci_read_config(dev, PCIR_INTLINE, 4);
375
376 device_printf(dev, "chip is in D%d power mode "
377 "-- setting to D0\n", pci_get_powerstate(dev));
378
379 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
380
381 pci_write_config(dev, sc->sc_mem_rid, mem, 4);
382 pci_write_config(dev, PCIR_INTLINE, irq, 4);
383 }
384#endif /* !BURN_BRIDGE */
385
386 /* Enable bus mastering */
387 pci_enable_busmaster(dev);
388
389 /* Allocate IO memory */
390 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
391 &sc->sc_mem_rid, RF_ACTIVE);
392 if (sc->sc_mem_res == NULL) {
393 device_printf(dev, "cound not allocate io memory\n");
394 return ENXIO;
395 }
396 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res);
397 sc->sc_memt = rman_get_bustag(sc->sc_mem_res);
398
399 /* Allocate IRQ */
400 sc->sc_irq_rid = 0;
401 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
402 &sc->sc_irq_rid,
403 RF_SHAREABLE | RF_ACTIVE);
404 if (sc->sc_irq_res == NULL) {
405 device_printf(dev, "could not allocate irq\n");
406 error = ENXIO;
407 goto fail;
408 }
409
410 nfe_get_macaddr(sc, eaddr);
411
412 /*
413 * Allocate Tx and Rx rings.
414 */
415 error = nfe_alloc_tx_ring(sc, &sc->txq);
416 if (error) {
417 device_printf(dev, "could not allocate Tx ring\n");
418 goto fail;
419 }
420
421 error = nfe_alloc_rx_ring(sc, &sc->rxq);
422 if (error) {
423 device_printf(dev, "could not allocate Rx ring\n");
424 goto fail;
425 }
426
427 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd,
428 nfe_ifmedia_sts);
429 if (error) {
430 device_printf(dev, "MII without any phy\n");
431 goto fail;
432 }
433
434 ifp->if_softc = sc;
435 ifp->if_mtu = ETHERMTU;
436 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
437 ifp->if_ioctl = nfe_ioctl;
438 ifp->if_start = nfe_start;
439#ifdef DEVICE_POLLING
440 ifp->if_poll = nfe_poll;
441#endif
442 ifp->if_watchdog = nfe_watchdog;
443 ifp->if_init = nfe_init;
444 ifq_set_maxlen(&ifp->if_snd, NFE_IFQ_MAXLEN);
445 ifq_set_ready(&ifp->if_snd);
446
447 ifp->if_capabilities = IFCAP_VLAN_MTU;
448
449#if 0
450 if (sc->sc_flags & NFE_USE_JUMBO)
451 ifp->if_hardmtu = NFE_JUMBO_MTU;
452#endif
453
454 if (sc->sc_flags & NFE_HW_VLAN)
455 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
456
457#ifdef NFE_CSUM
458 if (sc->sc_flags & NFE_HW_CSUM) {
459#if 0
460 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
461 IFCAP_CSUM_UDPv4;
462#else
463 ifp->if_capabilities = IFCAP_HWCSUM;
464 ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP;
465#endif
466 }
467#endif
468 ifp->if_capenable = ifp->if_capabilities;
469
470 callout_init(&sc->sc_tick_ch);
471
472 ether_ifattach(ifp, eaddr, NULL);
473
474 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc,
475 &sc->sc_ih, ifp->if_serializer);
476 if (error) {
477 device_printf(dev, "could not setup intr\n");
478 ether_ifdetach(ifp);
479 goto fail;
480 }
481
482 return 0;
483fail:
484 nfe_detach(dev);
485 return error;
486}
487
488static int
489nfe_detach(device_t dev)
490{
491 struct nfe_softc *sc = device_get_softc(dev);
492
493 if (device_is_attached(dev)) {
494 struct ifnet *ifp = &sc->arpcom.ac_if;
495
496 lwkt_serialize_enter(ifp->if_serializer);
497 nfe_stop(sc);
498 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih);
499 lwkt_serialize_exit(ifp->if_serializer);
500
501 ether_ifdetach(ifp);
502 }
503
504 if (sc->sc_miibus != NULL)
505 device_delete_child(dev, sc->sc_miibus);
506 bus_generic_detach(dev);
507
508 if (sc->sc_irq_res != NULL) {
509 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
510 sc->sc_irq_res);
511 }
512
513 if (sc->sc_mem_res != NULL) {
514 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
515 sc->sc_mem_res);
516 }
517
518 nfe_free_tx_ring(sc, &sc->txq);
519 nfe_free_rx_ring(sc, &sc->rxq);
520
521 return 0;
522}
523
524static void
525nfe_shutdown(device_t dev)
526{
527 struct nfe_softc *sc = device_get_softc(dev);
528 struct ifnet *ifp = &sc->arpcom.ac_if;
529
530 lwkt_serialize_enter(ifp->if_serializer);
531 nfe_stop(sc);
532 lwkt_serialize_exit(ifp->if_serializer);
533}
534
535static int
536nfe_suspend(device_t dev)
537{
538 struct nfe_softc *sc = device_get_softc(dev);
539 struct ifnet *ifp = &sc->arpcom.ac_if;
540
541 lwkt_serialize_enter(ifp->if_serializer);
542 nfe_stop(sc);
543 lwkt_serialize_exit(ifp->if_serializer);
544
545 return 0;
546}
547
548static int
549nfe_resume(device_t dev)
550{
551 struct nfe_softc *sc = device_get_softc(dev);
552 struct ifnet *ifp = &sc->arpcom.ac_if;
553
554 lwkt_serialize_enter(ifp->if_serializer);
555 if (ifp->if_flags & IFF_UP) {
3ffca68a 556 nfe_init(sc);
ae813fd8
SZ
557 if (ifp->if_flags & IFF_RUNNING)
558 ifp->if_start(ifp);
559 }
560 lwkt_serialize_exit(ifp->if_serializer);
561
562 return 0;
563}
564
565static void
566nfe_miibus_statchg(device_t dev)
567{
568 struct nfe_softc *sc = device_get_softc(dev);
569 struct mii_data *mii = device_get_softc(sc->sc_miibus);
570 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
571
572 phy = NFE_READ(sc, NFE_PHY_IFACE);
573 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
574
575 seed = NFE_READ(sc, NFE_RNDSEED);
576 seed &= ~NFE_SEED_MASK;
577
578 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
579 phy |= NFE_PHY_HDX; /* half-duplex */
580 misc |= NFE_MISC1_HDX;
581 }
582
583 switch (IFM_SUBTYPE(mii->mii_media_active)) {
584 case IFM_1000_T: /* full-duplex only */
585 link |= NFE_MEDIA_1000T;
586 seed |= NFE_SEED_1000T;
587 phy |= NFE_PHY_1000T;
588 break;
589 case IFM_100_TX:
590 link |= NFE_MEDIA_100TX;
591 seed |= NFE_SEED_100TX;
592 phy |= NFE_PHY_100TX;
593 break;
594 case IFM_10_T:
595 link |= NFE_MEDIA_10T;
596 seed |= NFE_SEED_10T;
597 break;
598 }
599
600 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
601
602 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
603 NFE_WRITE(sc, NFE_MISC1, misc);
604 NFE_WRITE(sc, NFE_LINKSPEED, link);
605}
606
607static int
608nfe_miibus_readreg(device_t dev, int phy, int reg)
609{
610 struct nfe_softc *sc = device_get_softc(dev);
611 uint32_t val;
612 int ntries;
613
614 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
615
616 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
617 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
618 DELAY(100);
619 }
620
621 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
622
623 for (ntries = 0; ntries < 1000; ntries++) {
624 DELAY(100);
625 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
626 break;
627 }
628 if (ntries == 1000) {
629 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", "");
630 return 0;
631 }
632
633 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
634 DPRINTFN(sc, 2, "could not read PHY %s\n", "");
635 return 0;
636 }
637
638 val = NFE_READ(sc, NFE_PHY_DATA);
639 if (val != 0xffffffff && val != 0)
640 sc->mii_phyaddr = phy;
641
642 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
643
644 return val;
645}
646
647static void
648nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
649{
650 struct nfe_softc *sc = device_get_softc(dev);
651 uint32_t ctl;
652 int ntries;
653
654 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
655
656 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
657 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
658 DELAY(100);
659 }
660
661 NFE_WRITE(sc, NFE_PHY_DATA, val);
662 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
663 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
664
665 for (ntries = 0; ntries < 1000; ntries++) {
666 DELAY(100);
667 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
668 break;
669 }
670
671#ifdef NFE_DEBUG
672 if (ntries == 1000)
673 DPRINTFN(sc, 2, "could not write to PHY %s\n", "");
674#endif
675}
676
677#ifdef DEVICE_POLLING
678
679static void
680nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
681{
682 struct nfe_softc *sc = ifp->if_softc;
683
684 switch(cmd) {
685 case POLL_REGISTER:
686 /* Disable interrupts */
687 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
688 break;
689 case POLL_DEREGISTER:
690 /* enable interrupts */
691 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
692 break;
693 case POLL_AND_CHECK_STATUS:
694 /* fall through */
695 case POLL_ONLY:
696 if (ifp->if_flags & IFF_RUNNING) {
697 nfe_rxeof(sc);
698 nfe_txeof(sc);
699 }
700 break;
701 }
702}
703
704#endif
705
706static void
707nfe_intr(void *arg)
708{
709 struct nfe_softc *sc = arg;
710 struct ifnet *ifp = &sc->arpcom.ac_if;
711 uint32_t r;
712
713 r = NFE_READ(sc, NFE_IRQ_STATUS);
714 if (r == 0)
715 return; /* not for us */
716 NFE_WRITE(sc, NFE_IRQ_STATUS, r);
717
718 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r);
719
720 if (r & NFE_IRQ_LINK) {
721 NFE_READ(sc, NFE_PHY_STATUS);
722 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
723 DPRINTF(sc, "link state changed %s\n", "");
724 }
725
726 if (ifp->if_flags & IFF_RUNNING) {
727 /* check Rx ring */
728 nfe_rxeof(sc);
729
730 /* check Tx ring */
731 nfe_txeof(sc);
732 }
733}
734
735static int
736nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
737{
738 struct nfe_softc *sc = ifp->if_softc;
739 struct ifreq *ifr = (struct ifreq *)data;
740 struct mii_data *mii;
741 int error = 0, mask;
742
743 switch (cmd) {
744 case SIOCSIFMTU:
745 /* XXX NFE_USE_JUMBO should be set here */
746 break;
747 case SIOCSIFFLAGS:
748 if (ifp->if_flags & IFF_UP) {
749 /*
750 * If only the PROMISC or ALLMULTI flag changes, then
751 * don't do a full re-init of the chip, just update
752 * the Rx filter.
753 */
754 if ((ifp->if_flags & IFF_RUNNING) &&
755 ((ifp->if_flags ^ sc->sc_if_flags) &
756 (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
757 nfe_setmulti(sc);
758 } else {
759 if (!(ifp->if_flags & IFF_RUNNING))
760 nfe_init(sc);
761 }
762 } else {
763 if (ifp->if_flags & IFF_RUNNING)
764 nfe_stop(sc);
765 }
766 sc->sc_if_flags = ifp->if_flags;
767 break;
768 case SIOCADDMULTI:
769 case SIOCDELMULTI:
770 if (ifp->if_flags & IFF_RUNNING)
771 nfe_setmulti(sc);
772 break;
773 case SIOCSIFMEDIA:
774 case SIOCGIFMEDIA:
775 mii = device_get_softc(sc->sc_miibus);
776 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
777 break;
778 case SIOCSIFCAP:
779 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
780 if (mask & IFCAP_HWCSUM) {
781 if (IFCAP_HWCSUM & ifp->if_capenable)
782 ifp->if_capenable &= ~IFCAP_HWCSUM;
783 else
784 ifp->if_capenable |= IFCAP_HWCSUM;
785 }
786 break;
787 default:
788 error = ether_ioctl(ifp, cmd, data);
789 break;
790 }
791 return error;
792}
793
794static void
795nfe_rxeof(struct nfe_softc *sc)
796{
797 struct ifnet *ifp = &sc->arpcom.ac_if;
798 struct nfe_rx_ring *ring = &sc->rxq;
799 int reap;
800
801 reap = 0;
802 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD);
803
804 for (;;) {
805 struct nfe_rx_data *data = &ring->data[ring->cur];
806 struct mbuf *m;
807 uint16_t flags;
808 int len, error;
809
810 if (sc->sc_flags & NFE_40BIT_ADDR) {
811 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur];
812
813 flags = le16toh(desc64->flags);
814 len = le16toh(desc64->length) & 0x3fff;
815 } else {
816 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur];
817
818 flags = le16toh(desc32->flags);
819 len = le16toh(desc32->length) & 0x3fff;
820 }
821
822 if (flags & NFE_RX_READY)
823 break;
824
825 reap = 1;
826
827 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
828 if (!(flags & NFE_RX_VALID_V1))
829 goto skip;
830
831 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
832 flags &= ~NFE_RX_ERROR;
833 len--; /* fix buffer length */
834 }
835 } else {
836 if (!(flags & NFE_RX_VALID_V2))
837 goto skip;
838
839 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
840 flags &= ~NFE_RX_ERROR;
841 len--; /* fix buffer length */
842 }
843 }
844
845 if (flags & NFE_RX_ERROR) {
846 ifp->if_ierrors++;
847 goto skip;
848 }
849
850 m = data->m;
851
852 if (sc->sc_flags & NFE_USE_JUMBO)
853 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0);
854 else
855 error = nfe_newbuf_std(sc, ring, ring->cur, 0);
856 if (error) {
857 ifp->if_ierrors++;
858 goto skip;
859 }
860
861 /* finalize mbuf */
862 m->m_pkthdr.len = m->m_len = len;
863 m->m_pkthdr.rcvif = ifp;
864
865#ifdef notyet
866 if (sc->sc_flags & NFE_HW_CSUM) {
867 if (flags & NFE_RX_IP_CSUMOK)
868 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
869 if (flags & NFE_RX_UDP_CSUMOK)
870 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
871 if (flags & NFE_RX_TCP_CSUMOK)
872 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
873 }
874#elif defined(NFE_CSUM)
875 if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK))
876 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
877#endif
878
879 ifp->if_ipackets++;
880 ifp->if_input(ifp, m);
881skip:
882 nfe_set_ready_rxdesc(sc, ring, ring->cur);
883 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
884 }
885
886 if (reap)
887 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
888}
889
890static void
891nfe_txeof(struct nfe_softc *sc)
892{
893 struct ifnet *ifp = &sc->arpcom.ac_if;
894 struct nfe_tx_ring *ring = &sc->txq;
895 struct nfe_tx_data *data = NULL;
896
897 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD);
898 while (ring->next != ring->cur) {
899 uint16_t flags;
900
901 if (sc->sc_flags & NFE_40BIT_ADDR)
902 flags = le16toh(ring->desc64[ring->next].flags);
903 else
904 flags = le16toh(ring->desc32[ring->next].flags);
905
906 if (flags & NFE_TX_VALID)
907 break;
908
909 data = &ring->data[ring->next];
910
911 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
912 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
913 goto skip;
914
915 if ((flags & NFE_TX_ERROR_V1) != 0) {
916 if_printf(ifp, "tx v1 error 0x%4b\n", flags,
917 NFE_V1_TXERR);
918 ifp->if_oerrors++;
919 } else {
920 ifp->if_opackets++;
921 }
922 } else {
923 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
924 goto skip;
925
926 if ((flags & NFE_TX_ERROR_V2) != 0) {
927 if_printf(ifp, "tx v2 error 0x%4b\n", flags,
928 NFE_V2_TXERR);
929 ifp->if_oerrors++;
930 } else {
931 ifp->if_opackets++;
932 }
933 }
934
935 if (data->m == NULL) { /* should not get there */
936 if_printf(ifp,
937 "last fragment bit w/o associated mbuf!\n");
938 goto skip;
939 }
940
941 /* last fragment of the mbuf chain transmitted */
942 bus_dmamap_sync(ring->data_tag, data->map,
943 BUS_DMASYNC_POSTWRITE);
944 bus_dmamap_unload(ring->data_tag, data->map);
945 m_freem(data->m);
946 data->m = NULL;
947
948 ifp->if_timer = 0;
949skip:
950 ring->queued--;
951 KKASSERT(ring->queued >= 0);
952 ring->next = (ring->next + 1) % NFE_TX_RING_COUNT;
953 }
954
955 if (data != NULL) { /* at least one slot freed */
956 ifp->if_flags &= ~IFF_OACTIVE;
957 ifp->if_start(ifp);
958 }
959}
960
961static int
962nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0)
963{
964 struct nfe_dma_ctx ctx;
965 bus_dma_segment_t segs[NFE_MAX_SCATTER];
966 struct nfe_tx_data *data, *data_map;
967 bus_dmamap_t map;
968 struct nfe_desc64 *desc64 = NULL;
969 struct nfe_desc32 *desc32 = NULL;
970 uint16_t flags = 0;
971 uint32_t vtag = 0;
972 int error, i, j;
973
974 data = &ring->data[ring->cur];
975 map = data->map;
976 data_map = data; /* Remember who owns the DMA map */
977
978 ctx.nsegs = NFE_MAX_SCATTER;
979 ctx.segs = segs;
980 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0,
981 nfe_buf_dma_addr, &ctx, BUS_DMA_NOWAIT);
982 if (error && error != EFBIG) {
983 if_printf(&sc->arpcom.ac_if, "could not map TX mbuf\n");
984 goto back;
985 }
986
987 if (error) { /* error == EFBIG */
988 struct mbuf *m_new;
989
990 m_new = m_defrag(m0, MB_DONTWAIT);
991 if (m_new == NULL) {
992 if_printf(&sc->arpcom.ac_if,
993 "could not defrag TX mbuf\n");
994 error = ENOBUFS;
995 goto back;
996 } else {
997 m0 = m_new;
998 }
999
1000 ctx.nsegs = NFE_MAX_SCATTER;
1001 ctx.segs = segs;
1002 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0,
1003 nfe_buf_dma_addr, &ctx,
1004 BUS_DMA_NOWAIT);
1005 if (error) {
1006 if_printf(&sc->arpcom.ac_if,
1007 "could not map defraged TX mbuf\n");
1008 goto back;
1009 }
1010 }
1011
1012 error = 0;
1013
1014 if (ring->queued + ctx.nsegs >= NFE_TX_RING_COUNT - 1) {
1015 bus_dmamap_unload(ring->data_tag, map);
1016 error = ENOBUFS;
1017 goto back;
1018 }
1019
1020 /* setup h/w VLAN tagging */
1021 if ((m0->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) &&
1022 m0->m_pkthdr.rcvif != NULL &&
1023 m0->m_pkthdr.rcvif->if_type == IFT_L2VLAN) {
1024 struct ifvlan *ifv = m0->m_pkthdr.rcvif->if_softc;
1025
1026 if (ifv != NULL)
1027 vtag = NFE_TX_VTAG | htons(ifv->ifv_tag);
1028 }
1029
1030#ifdef NFE_CSUM
1031 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1032 flags |= NFE_TX_IP_CSUM;
1033 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT))
1034 flags |= NFE_TX_TCP_CSUM;
1035#endif
1036
1037 /*
1038 * XXX urm. somebody is unaware of how hardware works. You
1039 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in
1040 * the ring until the entire chain is actually *VALID*. Otherwise
1041 * the hardware may encounter a partially initialized chain that
1042 * is marked as being ready to go when it in fact is not ready to
1043 * go.
1044 */
1045
1046 for (i = 0; i < ctx.nsegs; i++) {
1047 j = (ring->cur + i) % NFE_TX_RING_COUNT;
1048 data = &ring->data[j];
1049
1050 if (sc->sc_flags & NFE_40BIT_ADDR) {
1051 desc64 = &ring->desc64[j];
1052#if defined(__LP64__)
1053 desc64->physaddr[0] =
1054 htole32(segs[i].ds_addr >> 32);
1055#endif
1056 desc64->physaddr[1] =
1057 htole32(segs[i].ds_addr & 0xffffffff);
1058 desc64->length = htole16(segs[i].ds_len - 1);
1059 desc64->vtag = htole32(vtag);
1060 desc64->flags = htole16(flags);
1061 } else {
1062 desc32 = &ring->desc32[j];
1063 desc32->physaddr = htole32(segs[i].ds_addr);
1064 desc32->length = htole16(segs[i].ds_len - 1);
1065 desc32->flags = htole16(flags);
1066 }
1067
1068 /* csum flags and vtag belong to the first fragment only */
1069 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
1070 vtag = 0;
1071
1072 ring->queued++;
1073 KKASSERT(ring->queued <= NFE_TX_RING_COUNT);
1074 }
1075
1076 /* the whole mbuf chain has been DMA mapped, fix last descriptor */
1077 if (sc->sc_flags & NFE_40BIT_ADDR) {
1078 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
1079 } else {
1080 if (sc->sc_flags & NFE_JUMBO_SUP)
1081 flags = NFE_TX_LASTFRAG_V2;
1082 else
1083 flags = NFE_TX_LASTFRAG_V1;
1084 desc32->flags |= htole16(flags);
1085 }
1086
1087 /*
1088 * Set NFE_TX_VALID backwards so the hardware doesn't see the
1089 * whole mess until the first descriptor in the map is flagged.
1090 */
1091 for (i = ctx.nsegs - 1; i >= 0; --i) {
1092 j = (ring->cur + i) % NFE_TX_RING_COUNT;
1093 if (sc->sc_flags & NFE_40BIT_ADDR) {
1094 desc64 = &ring->desc64[j];
1095 desc64->flags |= htole16(NFE_TX_VALID);
1096 } else {
1097 desc32 = &ring->desc32[j];
1098 desc32->flags |= htole16(NFE_TX_VALID);
1099 }
1100 }
1101 ring->cur = (ring->cur + ctx.nsegs) % NFE_TX_RING_COUNT;
1102
1103 /* Exchange DMA map */
1104 data_map->map = data->map;
1105 data->map = map;
1106 data->m = m0;
1107
1108 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE);
1109back:
1110 if (error)
1111 m_freem(m0);
1112 return error;
1113}
1114
1115static void
1116nfe_start(struct ifnet *ifp)
1117{
1118 struct nfe_softc *sc = ifp->if_softc;
1119 struct nfe_tx_ring *ring = &sc->txq;
1120 int count = 0;
1121 struct mbuf *m0;
1122
1123 if (ifp->if_flags & IFF_OACTIVE)
1124 return;
1125
1126 if (ifq_is_empty(&ifp->if_snd))
1127 return;
1128
1129 for (;;) {
1130 m0 = ifq_dequeue(&ifp->if_snd, NULL);
1131 if (m0 == NULL)
1132 break;
1133
1134 BPF_MTAP(ifp, m0);
1135
1136 if (nfe_encap(sc, ring, m0) != 0) {
1137 ifp->if_flags |= IFF_OACTIVE;
1138 break;
1139 }
1140 ++count;
1141
1142 /*
1143 * NOTE:
1144 * `m0' may be freed in nfe_encap(), so
1145 * it should not be touched any more.
1146 */
1147 }
1148 if (count == 0) /* nothing sent */
1149 return;
1150
1151 /* Sync TX descriptor ring */
1152 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1153
1154 /* Kick Tx */
1155 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1156
1157 /*
1158 * Set a timeout in case the chip goes out to lunch.
1159 */
1160 ifp->if_timer = 5;
1161}
1162
1163static void
1164nfe_watchdog(struct ifnet *ifp)
1165{
1166 struct nfe_softc *sc = ifp->if_softc;
1167
1168 if (ifp->if_flags & IFF_RUNNING) {
1169 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n");
1170 nfe_txeof(sc);
1171 return;
1172 }
1173
1174 if_printf(ifp, "watchdog timeout\n");
1175
1176 nfe_init(ifp->if_softc);
1177
1178 ifp->if_oerrors++;
1179
1180 if (!ifq_is_empty(&ifp->if_snd))
1181 ifp->if_start(ifp);
1182}
1183
1184static void
1185nfe_init(void *xsc)
1186{
1187 struct nfe_softc *sc = xsc;
1188 struct ifnet *ifp = &sc->arpcom.ac_if;
1189 uint32_t tmp;
1190 int error;
1191
1192 nfe_stop(sc);
1193
1194 error = nfe_init_tx_ring(sc, &sc->txq);
1195 if (error) {
1196 nfe_stop(sc);
1197 return;
1198 }
1199
1200 error = nfe_init_rx_ring(sc, &sc->rxq);
1201 if (error) {
1202 nfe_stop(sc);
1203 return;
1204 }
1205
1206 NFE_WRITE(sc, NFE_TX_UNK, 0);
1207 NFE_WRITE(sc, NFE_STATUS, 0);
1208
1209 sc->rxtxctl = NFE_RXTX_BIT2;
1210 if (sc->sc_flags & NFE_40BIT_ADDR)
1211 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1212 else if (sc->sc_flags & NFE_JUMBO_SUP)
1213 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1214#ifdef NFE_CSUM
1215 if (sc->sc_flags & NFE_HW_CSUM)
1216 sc->rxtxctl |= NFE_RXTX_RXCSUM;
1217#endif
1218
1219 /*
1220 * Although the adapter is capable of stripping VLAN tags from received
1221 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1222 * purpose. This will be done in software by our network stack.
1223 */
1224 if (sc->sc_flags & NFE_HW_VLAN)
1225 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
1226
1227 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1228 DELAY(10);
1229 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1230
1231 if (sc->sc_flags & NFE_HW_VLAN)
1232 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1233
1234 NFE_WRITE(sc, NFE_SETUP_R6, 0);
1235
1236 /* set MAC address */
1237 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr);
1238
1239 /* tell MAC where rings are in memory */
1240#ifdef __LP64__
1241 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1242#endif
1243 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1244#ifdef __LP64__
1245 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1246#endif
1247 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1248
1249 NFE_WRITE(sc, NFE_RING_SIZE,
1250 (NFE_RX_RING_COUNT - 1) << 16 |
1251 (NFE_TX_RING_COUNT - 1));
1252
1253 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1254
1255 /* force MAC to wakeup */
1256 tmp = NFE_READ(sc, NFE_PWR_STATE);
1257 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1258 DELAY(10);
1259 tmp = NFE_READ(sc, NFE_PWR_STATE);
1260 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1261
1262#if 1
1263 /* configure interrupts coalescing/mitigation */
1264 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1265#else
1266 /* no interrupt mitigation: one interrupt per packet */
1267 NFE_WRITE(sc, NFE_IMTIMER, 970);
1268#endif
1269
1270 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1271 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1272 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1273
1274 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1275 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1276
1277 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1278 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
1279
1280 sc->rxtxctl &= ~NFE_RXTX_BIT2;
1281 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1282 DELAY(10);
1283 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1284
1285 /* set Rx filter */
1286 nfe_setmulti(sc);
1287
1288 nfe_ifmedia_upd(ifp);
1289
1290 /* enable Rx */
1291 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1292
1293 /* enable Tx */
1294 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1295
1296 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1297
1298#ifdef DEVICE_POLLING
1299 if ((ifp->if_flags & IFF_POLLING) == 0)
1300#endif
1301 /* enable interrupts */
1302 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1303
1304 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
1305
1306 ifp->if_flags |= IFF_RUNNING;
1307 ifp->if_flags &= ~IFF_OACTIVE;
1308}
1309
1310static void
1311nfe_stop(struct nfe_softc *sc)
1312{
1313 struct ifnet *ifp = &sc->arpcom.ac_if;
1314
1315 callout_stop(&sc->sc_tick_ch);
1316
1317 ifp->if_timer = 0;
1318 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1319
1320 /* Abort Tx */
1321 NFE_WRITE(sc, NFE_TX_CTL, 0);
1322
1323 /* Disable Rx */
1324 NFE_WRITE(sc, NFE_RX_CTL, 0);
1325
1326 /* Disable interrupts */
1327 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1328
1329 /* Reset Tx and Rx rings */
1330 nfe_reset_tx_ring(sc, &sc->txq);
1331 nfe_reset_rx_ring(sc, &sc->rxq);
1332}
1333
1334static int
1335nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1336{
1337 int i, j, error, descsize;
1338 void **desc;
1339
1340 if (sc->sc_flags & NFE_40BIT_ADDR) {
1341 desc = (void **)&ring->desc64;
1342 descsize = sizeof(struct nfe_desc64);
1343 } else {
1344 desc = (void **)&ring->desc32;
1345 descsize = sizeof(struct nfe_desc32);
1346 }
1347
1348 ring->bufsz = MCLBYTES;
1349 ring->cur = ring->next = 0;
1350
1351 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1352 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1353 NULL, NULL,
1354 NFE_RX_RING_COUNT * descsize, 1,
1355 NFE_RX_RING_COUNT * descsize,
1356 0, &ring->tag);
1357 if (error) {
1358 if_printf(&sc->arpcom.ac_if,
1359 "could not create desc RX DMA tag\n");
1360 return error;
1361 }
1362
1363 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO,
1364 &ring->map);
1365 if (error) {
1366 if_printf(&sc->arpcom.ac_if,
1367 "could not allocate RX desc DMA memory\n");
1368 bus_dma_tag_destroy(ring->tag);
1369 ring->tag = NULL;
1370 return error;
1371 }
1372
1373 error = bus_dmamap_load(ring->tag, ring->map, *desc,
1374 NFE_RX_RING_COUNT * descsize,
1375 nfe_ring_dma_addr, &ring->physaddr,
1376 BUS_DMA_WAITOK);
1377 if (error) {
1378 if_printf(&sc->arpcom.ac_if,
1379 "could not load RX desc DMA map\n");
1380 bus_dmamem_free(ring->tag, *desc, ring->map);
1381 bus_dma_tag_destroy(ring->tag);
1382 ring->tag = NULL;
1383 return error;
1384 }
1385
1386 if (sc->sc_flags & NFE_USE_JUMBO) {
1387 ring->bufsz = NFE_JBYTES;
1388
1389 error = nfe_jpool_alloc(sc, ring);
1390 if (error) {
1391 if_printf(&sc->arpcom.ac_if,
1392 "could not allocate jumbo frames\n");
1393 return error;
1394 }
1395 }
1396
1397 error = bus_dma_tag_create(NULL, 1, 0,
1398 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1399 NULL, NULL,
1400 MCLBYTES, 1, MCLBYTES,
1401 0, &ring->data_tag);
1402 if (error) {
1403 if_printf(&sc->arpcom.ac_if,
1404 "could not create RX mbuf DMA tag\n");
1405 return error;
1406 }
1407
1408 /* Create a spare RX mbuf DMA map */
1409 error = bus_dmamap_create(ring->data_tag, 0, &ring->data_tmpmap);
1410 if (error) {
1411 if_printf(&sc->arpcom.ac_if,
1412 "could not create spare RX mbuf DMA map\n");
1413 bus_dma_tag_destroy(ring->data_tag);
1414 ring->data_tag = NULL;
1415 return error;
1416 }
1417
1418 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1419 error = bus_dmamap_create(ring->data_tag, 0,
1420 &ring->data[i].map);
1421 if (error) {
1422 if_printf(&sc->arpcom.ac_if,
1423 "could not create %dth RX mbuf DMA mapn", i);
1424 goto fail;
1425 }
1426 }
1427 return 0;
1428fail:
1429 for (j = 0; j < i; ++j)
1430 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1431 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1432 bus_dma_tag_destroy(ring->data_tag);
1433 ring->data_tag = NULL;
1434 return error;
1435}
1436
1437static void
1438nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1439{
1440 int i;
1441
1442 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1443 struct nfe_rx_data *data = &ring->data[i];
1444
1445 if (data->m != NULL) {
1446 bus_dmamap_unload(ring->data_tag, data->map);
1447 m_freem(data->m);
1448 data->m = NULL;
1449 }
1450 }
1451 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1452
1453 ring->cur = ring->next = 0;
1454}
1455
1456static int
1457nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1458{
1459 int i;
1460
1461 for (i = 0; i < NFE_RX_RING_COUNT; ++i) {
1462 int error;
1463
1464 /* XXX should use a function pointer */
1465 if (sc->sc_flags & NFE_USE_JUMBO)
1466 error = nfe_newbuf_jumbo(sc, ring, i, 1);
1467 else
1468 error = nfe_newbuf_std(sc, ring, i, 1);
1469 if (error) {
1470 if_printf(&sc->arpcom.ac_if,
1471 "could not allocate RX buffer\n");
1472 return error;
1473 }
1474
1475 nfe_set_ready_rxdesc(sc, ring, i);
1476 }
1477 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1478
1479 return 0;
1480}
1481
1482static void
1483nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1484{
1485 if (ring->data_tag != NULL) {
1486 struct nfe_rx_data *data;
1487 int i;
1488
1489 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1490 data = &ring->data[i];
1491
1492 if (data->m != NULL) {
1493 bus_dmamap_unload(ring->data_tag, data->map);
1494 m_freem(data->m);
1495 }
1496 bus_dmamap_destroy(ring->data_tag, data->map);
1497 }
1498 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1499 bus_dma_tag_destroy(ring->data_tag);
1500 }
1501
1502 nfe_jpool_free(sc, ring);
1503
1504 if (ring->tag != NULL) {
1505 void *desc;
1506
1507 if (sc->sc_flags & NFE_40BIT_ADDR)
1508 desc = ring->desc64;
1509 else
1510 desc = ring->desc32;
1511
1512 bus_dmamap_unload(ring->tag, ring->map);
1513 bus_dmamem_free(ring->tag, desc, ring->map);
1514 bus_dma_tag_destroy(ring->tag);
1515 }
1516}
1517
1518static struct nfe_jbuf *
1519nfe_jalloc(struct nfe_softc *sc)
1520{
1521 struct ifnet *ifp = &sc->arpcom.ac_if;
1522 struct nfe_jbuf *jbuf;
1523
1524 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1525
1526 jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
1527 if (jbuf != NULL) {
1528 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
1529 jbuf->inuse = 1;
1530 } else {
1531 if_printf(ifp, "no free jumbo buffer\n");
1532 }
1533
1534 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1535
1536 return jbuf;
1537}
1538
1539static void
1540nfe_jfree(void *arg)
1541{
1542 struct nfe_jbuf *jbuf = arg;
1543 struct nfe_softc *sc = jbuf->sc;
1544 struct nfe_rx_ring *ring = jbuf->ring;
1545
1546 if (&ring->jbuf[jbuf->slot] != jbuf)
1547 panic("%s: free wrong jumbo buffer\n", __func__);
1548 else if (jbuf->inuse == 0)
1549 panic("%s: jumbo buffer already freed\n", __func__);
1550
1551 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1552 atomic_subtract_int(&jbuf->inuse, 1);
1553 if (jbuf->inuse == 0)
1554 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1555 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1556}
1557
1558static void
1559nfe_jref(void *arg)
1560{
1561 struct nfe_jbuf *jbuf = arg;
1562 struct nfe_rx_ring *ring = jbuf->ring;
1563
1564 if (&ring->jbuf[jbuf->slot] != jbuf)
1565 panic("%s: ref wrong jumbo buffer\n", __func__);
1566 else if (jbuf->inuse == 0)
1567 panic("%s: jumbo buffer already freed\n", __func__);
1568
06406609 1569 atomic_add_int(&jbuf->inuse, 1);
ae813fd8
SZ
1570}
1571
1572static int
1573nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1574{
1575 struct nfe_jbuf *jbuf;
1576 bus_addr_t physaddr;
1577 caddr_t buf;
1578 int i, error;
1579
1580 /*
1581 * Allocate a big chunk of DMA'able memory.
1582 */
1583 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1584 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1585 NULL, NULL,
1586 NFE_JPOOL_SIZE, 1, NFE_JPOOL_SIZE,
1587 0, &ring->jtag);
1588 if (error) {
1589 if_printf(&sc->arpcom.ac_if,
1590 "could not create jumbo DMA tag\n");
1591 return error;
1592 }
1593
1594 error = bus_dmamem_alloc(ring->jtag, (void **)&ring->jpool,
1595 BUS_DMA_WAITOK, &ring->jmap);
1596 if (error) {
1597 if_printf(&sc->arpcom.ac_if,
1598 "could not allocate jumbo DMA memory\n");
1599 bus_dma_tag_destroy(ring->jtag);
1600 ring->jtag = NULL;
1601 return error;
1602 }
1603
1604 error = bus_dmamap_load(ring->jtag, ring->jmap, ring->jpool,
1605 NFE_JPOOL_SIZE, nfe_ring_dma_addr, &physaddr,
1606 BUS_DMA_WAITOK);
1607 if (error) {
1608 if_printf(&sc->arpcom.ac_if,
1609 "could not load jumbo DMA map\n");
1610 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap);
1611 bus_dma_tag_destroy(ring->jtag);
1612 ring->jtag = NULL;
1613 return error;
1614 }
1615
1616 /* ..and split it into 9KB chunks */
1617 SLIST_INIT(&ring->jfreelist);
1618
1619 buf = ring->jpool;
1620 for (i = 0; i < NFE_JPOOL_COUNT; i++) {
1621 jbuf = &ring->jbuf[i];
1622
1623 jbuf->sc = sc;
1624 jbuf->ring = ring;
1625 jbuf->inuse = 0;
1626 jbuf->slot = i;
1627 jbuf->buf = buf;
1628 jbuf->physaddr = physaddr;
1629
1630 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1631
1632 buf += NFE_JBYTES;
1633 physaddr += NFE_JBYTES;
1634 }
1635
1636 return 0;
1637}
1638
1639static void
1640nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1641{
1642 if (ring->jtag != NULL) {
1643 bus_dmamap_unload(ring->jtag, ring->jmap);
1644 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap);
1645 bus_dma_tag_destroy(ring->jtag);
1646 }
1647}
1648
1649static int
1650nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1651{
1652 int i, j, error, descsize;
1653 void **desc;
1654
1655 if (sc->sc_flags & NFE_40BIT_ADDR) {
1656 desc = (void **)&ring->desc64;
1657 descsize = sizeof(struct nfe_desc64);
1658 } else {
1659 desc = (void **)&ring->desc32;
1660 descsize = sizeof(struct nfe_desc32);
1661 }
1662
1663 ring->queued = 0;
1664 ring->cur = ring->next = 0;
1665
1666 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1667 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1668 NULL, NULL,
1669 NFE_TX_RING_COUNT * descsize, 1,
1670 NFE_TX_RING_COUNT * descsize,
1671 0, &ring->tag);
1672 if (error) {
1673 if_printf(&sc->arpcom.ac_if,
1674 "could not create TX desc DMA map\n");
1675 return error;
1676 }
1677
1678 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO,
1679 &ring->map);
1680 if (error) {
1681 if_printf(&sc->arpcom.ac_if,
1682 "could not allocate TX desc DMA memory\n");
1683 bus_dma_tag_destroy(ring->tag);
1684 ring->tag = NULL;
1685 return error;
1686 }
1687
1688 error = bus_dmamap_load(ring->tag, ring->map, *desc,
1689 NFE_TX_RING_COUNT * descsize,
1690 nfe_ring_dma_addr, &ring->physaddr,
1691 BUS_DMA_WAITOK);
1692 if (error) {
1693 if_printf(&sc->arpcom.ac_if,
1694 "could not load TX desc DMA map\n");
1695 bus_dmamem_free(ring->tag, *desc, ring->map);
1696 bus_dma_tag_destroy(ring->tag);
1697 ring->tag = NULL;
1698 return error;
1699 }
1700
1701 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
1702 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1703 NULL, NULL,
1704 NFE_JBYTES * NFE_MAX_SCATTER,
1705 NFE_MAX_SCATTER, NFE_JBYTES,
1706 0, &ring->data_tag);
1707 if (error) {
1708 if_printf(&sc->arpcom.ac_if,
1709 "could not create TX buf DMA tag\n");
1710 return error;
1711 }
1712
1713 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1714 error = bus_dmamap_create(ring->data_tag, 0,
1715 &ring->data[i].map);
1716 if (error) {
1717 if_printf(&sc->arpcom.ac_if,
1718 "could not create %dth TX buf DMA map\n", i);
1719 goto fail;
1720 }
1721 }
1722
1723 return 0;
1724fail:
1725 for (j = 0; j < i; ++j)
1726 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1727 bus_dma_tag_destroy(ring->data_tag);
1728 ring->data_tag = NULL;
1729 return error;
1730}
1731
1732static void
1733nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1734{
1735 int i;
1736
1737 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1738 struct nfe_tx_data *data = &ring->data[i];
1739
1740 if (sc->sc_flags & NFE_40BIT_ADDR)
1741 ring->desc64[i].flags = 0;
1742 else
1743 ring->desc32[i].flags = 0;
1744
1745 if (data->m != NULL) {
1746 bus_dmamap_sync(ring->data_tag, data->map,
1747 BUS_DMASYNC_POSTWRITE);
1748 bus_dmamap_unload(ring->data_tag, data->map);
1749 m_freem(data->m);
1750 data->m = NULL;
1751 }
1752 }
1753 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE);
1754
1755 ring->queued = 0;
1756 ring->cur = ring->next = 0;
1757}
1758
1759static int
1760nfe_init_tx_ring(struct nfe_softc *sc __unused,
1761 struct nfe_tx_ring *ring __unused)
1762{
1763 return 0;
1764}
1765
1766static void
1767nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1768{
1769 if (ring->data_tag != NULL) {
1770 struct nfe_tx_data *data;
1771 int i;
1772
1773 for (i = 0; i < NFE_TX_RING_COUNT; ++i) {
1774 data = &ring->data[i];
1775
1776 if (data->m != NULL) {
1777 bus_dmamap_unload(ring->data_tag, data->map);
1778 m_freem(data->m);
1779 }
1780 bus_dmamap_destroy(ring->data_tag, data->map);
1781 }
1782
1783 bus_dma_tag_destroy(ring->data_tag);
1784 }
1785
1786 if (ring->tag != NULL) {
1787 void *desc;
1788
1789 if (sc->sc_flags & NFE_40BIT_ADDR)
1790 desc = ring->desc64;
1791 else
1792 desc = ring->desc32;
1793
1794 bus_dmamap_unload(ring->tag, ring->map);
1795 bus_dmamem_free(ring->tag, desc, ring->map);
1796 bus_dma_tag_destroy(ring->tag);
1797 }
1798}
1799
1800static int
1801nfe_ifmedia_upd(struct ifnet *ifp)
1802{
1803 struct nfe_softc *sc = ifp->if_softc;
1804 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1805
1806 if (mii->mii_instance != 0) {
1807 struct mii_softc *miisc;
1808
1809 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1810 mii_phy_reset(miisc);
1811 }
1812 mii_mediachg(mii);
1813
1814 return 0;
1815}
1816
1817static void
1818nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1819{
1820 struct nfe_softc *sc = ifp->if_softc;
1821 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1822
1823 mii_pollstat(mii);
1824 ifmr->ifm_status = mii->mii_media_status;
1825 ifmr->ifm_active = mii->mii_media_active;
1826}
1827
1828static void
1829nfe_setmulti(struct nfe_softc *sc)
1830{
1831 struct ifnet *ifp = &sc->arpcom.ac_if;
1832 struct ifmultiaddr *ifma;
1833 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1834 uint32_t filter = NFE_RXFILTER_MAGIC;
1835 int i;
1836
1837 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1838 bzero(addr, ETHER_ADDR_LEN);
1839 bzero(mask, ETHER_ADDR_LEN);
1840 goto done;
1841 }
1842
1843 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1844 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1845
1846 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1847 caddr_t maddr;
1848
1849 if (ifma->ifma_addr->sa_family != AF_LINK)
1850 continue;
1851
1852 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
1853 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1854 addr[i] &= maddr[i];
1855 mask[i] &= ~maddr[i];
1856 }
1857 }
1858
1859 for (i = 0; i < ETHER_ADDR_LEN; i++)
1860 mask[i] |= addr[i];
1861
1862done:
1863 addr[0] |= 0x01; /* make sure multicast bit is set */
1864
1865 NFE_WRITE(sc, NFE_MULTIADDR_HI,
1866 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1867 NFE_WRITE(sc, NFE_MULTIADDR_LO,
1868 addr[5] << 8 | addr[4]);
1869 NFE_WRITE(sc, NFE_MULTIMASK_HI,
1870 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1871 NFE_WRITE(sc, NFE_MULTIMASK_LO,
1872 mask[5] << 8 | mask[4]);
1873
1874 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1875 NFE_WRITE(sc, NFE_RXFILTER, filter);
1876}
1877
1878static void
1879nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1880{
1881 uint32_t tmp;
1882
1883 tmp = NFE_READ(sc, NFE_MACADDR_LO);
1884 addr[0] = (tmp >> 8) & 0xff;
1885 addr[1] = (tmp & 0xff);
1886
1887 tmp = NFE_READ(sc, NFE_MACADDR_HI);
1888 addr[2] = (tmp >> 24) & 0xff;
1889 addr[3] = (tmp >> 16) & 0xff;
1890 addr[4] = (tmp >> 8) & 0xff;
1891 addr[5] = (tmp & 0xff);
1892}
1893
1894static void
1895nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1896{
1897 NFE_WRITE(sc, NFE_MACADDR_LO,
1898 addr[5] << 8 | addr[4]);
1899 NFE_WRITE(sc, NFE_MACADDR_HI,
1900 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1901}
1902
1903static void
1904nfe_tick(void *arg)
1905{
1906 struct nfe_softc *sc = arg;
1907 struct ifnet *ifp = &sc->arpcom.ac_if;
1908 struct mii_data *mii = device_get_softc(sc->sc_miibus);
1909
1910 lwkt_serialize_enter(ifp->if_serializer);
1911
1912 mii_tick(mii);
1913 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
1914
1915 lwkt_serialize_exit(ifp->if_serializer);
1916}
1917
1918static void
1919nfe_ring_dma_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
1920{
1921 if (error)
1922 return;
1923
1924 KASSERT(nseg == 1, ("too many segments, should be 1\n"));
1925
1926 *((uint32_t *)arg) = seg->ds_addr;
1927}
1928
1929static void
1930nfe_buf_dma_addr(void *arg, bus_dma_segment_t *segs, int nsegs,
1931 bus_size_t mapsz __unused, int error)
1932{
1933 struct nfe_dma_ctx *ctx = arg;
1934 int i;
1935
1936 if (error)
1937 return;
1938
1939 KASSERT(nsegs <= ctx->nsegs,
1940 ("too many segments(%d), should be <= %d\n",
1941 nsegs, ctx->nsegs));
1942
1943 ctx->nsegs = nsegs;
1944 for (i = 0; i < nsegs; ++i)
1945 ctx->segs[i] = segs[i];
1946}
1947
1948static int
1949nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
1950 int wait)
1951{
1952 struct nfe_rx_data *data = &ring->data[idx];
1953 struct nfe_dma_ctx ctx;
1954 bus_dma_segment_t seg;
1955 bus_dmamap_t map;
1956 struct mbuf *m;
1957 int error;
1958
1959 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
1960 if (m == NULL)
1961 return ENOBUFS;
1962 m->m_len = m->m_pkthdr.len = MCLBYTES;
1963
1964 ctx.nsegs = 1;
1965 ctx.segs = &seg;
1966 error = bus_dmamap_load_mbuf(ring->data_tag, ring->data_tmpmap,
1967 m, nfe_buf_dma_addr, &ctx,
1968 wait ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
1969 if (error) {
1970 m_freem(m);
1971 if_printf(&sc->arpcom.ac_if, "could map RX mbuf %d\n", error);
1972 return error;
1973 }
1974
1975 /* Unload originally mapped mbuf */
1976 bus_dmamap_unload(ring->data_tag, data->map);
1977
1978 /* Swap this DMA map with tmp DMA map */
1979 map = data->map;
1980 data->map = ring->data_tmpmap;
1981 ring->data_tmpmap = map;
1982
1983 /* Caller is assumed to have collected the old mbuf */
1984 data->m = m;
1985
1986 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr);
1987
1988 bus_dmamap_sync(ring->data_tag, data->map, BUS_DMASYNC_PREREAD);
1989 return 0;
1990}
1991
1992static int
1993nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
1994 int wait)
1995{
1996 struct nfe_rx_data *data = &ring->data[idx];
1997 struct nfe_jbuf *jbuf;
1998 struct mbuf *m;
1999
2000 MGETHDR(m, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA);
2001 if (m == NULL)
2002 return ENOBUFS;
2003
2004 jbuf = nfe_jalloc(sc);
2005 if (jbuf == NULL) {
2006 m_freem(m);
2007 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
2008 "-- packet dropped!\n");
2009 return ENOBUFS;
2010 }
2011
2012 m->m_ext.ext_arg = jbuf;
2013 m->m_ext.ext_buf = jbuf->buf;
2014 m->m_ext.ext_free = nfe_jfree;
2015 m->m_ext.ext_ref = nfe_jref;
2016 m->m_ext.ext_size = NFE_JBYTES;
2017
2018 m->m_data = m->m_ext.ext_buf;
2019 m->m_flags |= M_EXT;
2020 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2021
2022 /* Caller is assumed to have collected the old mbuf */
2023 data->m = m;
2024
2025 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr);
2026
2027 bus_dmamap_sync(ring->jtag, ring->jmap, BUS_DMASYNC_PREREAD);
2028 return 0;
2029}
2030
2031static void
2032nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2033 bus_addr_t physaddr)
2034{
2035 if (sc->sc_flags & NFE_40BIT_ADDR) {
2036 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2037
2038#if defined(__LP64__)
2039 desc64->physaddr[0] = htole32(physaddr >> 32);
2040#endif
2041 desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
2042 } else {
2043 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2044
2045 desc32->physaddr = htole32(physaddr);
2046 }
2047}
2048
2049static void
2050nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx)
2051{
2052 if (sc->sc_flags & NFE_40BIT_ADDR) {
2053 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2054
2055 desc64->length = htole16(ring->bufsz);
2056 desc64->flags = htole16(NFE_RX_READY);
2057 } else {
2058 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2059
2060 desc32->length = htole16(ring->bufsz);
2061 desc32->flags = htole16(NFE_RX_READY);
2062 }
2063}