NFE - Fix bug with imtimer transitions and improve performance
[dragonfly.git] / sys / dev / netif / nfe / if_nfe.c
... / ...
CommitLineData
1/* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */
2/* $DragonFly: src/sys/dev/netif/nfe/if_nfe.c,v 1.46 2008/10/28 07:30:49 sephe Exp $ */
3
4/*
5 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
6 *
7 * This code is derived from software contributed to The DragonFly Project
8 * by Sepherosa Ziehau <sepherosa@gmail.com> and
9 * Matthew Dillon <dillon@apollo.backplane.com>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 */
38
39/*
40 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
41 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
42 *
43 * Permission to use, copy, modify, and distribute this software for any
44 * purpose with or without fee is hereby granted, provided that the above
45 * copyright notice and this permission notice appear in all copies.
46 *
47 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
48 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
49 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
50 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
51 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
52 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
53 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
54 */
55
56/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
57
58#include "opt_polling.h"
59
60#include <sys/param.h>
61#include <sys/endian.h>
62#include <sys/kernel.h>
63#include <sys/bus.h>
64#include <sys/interrupt.h>
65#include <sys/proc.h>
66#include <sys/rman.h>
67#include <sys/serialize.h>
68#include <sys/socket.h>
69#include <sys/sockio.h>
70#include <sys/sysctl.h>
71
72#include <net/ethernet.h>
73#include <net/if.h>
74#include <net/bpf.h>
75#include <net/if_arp.h>
76#include <net/if_dl.h>
77#include <net/if_media.h>
78#include <net/ifq_var.h>
79#include <net/if_types.h>
80#include <net/if_var.h>
81#include <net/vlan/if_vlan_var.h>
82#include <net/vlan/if_vlan_ether.h>
83
84#include <bus/pci/pcireg.h>
85#include <bus/pci/pcivar.h>
86#include <bus/pci/pcidevs.h>
87
88#include <dev/netif/mii_layer/mii.h>
89#include <dev/netif/mii_layer/miivar.h>
90
91#include "miibus_if.h"
92
93#include <dev/netif/nfe/if_nfereg.h>
94#include <dev/netif/nfe/if_nfevar.h>
95
96#define NFE_CSUM
97#define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
98
99static int nfe_probe(device_t);
100static int nfe_attach(device_t);
101static int nfe_detach(device_t);
102static void nfe_shutdown(device_t);
103static int nfe_resume(device_t);
104static int nfe_suspend(device_t);
105
106static int nfe_miibus_readreg(device_t, int, int);
107static void nfe_miibus_writereg(device_t, int, int, int);
108static void nfe_miibus_statchg(device_t);
109
110#ifdef DEVICE_POLLING
111static void nfe_poll(struct ifnet *, enum poll_cmd, int);
112#endif
113static void nfe_intr(void *);
114static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
115static int nfe_rxeof(struct nfe_softc *);
116static int nfe_txeof(struct nfe_softc *, int);
117static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *,
118 struct mbuf *);
119static void nfe_start(struct ifnet *);
120static void nfe_watchdog(struct ifnet *);
121static void nfe_init(void *);
122static void nfe_stop(struct nfe_softc *);
123static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *);
124static void nfe_jfree(void *);
125static void nfe_jref(void *);
126static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *);
127static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *);
128static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
129static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
130static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
131static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
132static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
133static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
134static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
135static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
136static int nfe_ifmedia_upd(struct ifnet *);
137static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
138static void nfe_setmulti(struct nfe_softc *);
139static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
140static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
141static void nfe_powerup(device_t);
142static void nfe_mac_reset(struct nfe_softc *);
143static void nfe_tick(void *);
144static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
145 int, bus_addr_t);
146static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
147 int);
148static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int,
149 int);
150static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int,
151 int);
152static void nfe_enable_intrs(struct nfe_softc *);
153static void nfe_disable_intrs(struct nfe_softc *);
154
155static int nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS);
156
157#define NFE_DEBUG
158#ifdef NFE_DEBUG
159
160static int nfe_debug = 0;
161static int nfe_rx_ring_count = NFE_RX_RING_DEF_COUNT;
162static int nfe_tx_ring_count = NFE_TX_RING_DEF_COUNT;
163/*
164 * hw timer simulated interrupt moderation @2000Hz. Negative values
165 * disable the timer when no traffic is present.
166 *
167 * XXX 8000Hz might be better but if the interrupt is shared it can
168 * blow out the cpu.
169 */
170static int nfe_imtime = -500; /* uS */
171
172TUNABLE_INT("hw.nfe.rx_ring_count", &nfe_rx_ring_count);
173TUNABLE_INT("hw.nfe.tx_ring_count", &nfe_tx_ring_count);
174TUNABLE_INT("hw.nfe.imtimer", &nfe_imtime);
175TUNABLE_INT("hw.nfe.debug", &nfe_debug);
176
177#define DPRINTF(sc, fmt, ...) do { \
178 if ((sc)->sc_debug) { \
179 if_printf(&(sc)->arpcom.ac_if, \
180 fmt, __VA_ARGS__); \
181 } \
182} while (0)
183
184#define DPRINTFN(sc, lv, fmt, ...) do { \
185 if ((sc)->sc_debug >= (lv)) { \
186 if_printf(&(sc)->arpcom.ac_if, \
187 fmt, __VA_ARGS__); \
188 } \
189} while (0)
190
191#else /* !NFE_DEBUG */
192
193#define DPRINTF(sc, fmt, ...)
194#define DPRINTFN(sc, lv, fmt, ...)
195
196#endif /* NFE_DEBUG */
197
198static const struct nfe_dev {
199 uint16_t vid;
200 uint16_t did;
201 const char *desc;
202} nfe_devices[] = {
203 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
204 "NVIDIA nForce Fast Ethernet" },
205
206 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
207 "NVIDIA nForce2 Fast Ethernet" },
208
209 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
210 "NVIDIA nForce3 Gigabit Ethernet" },
211
212 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb
213 chipset, and possibly also the 400R; it might be both nForce2- and
214 nForce3-based boards can use the same MCPs (= southbridges) */
215 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2,
216 "NVIDIA nForce3 Gigabit Ethernet" },
217
218 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3,
219 "NVIDIA nForce3 Gigabit Ethernet" },
220
221 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
222 "NVIDIA nForce3 Gigabit Ethernet" },
223
224 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5,
225 "NVIDIA nForce3 Gigabit Ethernet" },
226
227 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1,
228 "NVIDIA CK804 Gigabit Ethernet" },
229
230 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2,
231 "NVIDIA CK804 Gigabit Ethernet" },
232
233 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
234 "NVIDIA MCP04 Gigabit Ethernet" },
235
236 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
237 "NVIDIA MCP04 Gigabit Ethernet" },
238
239 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1,
240 "NVIDIA MCP51 Gigabit Ethernet" },
241
242 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2,
243 "NVIDIA MCP51 Gigabit Ethernet" },
244
245 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
246 "NVIDIA MCP55 Gigabit Ethernet" },
247
248 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
249 "NVIDIA MCP55 Gigabit Ethernet" },
250
251 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
252 "NVIDIA MCP61 Gigabit Ethernet" },
253
254 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
255 "NVIDIA MCP61 Gigabit Ethernet" },
256
257 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
258 "NVIDIA MCP61 Gigabit Ethernet" },
259
260 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
261 "NVIDIA MCP61 Gigabit Ethernet" },
262
263 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
264 "NVIDIA MCP65 Gigabit Ethernet" },
265
266 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
267 "NVIDIA MCP65 Gigabit Ethernet" },
268
269 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
270 "NVIDIA MCP65 Gigabit Ethernet" },
271
272 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
273 "NVIDIA MCP65 Gigabit Ethernet" },
274
275 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
276 "NVIDIA MCP67 Gigabit Ethernet" },
277
278 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
279 "NVIDIA MCP67 Gigabit Ethernet" },
280
281 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
282 "NVIDIA MCP67 Gigabit Ethernet" },
283
284 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
285 "NVIDIA MCP67 Gigabit Ethernet" },
286
287 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
288 "NVIDIA MCP73 Gigabit Ethernet" },
289
290 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
291 "NVIDIA MCP73 Gigabit Ethernet" },
292
293 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
294 "NVIDIA MCP73 Gigabit Ethernet" },
295
296 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
297 "NVIDIA MCP73 Gigabit Ethernet" },
298
299 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
300 "NVIDIA MCP77 Gigabit Ethernet" },
301
302 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
303 "NVIDIA MCP77 Gigabit Ethernet" },
304
305 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
306 "NVIDIA MCP77 Gigabit Ethernet" },
307
308 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
309 "NVIDIA MCP77 Gigabit Ethernet" },
310
311 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
312 "NVIDIA MCP79 Gigabit Ethernet" },
313
314 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
315 "NVIDIA MCP79 Gigabit Ethernet" },
316
317 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
318 "NVIDIA MCP79 Gigabit Ethernet" },
319
320 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
321 "NVIDIA MCP79 Gigabit Ethernet" },
322
323 { 0, 0, NULL }
324};
325
326static device_method_t nfe_methods[] = {
327 /* Device interface */
328 DEVMETHOD(device_probe, nfe_probe),
329 DEVMETHOD(device_attach, nfe_attach),
330 DEVMETHOD(device_detach, nfe_detach),
331 DEVMETHOD(device_suspend, nfe_suspend),
332 DEVMETHOD(device_resume, nfe_resume),
333 DEVMETHOD(device_shutdown, nfe_shutdown),
334
335 /* Bus interface */
336 DEVMETHOD(bus_print_child, bus_generic_print_child),
337 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
338
339 /* MII interface */
340 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
341 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
342 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
343
344 { 0, 0 }
345};
346
347static driver_t nfe_driver = {
348 "nfe",
349 nfe_methods,
350 sizeof(struct nfe_softc)
351};
352
353static devclass_t nfe_devclass;
354
355DECLARE_DUMMY_MODULE(if_nfe);
356MODULE_DEPEND(if_nfe, miibus, 1, 1, 1);
357DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, 0, 0);
358DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
359
360static int
361nfe_probe(device_t dev)
362{
363 const struct nfe_dev *n;
364 uint16_t vid, did;
365
366 vid = pci_get_vendor(dev);
367 did = pci_get_device(dev);
368 for (n = nfe_devices; n->desc != NULL; ++n) {
369 if (vid == n->vid && did == n->did) {
370 struct nfe_softc *sc = device_get_softc(dev);
371
372 switch (did) {
373 case PCI_PRODUCT_NVIDIA_NFORCE_LAN:
374 case PCI_PRODUCT_NVIDIA_NFORCE2_LAN:
375 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN1:
376 sc->sc_caps = NFE_NO_PWRCTL |
377 NFE_FIX_EADDR;
378 break;
379 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
380 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
381 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
382 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
383 sc->sc_caps = NFE_JUMBO_SUP |
384 NFE_HW_CSUM |
385 NFE_NO_PWRCTL |
386 NFE_FIX_EADDR;
387 break;
388 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
389 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
390 sc->sc_caps = NFE_FIX_EADDR;
391 /* FALL THROUGH */
392 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
393 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
394 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
395 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
396 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
397 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
398 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
399 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
400 case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
401 case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
402 case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
403 case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
404 sc->sc_caps |= NFE_40BIT_ADDR;
405 break;
406 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
407 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
408 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
409 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
410 sc->sc_caps = NFE_JUMBO_SUP |
411 NFE_40BIT_ADDR |
412 NFE_HW_CSUM |
413 NFE_NO_PWRCTL |
414 NFE_FIX_EADDR;
415 break;
416 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
417 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
418 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
419 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
420 sc->sc_caps = NFE_JUMBO_SUP |
421 NFE_40BIT_ADDR;
422 break;
423 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
424 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
425 sc->sc_caps = NFE_JUMBO_SUP |
426 NFE_40BIT_ADDR |
427 NFE_HW_CSUM |
428 NFE_HW_VLAN |
429 NFE_FIX_EADDR;
430 break;
431 case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
432 case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
433 case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
434 case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
435 case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
436 case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
437 case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
438 case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
439 sc->sc_caps = NFE_40BIT_ADDR |
440 NFE_HW_CSUM;
441 break;
442 }
443
444 device_set_desc(dev, n->desc);
445 device_set_async_attach(dev, TRUE);
446 return 0;
447 }
448 }
449 return ENXIO;
450}
451
452static int
453nfe_attach(device_t dev)
454{
455 struct nfe_softc *sc = device_get_softc(dev);
456 struct ifnet *ifp = &sc->arpcom.ac_if;
457 uint8_t eaddr[ETHER_ADDR_LEN];
458 bus_addr_t lowaddr;
459 int error;
460
461 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
462 lwkt_serialize_init(&sc->sc_jbuf_serializer);
463
464 /*
465 * Initialize sysctl variables
466 */
467 sc->sc_rx_ring_count = nfe_rx_ring_count;
468 sc->sc_tx_ring_count = nfe_tx_ring_count;
469 sc->sc_debug = nfe_debug;
470 if (nfe_imtime < 0) {
471 sc->sc_flags |= NFE_F_DYN_IM;
472 sc->sc_imtime = -nfe_imtime;
473 } else {
474 sc->sc_imtime = nfe_imtime;
475 }
476 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc);
477
478 sc->sc_mem_rid = PCIR_BAR(0);
479
480 if (sc->sc_caps & NFE_40BIT_ADDR)
481 sc->rxtxctl_desc = NFE_RXTX_DESC_V3;
482 else if (sc->sc_caps & NFE_JUMBO_SUP)
483 sc->rxtxctl_desc = NFE_RXTX_DESC_V2;
484
485#ifndef BURN_BRIDGES
486 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
487 uint32_t mem, irq;
488
489 mem = pci_read_config(dev, sc->sc_mem_rid, 4);
490 irq = pci_read_config(dev, PCIR_INTLINE, 4);
491
492 device_printf(dev, "chip is in D%d power mode "
493 "-- setting to D0\n", pci_get_powerstate(dev));
494
495 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
496
497 pci_write_config(dev, sc->sc_mem_rid, mem, 4);
498 pci_write_config(dev, PCIR_INTLINE, irq, 4);
499 }
500#endif /* !BURN_BRIDGE */
501
502 /* Enable bus mastering */
503 pci_enable_busmaster(dev);
504
505 /* Allocate IO memory */
506 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
507 &sc->sc_mem_rid, RF_ACTIVE);
508 if (sc->sc_mem_res == NULL) {
509 device_printf(dev, "could not allocate io memory\n");
510 return ENXIO;
511 }
512 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res);
513 sc->sc_memt = rman_get_bustag(sc->sc_mem_res);
514
515 /* Allocate IRQ */
516 sc->sc_irq_rid = 0;
517 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
518 &sc->sc_irq_rid,
519 RF_SHAREABLE | RF_ACTIVE);
520 if (sc->sc_irq_res == NULL) {
521 device_printf(dev, "could not allocate irq\n");
522 error = ENXIO;
523 goto fail;
524 }
525
526 /* Disable WOL */
527 NFE_WRITE(sc, NFE_WOL_CTL, 0);
528
529 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0)
530 nfe_powerup(dev);
531
532 nfe_get_macaddr(sc, eaddr);
533
534 /*
535 * Allocate top level DMA tag
536 */
537 if (sc->sc_caps & NFE_40BIT_ADDR)
538 lowaddr = NFE_BUS_SPACE_MAXADDR;
539 else
540 lowaddr = BUS_SPACE_MAXADDR_32BIT;
541 error = bus_dma_tag_create(NULL, /* parent */
542 1, 0, /* alignment, boundary */
543 lowaddr, /* lowaddr */
544 BUS_SPACE_MAXADDR, /* highaddr */
545 NULL, NULL, /* filter, filterarg */
546 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
547 0, /* nsegments */
548 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
549 0, /* flags */
550 &sc->sc_dtag);
551 if (error) {
552 device_printf(dev, "could not allocate parent dma tag\n");
553 goto fail;
554 }
555
556 /*
557 * Allocate Tx and Rx rings.
558 */
559 error = nfe_alloc_tx_ring(sc, &sc->txq);
560 if (error) {
561 device_printf(dev, "could not allocate Tx ring\n");
562 goto fail;
563 }
564
565 error = nfe_alloc_rx_ring(sc, &sc->rxq);
566 if (error) {
567 device_printf(dev, "could not allocate Rx ring\n");
568 goto fail;
569 }
570
571 /*
572 * Create sysctl tree
573 */
574 sysctl_ctx_init(&sc->sc_sysctl_ctx);
575 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx,
576 SYSCTL_STATIC_CHILDREN(_hw),
577 OID_AUTO,
578 device_get_nameunit(dev),
579 CTLFLAG_RD, 0, "");
580 if (sc->sc_sysctl_tree == NULL) {
581 device_printf(dev, "can't add sysctl node\n");
582 error = ENXIO;
583 goto fail;
584 }
585 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
586 SYSCTL_CHILDREN(sc->sc_sysctl_tree),
587 OID_AUTO, "imtimer", CTLTYPE_INT | CTLFLAG_RW,
588 sc, 0, nfe_sysctl_imtime, "I",
589 "Interrupt moderation time (usec). "
590 "0 to disable interrupt moderation.");
591 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx,
592 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
593 "rx_ring_count", CTLFLAG_RD, &sc->sc_rx_ring_count,
594 0, "RX ring count");
595 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx,
596 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
597 "tx_ring_count", CTLFLAG_RD, &sc->sc_tx_ring_count,
598 0, "TX ring count");
599 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx,
600 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
601 "debug", CTLFLAG_RW, &sc->sc_debug,
602 0, "control debugging printfs");
603
604 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd,
605 nfe_ifmedia_sts);
606 if (error) {
607 device_printf(dev, "MII without any phy\n");
608 goto fail;
609 }
610
611 ifp->if_softc = sc;
612 ifp->if_mtu = ETHERMTU;
613 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
614 ifp->if_ioctl = nfe_ioctl;
615 ifp->if_start = nfe_start;
616#ifdef DEVICE_POLLING
617 ifp->if_poll = nfe_poll;
618#endif
619 ifp->if_watchdog = nfe_watchdog;
620 ifp->if_init = nfe_init;
621 ifq_set_maxlen(&ifp->if_snd, sc->sc_tx_ring_count);
622 ifq_set_ready(&ifp->if_snd);
623
624 ifp->if_capabilities = IFCAP_VLAN_MTU;
625
626 if (sc->sc_caps & NFE_HW_VLAN)
627 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
628
629#ifdef NFE_CSUM
630 if (sc->sc_caps & NFE_HW_CSUM) {
631 ifp->if_capabilities |= IFCAP_HWCSUM;
632 ifp->if_hwassist = NFE_CSUM_FEATURES;
633 }
634#else
635 sc->sc_caps &= ~NFE_HW_CSUM;
636#endif
637 ifp->if_capenable = ifp->if_capabilities;
638
639 callout_init(&sc->sc_tick_ch);
640
641 ether_ifattach(ifp, eaddr, NULL);
642
643 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc,
644 &sc->sc_ih, ifp->if_serializer);
645 if (error) {
646 device_printf(dev, "could not setup intr\n");
647 ether_ifdetach(ifp);
648 goto fail;
649 }
650
651 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->sc_irq_res));
652 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
653
654 return 0;
655fail:
656 nfe_detach(dev);
657 return error;
658}
659
660static int
661nfe_detach(device_t dev)
662{
663 struct nfe_softc *sc = device_get_softc(dev);
664
665 if (device_is_attached(dev)) {
666 struct ifnet *ifp = &sc->arpcom.ac_if;
667
668 lwkt_serialize_enter(ifp->if_serializer);
669 nfe_stop(sc);
670 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih);
671 lwkt_serialize_exit(ifp->if_serializer);
672
673 ether_ifdetach(ifp);
674 }
675
676 if (sc->sc_miibus != NULL)
677 device_delete_child(dev, sc->sc_miibus);
678 bus_generic_detach(dev);
679
680 if (sc->sc_sysctl_tree != NULL)
681 sysctl_ctx_free(&sc->sc_sysctl_ctx);
682
683 if (sc->sc_irq_res != NULL) {
684 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
685 sc->sc_irq_res);
686 }
687
688 if (sc->sc_mem_res != NULL) {
689 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
690 sc->sc_mem_res);
691 }
692
693 nfe_free_tx_ring(sc, &sc->txq);
694 nfe_free_rx_ring(sc, &sc->rxq);
695 if (sc->sc_dtag != NULL)
696 bus_dma_tag_destroy(sc->sc_dtag);
697
698 return 0;
699}
700
701static void
702nfe_shutdown(device_t dev)
703{
704 struct nfe_softc *sc = device_get_softc(dev);
705 struct ifnet *ifp = &sc->arpcom.ac_if;
706
707 lwkt_serialize_enter(ifp->if_serializer);
708 nfe_stop(sc);
709 lwkt_serialize_exit(ifp->if_serializer);
710}
711
712static int
713nfe_suspend(device_t dev)
714{
715 struct nfe_softc *sc = device_get_softc(dev);
716 struct ifnet *ifp = &sc->arpcom.ac_if;
717
718 lwkt_serialize_enter(ifp->if_serializer);
719 nfe_stop(sc);
720 lwkt_serialize_exit(ifp->if_serializer);
721
722 return 0;
723}
724
725static int
726nfe_resume(device_t dev)
727{
728 struct nfe_softc *sc = device_get_softc(dev);
729 struct ifnet *ifp = &sc->arpcom.ac_if;
730
731 lwkt_serialize_enter(ifp->if_serializer);
732 if (ifp->if_flags & IFF_UP)
733 nfe_init(sc);
734 lwkt_serialize_exit(ifp->if_serializer);
735
736 return 0;
737}
738
739static void
740nfe_miibus_statchg(device_t dev)
741{
742 struct nfe_softc *sc = device_get_softc(dev);
743 struct mii_data *mii = device_get_softc(sc->sc_miibus);
744 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
745
746 ASSERT_SERIALIZED(sc->arpcom.ac_if.if_serializer);
747
748 phy = NFE_READ(sc, NFE_PHY_IFACE);
749 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
750
751 seed = NFE_READ(sc, NFE_RNDSEED);
752 seed &= ~NFE_SEED_MASK;
753
754 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
755 phy |= NFE_PHY_HDX; /* half-duplex */
756 misc |= NFE_MISC1_HDX;
757 }
758
759 switch (IFM_SUBTYPE(mii->mii_media_active)) {
760 case IFM_1000_T: /* full-duplex only */
761 link |= NFE_MEDIA_1000T;
762 seed |= NFE_SEED_1000T;
763 phy |= NFE_PHY_1000T;
764 break;
765 case IFM_100_TX:
766 link |= NFE_MEDIA_100TX;
767 seed |= NFE_SEED_100TX;
768 phy |= NFE_PHY_100TX;
769 break;
770 case IFM_10_T:
771 link |= NFE_MEDIA_10T;
772 seed |= NFE_SEED_10T;
773 break;
774 }
775
776 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
777
778 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
779 NFE_WRITE(sc, NFE_MISC1, misc);
780 NFE_WRITE(sc, NFE_LINKSPEED, link);
781}
782
783static int
784nfe_miibus_readreg(device_t dev, int phy, int reg)
785{
786 struct nfe_softc *sc = device_get_softc(dev);
787 uint32_t val;
788 int ntries;
789
790 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
791
792 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
793 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
794 DELAY(100);
795 }
796
797 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
798
799 for (ntries = 0; ntries < 1000; ntries++) {
800 DELAY(100);
801 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
802 break;
803 }
804 if (ntries == 1000) {
805 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", "");
806 return 0;
807 }
808
809 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
810 DPRINTFN(sc, 2, "could not read PHY %s\n", "");
811 return 0;
812 }
813
814 val = NFE_READ(sc, NFE_PHY_DATA);
815 if (val != 0xffffffff && val != 0)
816 sc->mii_phyaddr = phy;
817
818 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
819
820 return val;
821}
822
823static void
824nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
825{
826 struct nfe_softc *sc = device_get_softc(dev);
827 uint32_t ctl;
828 int ntries;
829
830 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
831
832 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
833 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
834 DELAY(100);
835 }
836
837 NFE_WRITE(sc, NFE_PHY_DATA, val);
838 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
839 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
840
841 for (ntries = 0; ntries < 1000; ntries++) {
842 DELAY(100);
843 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
844 break;
845 }
846
847#ifdef NFE_DEBUG
848 if (ntries == 1000)
849 DPRINTFN(sc, 2, "could not write to PHY %s\n", "");
850#endif
851}
852
853#ifdef DEVICE_POLLING
854
855static void
856nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
857{
858 struct nfe_softc *sc = ifp->if_softc;
859
860 ASSERT_SERIALIZED(ifp->if_serializer);
861
862 switch(cmd) {
863 case POLL_REGISTER:
864 nfe_disable_intrs(sc);
865 break;
866
867 case POLL_DEREGISTER:
868 nfe_enable_intrs(sc);
869 break;
870
871 case POLL_AND_CHECK_STATUS:
872 /* fall through */
873 case POLL_ONLY:
874 if (ifp->if_flags & IFF_RUNNING) {
875 nfe_rxeof(sc);
876 nfe_txeof(sc, 1);
877 }
878 break;
879 }
880}
881
882#endif
883
884static void
885nfe_intr(void *arg)
886{
887 struct nfe_softc *sc = arg;
888 struct ifnet *ifp = &sc->arpcom.ac_if;
889 uint32_t r;
890
891 r = NFE_READ(sc, NFE_IRQ_STATUS);
892 if (r == 0)
893 return; /* not for us */
894 NFE_WRITE(sc, NFE_IRQ_STATUS, r);
895
896 if (sc->sc_rate_second != time_second) {
897 /*
898 * Calculate sc_rate_avg - interrupts per second.
899 */
900 sc->sc_rate_second = time_second;
901 if (sc->sc_rate_avg < sc->sc_rate_acc)
902 sc->sc_rate_avg = sc->sc_rate_acc;
903 else
904 sc->sc_rate_avg = (sc->sc_rate_avg * 3 +
905 sc->sc_rate_acc) / 4;
906 sc->sc_rate_acc = 0;
907 } else if (sc->sc_rate_avg < sc->sc_rate_acc) {
908 /*
909 * Don't wait for a tick to roll over if we are taking
910 * a lot of interrupts.
911 */
912 sc->sc_rate_avg = sc->sc_rate_acc;
913 }
914
915 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r);
916
917 if (r & NFE_IRQ_LINK) {
918 NFE_READ(sc, NFE_PHY_STATUS);
919 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
920 DPRINTF(sc, "link state changed %s\n", "");
921 }
922
923 if (ifp->if_flags & IFF_RUNNING) {
924 int ret;
925 int rate;
926
927 /* check Rx ring */
928 ret = nfe_rxeof(sc);
929
930 /* check Tx ring */
931 ret |= nfe_txeof(sc, 1);
932
933 /* update the rate accumulator */
934 if (ret)
935 ++sc->sc_rate_acc;
936
937 if (sc->sc_flags & NFE_F_DYN_IM) {
938 rate = 1000000 / sc->sc_imtime;
939 if ((sc->sc_flags & NFE_F_IRQ_TIMER) == 0 &&
940 sc->sc_rate_avg > rate) {
941 /*
942 * Use the hardware timer to reduce the
943 * interrupt rate if the discrete interrupt
944 * rate has exceeded our threshold.
945 */
946 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_IMTIMER);
947 sc->sc_flags |= NFE_F_IRQ_TIMER;
948 } else if ((sc->sc_flags & NFE_F_IRQ_TIMER) &&
949 sc->sc_rate_avg <= rate) {
950 /*
951 * Use discrete TX/RX interrupts if the rate
952 * has fallen below our threshold.
953 */
954 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_NOIMTIMER);
955 sc->sc_flags &= ~NFE_F_IRQ_TIMER;
956
957 /*
958 * Recollect, mainly to avoid the possible race
959 * introduced by changing interrupt masks.
960 */
961 nfe_rxeof(sc);
962 nfe_txeof(sc, 1);
963 }
964 }
965 }
966}
967
968static int
969nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
970{
971 struct nfe_softc *sc = ifp->if_softc;
972 struct ifreq *ifr = (struct ifreq *)data;
973 struct mii_data *mii;
974 int error = 0, mask, jumbo_cap;
975
976 ASSERT_SERIALIZED(ifp->if_serializer);
977
978 switch (cmd) {
979 case SIOCSIFMTU:
980 if ((sc->sc_caps & NFE_JUMBO_SUP) && sc->rxq.jbuf != NULL)
981 jumbo_cap = 1;
982 else
983 jumbo_cap = 0;
984
985 if ((jumbo_cap && ifr->ifr_mtu > NFE_JUMBO_MTU) ||
986 (!jumbo_cap && ifr->ifr_mtu > ETHERMTU)) {
987 return EINVAL;
988 } else if (ifp->if_mtu != ifr->ifr_mtu) {
989 ifp->if_mtu = ifr->ifr_mtu;
990 if (ifp->if_flags & IFF_RUNNING)
991 nfe_init(sc);
992 }
993 break;
994 case SIOCSIFFLAGS:
995 if (ifp->if_flags & IFF_UP) {
996 /*
997 * If only the PROMISC or ALLMULTI flag changes, then
998 * don't do a full re-init of the chip, just update
999 * the Rx filter.
1000 */
1001 if ((ifp->if_flags & IFF_RUNNING) &&
1002 ((ifp->if_flags ^ sc->sc_if_flags) &
1003 (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1004 nfe_setmulti(sc);
1005 } else {
1006 if (!(ifp->if_flags & IFF_RUNNING))
1007 nfe_init(sc);
1008 }
1009 } else {
1010 if (ifp->if_flags & IFF_RUNNING)
1011 nfe_stop(sc);
1012 }
1013 sc->sc_if_flags = ifp->if_flags;
1014 break;
1015 case SIOCADDMULTI:
1016 case SIOCDELMULTI:
1017 if (ifp->if_flags & IFF_RUNNING)
1018 nfe_setmulti(sc);
1019 break;
1020 case SIOCSIFMEDIA:
1021 case SIOCGIFMEDIA:
1022 mii = device_get_softc(sc->sc_miibus);
1023 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1024 break;
1025 case SIOCSIFCAP:
1026 mask = (ifr->ifr_reqcap ^ ifp->if_capenable) & IFCAP_HWCSUM;
1027 if (mask && (ifp->if_capabilities & IFCAP_HWCSUM)) {
1028 ifp->if_capenable ^= mask;
1029 if (IFCAP_TXCSUM & ifp->if_capenable)
1030 ifp->if_hwassist = NFE_CSUM_FEATURES;
1031 else
1032 ifp->if_hwassist = 0;
1033
1034 if (ifp->if_flags & IFF_RUNNING)
1035 nfe_init(sc);
1036 }
1037 break;
1038 default:
1039 error = ether_ioctl(ifp, cmd, data);
1040 break;
1041 }
1042 return error;
1043}
1044
1045static int
1046nfe_rxeof(struct nfe_softc *sc)
1047{
1048 struct ifnet *ifp = &sc->arpcom.ac_if;
1049 struct nfe_rx_ring *ring = &sc->rxq;
1050 int reap;
1051 struct mbuf_chain chain[MAXCPU];
1052
1053 reap = 0;
1054 ether_input_chain_init(chain);
1055
1056 for (;;) {
1057 struct nfe_rx_data *data = &ring->data[ring->cur];
1058 struct mbuf *m;
1059 uint16_t flags;
1060 int len, error;
1061
1062 if (sc->sc_caps & NFE_40BIT_ADDR) {
1063 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur];
1064
1065 flags = le16toh(desc64->flags);
1066 len = le16toh(desc64->length) & 0x3fff;
1067 } else {
1068 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur];
1069
1070 flags = le16toh(desc32->flags);
1071 len = le16toh(desc32->length) & 0x3fff;
1072 }
1073
1074 if (flags & NFE_RX_READY)
1075 break;
1076
1077 reap = 1;
1078
1079 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
1080 if (!(flags & NFE_RX_VALID_V1))
1081 goto skip;
1082
1083 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
1084 flags &= ~NFE_RX_ERROR;
1085 len--; /* fix buffer length */
1086 }
1087 } else {
1088 if (!(flags & NFE_RX_VALID_V2))
1089 goto skip;
1090
1091 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
1092 flags &= ~NFE_RX_ERROR;
1093 len--; /* fix buffer length */
1094 }
1095 }
1096
1097 if (flags & NFE_RX_ERROR) {
1098 ifp->if_ierrors++;
1099 goto skip;
1100 }
1101
1102 m = data->m;
1103
1104 if (sc->sc_flags & NFE_F_USE_JUMBO)
1105 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0);
1106 else
1107 error = nfe_newbuf_std(sc, ring, ring->cur, 0);
1108 if (error) {
1109 ifp->if_ierrors++;
1110 goto skip;
1111 }
1112
1113 /* finalize mbuf */
1114 m->m_pkthdr.len = m->m_len = len;
1115 m->m_pkthdr.rcvif = ifp;
1116
1117 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
1118 (flags & NFE_RX_CSUMOK)) {
1119 if (flags & NFE_RX_IP_CSUMOK_V2) {
1120 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED |
1121 CSUM_IP_VALID;
1122 }
1123
1124 if (flags &
1125 (NFE_RX_UDP_CSUMOK_V2 | NFE_RX_TCP_CSUMOK_V2)) {
1126 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
1127 CSUM_PSEUDO_HDR |
1128 CSUM_FRAG_NOT_CHECKED;
1129 m->m_pkthdr.csum_data = 0xffff;
1130 }
1131 }
1132
1133 ifp->if_ipackets++;
1134 ether_input_chain(ifp, m, NULL, chain);
1135skip:
1136 nfe_set_ready_rxdesc(sc, ring, ring->cur);
1137 sc->rxq.cur = (sc->rxq.cur + 1) % sc->sc_rx_ring_count;
1138 }
1139
1140 if (reap)
1141 ether_input_dispatch(chain);
1142 return reap;
1143}
1144
1145static int
1146nfe_txeof(struct nfe_softc *sc, int start)
1147{
1148 struct ifnet *ifp = &sc->arpcom.ac_if;
1149 struct nfe_tx_ring *ring = &sc->txq;
1150 struct nfe_tx_data *data = NULL;
1151
1152 while (ring->next != ring->cur) {
1153 uint16_t flags;
1154
1155 if (sc->sc_caps & NFE_40BIT_ADDR)
1156 flags = le16toh(ring->desc64[ring->next].flags);
1157 else
1158 flags = le16toh(ring->desc32[ring->next].flags);
1159
1160 if (flags & NFE_TX_VALID)
1161 break;
1162
1163 data = &ring->data[ring->next];
1164
1165 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
1166 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
1167 goto skip;
1168
1169 if ((flags & NFE_TX_ERROR_V1) != 0) {
1170 if_printf(ifp, "tx v1 error 0x%4b\n", flags,
1171 NFE_V1_TXERR);
1172 ifp->if_oerrors++;
1173 } else {
1174 ifp->if_opackets++;
1175 }
1176 } else {
1177 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
1178 goto skip;
1179
1180 if ((flags & NFE_TX_ERROR_V2) != 0) {
1181 if_printf(ifp, "tx v2 error 0x%4b\n", flags,
1182 NFE_V2_TXERR);
1183 ifp->if_oerrors++;
1184 } else {
1185 ifp->if_opackets++;
1186 }
1187 }
1188
1189 if (data->m == NULL) { /* should not get there */
1190 if_printf(ifp,
1191 "last fragment bit w/o associated mbuf!\n");
1192 goto skip;
1193 }
1194
1195 /* last fragment of the mbuf chain transmitted */
1196 bus_dmamap_unload(ring->data_tag, data->map);
1197 m_freem(data->m);
1198 data->m = NULL;
1199skip:
1200 ring->queued--;
1201 KKASSERT(ring->queued >= 0);
1202 ring->next = (ring->next + 1) % sc->sc_tx_ring_count;
1203 }
1204
1205 if (sc->sc_tx_ring_count - ring->queued >=
1206 sc->sc_tx_spare + NFE_NSEG_RSVD)
1207 ifp->if_flags &= ~IFF_OACTIVE;
1208
1209 if (ring->queued == 0)
1210 ifp->if_timer = 0;
1211
1212 if (start && !ifq_is_empty(&ifp->if_snd))
1213 if_devstart(ifp);
1214
1215 if (data != NULL)
1216 return 1;
1217 else
1218 return 0;
1219}
1220
1221static int
1222nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0)
1223{
1224 bus_dma_segment_t segs[NFE_MAX_SCATTER];
1225 struct nfe_tx_data *data, *data_map;
1226 bus_dmamap_t map;
1227 struct nfe_desc64 *desc64 = NULL;
1228 struct nfe_desc32 *desc32 = NULL;
1229 uint16_t flags = 0;
1230 uint32_t vtag = 0;
1231 int error, i, j, maxsegs, nsegs;
1232
1233 data = &ring->data[ring->cur];
1234 map = data->map;
1235 data_map = data; /* Remember who owns the DMA map */
1236
1237 maxsegs = (sc->sc_tx_ring_count - ring->queued) - NFE_NSEG_RSVD;
1238 if (maxsegs > NFE_MAX_SCATTER)
1239 maxsegs = NFE_MAX_SCATTER;
1240 KASSERT(maxsegs >= sc->sc_tx_spare,
1241 ("no enough segments %d,%d\n", maxsegs, sc->sc_tx_spare));
1242
1243 error = bus_dmamap_load_mbuf_defrag(ring->data_tag, map, &m0,
1244 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1245 if (error)
1246 goto back;
1247 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE);
1248
1249 error = 0;
1250
1251 /* setup h/w VLAN tagging */
1252 if (m0->m_flags & M_VLANTAG)
1253 vtag = m0->m_pkthdr.ether_vlantag;
1254
1255 if (sc->arpcom.ac_if.if_capenable & IFCAP_TXCSUM) {
1256 if (m0->m_pkthdr.csum_flags & CSUM_IP)
1257 flags |= NFE_TX_IP_CSUM;
1258 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
1259 flags |= NFE_TX_TCP_CSUM;
1260 }
1261
1262 /*
1263 * XXX urm. somebody is unaware of how hardware works. You
1264 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in
1265 * the ring until the entire chain is actually *VALID*. Otherwise
1266 * the hardware may encounter a partially initialized chain that
1267 * is marked as being ready to go when it in fact is not ready to
1268 * go.
1269 */
1270
1271 for (i = 0; i < nsegs; i++) {
1272 j = (ring->cur + i) % sc->sc_tx_ring_count;
1273 data = &ring->data[j];
1274
1275 if (sc->sc_caps & NFE_40BIT_ADDR) {
1276 desc64 = &ring->desc64[j];
1277 desc64->physaddr[0] =
1278 htole32(NFE_ADDR_HI(segs[i].ds_addr));
1279 desc64->physaddr[1] =
1280 htole32(NFE_ADDR_LO(segs[i].ds_addr));
1281 desc64->length = htole16(segs[i].ds_len - 1);
1282 desc64->vtag = htole32(vtag);
1283 desc64->flags = htole16(flags);
1284 } else {
1285 desc32 = &ring->desc32[j];
1286 desc32->physaddr = htole32(segs[i].ds_addr);
1287 desc32->length = htole16(segs[i].ds_len - 1);
1288 desc32->flags = htole16(flags);
1289 }
1290
1291 /* csum flags and vtag belong to the first fragment only */
1292 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
1293 vtag = 0;
1294
1295 ring->queued++;
1296 KKASSERT(ring->queued <= sc->sc_tx_ring_count);
1297 }
1298
1299 /* the whole mbuf chain has been DMA mapped, fix last descriptor */
1300 if (sc->sc_caps & NFE_40BIT_ADDR) {
1301 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
1302 } else {
1303 if (sc->sc_caps & NFE_JUMBO_SUP)
1304 flags = NFE_TX_LASTFRAG_V2;
1305 else
1306 flags = NFE_TX_LASTFRAG_V1;
1307 desc32->flags |= htole16(flags);
1308 }
1309
1310 /*
1311 * Set NFE_TX_VALID backwards so the hardware doesn't see the
1312 * whole mess until the first descriptor in the map is flagged.
1313 */
1314 for (i = nsegs - 1; i >= 0; --i) {
1315 j = (ring->cur + i) % sc->sc_tx_ring_count;
1316 if (sc->sc_caps & NFE_40BIT_ADDR) {
1317 desc64 = &ring->desc64[j];
1318 desc64->flags |= htole16(NFE_TX_VALID);
1319 } else {
1320 desc32 = &ring->desc32[j];
1321 desc32->flags |= htole16(NFE_TX_VALID);
1322 }
1323 }
1324 ring->cur = (ring->cur + nsegs) % sc->sc_tx_ring_count;
1325
1326 /* Exchange DMA map */
1327 data_map->map = data->map;
1328 data->map = map;
1329 data->m = m0;
1330back:
1331 if (error)
1332 m_freem(m0);
1333 return error;
1334}
1335
1336static void
1337nfe_start(struct ifnet *ifp)
1338{
1339 struct nfe_softc *sc = ifp->if_softc;
1340 struct nfe_tx_ring *ring = &sc->txq;
1341 int count = 0, oactive = 0;
1342 struct mbuf *m0;
1343
1344 ASSERT_SERIALIZED(ifp->if_serializer);
1345
1346 if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING)
1347 return;
1348
1349 for (;;) {
1350 int error;
1351
1352 if (sc->sc_tx_ring_count - ring->queued <
1353 sc->sc_tx_spare + NFE_NSEG_RSVD) {
1354 if (oactive) {
1355 ifp->if_flags |= IFF_OACTIVE;
1356 break;
1357 }
1358
1359 nfe_txeof(sc, 0);
1360 oactive = 1;
1361 continue;
1362 }
1363
1364 m0 = ifq_dequeue(&ifp->if_snd, NULL);
1365 if (m0 == NULL)
1366 break;
1367
1368 ETHER_BPF_MTAP(ifp, m0);
1369
1370 error = nfe_encap(sc, ring, m0);
1371 if (error) {
1372 ifp->if_oerrors++;
1373 if (error == EFBIG) {
1374 if (oactive) {
1375 ifp->if_flags |= IFF_OACTIVE;
1376 break;
1377 }
1378 nfe_txeof(sc, 0);
1379 oactive = 1;
1380 }
1381 continue;
1382 } else {
1383 oactive = 0;
1384 }
1385 ++count;
1386
1387 /*
1388 * NOTE:
1389 * `m0' may be freed in nfe_encap(), so
1390 * it should not be touched any more.
1391 */
1392 }
1393
1394 if (count == 0) /* nothing sent */
1395 return;
1396
1397 /* Kick Tx */
1398 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1399
1400 /*
1401 * Set a timeout in case the chip goes out to lunch.
1402 */
1403 ifp->if_timer = 5;
1404}
1405
1406static void
1407nfe_watchdog(struct ifnet *ifp)
1408{
1409 struct nfe_softc *sc = ifp->if_softc;
1410
1411 ASSERT_SERIALIZED(ifp->if_serializer);
1412
1413 if (ifp->if_flags & IFF_RUNNING) {
1414 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n");
1415 nfe_txeof(sc, 1);
1416 return;
1417 }
1418
1419 if_printf(ifp, "watchdog timeout\n");
1420
1421 nfe_init(ifp->if_softc);
1422
1423 ifp->if_oerrors++;
1424}
1425
1426static void
1427nfe_init(void *xsc)
1428{
1429 struct nfe_softc *sc = xsc;
1430 struct ifnet *ifp = &sc->arpcom.ac_if;
1431 uint32_t tmp;
1432 int error;
1433
1434 ASSERT_SERIALIZED(ifp->if_serializer);
1435
1436 nfe_stop(sc);
1437
1438 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0)
1439 nfe_mac_reset(sc);
1440
1441 /*
1442 * NOTE:
1443 * Switching between jumbo frames and normal frames should
1444 * be done _after_ nfe_stop() but _before_ nfe_init_rx_ring().
1445 */
1446 if (ifp->if_mtu > ETHERMTU) {
1447 sc->sc_flags |= NFE_F_USE_JUMBO;
1448 sc->rxq.bufsz = NFE_JBYTES;
1449 sc->sc_tx_spare = NFE_NSEG_SPARE_JUMBO;
1450 if (bootverbose)
1451 if_printf(ifp, "use jumbo frames\n");
1452 } else {
1453 sc->sc_flags &= ~NFE_F_USE_JUMBO;
1454 sc->rxq.bufsz = MCLBYTES;
1455 sc->sc_tx_spare = NFE_NSEG_SPARE;
1456 if (bootverbose)
1457 if_printf(ifp, "use non-jumbo frames\n");
1458 }
1459
1460 error = nfe_init_tx_ring(sc, &sc->txq);
1461 if (error) {
1462 nfe_stop(sc);
1463 return;
1464 }
1465
1466 error = nfe_init_rx_ring(sc, &sc->rxq);
1467 if (error) {
1468 nfe_stop(sc);
1469 return;
1470 }
1471
1472 NFE_WRITE(sc, NFE_TX_POLL, 0);
1473 NFE_WRITE(sc, NFE_STATUS, 0);
1474
1475 sc->rxtxctl = NFE_RXTX_BIT2 | sc->rxtxctl_desc;
1476
1477 if (ifp->if_capenable & IFCAP_RXCSUM)
1478 sc->rxtxctl |= NFE_RXTX_RXCSUM;
1479
1480 /*
1481 * Although the adapter is capable of stripping VLAN tags from received
1482 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1483 * purpose. This will be done in software by our network stack.
1484 */
1485 if (sc->sc_caps & NFE_HW_VLAN)
1486 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
1487
1488 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1489 DELAY(10);
1490 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1491
1492 if (sc->sc_caps & NFE_HW_VLAN)
1493 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1494
1495 NFE_WRITE(sc, NFE_SETUP_R6, 0);
1496
1497 /* set MAC address */
1498 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr);
1499
1500 /* tell MAC where rings are in memory */
1501 if (sc->sc_caps & NFE_40BIT_ADDR) {
1502 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
1503 NFE_ADDR_HI(sc->rxq.physaddr));
1504 }
1505 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, NFE_ADDR_LO(sc->rxq.physaddr));
1506
1507 if (sc->sc_caps & NFE_40BIT_ADDR) {
1508 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI,
1509 NFE_ADDR_HI(sc->txq.physaddr));
1510 }
1511 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
1512
1513 NFE_WRITE(sc, NFE_RING_SIZE,
1514 (sc->sc_rx_ring_count - 1) << 16 |
1515 (sc->sc_tx_ring_count - 1));
1516
1517 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1518
1519 /* force MAC to wakeup */
1520 tmp = NFE_READ(sc, NFE_PWR_STATE);
1521 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1522 DELAY(10);
1523 tmp = NFE_READ(sc, NFE_PWR_STATE);
1524 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1525
1526 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1527 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1528 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1529
1530 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1531 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1532
1533 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1534
1535 sc->rxtxctl &= ~NFE_RXTX_BIT2;
1536 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1537 DELAY(10);
1538 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1539
1540 /* set Rx filter */
1541 nfe_setmulti(sc);
1542
1543 nfe_ifmedia_upd(ifp);
1544
1545 /* enable Rx */
1546 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1547
1548 /* enable Tx */
1549 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1550
1551 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1552
1553#ifdef DEVICE_POLLING
1554 if ((ifp->if_flags & IFF_POLLING))
1555 nfe_disable_intrs(sc);
1556 else
1557#endif
1558 nfe_enable_intrs(sc);
1559
1560 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
1561
1562 ifp->if_flags |= IFF_RUNNING;
1563 ifp->if_flags &= ~IFF_OACTIVE;
1564
1565 /*
1566 * If we had stuff in the tx ring before its all cleaned out now
1567 * so we are not going to get an interrupt, jump-start any pending
1568 * output.
1569 */
1570 if (!ifq_is_empty(&ifp->if_snd))
1571 if_devstart(ifp);
1572}
1573
1574static void
1575nfe_stop(struct nfe_softc *sc)
1576{
1577 struct ifnet *ifp = &sc->arpcom.ac_if;
1578 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2;
1579 int i;
1580
1581 ASSERT_SERIALIZED(ifp->if_serializer);
1582
1583 callout_stop(&sc->sc_tick_ch);
1584
1585 ifp->if_timer = 0;
1586 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1587 sc->sc_flags &= ~NFE_F_IRQ_TIMER;
1588
1589#define WAITMAX 50000
1590
1591 /*
1592 * Abort Tx
1593 */
1594 NFE_WRITE(sc, NFE_TX_CTL, 0);
1595 for (i = 0; i < WAITMAX; ++i) {
1596 DELAY(100);
1597 if ((NFE_READ(sc, NFE_TX_STATUS) & NFE_TX_STATUS_BUSY) == 0)
1598 break;
1599 }
1600 if (i == WAITMAX)
1601 if_printf(ifp, "can't stop TX\n");
1602 DELAY(100);
1603
1604 /*
1605 * Disable Rx
1606 */
1607 NFE_WRITE(sc, NFE_RX_CTL, 0);
1608 for (i = 0; i < WAITMAX; ++i) {
1609 DELAY(100);
1610 if ((NFE_READ(sc, NFE_RX_STATUS) & NFE_RX_STATUS_BUSY) == 0)
1611 break;
1612 }
1613 if (i == WAITMAX)
1614 if_printf(ifp, "can't stop RX\n");
1615 DELAY(100);
1616
1617#undef WAITMAX
1618
1619 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl);
1620 DELAY(10);
1621 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
1622
1623 /* Disable interrupts */
1624 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1625
1626 /* Reset Tx and Rx rings */
1627 nfe_reset_tx_ring(sc, &sc->txq);
1628 nfe_reset_rx_ring(sc, &sc->rxq);
1629}
1630
1631static int
1632nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1633{
1634 int i, j, error, descsize;
1635 bus_dmamem_t dmem;
1636 void **desc;
1637
1638 if (sc->sc_caps & NFE_40BIT_ADDR) {
1639 desc = (void **)&ring->desc64;
1640 descsize = sizeof(struct nfe_desc64);
1641 } else {
1642 desc = (void **)&ring->desc32;
1643 descsize = sizeof(struct nfe_desc32);
1644 }
1645
1646 ring->bufsz = MCLBYTES;
1647 ring->cur = ring->next = 0;
1648
1649 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0,
1650 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1651 sc->sc_rx_ring_count * descsize,
1652 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1653 if (error) {
1654 if_printf(&sc->arpcom.ac_if,
1655 "could not create RX desc ring\n");
1656 return error;
1657 }
1658 ring->tag = dmem.dmem_tag;
1659 ring->map = dmem.dmem_map;
1660 *desc = dmem.dmem_addr;
1661 ring->physaddr = dmem.dmem_busaddr;
1662
1663 if (sc->sc_caps & NFE_JUMBO_SUP) {
1664 ring->jbuf =
1665 kmalloc(sizeof(struct nfe_jbuf) * NFE_JPOOL_COUNT(sc),
1666 M_DEVBUF, M_WAITOK | M_ZERO);
1667
1668 error = nfe_jpool_alloc(sc, ring);
1669 if (error) {
1670 if_printf(&sc->arpcom.ac_if,
1671 "could not allocate jumbo frames\n");
1672 kfree(ring->jbuf, M_DEVBUF);
1673 ring->jbuf = NULL;
1674 /* Allow jumbo frame allocation to fail */
1675 }
1676 }
1677
1678 ring->data = kmalloc(sizeof(struct nfe_rx_data) * sc->sc_rx_ring_count,
1679 M_DEVBUF, M_WAITOK | M_ZERO);
1680
1681 error = bus_dma_tag_create(sc->sc_dtag, 1, 0,
1682 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1683 NULL, NULL,
1684 MCLBYTES, 1, MCLBYTES,
1685 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
1686 &ring->data_tag);
1687 if (error) {
1688 if_printf(&sc->arpcom.ac_if,
1689 "could not create RX mbuf DMA tag\n");
1690 return error;
1691 }
1692
1693 /* Create a spare RX mbuf DMA map */
1694 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK,
1695 &ring->data_tmpmap);
1696 if (error) {
1697 if_printf(&sc->arpcom.ac_if,
1698 "could not create spare RX mbuf DMA map\n");
1699 bus_dma_tag_destroy(ring->data_tag);
1700 ring->data_tag = NULL;
1701 return error;
1702 }
1703
1704 for (i = 0; i < sc->sc_rx_ring_count; i++) {
1705 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK,
1706 &ring->data[i].map);
1707 if (error) {
1708 if_printf(&sc->arpcom.ac_if,
1709 "could not create %dth RX mbuf DMA mapn", i);
1710 goto fail;
1711 }
1712 }
1713 return 0;
1714fail:
1715 for (j = 0; j < i; ++j)
1716 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1717 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1718 bus_dma_tag_destroy(ring->data_tag);
1719 ring->data_tag = NULL;
1720 return error;
1721}
1722
1723static void
1724nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1725{
1726 int i;
1727
1728 for (i = 0; i < sc->sc_rx_ring_count; i++) {
1729 struct nfe_rx_data *data = &ring->data[i];
1730
1731 if (data->m != NULL) {
1732 if ((sc->sc_flags & NFE_F_USE_JUMBO) == 0)
1733 bus_dmamap_unload(ring->data_tag, data->map);
1734 m_freem(data->m);
1735 data->m = NULL;
1736 }
1737 }
1738
1739 ring->cur = ring->next = 0;
1740}
1741
1742static int
1743nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1744{
1745 int i;
1746
1747 for (i = 0; i < sc->sc_rx_ring_count; ++i) {
1748 int error;
1749
1750 /* XXX should use a function pointer */
1751 if (sc->sc_flags & NFE_F_USE_JUMBO)
1752 error = nfe_newbuf_jumbo(sc, ring, i, 1);
1753 else
1754 error = nfe_newbuf_std(sc, ring, i, 1);
1755 if (error) {
1756 if_printf(&sc->arpcom.ac_if,
1757 "could not allocate RX buffer\n");
1758 return error;
1759 }
1760 nfe_set_ready_rxdesc(sc, ring, i);
1761 }
1762 return 0;
1763}
1764
1765static void
1766nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1767{
1768 if (ring->data_tag != NULL) {
1769 struct nfe_rx_data *data;
1770 int i;
1771
1772 for (i = 0; i < sc->sc_rx_ring_count; i++) {
1773 data = &ring->data[i];
1774
1775 if (data->m != NULL) {
1776 bus_dmamap_unload(ring->data_tag, data->map);
1777 m_freem(data->m);
1778 }
1779 bus_dmamap_destroy(ring->data_tag, data->map);
1780 }
1781 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1782 bus_dma_tag_destroy(ring->data_tag);
1783 }
1784
1785 nfe_jpool_free(sc, ring);
1786
1787 if (ring->jbuf != NULL)
1788 kfree(ring->jbuf, M_DEVBUF);
1789 if (ring->data != NULL)
1790 kfree(ring->data, M_DEVBUF);
1791
1792 if (ring->tag != NULL) {
1793 void *desc;
1794
1795 if (sc->sc_caps & NFE_40BIT_ADDR)
1796 desc = ring->desc64;
1797 else
1798 desc = ring->desc32;
1799
1800 bus_dmamap_unload(ring->tag, ring->map);
1801 bus_dmamem_free(ring->tag, desc, ring->map);
1802 bus_dma_tag_destroy(ring->tag);
1803 }
1804}
1805
1806static struct nfe_jbuf *
1807nfe_jalloc(struct nfe_softc *sc)
1808{
1809 struct ifnet *ifp = &sc->arpcom.ac_if;
1810 struct nfe_jbuf *jbuf;
1811
1812 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1813
1814 jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
1815 if (jbuf != NULL) {
1816 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
1817 jbuf->inuse = 1;
1818 } else {
1819 if_printf(ifp, "no free jumbo buffer\n");
1820 }
1821
1822 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1823
1824 return jbuf;
1825}
1826
1827static void
1828nfe_jfree(void *arg)
1829{
1830 struct nfe_jbuf *jbuf = arg;
1831 struct nfe_softc *sc = jbuf->sc;
1832 struct nfe_rx_ring *ring = jbuf->ring;
1833
1834 if (&ring->jbuf[jbuf->slot] != jbuf)
1835 panic("%s: free wrong jumbo buffer\n", __func__);
1836 else if (jbuf->inuse == 0)
1837 panic("%s: jumbo buffer already freed\n", __func__);
1838
1839 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1840 atomic_subtract_int(&jbuf->inuse, 1);
1841 if (jbuf->inuse == 0)
1842 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1843 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1844}
1845
1846static void
1847nfe_jref(void *arg)
1848{
1849 struct nfe_jbuf *jbuf = arg;
1850 struct nfe_rx_ring *ring = jbuf->ring;
1851
1852 if (&ring->jbuf[jbuf->slot] != jbuf)
1853 panic("%s: ref wrong jumbo buffer\n", __func__);
1854 else if (jbuf->inuse == 0)
1855 panic("%s: jumbo buffer already freed\n", __func__);
1856
1857 atomic_add_int(&jbuf->inuse, 1);
1858}
1859
1860static int
1861nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1862{
1863 struct nfe_jbuf *jbuf;
1864 bus_dmamem_t dmem;
1865 bus_addr_t physaddr;
1866 caddr_t buf;
1867 int i, error;
1868
1869 /*
1870 * Allocate a big chunk of DMA'able memory.
1871 */
1872 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0,
1873 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1874 NFE_JPOOL_SIZE(sc),
1875 BUS_DMA_WAITOK, &dmem);
1876 if (error) {
1877 if_printf(&sc->arpcom.ac_if,
1878 "could not create jumbo buffer\n");
1879 return error;
1880 }
1881 ring->jtag = dmem.dmem_tag;
1882 ring->jmap = dmem.dmem_map;
1883 ring->jpool = dmem.dmem_addr;
1884 physaddr = dmem.dmem_busaddr;
1885
1886 /* ..and split it into 9KB chunks */
1887 SLIST_INIT(&ring->jfreelist);
1888
1889 buf = ring->jpool;
1890 for (i = 0; i < NFE_JPOOL_COUNT(sc); i++) {
1891 jbuf = &ring->jbuf[i];
1892
1893 jbuf->sc = sc;
1894 jbuf->ring = ring;
1895 jbuf->inuse = 0;
1896 jbuf->slot = i;
1897 jbuf->buf = buf;
1898 jbuf->physaddr = physaddr;
1899
1900 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1901
1902 buf += NFE_JBYTES;
1903 physaddr += NFE_JBYTES;
1904 }
1905
1906 return 0;
1907}
1908
1909static void
1910nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1911{
1912 if (ring->jtag != NULL) {
1913 bus_dmamap_unload(ring->jtag, ring->jmap);
1914 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap);
1915 bus_dma_tag_destroy(ring->jtag);
1916 }
1917}
1918
1919static int
1920nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1921{
1922 int i, j, error, descsize;
1923 bus_dmamem_t dmem;
1924 void **desc;
1925
1926 if (sc->sc_caps & NFE_40BIT_ADDR) {
1927 desc = (void **)&ring->desc64;
1928 descsize = sizeof(struct nfe_desc64);
1929 } else {
1930 desc = (void **)&ring->desc32;
1931 descsize = sizeof(struct nfe_desc32);
1932 }
1933
1934 ring->queued = 0;
1935 ring->cur = ring->next = 0;
1936
1937 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0,
1938 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1939 sc->sc_tx_ring_count * descsize,
1940 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1941 if (error) {
1942 if_printf(&sc->arpcom.ac_if,
1943 "could not create TX desc ring\n");
1944 return error;
1945 }
1946 ring->tag = dmem.dmem_tag;
1947 ring->map = dmem.dmem_map;
1948 *desc = dmem.dmem_addr;
1949 ring->physaddr = dmem.dmem_busaddr;
1950
1951 ring->data = kmalloc(sizeof(struct nfe_tx_data) * sc->sc_tx_ring_count,
1952 M_DEVBUF, M_WAITOK | M_ZERO);
1953
1954 error = bus_dma_tag_create(sc->sc_dtag, 1, 0,
1955 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1956 NULL, NULL,
1957 NFE_JBYTES, NFE_MAX_SCATTER, MCLBYTES,
1958 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1959 &ring->data_tag);
1960 if (error) {
1961 if_printf(&sc->arpcom.ac_if,
1962 "could not create TX buf DMA tag\n");
1963 return error;
1964 }
1965
1966 for (i = 0; i < sc->sc_tx_ring_count; i++) {
1967 error = bus_dmamap_create(ring->data_tag,
1968 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1969 &ring->data[i].map);
1970 if (error) {
1971 if_printf(&sc->arpcom.ac_if,
1972 "could not create %dth TX buf DMA map\n", i);
1973 goto fail;
1974 }
1975 }
1976
1977 return 0;
1978fail:
1979 for (j = 0; j < i; ++j)
1980 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1981 bus_dma_tag_destroy(ring->data_tag);
1982 ring->data_tag = NULL;
1983 return error;
1984}
1985
1986static void
1987nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1988{
1989 int i;
1990
1991 for (i = 0; i < sc->sc_tx_ring_count; i++) {
1992 struct nfe_tx_data *data = &ring->data[i];
1993
1994 if (sc->sc_caps & NFE_40BIT_ADDR)
1995 ring->desc64[i].flags = 0;
1996 else
1997 ring->desc32[i].flags = 0;
1998
1999 if (data->m != NULL) {
2000 bus_dmamap_unload(ring->data_tag, data->map);
2001 m_freem(data->m);
2002 data->m = NULL;
2003 }
2004 }
2005
2006 ring->queued = 0;
2007 ring->cur = ring->next = 0;
2008}
2009
2010static int
2011nfe_init_tx_ring(struct nfe_softc *sc __unused,
2012 struct nfe_tx_ring *ring __unused)
2013{
2014 return 0;
2015}
2016
2017static void
2018nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
2019{
2020 if (ring->data_tag != NULL) {
2021 struct nfe_tx_data *data;
2022 int i;
2023
2024 for (i = 0; i < sc->sc_tx_ring_count; ++i) {
2025 data = &ring->data[i];
2026
2027 if (data->m != NULL) {
2028 bus_dmamap_unload(ring->data_tag, data->map);
2029 m_freem(data->m);
2030 }
2031 bus_dmamap_destroy(ring->data_tag, data->map);
2032 }
2033
2034 bus_dma_tag_destroy(ring->data_tag);
2035 }
2036
2037 if (ring->data != NULL)
2038 kfree(ring->data, M_DEVBUF);
2039
2040 if (ring->tag != NULL) {
2041 void *desc;
2042
2043 if (sc->sc_caps & NFE_40BIT_ADDR)
2044 desc = ring->desc64;
2045 else
2046 desc = ring->desc32;
2047
2048 bus_dmamap_unload(ring->tag, ring->map);
2049 bus_dmamem_free(ring->tag, desc, ring->map);
2050 bus_dma_tag_destroy(ring->tag);
2051 }
2052}
2053
2054static int
2055nfe_ifmedia_upd(struct ifnet *ifp)
2056{
2057 struct nfe_softc *sc = ifp->if_softc;
2058 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2059
2060 ASSERT_SERIALIZED(ifp->if_serializer);
2061
2062 if (mii->mii_instance != 0) {
2063 struct mii_softc *miisc;
2064
2065 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2066 mii_phy_reset(miisc);
2067 }
2068 mii_mediachg(mii);
2069
2070 return 0;
2071}
2072
2073static void
2074nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2075{
2076 struct nfe_softc *sc = ifp->if_softc;
2077 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2078
2079 ASSERT_SERIALIZED(ifp->if_serializer);
2080
2081 mii_pollstat(mii);
2082 ifmr->ifm_status = mii->mii_media_status;
2083 ifmr->ifm_active = mii->mii_media_active;
2084}
2085
2086static void
2087nfe_setmulti(struct nfe_softc *sc)
2088{
2089 struct ifnet *ifp = &sc->arpcom.ac_if;
2090 struct ifmultiaddr *ifma;
2091 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
2092 uint32_t filter = NFE_RXFILTER_MAGIC;
2093 int i;
2094
2095 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2096 bzero(addr, ETHER_ADDR_LEN);
2097 bzero(mask, ETHER_ADDR_LEN);
2098 goto done;
2099 }
2100
2101 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
2102 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
2103
2104 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2105 caddr_t maddr;
2106
2107 if (ifma->ifma_addr->sa_family != AF_LINK)
2108 continue;
2109
2110 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2111 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2112 addr[i] &= maddr[i];
2113 mask[i] &= ~maddr[i];
2114 }
2115 }
2116
2117 for (i = 0; i < ETHER_ADDR_LEN; i++)
2118 mask[i] |= addr[i];
2119
2120done:
2121 addr[0] |= 0x01; /* make sure multicast bit is set */
2122
2123 NFE_WRITE(sc, NFE_MULTIADDR_HI,
2124 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2125 NFE_WRITE(sc, NFE_MULTIADDR_LO,
2126 addr[5] << 8 | addr[4]);
2127 NFE_WRITE(sc, NFE_MULTIMASK_HI,
2128 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
2129 NFE_WRITE(sc, NFE_MULTIMASK_LO,
2130 mask[5] << 8 | mask[4]);
2131
2132 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
2133 NFE_WRITE(sc, NFE_RXFILTER, filter);
2134}
2135
2136static void
2137nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
2138{
2139 uint32_t lo, hi;
2140
2141 lo = NFE_READ(sc, NFE_MACADDR_LO);
2142 hi = NFE_READ(sc, NFE_MACADDR_HI);
2143 if (sc->sc_caps & NFE_FIX_EADDR) {
2144 addr[0] = (lo >> 8) & 0xff;
2145 addr[1] = (lo & 0xff);
2146
2147 addr[2] = (hi >> 24) & 0xff;
2148 addr[3] = (hi >> 16) & 0xff;
2149 addr[4] = (hi >> 8) & 0xff;
2150 addr[5] = (hi & 0xff);
2151 } else {
2152 addr[0] = (hi & 0xff);
2153 addr[1] = (hi >> 8) & 0xff;
2154 addr[2] = (hi >> 16) & 0xff;
2155 addr[3] = (hi >> 24) & 0xff;
2156
2157 addr[4] = (lo & 0xff);
2158 addr[5] = (lo >> 8) & 0xff;
2159 }
2160}
2161
2162static void
2163nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
2164{
2165 NFE_WRITE(sc, NFE_MACADDR_LO,
2166 addr[5] << 8 | addr[4]);
2167 NFE_WRITE(sc, NFE_MACADDR_HI,
2168 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2169}
2170
2171static void
2172nfe_tick(void *arg)
2173{
2174 struct nfe_softc *sc = arg;
2175 struct ifnet *ifp = &sc->arpcom.ac_if;
2176 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2177
2178 lwkt_serialize_enter(ifp->if_serializer);
2179
2180 mii_tick(mii);
2181 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
2182
2183 lwkt_serialize_exit(ifp->if_serializer);
2184}
2185
2186static int
2187nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2188 int wait)
2189{
2190 struct nfe_rx_data *data = &ring->data[idx];
2191 bus_dma_segment_t seg;
2192 bus_dmamap_t map;
2193 struct mbuf *m;
2194 int nsegs, error;
2195
2196 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2197 if (m == NULL)
2198 return ENOBUFS;
2199 m->m_len = m->m_pkthdr.len = MCLBYTES;
2200
2201 error = bus_dmamap_load_mbuf_segment(ring->data_tag, ring->data_tmpmap,
2202 m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
2203 if (error) {
2204 m_freem(m);
2205 if (wait) {
2206 if_printf(&sc->arpcom.ac_if,
2207 "could map RX mbuf %d\n", error);
2208 }
2209 return error;
2210 }
2211
2212 if (data->m != NULL) {
2213 /* Sync and unload originally mapped mbuf */
2214 bus_dmamap_sync(ring->data_tag, data->map,
2215 BUS_DMASYNC_POSTREAD);
2216 bus_dmamap_unload(ring->data_tag, data->map);
2217 }
2218
2219 /* Swap this DMA map with tmp DMA map */
2220 map = data->map;
2221 data->map = ring->data_tmpmap;
2222 ring->data_tmpmap = map;
2223
2224 /* Caller is assumed to have collected the old mbuf */
2225 data->m = m;
2226
2227 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr);
2228 return 0;
2229}
2230
2231static int
2232nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2233 int wait)
2234{
2235 struct nfe_rx_data *data = &ring->data[idx];
2236 struct nfe_jbuf *jbuf;
2237 struct mbuf *m;
2238
2239 MGETHDR(m, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA);
2240 if (m == NULL)
2241 return ENOBUFS;
2242
2243 jbuf = nfe_jalloc(sc);
2244 if (jbuf == NULL) {
2245 m_freem(m);
2246 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
2247 "-- packet dropped!\n");
2248 return ENOBUFS;
2249 }
2250
2251 m->m_ext.ext_arg = jbuf;
2252 m->m_ext.ext_buf = jbuf->buf;
2253 m->m_ext.ext_free = nfe_jfree;
2254 m->m_ext.ext_ref = nfe_jref;
2255 m->m_ext.ext_size = NFE_JBYTES;
2256
2257 m->m_data = m->m_ext.ext_buf;
2258 m->m_flags |= M_EXT;
2259 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2260
2261 /* Caller is assumed to have collected the old mbuf */
2262 data->m = m;
2263
2264 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr);
2265 return 0;
2266}
2267
2268static void
2269nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2270 bus_addr_t physaddr)
2271{
2272 if (sc->sc_caps & NFE_40BIT_ADDR) {
2273 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2274
2275 desc64->physaddr[0] = htole32(NFE_ADDR_HI(physaddr));
2276 desc64->physaddr[1] = htole32(NFE_ADDR_LO(physaddr));
2277 } else {
2278 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2279
2280 desc32->physaddr = htole32(physaddr);
2281 }
2282}
2283
2284static void
2285nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx)
2286{
2287 if (sc->sc_caps & NFE_40BIT_ADDR) {
2288 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2289
2290 desc64->length = htole16(ring->bufsz);
2291 desc64->flags = htole16(NFE_RX_READY);
2292 } else {
2293 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2294
2295 desc32->length = htole16(ring->bufsz);
2296 desc32->flags = htole16(NFE_RX_READY);
2297 }
2298}
2299
2300static int
2301nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS)
2302{
2303 struct nfe_softc *sc = arg1;
2304 struct ifnet *ifp = &sc->arpcom.ac_if;
2305 uint32_t flags;
2306 int error, v;
2307
2308 lwkt_serialize_enter(ifp->if_serializer);
2309
2310 flags = sc->sc_flags & ~NFE_F_DYN_IM;
2311 v = sc->sc_imtime;
2312 if (sc->sc_flags & NFE_F_DYN_IM)
2313 v = -v;
2314
2315 error = sysctl_handle_int(oidp, &v, 0, req);
2316 if (error || req->newptr == NULL)
2317 goto back;
2318
2319 if (v < 0) {
2320 flags |= NFE_F_DYN_IM;
2321 v = -v;
2322 }
2323
2324 if (v != sc->sc_imtime || (flags ^ sc->sc_flags)) {
2325 if (NFE_IMTIME(v) == 0)
2326 v = 0;
2327 sc->sc_imtime = v;
2328 sc->sc_flags = flags;
2329 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc);
2330
2331 if ((ifp->if_flags & (IFF_POLLING | IFF_RUNNING))
2332 == IFF_RUNNING) {
2333 nfe_enable_intrs(sc);
2334 }
2335 }
2336back:
2337 lwkt_serialize_exit(ifp->if_serializer);
2338 return error;
2339}
2340
2341static void
2342nfe_powerup(device_t dev)
2343{
2344 struct nfe_softc *sc = device_get_softc(dev);
2345 uint32_t pwr_state;
2346 uint16_t did;
2347
2348 /*
2349 * Bring MAC and PHY out of low power state
2350 */
2351
2352 pwr_state = NFE_READ(sc, NFE_PWR_STATE2) & ~NFE_PWRUP_MASK;
2353
2354 did = pci_get_device(dev);
2355 if ((did == PCI_PRODUCT_NVIDIA_MCP51_LAN1 ||
2356 did == PCI_PRODUCT_NVIDIA_MCP51_LAN2) &&
2357 pci_get_revid(dev) >= 0xa3)
2358 pwr_state |= NFE_PWRUP_REV_A3;
2359
2360 NFE_WRITE(sc, NFE_PWR_STATE2, pwr_state);
2361}
2362
2363static void
2364nfe_mac_reset(struct nfe_softc *sc)
2365{
2366 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2;
2367 uint32_t macaddr_hi, macaddr_lo, tx_poll;
2368
2369 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl);
2370
2371 /* Save several registers for later restoration */
2372 macaddr_hi = NFE_READ(sc, NFE_MACADDR_HI);
2373 macaddr_lo = NFE_READ(sc, NFE_MACADDR_LO);
2374 tx_poll = NFE_READ(sc, NFE_TX_POLL);
2375
2376 NFE_WRITE(sc, NFE_MAC_RESET, NFE_RESET_ASSERT);
2377 DELAY(100);
2378
2379 NFE_WRITE(sc, NFE_MAC_RESET, 0);
2380 DELAY(100);
2381
2382 /* Restore saved registers */
2383 NFE_WRITE(sc, NFE_MACADDR_HI, macaddr_hi);
2384 NFE_WRITE(sc, NFE_MACADDR_LO, macaddr_lo);
2385 NFE_WRITE(sc, NFE_TX_POLL, tx_poll);
2386
2387 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
2388}
2389
2390static void
2391nfe_enable_intrs(struct nfe_softc *sc)
2392{
2393 /*
2394 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER.
2395 * It is unclear how wide the timer is. Base programming does
2396 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so
2397 * we don't get any interrupt moderation. TX moderation is
2398 * possible by using the timer interrupt instead of TX_DONE.
2399 *
2400 * It is unclear whether there are other bits that can be
2401 * set to make the NFE device actually do interrupt moderation
2402 * on the RX side.
2403 *
2404 * For now set a 128uS interval as a placemark, but don't use
2405 * the timer.
2406 */
2407 if (sc->sc_imtime == 0)
2408 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME_DEFAULT);
2409 else
2410 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME(sc->sc_imtime));
2411
2412 /* Enable interrupts */
2413 NFE_WRITE(sc, NFE_IRQ_MASK, sc->sc_irq_enable);
2414
2415 if (sc->sc_irq_enable & NFE_IRQ_TIMER)
2416 sc->sc_flags |= NFE_F_IRQ_TIMER;
2417 else
2418 sc->sc_flags &= ~NFE_F_IRQ_TIMER;
2419}
2420
2421static void
2422nfe_disable_intrs(struct nfe_softc *sc)
2423{
2424 /* Disable interrupts */
2425 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
2426 sc->sc_flags &= ~NFE_F_IRQ_TIMER;
2427}