hammer(8): adjust markup & improve wording
[dragonfly.git] / sys / dev / netif / nfe / if_nfe.c
CommitLineData
ae813fd8 1/* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */
46d50f4b 2/* $DragonFly: src/sys/dev/netif/nfe/if_nfe.c,v 1.46 2008/10/28 07:30:49 sephe Exp $ */
ae813fd8
SZ
3
4/*
5 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
6 *
7 * This code is derived from software contributed to The DragonFly Project
8 * by Sepherosa Ziehau <sepherosa@gmail.com> and
9 * Matthew Dillon <dillon@apollo.backplane.com>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 */
38
39/*
40 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
41 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
42 *
43 * Permission to use, copy, modify, and distribute this software for any
44 * purpose with or without fee is hereby granted, provided that the above
45 * copyright notice and this permission notice appear in all copies.
46 *
47 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
48 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
49 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
50 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
51 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
52 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
53 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
54 */
55
56/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
57
58#include "opt_polling.h"
59
60#include <sys/param.h>
61#include <sys/endian.h>
62#include <sys/kernel.h>
63#include <sys/bus.h>
9db4b353 64#include <sys/interrupt.h>
ae813fd8
SZ
65#include <sys/proc.h>
66#include <sys/rman.h>
67#include <sys/serialize.h>
68#include <sys/socket.h>
69#include <sys/sockio.h>
70#include <sys/sysctl.h>
71
ae813fd8
SZ
72#include <net/ethernet.h>
73#include <net/if.h>
74#include <net/bpf.h>
75#include <net/if_arp.h>
76#include <net/if_dl.h>
77#include <net/if_media.h>
78#include <net/ifq_var.h>
79#include <net/if_types.h>
80#include <net/if_var.h>
81#include <net/vlan/if_vlan_var.h>
b637f170 82#include <net/vlan/if_vlan_ether.h>
ae813fd8
SZ
83
84#include <bus/pci/pcireg.h>
85#include <bus/pci/pcivar.h>
86#include <bus/pci/pcidevs.h>
87
88#include <dev/netif/mii_layer/mii.h>
89#include <dev/netif/mii_layer/miivar.h>
90
91#include "miibus_if.h"
92
11db6c57
SZ
93#include <dev/netif/nfe/if_nfereg.h>
94#include <dev/netif/nfe/if_nfevar.h>
95
96#define NFE_CSUM
97#define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
ae813fd8
SZ
98
99static int nfe_probe(device_t);
100static int nfe_attach(device_t);
101static int nfe_detach(device_t);
102static void nfe_shutdown(device_t);
103static int nfe_resume(device_t);
104static int nfe_suspend(device_t);
105
106static int nfe_miibus_readreg(device_t, int, int);
107static void nfe_miibus_writereg(device_t, int, int, int);
108static void nfe_miibus_statchg(device_t);
109
110#ifdef DEVICE_POLLING
111static void nfe_poll(struct ifnet *, enum poll_cmd, int);
112#endif
113static void nfe_intr(void *);
114static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
04b9ef8d 115static int nfe_rxeof(struct nfe_softc *);
d378110e 116static int nfe_txeof(struct nfe_softc *, int);
ae813fd8
SZ
117static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *,
118 struct mbuf *);
119static void nfe_start(struct ifnet *);
120static void nfe_watchdog(struct ifnet *);
121static void nfe_init(void *);
122static void nfe_stop(struct nfe_softc *);
123static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *);
124static void nfe_jfree(void *);
125static void nfe_jref(void *);
126static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *);
127static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *);
128static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
129static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
130static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
131static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
132static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
133static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
134static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
135static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
136static int nfe_ifmedia_upd(struct ifnet *);
137static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
138static void nfe_setmulti(struct nfe_softc *);
139static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
140static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
faaea42e
SZ
141static void nfe_powerup(device_t);
142static void nfe_mac_reset(struct nfe_softc *);
ae813fd8 143static void nfe_tick(void *);
ae813fd8
SZ
144static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
145 int, bus_addr_t);
146static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *,
147 int);
148static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int,
149 int);
150static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int,
151 int);
04b9ef8d
SZ
152static void nfe_enable_intrs(struct nfe_softc *);
153static void nfe_disable_intrs(struct nfe_softc *);
ae813fd8 154
ec9403d0
SZ
155static int nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS);
156
ae813fd8
SZ
157#define NFE_DEBUG
158#ifdef NFE_DEBUG
159
160static int nfe_debug = 0;
a455c52e 161static int nfe_rx_ring_count = NFE_RX_RING_DEF_COUNT;
b4633098 162static int nfe_tx_ring_count = NFE_TX_RING_DEF_COUNT;
c00ddf33 163/*
5df3a6aa
MD
164 * hw timer simulated interrupt moderation @4000Hz. Negative values
165 * disable the timer when the discrete interrupt rate falls below
166 * the moderation rate.
c00ddf33
MD
167 *
168 * XXX 8000Hz might be better but if the interrupt is shared it can
169 * blow out the cpu.
170 */
5df3a6aa 171static int nfe_imtime = -250; /* uS */
a455c52e
SZ
172
173TUNABLE_INT("hw.nfe.rx_ring_count", &nfe_rx_ring_count);
b4633098 174TUNABLE_INT("hw.nfe.tx_ring_count", &nfe_tx_ring_count);
c4eebf3a 175TUNABLE_INT("hw.nfe.imtimer", &nfe_imtime);
ec9403d0 176TUNABLE_INT("hw.nfe.debug", &nfe_debug);
ae813fd8
SZ
177
178#define DPRINTF(sc, fmt, ...) do { \
ec9403d0 179 if ((sc)->sc_debug) { \
ae813fd8
SZ
180 if_printf(&(sc)->arpcom.ac_if, \
181 fmt, __VA_ARGS__); \
182 } \
183} while (0)
184
185#define DPRINTFN(sc, lv, fmt, ...) do { \
ec9403d0 186 if ((sc)->sc_debug >= (lv)) { \
ae813fd8
SZ
187 if_printf(&(sc)->arpcom.ac_if, \
188 fmt, __VA_ARGS__); \
189 } \
190} while (0)
191
192#else /* !NFE_DEBUG */
193
194#define DPRINTF(sc, fmt, ...)
195#define DPRINTFN(sc, lv, fmt, ...)
196
197#endif /* NFE_DEBUG */
198
ae813fd8
SZ
199static const struct nfe_dev {
200 uint16_t vid;
201 uint16_t did;
202 const char *desc;
203} nfe_devices[] = {
204 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
87e3db44 205 "NVIDIA nForce Fast Ethernet" },
ae813fd8
SZ
206
207 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
87e3db44 208 "NVIDIA nForce2 Fast Ethernet" },
ae813fd8
SZ
209
210 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
211 "NVIDIA nForce3 Gigabit Ethernet" },
212
87e3db44
TS
213 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb
214 chipset, and possibly also the 400R; it might be both nForce2- and
215 nForce3-based boards can use the same MCPs (= southbridges) */
ae813fd8
SZ
216 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2,
217 "NVIDIA nForce3 Gigabit Ethernet" },
218
219 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3,
220 "NVIDIA nForce3 Gigabit Ethernet" },
221
222 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
223 "NVIDIA nForce3 Gigabit Ethernet" },
224
225 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5,
226 "NVIDIA nForce3 Gigabit Ethernet" },
227
228 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1,
229 "NVIDIA CK804 Gigabit Ethernet" },
230
231 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2,
232 "NVIDIA CK804 Gigabit Ethernet" },
233
234 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
235 "NVIDIA MCP04 Gigabit Ethernet" },
236
237 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
238 "NVIDIA MCP04 Gigabit Ethernet" },
239
240 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1,
241 "NVIDIA MCP51 Gigabit Ethernet" },
242
243 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2,
244 "NVIDIA MCP51 Gigabit Ethernet" },
245
246 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
247 "NVIDIA MCP55 Gigabit Ethernet" },
248
249 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
9d1ecb21
SZ
250 "NVIDIA MCP55 Gigabit Ethernet" },
251
252 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
253 "NVIDIA MCP61 Gigabit Ethernet" },
254
255 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
256 "NVIDIA MCP61 Gigabit Ethernet" },
257
258 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
259 "NVIDIA MCP61 Gigabit Ethernet" },
260
261 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
262 "NVIDIA MCP61 Gigabit Ethernet" },
263
264 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
265 "NVIDIA MCP65 Gigabit Ethernet" },
266
267 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
268 "NVIDIA MCP65 Gigabit Ethernet" },
269
270 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
271 "NVIDIA MCP65 Gigabit Ethernet" },
272
273 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
df290cac
SZ
274 "NVIDIA MCP65 Gigabit Ethernet" },
275
276 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
277 "NVIDIA MCP67 Gigabit Ethernet" },
278
279 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
280 "NVIDIA MCP67 Gigabit Ethernet" },
281
282 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
283 "NVIDIA MCP67 Gigabit Ethernet" },
284
285 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
1d67eefe
SZ
286 "NVIDIA MCP67 Gigabit Ethernet" },
287
288 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
289 "NVIDIA MCP73 Gigabit Ethernet" },
290
291 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
292 "NVIDIA MCP73 Gigabit Ethernet" },
293
294 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
295 "NVIDIA MCP73 Gigabit Ethernet" },
296
297 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
298 "NVIDIA MCP73 Gigabit Ethernet" },
299
300 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
301 "NVIDIA MCP77 Gigabit Ethernet" },
302
303 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
304 "NVIDIA MCP77 Gigabit Ethernet" },
305
306 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
307 "NVIDIA MCP77 Gigabit Ethernet" },
308
309 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
310 "NVIDIA MCP77 Gigabit Ethernet" },
311
312 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
313 "NVIDIA MCP79 Gigabit Ethernet" },
314
315 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
316 "NVIDIA MCP79 Gigabit Ethernet" },
317
318 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
319 "NVIDIA MCP79 Gigabit Ethernet" },
320
321 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
322 "NVIDIA MCP79 Gigabit Ethernet" },
323
324 { 0, 0, NULL }
ae813fd8
SZ
325};
326
327static device_method_t nfe_methods[] = {
328 /* Device interface */
329 DEVMETHOD(device_probe, nfe_probe),
330 DEVMETHOD(device_attach, nfe_attach),
331 DEVMETHOD(device_detach, nfe_detach),
332 DEVMETHOD(device_suspend, nfe_suspend),
333 DEVMETHOD(device_resume, nfe_resume),
334 DEVMETHOD(device_shutdown, nfe_shutdown),
335
336 /* Bus interface */
337 DEVMETHOD(bus_print_child, bus_generic_print_child),
338 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
339
340 /* MII interface */
341 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
342 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
343 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
344
345 { 0, 0 }
346};
347
348static driver_t nfe_driver = {
349 "nfe",
350 nfe_methods,
351 sizeof(struct nfe_softc)
352};
353
354static devclass_t nfe_devclass;
355
356DECLARE_DUMMY_MODULE(if_nfe);
357MODULE_DEPEND(if_nfe, miibus, 1, 1, 1);
358DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, 0, 0);
359DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
360
f81efabe
MD
361/*
362 * NOTE: NFE_WORDALIGN support is guesswork right now.
363 */
ae813fd8
SZ
364static int
365nfe_probe(device_t dev)
366{
367 const struct nfe_dev *n;
368 uint16_t vid, did;
369
370 vid = pci_get_vendor(dev);
371 did = pci_get_device(dev);
372 for (n = nfe_devices; n->desc != NULL; ++n) {
373 if (vid == n->vid && did == n->did) {
374 struct nfe_softc *sc = device_get_softc(dev);
375
376 switch (did) {
f678f57e
SZ
377 case PCI_PRODUCT_NVIDIA_NFORCE_LAN:
378 case PCI_PRODUCT_NVIDIA_NFORCE2_LAN:
379 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN1:
ece56005
SZ
380 sc->sc_caps = NFE_NO_PWRCTL |
381 NFE_FIX_EADDR;
f678f57e 382 break;
ae813fd8
SZ
383 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
384 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
385 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
386 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
88d487c3 387 sc->sc_caps = NFE_JUMBO_SUP |
ce000928
SZ
388 NFE_HW_CSUM |
389 NFE_NO_PWRCTL |
390 NFE_FIX_EADDR;
ae813fd8
SZ
391 break;
392 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
393 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
ece56005
SZ
394 sc->sc_caps = NFE_FIX_EADDR;
395 /* FALL THROUGH */
9d1ecb21
SZ
396 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
397 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
398 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
399 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
df290cac
SZ
400 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
401 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
402 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
403 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
1d67eefe
SZ
404 case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
405 case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
406 case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
407 case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
ece56005 408 sc->sc_caps |= NFE_40BIT_ADDR;
ae813fd8
SZ
409 break;
410 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
411 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
412 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
413 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
88d487c3 414 sc->sc_caps = NFE_JUMBO_SUP |
ce000928
SZ
415 NFE_40BIT_ADDR |
416 NFE_HW_CSUM |
417 NFE_NO_PWRCTL |
418 NFE_FIX_EADDR;
972538a5 419 break;
9d1ecb21
SZ
420 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
421 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
422 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
423 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
88d487c3 424 sc->sc_caps = NFE_JUMBO_SUP |
ce000928 425 NFE_40BIT_ADDR;
ae813fd8
SZ
426 break;
427 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
428 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
88d487c3 429 sc->sc_caps = NFE_JUMBO_SUP |
ce000928
SZ
430 NFE_40BIT_ADDR |
431 NFE_HW_CSUM |
432 NFE_HW_VLAN |
433 NFE_FIX_EADDR;
ae813fd8 434 break;
1d67eefe
SZ
435 case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
436 case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
437 case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
438 case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
439 case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
440 case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
441 case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
442 case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
88d487c3 443 sc->sc_caps = NFE_40BIT_ADDR |
f81efabe
MD
444 NFE_HW_CSUM |
445 NFE_WORDALIGN;
1d67eefe 446 break;
ae813fd8
SZ
447 }
448
ae813fd8 449 device_set_desc(dev, n->desc);
dbcd0c9b 450 device_set_async_attach(dev, TRUE);
ae813fd8
SZ
451 return 0;
452 }
453 }
454 return ENXIO;
455}
456
457static int
458nfe_attach(device_t dev)
459{
460 struct nfe_softc *sc = device_get_softc(dev);
461 struct ifnet *ifp = &sc->arpcom.ac_if;
462 uint8_t eaddr[ETHER_ADDR_LEN];
244a9aa3 463 bus_addr_t lowaddr;
ae813fd8
SZ
464 int error;
465
466 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
467 lwkt_serialize_init(&sc->sc_jbuf_serializer);
468
ec9403d0
SZ
469 /*
470 * Initialize sysctl variables
471 */
ec9403d0 472 sc->sc_rx_ring_count = nfe_rx_ring_count;
b4633098 473 sc->sc_tx_ring_count = nfe_tx_ring_count;
ec9403d0 474 sc->sc_debug = nfe_debug;
04b9ef8d
SZ
475 if (nfe_imtime < 0) {
476 sc->sc_flags |= NFE_F_DYN_IM;
477 sc->sc_imtime = -nfe_imtime;
478 } else {
479 sc->sc_imtime = nfe_imtime;
480 }
481 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc);
ec9403d0 482
ae813fd8
SZ
483 sc->sc_mem_rid = PCIR_BAR(0);
484
88d487c3 485 if (sc->sc_caps & NFE_40BIT_ADDR)
faaea42e 486 sc->rxtxctl_desc = NFE_RXTX_DESC_V3;
88d487c3 487 else if (sc->sc_caps & NFE_JUMBO_SUP)
faaea42e
SZ
488 sc->rxtxctl_desc = NFE_RXTX_DESC_V2;
489
ae813fd8
SZ
490#ifndef BURN_BRIDGES
491 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
492 uint32_t mem, irq;
493
494 mem = pci_read_config(dev, sc->sc_mem_rid, 4);
495 irq = pci_read_config(dev, PCIR_INTLINE, 4);
496
497 device_printf(dev, "chip is in D%d power mode "
498 "-- setting to D0\n", pci_get_powerstate(dev));
499
500 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
501
502 pci_write_config(dev, sc->sc_mem_rid, mem, 4);
503 pci_write_config(dev, PCIR_INTLINE, irq, 4);
504 }
505#endif /* !BURN_BRIDGE */
506
507 /* Enable bus mastering */
508 pci_enable_busmaster(dev);
509
510 /* Allocate IO memory */
511 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
512 &sc->sc_mem_rid, RF_ACTIVE);
513 if (sc->sc_mem_res == NULL) {
c6218e1e 514 device_printf(dev, "could not allocate io memory\n");
ae813fd8
SZ
515 return ENXIO;
516 }
517 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res);
518 sc->sc_memt = rman_get_bustag(sc->sc_mem_res);
519
520 /* Allocate IRQ */
521 sc->sc_irq_rid = 0;
522 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
523 &sc->sc_irq_rid,
524 RF_SHAREABLE | RF_ACTIVE);
525 if (sc->sc_irq_res == NULL) {
526 device_printf(dev, "could not allocate irq\n");
527 error = ENXIO;
528 goto fail;
529 }
530
755f8683
SZ
531 /* Disable WOL */
532 NFE_WRITE(sc, NFE_WOL_CTL, 0);
533
88d487c3 534 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0)
faaea42e
SZ
535 nfe_powerup(dev);
536
ae813fd8
SZ
537 nfe_get_macaddr(sc, eaddr);
538
244a9aa3
SZ
539 /*
540 * Allocate top level DMA tag
541 */
542 if (sc->sc_caps & NFE_40BIT_ADDR)
543 lowaddr = NFE_BUS_SPACE_MAXADDR;
544 else
545 lowaddr = BUS_SPACE_MAXADDR_32BIT;
546 error = bus_dma_tag_create(NULL, /* parent */
547 1, 0, /* alignment, boundary */
548 lowaddr, /* lowaddr */
549 BUS_SPACE_MAXADDR, /* highaddr */
550 NULL, NULL, /* filter, filterarg */
551 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
552 0, /* nsegments */
553 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
554 0, /* flags */
555 &sc->sc_dtag);
556 if (error) {
557 device_printf(dev, "could not allocate parent dma tag\n");
558 goto fail;
559 }
560
ae813fd8
SZ
561 /*
562 * Allocate Tx and Rx rings.
563 */
564 error = nfe_alloc_tx_ring(sc, &sc->txq);
565 if (error) {
566 device_printf(dev, "could not allocate Tx ring\n");
567 goto fail;
568 }
569
570 error = nfe_alloc_rx_ring(sc, &sc->rxq);
571 if (error) {
572 device_printf(dev, "could not allocate Rx ring\n");
573 goto fail;
574 }
575
ec9403d0
SZ
576 /*
577 * Create sysctl tree
578 */
579 sysctl_ctx_init(&sc->sc_sysctl_ctx);
580 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx,
581 SYSCTL_STATIC_CHILDREN(_hw),
582 OID_AUTO,
583 device_get_nameunit(dev),
584 CTLFLAG_RD, 0, "");
585 if (sc->sc_sysctl_tree == NULL) {
586 device_printf(dev, "can't add sysctl node\n");
587 error = ENXIO;
588 goto fail;
589 }
590 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx,
591 SYSCTL_CHILDREN(sc->sc_sysctl_tree),
592 OID_AUTO, "imtimer", CTLTYPE_INT | CTLFLAG_RW,
593 sc, 0, nfe_sysctl_imtime, "I",
594 "Interrupt moderation time (usec). "
04b9ef8d 595 "0 to disable interrupt moderation.");
56fa71a9
SZ
596 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx,
597 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
ec9403d0
SZ
598 "rx_ring_count", CTLFLAG_RD, &sc->sc_rx_ring_count,
599 0, "RX ring count");
b4633098
SZ
600 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx,
601 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
602 "tx_ring_count", CTLFLAG_RD, &sc->sc_tx_ring_count,
603 0, "TX ring count");
56fa71a9
SZ
604 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx,
605 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO,
ec9403d0
SZ
606 "debug", CTLFLAG_RW, &sc->sc_debug,
607 0, "control debugging printfs");
608
ae813fd8
SZ
609 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd,
610 nfe_ifmedia_sts);
611 if (error) {
612 device_printf(dev, "MII without any phy\n");
613 goto fail;
614 }
615
616 ifp->if_softc = sc;
617 ifp->if_mtu = ETHERMTU;
618 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
619 ifp->if_ioctl = nfe_ioctl;
620 ifp->if_start = nfe_start;
621#ifdef DEVICE_POLLING
622 ifp->if_poll = nfe_poll;
623#endif
624 ifp->if_watchdog = nfe_watchdog;
625 ifp->if_init = nfe_init;
d378110e 626 ifq_set_maxlen(&ifp->if_snd, sc->sc_tx_ring_count);
ae813fd8
SZ
627 ifq_set_ready(&ifp->if_snd);
628
629 ifp->if_capabilities = IFCAP_VLAN_MTU;
630
88d487c3 631 if (sc->sc_caps & NFE_HW_VLAN)
ae813fd8
SZ
632 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
633
634#ifdef NFE_CSUM
88d487c3 635 if (sc->sc_caps & NFE_HW_CSUM) {
11db6c57
SZ
636 ifp->if_capabilities |= IFCAP_HWCSUM;
637 ifp->if_hwassist = NFE_CSUM_FEATURES;
ae813fd8 638 }
11db6c57 639#else
88d487c3 640 sc->sc_caps &= ~NFE_HW_CSUM;
ae813fd8
SZ
641#endif
642 ifp->if_capenable = ifp->if_capabilities;
643
644 callout_init(&sc->sc_tick_ch);
645
646 ether_ifattach(ifp, eaddr, NULL);
647
648 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc,
649 &sc->sc_ih, ifp->if_serializer);
650 if (error) {
651 device_printf(dev, "could not setup intr\n");
652 ether_ifdetach(ifp);
653 goto fail;
654 }
655
9db4b353
SZ
656 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->sc_irq_res));
657 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
658
ae813fd8
SZ
659 return 0;
660fail:
661 nfe_detach(dev);
662 return error;
663}
664
665static int
666nfe_detach(device_t dev)
667{
668 struct nfe_softc *sc = device_get_softc(dev);
669
670 if (device_is_attached(dev)) {
671 struct ifnet *ifp = &sc->arpcom.ac_if;
672
673 lwkt_serialize_enter(ifp->if_serializer);
674 nfe_stop(sc);
675 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih);
676 lwkt_serialize_exit(ifp->if_serializer);
677
678 ether_ifdetach(ifp);
679 }
680
681 if (sc->sc_miibus != NULL)
682 device_delete_child(dev, sc->sc_miibus);
683 bus_generic_detach(dev);
684
ec9403d0
SZ
685 if (sc->sc_sysctl_tree != NULL)
686 sysctl_ctx_free(&sc->sc_sysctl_ctx);
687
ae813fd8
SZ
688 if (sc->sc_irq_res != NULL) {
689 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
690 sc->sc_irq_res);
691 }
692
693 if (sc->sc_mem_res != NULL) {
694 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
695 sc->sc_mem_res);
696 }
697
698 nfe_free_tx_ring(sc, &sc->txq);
699 nfe_free_rx_ring(sc, &sc->rxq);
244a9aa3
SZ
700 if (sc->sc_dtag != NULL)
701 bus_dma_tag_destroy(sc->sc_dtag);
ae813fd8
SZ
702
703 return 0;
704}
705
706static void
707nfe_shutdown(device_t dev)
708{
709 struct nfe_softc *sc = device_get_softc(dev);
710 struct ifnet *ifp = &sc->arpcom.ac_if;
711
712 lwkt_serialize_enter(ifp->if_serializer);
713 nfe_stop(sc);
714 lwkt_serialize_exit(ifp->if_serializer);
715}
716
717static int
718nfe_suspend(device_t dev)
719{
720 struct nfe_softc *sc = device_get_softc(dev);
721 struct ifnet *ifp = &sc->arpcom.ac_if;
722
723 lwkt_serialize_enter(ifp->if_serializer);
724 nfe_stop(sc);
725 lwkt_serialize_exit(ifp->if_serializer);
726
727 return 0;
728}
729
730static int
731nfe_resume(device_t dev)
732{
733 struct nfe_softc *sc = device_get_softc(dev);
734 struct ifnet *ifp = &sc->arpcom.ac_if;
735
736 lwkt_serialize_enter(ifp->if_serializer);
751890ab 737 if (ifp->if_flags & IFF_UP)
3ffca68a 738 nfe_init(sc);
ae813fd8
SZ
739 lwkt_serialize_exit(ifp->if_serializer);
740
741 return 0;
742}
743
744static void
745nfe_miibus_statchg(device_t dev)
746{
747 struct nfe_softc *sc = device_get_softc(dev);
748 struct mii_data *mii = device_get_softc(sc->sc_miibus);
749 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
750
c0dcc88e
SZ
751 ASSERT_SERIALIZED(sc->arpcom.ac_if.if_serializer);
752
ae813fd8
SZ
753 phy = NFE_READ(sc, NFE_PHY_IFACE);
754 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
755
756 seed = NFE_READ(sc, NFE_RNDSEED);
757 seed &= ~NFE_SEED_MASK;
758
759 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
760 phy |= NFE_PHY_HDX; /* half-duplex */
761 misc |= NFE_MISC1_HDX;
762 }
763
764 switch (IFM_SUBTYPE(mii->mii_media_active)) {
765 case IFM_1000_T: /* full-duplex only */
766 link |= NFE_MEDIA_1000T;
767 seed |= NFE_SEED_1000T;
768 phy |= NFE_PHY_1000T;
769 break;
770 case IFM_100_TX:
771 link |= NFE_MEDIA_100TX;
772 seed |= NFE_SEED_100TX;
773 phy |= NFE_PHY_100TX;
774 break;
775 case IFM_10_T:
776 link |= NFE_MEDIA_10T;
777 seed |= NFE_SEED_10T;
778 break;
779 }
780
781 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
782
783 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
784 NFE_WRITE(sc, NFE_MISC1, misc);
785 NFE_WRITE(sc, NFE_LINKSPEED, link);
786}
787
788static int
789nfe_miibus_readreg(device_t dev, int phy, int reg)
790{
791 struct nfe_softc *sc = device_get_softc(dev);
792 uint32_t val;
793 int ntries;
794
795 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
796
797 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
798 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
799 DELAY(100);
800 }
801
802 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
803
804 for (ntries = 0; ntries < 1000; ntries++) {
805 DELAY(100);
806 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
807 break;
808 }
809 if (ntries == 1000) {
810 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", "");
811 return 0;
812 }
813
814 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
815 DPRINTFN(sc, 2, "could not read PHY %s\n", "");
816 return 0;
817 }
818
819 val = NFE_READ(sc, NFE_PHY_DATA);
820 if (val != 0xffffffff && val != 0)
821 sc->mii_phyaddr = phy;
822
823 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
824
825 return val;
826}
827
828static void
829nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
830{
831 struct nfe_softc *sc = device_get_softc(dev);
832 uint32_t ctl;
833 int ntries;
834
835 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
836
837 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
838 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
839 DELAY(100);
840 }
841
842 NFE_WRITE(sc, NFE_PHY_DATA, val);
843 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
844 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
845
846 for (ntries = 0; ntries < 1000; ntries++) {
847 DELAY(100);
848 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
849 break;
850 }
851
852#ifdef NFE_DEBUG
853 if (ntries == 1000)
854 DPRINTFN(sc, 2, "could not write to PHY %s\n", "");
855#endif
856}
857
858#ifdef DEVICE_POLLING
859
860static void
861nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
862{
863 struct nfe_softc *sc = ifp->if_softc;
864
ec9403d0
SZ
865 ASSERT_SERIALIZED(ifp->if_serializer);
866
ae813fd8
SZ
867 switch(cmd) {
868 case POLL_REGISTER:
04b9ef8d 869 nfe_disable_intrs(sc);
ae813fd8 870 break;
04b9ef8d 871
ae813fd8 872 case POLL_DEREGISTER:
04b9ef8d 873 nfe_enable_intrs(sc);
ae813fd8 874 break;
04b9ef8d 875
ae813fd8
SZ
876 case POLL_AND_CHECK_STATUS:
877 /* fall through */
878 case POLL_ONLY:
879 if (ifp->if_flags & IFF_RUNNING) {
880 nfe_rxeof(sc);
d378110e 881 nfe_txeof(sc, 1);
ae813fd8
SZ
882 }
883 break;
884 }
885}
886
887#endif
888
889static void
890nfe_intr(void *arg)
891{
892 struct nfe_softc *sc = arg;
893 struct ifnet *ifp = &sc->arpcom.ac_if;
894 uint32_t r;
895
896 r = NFE_READ(sc, NFE_IRQ_STATUS);
897 if (r == 0)
898 return; /* not for us */
899 NFE_WRITE(sc, NFE_IRQ_STATUS, r);
900
c00ddf33
MD
901 if (sc->sc_rate_second != time_second) {
902 /*
903 * Calculate sc_rate_avg - interrupts per second.
904 */
905 sc->sc_rate_second = time_second;
906 if (sc->sc_rate_avg < sc->sc_rate_acc)
907 sc->sc_rate_avg = sc->sc_rate_acc;
908 else
909 sc->sc_rate_avg = (sc->sc_rate_avg * 3 +
910 sc->sc_rate_acc) / 4;
911 sc->sc_rate_acc = 0;
912 } else if (sc->sc_rate_avg < sc->sc_rate_acc) {
913 /*
914 * Don't wait for a tick to roll over if we are taking
915 * a lot of interrupts.
916 */
917 sc->sc_rate_avg = sc->sc_rate_acc;
918 }
919
ae813fd8
SZ
920 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r);
921
922 if (r & NFE_IRQ_LINK) {
923 NFE_READ(sc, NFE_PHY_STATUS);
924 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
925 DPRINTF(sc, "link state changed %s\n", "");
926 }
927
928 if (ifp->if_flags & IFF_RUNNING) {
04b9ef8d 929 int ret;
c00ddf33 930 int rate;
04b9ef8d 931
ae813fd8 932 /* check Rx ring */
04b9ef8d 933 ret = nfe_rxeof(sc);
ae813fd8
SZ
934
935 /* check Tx ring */
d378110e 936 ret |= nfe_txeof(sc, 1);
04b9ef8d 937
c00ddf33
MD
938 /* update the rate accumulator */
939 if (ret)
940 ++sc->sc_rate_acc;
941
04b9ef8d 942 if (sc->sc_flags & NFE_F_DYN_IM) {
c00ddf33
MD
943 rate = 1000000 / sc->sc_imtime;
944 if ((sc->sc_flags & NFE_F_IRQ_TIMER) == 0 &&
945 sc->sc_rate_avg > rate) {
04b9ef8d 946 /*
c00ddf33
MD
947 * Use the hardware timer to reduce the
948 * interrupt rate if the discrete interrupt
949 * rate has exceeded our threshold.
04b9ef8d
SZ
950 */
951 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_IMTIMER);
952 sc->sc_flags |= NFE_F_IRQ_TIMER;
c00ddf33
MD
953 } else if ((sc->sc_flags & NFE_F_IRQ_TIMER) &&
954 sc->sc_rate_avg <= rate) {
04b9ef8d 955 /*
c00ddf33
MD
956 * Use discrete TX/RX interrupts if the rate
957 * has fallen below our threshold.
04b9ef8d
SZ
958 */
959 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_NOIMTIMER);
960 sc->sc_flags &= ~NFE_F_IRQ_TIMER;
46d50f4b
SZ
961
962 /*
963 * Recollect, mainly to avoid the possible race
964 * introduced by changing interrupt masks.
965 */
966 nfe_rxeof(sc);
967 nfe_txeof(sc, 1);
04b9ef8d
SZ
968 }
969 }
ae813fd8
SZ
970 }
971}
972
973static int
974nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
975{
976 struct nfe_softc *sc = ifp->if_softc;
977 struct ifreq *ifr = (struct ifreq *)data;
978 struct mii_data *mii;
56fa71a9 979 int error = 0, mask, jumbo_cap;
ae813fd8 980
c0dcc88e
SZ
981 ASSERT_SERIALIZED(ifp->if_serializer);
982
ae813fd8
SZ
983 switch (cmd) {
984 case SIOCSIFMTU:
56fa71a9
SZ
985 if ((sc->sc_caps & NFE_JUMBO_SUP) && sc->rxq.jbuf != NULL)
986 jumbo_cap = 1;
987 else
988 jumbo_cap = 0;
989
990 if ((jumbo_cap && ifr->ifr_mtu > NFE_JUMBO_MTU) ||
991 (!jumbo_cap && ifr->ifr_mtu > ETHERMTU)) {
a455c52e
SZ
992 return EINVAL;
993 } else if (ifp->if_mtu != ifr->ifr_mtu) {
994 ifp->if_mtu = ifr->ifr_mtu;
56fa71a9
SZ
995 if (ifp->if_flags & IFF_RUNNING)
996 nfe_init(sc);
a455c52e 997 }
ae813fd8
SZ
998 break;
999 case SIOCSIFFLAGS:
1000 if (ifp->if_flags & IFF_UP) {
1001 /*
1002 * If only the PROMISC or ALLMULTI flag changes, then
1003 * don't do a full re-init of the chip, just update
1004 * the Rx filter.
1005 */
1006 if ((ifp->if_flags & IFF_RUNNING) &&
1007 ((ifp->if_flags ^ sc->sc_if_flags) &
1008 (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1009 nfe_setmulti(sc);
1010 } else {
1011 if (!(ifp->if_flags & IFF_RUNNING))
1012 nfe_init(sc);
1013 }
1014 } else {
1015 if (ifp->if_flags & IFF_RUNNING)
1016 nfe_stop(sc);
1017 }
1018 sc->sc_if_flags = ifp->if_flags;
1019 break;
1020 case SIOCADDMULTI:
1021 case SIOCDELMULTI:
1022 if (ifp->if_flags & IFF_RUNNING)
1023 nfe_setmulti(sc);
1024 break;
1025 case SIOCSIFMEDIA:
1026 case SIOCGIFMEDIA:
1027 mii = device_get_softc(sc->sc_miibus);
1028 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1029 break;
1030 case SIOCSIFCAP:
bf2a5992
SZ
1031 mask = (ifr->ifr_reqcap ^ ifp->if_capenable) & IFCAP_HWCSUM;
1032 if (mask && (ifp->if_capabilities & IFCAP_HWCSUM)) {
1033 ifp->if_capenable ^= mask;
1034 if (IFCAP_TXCSUM & ifp->if_capenable)
11db6c57 1035 ifp->if_hwassist = NFE_CSUM_FEATURES;
bf2a5992
SZ
1036 else
1037 ifp->if_hwassist = 0;
11db6c57
SZ
1038
1039 if (ifp->if_flags & IFF_RUNNING)
1040 nfe_init(sc);
ae813fd8
SZ
1041 }
1042 break;
1043 default:
1044 error = ether_ioctl(ifp, cmd, data);
1045 break;
1046 }
1047 return error;
1048}
1049
04b9ef8d 1050static int
ae813fd8
SZ
1051nfe_rxeof(struct nfe_softc *sc)
1052{
1053 struct ifnet *ifp = &sc->arpcom.ac_if;
1054 struct nfe_rx_ring *ring = &sc->rxq;
1055 int reap;
1bf7e051 1056 struct mbuf_chain chain[MAXCPU];
ae813fd8
SZ
1057
1058 reap = 0;
1bf7e051 1059 ether_input_chain_init(chain);
1bf7e051 1060
ae813fd8
SZ
1061 for (;;) {
1062 struct nfe_rx_data *data = &ring->data[ring->cur];
1063 struct mbuf *m;
1064 uint16_t flags;
1065 int len, error;
1066
88d487c3 1067 if (sc->sc_caps & NFE_40BIT_ADDR) {
ae813fd8
SZ
1068 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur];
1069
1070 flags = le16toh(desc64->flags);
1071 len = le16toh(desc64->length) & 0x3fff;
1072 } else {
1073 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur];
1074
1075 flags = le16toh(desc32->flags);
1076 len = le16toh(desc32->length) & 0x3fff;
1077 }
1078
1079 if (flags & NFE_RX_READY)
1080 break;
1081
1082 reap = 1;
1083
88d487c3 1084 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
ae813fd8
SZ
1085 if (!(flags & NFE_RX_VALID_V1))
1086 goto skip;
1087
1088 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
1089 flags &= ~NFE_RX_ERROR;
1090 len--; /* fix buffer length */
1091 }
1092 } else {
1093 if (!(flags & NFE_RX_VALID_V2))
1094 goto skip;
1095
1096 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
1097 flags &= ~NFE_RX_ERROR;
1098 len--; /* fix buffer length */
1099 }
1100 }
1101
1102 if (flags & NFE_RX_ERROR) {
1103 ifp->if_ierrors++;
1104 goto skip;
1105 }
1106
1107 m = data->m;
1108
5dc1e30e 1109 if (sc->sc_flags & NFE_F_USE_JUMBO)
ae813fd8
SZ
1110 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0);
1111 else
1112 error = nfe_newbuf_std(sc, ring, ring->cur, 0);
1113 if (error) {
1114 ifp->if_ierrors++;
1115 goto skip;
1116 }
1117
1118 /* finalize mbuf */
1119 m->m_pkthdr.len = m->m_len = len;
1120 m->m_pkthdr.rcvif = ifp;
1121
bf2a5992 1122 if ((ifp->if_capenable & IFCAP_RXCSUM) &&
11db6c57 1123 (flags & NFE_RX_CSUMOK)) {
8b712a27
SZ
1124 if (flags & NFE_RX_IP_CSUMOK_V2) {
1125 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED |
1126 CSUM_IP_VALID;
1127 }
11db6c57
SZ
1128
1129 if (flags &
1130 (NFE_RX_UDP_CSUMOK_V2 | NFE_RX_TCP_CSUMOK_V2)) {
1131 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
fbb35ef0
SZ
1132 CSUM_PSEUDO_HDR |
1133 CSUM_FRAG_NOT_CHECKED;
11db6c57
SZ
1134 m->m_pkthdr.csum_data = 0xffff;
1135 }
ae813fd8 1136 }
ae813fd8
SZ
1137
1138 ifp->if_ipackets++;
2eb0d069 1139 ether_input_chain(ifp, m, NULL, chain);
ae813fd8
SZ
1140skip:
1141 nfe_set_ready_rxdesc(sc, ring, ring->cur);
ec9403d0 1142 sc->rxq.cur = (sc->rxq.cur + 1) % sc->sc_rx_ring_count;
ae813fd8
SZ
1143 }
1144
8ed6a3af 1145 if (reap)
1bf7e051 1146 ether_input_dispatch(chain);
04b9ef8d 1147 return reap;
ae813fd8
SZ
1148}
1149
04b9ef8d 1150static int
d378110e 1151nfe_txeof(struct nfe_softc *sc, int start)
ae813fd8
SZ
1152{
1153 struct ifnet *ifp = &sc->arpcom.ac_if;
1154 struct nfe_tx_ring *ring = &sc->txq;
1155 struct nfe_tx_data *data = NULL;
1156
ae813fd8
SZ
1157 while (ring->next != ring->cur) {
1158 uint16_t flags;
1159
88d487c3 1160 if (sc->sc_caps & NFE_40BIT_ADDR)
ae813fd8
SZ
1161 flags = le16toh(ring->desc64[ring->next].flags);
1162 else
1163 flags = le16toh(ring->desc32[ring->next].flags);
1164
1165 if (flags & NFE_TX_VALID)
1166 break;
1167
1168 data = &ring->data[ring->next];
1169
88d487c3 1170 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
ae813fd8
SZ
1171 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
1172 goto skip;
1173
1174 if ((flags & NFE_TX_ERROR_V1) != 0) {
1175 if_printf(ifp, "tx v1 error 0x%4b\n", flags,
1176 NFE_V1_TXERR);
1177 ifp->if_oerrors++;
1178 } else {
1179 ifp->if_opackets++;
1180 }
1181 } else {
1182 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
1183 goto skip;
1184
1185 if ((flags & NFE_TX_ERROR_V2) != 0) {
1186 if_printf(ifp, "tx v2 error 0x%4b\n", flags,
1187 NFE_V2_TXERR);
1188 ifp->if_oerrors++;
1189 } else {
1190 ifp->if_opackets++;
1191 }
1192 }
1193
1194 if (data->m == NULL) { /* should not get there */
1195 if_printf(ifp,
1196 "last fragment bit w/o associated mbuf!\n");
1197 goto skip;
1198 }
1199
1200 /* last fragment of the mbuf chain transmitted */
ae813fd8
SZ
1201 bus_dmamap_unload(ring->data_tag, data->map);
1202 m_freem(data->m);
1203 data->m = NULL;
ae813fd8
SZ
1204skip:
1205 ring->queued--;
1206 KKASSERT(ring->queued >= 0);
b4633098 1207 ring->next = (ring->next + 1) % sc->sc_tx_ring_count;
ae813fd8
SZ
1208 }
1209
d378110e
SZ
1210 if (sc->sc_tx_ring_count - ring->queued >=
1211 sc->sc_tx_spare + NFE_NSEG_RSVD)
ae813fd8 1212 ifp->if_flags &= ~IFF_OACTIVE;
d378110e
SZ
1213
1214 if (ring->queued == 0)
1215 ifp->if_timer = 0;
1216
1217 if (start && !ifq_is_empty(&ifp->if_snd))
9db4b353 1218 if_devstart(ifp);
d378110e
SZ
1219
1220 if (data != NULL)
04b9ef8d 1221 return 1;
d378110e
SZ
1222 else
1223 return 0;
ae813fd8
SZ
1224}
1225
1226static int
1227nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0)
1228{
ae813fd8
SZ
1229 bus_dma_segment_t segs[NFE_MAX_SCATTER];
1230 struct nfe_tx_data *data, *data_map;
1231 bus_dmamap_t map;
1232 struct nfe_desc64 *desc64 = NULL;
1233 struct nfe_desc32 *desc32 = NULL;
1234 uint16_t flags = 0;
1235 uint32_t vtag = 0;
b6bb439d 1236 int error, i, j, maxsegs, nsegs;
ae813fd8
SZ
1237
1238 data = &ring->data[ring->cur];
1239 map = data->map;
1240 data_map = data; /* Remember who owns the DMA map */
1241
d378110e
SZ
1242 maxsegs = (sc->sc_tx_ring_count - ring->queued) - NFE_NSEG_RSVD;
1243 if (maxsegs > NFE_MAX_SCATTER)
1244 maxsegs = NFE_MAX_SCATTER;
1245 KASSERT(maxsegs >= sc->sc_tx_spare,
1246 ("no enough segments %d,%d\n", maxsegs, sc->sc_tx_spare));
1247
b6bb439d
SZ
1248 error = bus_dmamap_load_mbuf_defrag(ring->data_tag, map, &m0,
1249 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1250 if (error)
ae813fd8 1251 goto back;
e679c149 1252 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE);
ae813fd8
SZ
1253
1254 error = 0;
1255
ae813fd8 1256 /* setup h/w VLAN tagging */
83790f85
SZ
1257 if (m0->m_flags & M_VLANTAG)
1258 vtag = m0->m_pkthdr.ether_vlantag;
ae813fd8 1259
bf2a5992 1260 if (sc->arpcom.ac_if.if_capenable & IFCAP_TXCSUM) {
11db6c57
SZ
1261 if (m0->m_pkthdr.csum_flags & CSUM_IP)
1262 flags |= NFE_TX_IP_CSUM;
1263 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
1264 flags |= NFE_TX_TCP_CSUM;
1265 }
ae813fd8
SZ
1266
1267 /*
1268 * XXX urm. somebody is unaware of how hardware works. You
1269 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in
1270 * the ring until the entire chain is actually *VALID*. Otherwise
1271 * the hardware may encounter a partially initialized chain that
1272 * is marked as being ready to go when it in fact is not ready to
1273 * go.
1274 */
1275
b6bb439d 1276 for (i = 0; i < nsegs; i++) {
b4633098 1277 j = (ring->cur + i) % sc->sc_tx_ring_count;
ae813fd8
SZ
1278 data = &ring->data[j];
1279
88d487c3 1280 if (sc->sc_caps & NFE_40BIT_ADDR) {
ae813fd8 1281 desc64 = &ring->desc64[j];
ae813fd8 1282 desc64->physaddr[0] =
7752918d 1283 htole32(NFE_ADDR_HI(segs[i].ds_addr));
ae813fd8 1284 desc64->physaddr[1] =
7752918d 1285 htole32(NFE_ADDR_LO(segs[i].ds_addr));
ae813fd8
SZ
1286 desc64->length = htole16(segs[i].ds_len - 1);
1287 desc64->vtag = htole32(vtag);
1288 desc64->flags = htole16(flags);
1289 } else {
1290 desc32 = &ring->desc32[j];
1291 desc32->physaddr = htole32(segs[i].ds_addr);
1292 desc32->length = htole16(segs[i].ds_len - 1);
1293 desc32->flags = htole16(flags);
1294 }
1295
1296 /* csum flags and vtag belong to the first fragment only */
1297 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
1298 vtag = 0;
1299
1300 ring->queued++;
b4633098 1301 KKASSERT(ring->queued <= sc->sc_tx_ring_count);
ae813fd8
SZ
1302 }
1303
1304 /* the whole mbuf chain has been DMA mapped, fix last descriptor */
88d487c3 1305 if (sc->sc_caps & NFE_40BIT_ADDR) {
ae813fd8
SZ
1306 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
1307 } else {
88d487c3 1308 if (sc->sc_caps & NFE_JUMBO_SUP)
ae813fd8
SZ
1309 flags = NFE_TX_LASTFRAG_V2;
1310 else
1311 flags = NFE_TX_LASTFRAG_V1;
1312 desc32->flags |= htole16(flags);
1313 }
1314
1315 /*
1316 * Set NFE_TX_VALID backwards so the hardware doesn't see the
1317 * whole mess until the first descriptor in the map is flagged.
1318 */
b6bb439d 1319 for (i = nsegs - 1; i >= 0; --i) {
b4633098 1320 j = (ring->cur + i) % sc->sc_tx_ring_count;
88d487c3 1321 if (sc->sc_caps & NFE_40BIT_ADDR) {
ae813fd8
SZ
1322 desc64 = &ring->desc64[j];
1323 desc64->flags |= htole16(NFE_TX_VALID);
1324 } else {
1325 desc32 = &ring->desc32[j];
1326 desc32->flags |= htole16(NFE_TX_VALID);
1327 }
1328 }
b6bb439d 1329 ring->cur = (ring->cur + nsegs) % sc->sc_tx_ring_count;
ae813fd8
SZ
1330
1331 /* Exchange DMA map */
1332 data_map->map = data->map;
1333 data->map = map;
1334 data->m = m0;
ae813fd8
SZ
1335back:
1336 if (error)
1337 m_freem(m0);
1338 return error;
1339}
1340
1341static void
1342nfe_start(struct ifnet *ifp)
1343{
1344 struct nfe_softc *sc = ifp->if_softc;
1345 struct nfe_tx_ring *ring = &sc->txq;
d378110e 1346 int count = 0, oactive = 0;
ae813fd8
SZ
1347 struct mbuf *m0;
1348
c0dcc88e
SZ
1349 ASSERT_SERIALIZED(ifp->if_serializer);
1350
9db4b353 1351 if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING)
ae813fd8
SZ
1352 return;
1353
1354 for (;;) {
d378110e
SZ
1355 int error;
1356
1357 if (sc->sc_tx_ring_count - ring->queued <
1358 sc->sc_tx_spare + NFE_NSEG_RSVD) {
1359 if (oactive) {
1360 ifp->if_flags |= IFF_OACTIVE;
1361 break;
1362 }
1363
1364 nfe_txeof(sc, 0);
1365 oactive = 1;
1366 continue;
1367 }
1368
ae813fd8
SZ
1369 m0 = ifq_dequeue(&ifp->if_snd, NULL);
1370 if (m0 == NULL)
1371 break;
1372
b637f170 1373 ETHER_BPF_MTAP(ifp, m0);
ae813fd8 1374
d378110e
SZ
1375 error = nfe_encap(sc, ring, m0);
1376 if (error) {
1377 ifp->if_oerrors++;
1378 if (error == EFBIG) {
1379 if (oactive) {
1380 ifp->if_flags |= IFF_OACTIVE;
1381 break;
1382 }
1383 nfe_txeof(sc, 0);
1384 oactive = 1;
1385 }
1386 continue;
1387 } else {
1388 oactive = 0;
ae813fd8
SZ
1389 }
1390 ++count;
1391
1392 /*
1393 * NOTE:
1394 * `m0' may be freed in nfe_encap(), so
1395 * it should not be touched any more.
1396 */
1397 }
8ed6a3af 1398
ae813fd8
SZ
1399 if (count == 0) /* nothing sent */
1400 return;
1401
ae813fd8
SZ
1402 /* Kick Tx */
1403 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1404
1405 /*
1406 * Set a timeout in case the chip goes out to lunch.
1407 */
1408 ifp->if_timer = 5;
1409}
1410
1411static void
1412nfe_watchdog(struct ifnet *ifp)
1413{
1414 struct nfe_softc *sc = ifp->if_softc;
1415
c0dcc88e
SZ
1416 ASSERT_SERIALIZED(ifp->if_serializer);
1417
ae813fd8
SZ
1418 if (ifp->if_flags & IFF_RUNNING) {
1419 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n");
d378110e 1420 nfe_txeof(sc, 1);
ae813fd8
SZ
1421 return;
1422 }
1423
1424 if_printf(ifp, "watchdog timeout\n");
1425
1426 nfe_init(ifp->if_softc);
1427
1428 ifp->if_oerrors++;
ae813fd8
SZ
1429}
1430
1431static void
1432nfe_init(void *xsc)
1433{
1434 struct nfe_softc *sc = xsc;
1435 struct ifnet *ifp = &sc->arpcom.ac_if;
1436 uint32_t tmp;
1437 int error;
1438
c0dcc88e
SZ
1439 ASSERT_SERIALIZED(ifp->if_serializer);
1440
ae813fd8
SZ
1441 nfe_stop(sc);
1442
88d487c3 1443 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0)
faaea42e
SZ
1444 nfe_mac_reset(sc);
1445
a455c52e
SZ
1446 /*
1447 * NOTE:
1448 * Switching between jumbo frames and normal frames should
1449 * be done _after_ nfe_stop() but _before_ nfe_init_rx_ring().
1450 */
1451 if (ifp->if_mtu > ETHERMTU) {
5dc1e30e 1452 sc->sc_flags |= NFE_F_USE_JUMBO;
a455c52e 1453 sc->rxq.bufsz = NFE_JBYTES;
d378110e 1454 sc->sc_tx_spare = NFE_NSEG_SPARE_JUMBO;
a455c52e
SZ
1455 if (bootverbose)
1456 if_printf(ifp, "use jumbo frames\n");
1457 } else {
5dc1e30e 1458 sc->sc_flags &= ~NFE_F_USE_JUMBO;
a455c52e 1459 sc->rxq.bufsz = MCLBYTES;
d378110e 1460 sc->sc_tx_spare = NFE_NSEG_SPARE;
a455c52e
SZ
1461 if (bootverbose)
1462 if_printf(ifp, "use non-jumbo frames\n");
1463 }
1464
ae813fd8
SZ
1465 error = nfe_init_tx_ring(sc, &sc->txq);
1466 if (error) {
1467 nfe_stop(sc);
1468 return;
1469 }
1470
1471 error = nfe_init_rx_ring(sc, &sc->rxq);
1472 if (error) {
1473 nfe_stop(sc);
1474 return;
1475 }
1476
fd9c8397 1477 NFE_WRITE(sc, NFE_TX_POLL, 0);
ae813fd8
SZ
1478 NFE_WRITE(sc, NFE_STATUS, 0);
1479
faaea42e 1480 sc->rxtxctl = NFE_RXTX_BIT2 | sc->rxtxctl_desc;
11db6c57 1481
bf2a5992 1482 if (ifp->if_capenable & IFCAP_RXCSUM)
ae813fd8 1483 sc->rxtxctl |= NFE_RXTX_RXCSUM;
ae813fd8
SZ
1484
1485 /*
1486 * Although the adapter is capable of stripping VLAN tags from received
1487 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1488 * purpose. This will be done in software by our network stack.
1489 */
88d487c3 1490 if (sc->sc_caps & NFE_HW_VLAN)
ae813fd8
SZ
1491 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
1492
1493 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1494 DELAY(10);
1495 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1496
88d487c3 1497 if (sc->sc_caps & NFE_HW_VLAN)
ae813fd8
SZ
1498 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1499
1500 NFE_WRITE(sc, NFE_SETUP_R6, 0);
1501
1502 /* set MAC address */
1503 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr);
1504
1505 /* tell MAC where rings are in memory */
7752918d
SZ
1506 if (sc->sc_caps & NFE_40BIT_ADDR) {
1507 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
1508 NFE_ADDR_HI(sc->rxq.physaddr));
1509 }
1510 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, NFE_ADDR_LO(sc->rxq.physaddr));
1511
1512 if (sc->sc_caps & NFE_40BIT_ADDR) {
1513 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI,
1514 NFE_ADDR_HI(sc->txq.physaddr));
1515 }
1516 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
ae813fd8
SZ
1517
1518 NFE_WRITE(sc, NFE_RING_SIZE,
ec9403d0 1519 (sc->sc_rx_ring_count - 1) << 16 |
b4633098 1520 (sc->sc_tx_ring_count - 1));
ae813fd8
SZ
1521
1522 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1523
1524 /* force MAC to wakeup */
1525 tmp = NFE_READ(sc, NFE_PWR_STATE);
1526 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1527 DELAY(10);
1528 tmp = NFE_READ(sc, NFE_PWR_STATE);
1529 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1530
ae813fd8
SZ
1531 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1532 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1533 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1534
1535 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1536 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1537
1538 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
ae813fd8
SZ
1539
1540 sc->rxtxctl &= ~NFE_RXTX_BIT2;
1541 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1542 DELAY(10);
1543 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1544
1545 /* set Rx filter */
1546 nfe_setmulti(sc);
1547
1548 nfe_ifmedia_upd(ifp);
1549
1550 /* enable Rx */
1551 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1552
1553 /* enable Tx */
1554 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1555
1556 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1557
1558#ifdef DEVICE_POLLING
04b9ef8d
SZ
1559 if ((ifp->if_flags & IFF_POLLING))
1560 nfe_disable_intrs(sc);
1561 else
ae813fd8 1562#endif
04b9ef8d 1563 nfe_enable_intrs(sc);
ae813fd8
SZ
1564
1565 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
1566
1567 ifp->if_flags |= IFF_RUNNING;
1568 ifp->if_flags &= ~IFF_OACTIVE;
751890ab
MD
1569
1570 /*
1571 * If we had stuff in the tx ring before its all cleaned out now
1572 * so we are not going to get an interrupt, jump-start any pending
1573 * output.
1574 */
d378110e
SZ
1575 if (!ifq_is_empty(&ifp->if_snd))
1576 if_devstart(ifp);
ae813fd8
SZ
1577}
1578
1579static void
1580nfe_stop(struct nfe_softc *sc)
1581{
1582 struct ifnet *ifp = &sc->arpcom.ac_if;
53f1d017
SZ
1583 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2;
1584 int i;
ae813fd8 1585
c0dcc88e
SZ
1586 ASSERT_SERIALIZED(ifp->if_serializer);
1587
ae813fd8
SZ
1588 callout_stop(&sc->sc_tick_ch);
1589
1590 ifp->if_timer = 0;
1591 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
04b9ef8d 1592 sc->sc_flags &= ~NFE_F_IRQ_TIMER;
ae813fd8 1593
53f1d017
SZ
1594#define WAITMAX 50000
1595
d1daf8af 1596 /*
53f1d017 1597 * Abort Tx
d1daf8af 1598 */
ae813fd8 1599 NFE_WRITE(sc, NFE_TX_CTL, 0);
53f1d017
SZ
1600 for (i = 0; i < WAITMAX; ++i) {
1601 DELAY(100);
1602 if ((NFE_READ(sc, NFE_TX_STATUS) & NFE_TX_STATUS_BUSY) == 0)
1603 break;
1604 }
1605 if (i == WAITMAX)
1606 if_printf(ifp, "can't stop TX\n");
1607 DELAY(100);
ae813fd8 1608
53f1d017
SZ
1609 /*
1610 * Disable Rx
1611 */
ae813fd8 1612 NFE_WRITE(sc, NFE_RX_CTL, 0);
53f1d017
SZ
1613 for (i = 0; i < WAITMAX; ++i) {
1614 DELAY(100);
1615 if ((NFE_READ(sc, NFE_RX_STATUS) & NFE_RX_STATUS_BUSY) == 0)
1616 break;
1617 }
1618 if (i == WAITMAX)
1619 if_printf(ifp, "can't stop RX\n");
1620 DELAY(100);
1621
1622#undef WAITMAX
1623
1624 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl);
1625 DELAY(10);
1626 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
ae813fd8
SZ
1627
1628 /* Disable interrupts */
1629 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1630
1631 /* Reset Tx and Rx rings */
1632 nfe_reset_tx_ring(sc, &sc->txq);
1633 nfe_reset_rx_ring(sc, &sc->rxq);
1634}
1635
1636static int
1637nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1638{
1639 int i, j, error, descsize;
244a9aa3 1640 bus_dmamem_t dmem;
ae813fd8
SZ
1641 void **desc;
1642
88d487c3 1643 if (sc->sc_caps & NFE_40BIT_ADDR) {
ae813fd8
SZ
1644 desc = (void **)&ring->desc64;
1645 descsize = sizeof(struct nfe_desc64);
1646 } else {
1647 desc = (void **)&ring->desc32;
1648 descsize = sizeof(struct nfe_desc32);
1649 }
1650
1651 ring->bufsz = MCLBYTES;
1652 ring->cur = ring->next = 0;
1653
244a9aa3
SZ
1654 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0,
1655 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1656 sc->sc_rx_ring_count * descsize,
1657 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
ae813fd8
SZ
1658 if (error) {
1659 if_printf(&sc->arpcom.ac_if,
244a9aa3 1660 "could not create RX desc ring\n");
ae813fd8
SZ
1661 return error;
1662 }
244a9aa3
SZ
1663 ring->tag = dmem.dmem_tag;
1664 ring->map = dmem.dmem_map;
1665 *desc = dmem.dmem_addr;
1666 ring->physaddr = dmem.dmem_busaddr;
ae813fd8 1667
88d487c3 1668 if (sc->sc_caps & NFE_JUMBO_SUP) {
c58816ed
SZ
1669 ring->jbuf =
1670 kmalloc(sizeof(struct nfe_jbuf) * NFE_JPOOL_COUNT(sc),
1671 M_DEVBUF, M_WAITOK | M_ZERO);
56fa71a9 1672
ae813fd8
SZ
1673 error = nfe_jpool_alloc(sc, ring);
1674 if (error) {
1675 if_printf(&sc->arpcom.ac_if,
1676 "could not allocate jumbo frames\n");
56fa71a9
SZ
1677 kfree(ring->jbuf, M_DEVBUF);
1678 ring->jbuf = NULL;
1679 /* Allow jumbo frame allocation to fail */
ae813fd8
SZ
1680 }
1681 }
1682
56fa71a9
SZ
1683 ring->data = kmalloc(sizeof(struct nfe_rx_data) * sc->sc_rx_ring_count,
1684 M_DEVBUF, M_WAITOK | M_ZERO);
1685
244a9aa3
SZ
1686 error = bus_dma_tag_create(sc->sc_dtag, 1, 0,
1687 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
ae813fd8 1688 NULL, NULL,
244a9aa3
SZ
1689 MCLBYTES, 1, MCLBYTES,
1690 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
1691 &ring->data_tag);
ae813fd8
SZ
1692 if (error) {
1693 if_printf(&sc->arpcom.ac_if,
1694 "could not create RX mbuf DMA tag\n");
1695 return error;
1696 }
1697
1698 /* Create a spare RX mbuf DMA map */
244a9aa3
SZ
1699 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK,
1700 &ring->data_tmpmap);
ae813fd8
SZ
1701 if (error) {
1702 if_printf(&sc->arpcom.ac_if,
1703 "could not create spare RX mbuf DMA map\n");
1704 bus_dma_tag_destroy(ring->data_tag);
1705 ring->data_tag = NULL;
1706 return error;
1707 }
1708
ec9403d0 1709 for (i = 0; i < sc->sc_rx_ring_count; i++) {
244a9aa3 1710 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK,
ae813fd8
SZ
1711 &ring->data[i].map);
1712 if (error) {
1713 if_printf(&sc->arpcom.ac_if,
1714 "could not create %dth RX mbuf DMA mapn", i);
1715 goto fail;
1716 }
1717 }
1718 return 0;
1719fail:
1720 for (j = 0; j < i; ++j)
1721 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1722 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1723 bus_dma_tag_destroy(ring->data_tag);
1724 ring->data_tag = NULL;
1725 return error;
1726}
1727
1728static void
1729nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1730{
1731 int i;
1732
ec9403d0 1733 for (i = 0; i < sc->sc_rx_ring_count; i++) {
ae813fd8
SZ
1734 struct nfe_rx_data *data = &ring->data[i];
1735
1736 if (data->m != NULL) {
5dc1e30e 1737 if ((sc->sc_flags & NFE_F_USE_JUMBO) == 0)
a455c52e 1738 bus_dmamap_unload(ring->data_tag, data->map);
ae813fd8
SZ
1739 m_freem(data->m);
1740 data->m = NULL;
1741 }
1742 }
ae813fd8
SZ
1743
1744 ring->cur = ring->next = 0;
1745}
1746
1747static int
1748nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1749{
1750 int i;
1751
ec9403d0 1752 for (i = 0; i < sc->sc_rx_ring_count; ++i) {
ae813fd8
SZ
1753 int error;
1754
1755 /* XXX should use a function pointer */
5dc1e30e 1756 if (sc->sc_flags & NFE_F_USE_JUMBO)
ae813fd8
SZ
1757 error = nfe_newbuf_jumbo(sc, ring, i, 1);
1758 else
1759 error = nfe_newbuf_std(sc, ring, i, 1);
1760 if (error) {
1761 if_printf(&sc->arpcom.ac_if,
1762 "could not allocate RX buffer\n");
1763 return error;
1764 }
ae813fd8
SZ
1765 nfe_set_ready_rxdesc(sc, ring, i);
1766 }
ae813fd8
SZ
1767 return 0;
1768}
1769
1770static void
1771nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1772{
1773 if (ring->data_tag != NULL) {
1774 struct nfe_rx_data *data;
1775 int i;
1776
ec9403d0 1777 for (i = 0; i < sc->sc_rx_ring_count; i++) {
ae813fd8
SZ
1778 data = &ring->data[i];
1779
1780 if (data->m != NULL) {
1781 bus_dmamap_unload(ring->data_tag, data->map);
1782 m_freem(data->m);
1783 }
1784 bus_dmamap_destroy(ring->data_tag, data->map);
1785 }
1786 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap);
1787 bus_dma_tag_destroy(ring->data_tag);
1788 }
1789
1790 nfe_jpool_free(sc, ring);
56fa71a9 1791
a455c52e
SZ
1792 if (ring->jbuf != NULL)
1793 kfree(ring->jbuf, M_DEVBUF);
1794 if (ring->data != NULL)
1795 kfree(ring->data, M_DEVBUF);
ae813fd8
SZ
1796
1797 if (ring->tag != NULL) {
1798 void *desc;
1799
88d487c3 1800 if (sc->sc_caps & NFE_40BIT_ADDR)
ae813fd8
SZ
1801 desc = ring->desc64;
1802 else
1803 desc = ring->desc32;
1804
1805 bus_dmamap_unload(ring->tag, ring->map);
1806 bus_dmamem_free(ring->tag, desc, ring->map);
1807 bus_dma_tag_destroy(ring->tag);
1808 }
1809}
1810
1811static struct nfe_jbuf *
1812nfe_jalloc(struct nfe_softc *sc)
1813{
1814 struct ifnet *ifp = &sc->arpcom.ac_if;
1815 struct nfe_jbuf *jbuf;
1816
1817 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1818
1819 jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
1820 if (jbuf != NULL) {
1821 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
1822 jbuf->inuse = 1;
1823 } else {
1824 if_printf(ifp, "no free jumbo buffer\n");
1825 }
1826
1827 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1828
1829 return jbuf;
1830}
1831
1832static void
1833nfe_jfree(void *arg)
1834{
1835 struct nfe_jbuf *jbuf = arg;
1836 struct nfe_softc *sc = jbuf->sc;
1837 struct nfe_rx_ring *ring = jbuf->ring;
1838
1839 if (&ring->jbuf[jbuf->slot] != jbuf)
1840 panic("%s: free wrong jumbo buffer\n", __func__);
1841 else if (jbuf->inuse == 0)
1842 panic("%s: jumbo buffer already freed\n", __func__);
1843
1844 lwkt_serialize_enter(&sc->sc_jbuf_serializer);
1845 atomic_subtract_int(&jbuf->inuse, 1);
1846 if (jbuf->inuse == 0)
1847 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1848 lwkt_serialize_exit(&sc->sc_jbuf_serializer);
1849}
1850
1851static void
1852nfe_jref(void *arg)
1853{
1854 struct nfe_jbuf *jbuf = arg;
1855 struct nfe_rx_ring *ring = jbuf->ring;
1856
1857 if (&ring->jbuf[jbuf->slot] != jbuf)
1858 panic("%s: ref wrong jumbo buffer\n", __func__);
1859 else if (jbuf->inuse == 0)
1860 panic("%s: jumbo buffer already freed\n", __func__);
1861
06406609 1862 atomic_add_int(&jbuf->inuse, 1);
ae813fd8
SZ
1863}
1864
1865static int
1866nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1867{
1868 struct nfe_jbuf *jbuf;
244a9aa3 1869 bus_dmamem_t dmem;
ae813fd8
SZ
1870 bus_addr_t physaddr;
1871 caddr_t buf;
1872 int i, error;
1873
1874 /*
1875 * Allocate a big chunk of DMA'able memory.
1876 */
244a9aa3
SZ
1877 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0,
1878 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1879 NFE_JPOOL_SIZE(sc),
1880 BUS_DMA_WAITOK, &dmem);
ae813fd8
SZ
1881 if (error) {
1882 if_printf(&sc->arpcom.ac_if,
244a9aa3 1883 "could not create jumbo buffer\n");
ae813fd8
SZ
1884 return error;
1885 }
244a9aa3
SZ
1886 ring->jtag = dmem.dmem_tag;
1887 ring->jmap = dmem.dmem_map;
1888 ring->jpool = dmem.dmem_addr;
1889 physaddr = dmem.dmem_busaddr;
ae813fd8
SZ
1890
1891 /* ..and split it into 9KB chunks */
1892 SLIST_INIT(&ring->jfreelist);
1893
1894 buf = ring->jpool;
c58816ed 1895 for (i = 0; i < NFE_JPOOL_COUNT(sc); i++) {
ae813fd8
SZ
1896 jbuf = &ring->jbuf[i];
1897
1898 jbuf->sc = sc;
1899 jbuf->ring = ring;
1900 jbuf->inuse = 0;
1901 jbuf->slot = i;
1902 jbuf->buf = buf;
1903 jbuf->physaddr = physaddr;
1904
1905 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1906
1907 buf += NFE_JBYTES;
1908 physaddr += NFE_JBYTES;
1909 }
1910
1911 return 0;
1912}
1913
1914static void
1915nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1916{
1917 if (ring->jtag != NULL) {
1918 bus_dmamap_unload(ring->jtag, ring->jmap);
1919 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap);
1920 bus_dma_tag_destroy(ring->jtag);
1921 }
1922}
1923
1924static int
1925nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1926{
1927 int i, j, error, descsize;
244a9aa3 1928 bus_dmamem_t dmem;
ae813fd8
SZ
1929 void **desc;
1930
88d487c3 1931 if (sc->sc_caps & NFE_40BIT_ADDR) {
ae813fd8
SZ
1932 desc = (void **)&ring->desc64;
1933 descsize = sizeof(struct nfe_desc64);
1934 } else {
1935 desc = (void **)&ring->desc32;
1936 descsize = sizeof(struct nfe_desc32);
1937 }
1938
1939 ring->queued = 0;
1940 ring->cur = ring->next = 0;
1941
244a9aa3
SZ
1942 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0,
1943 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1944 sc->sc_tx_ring_count * descsize,
1945 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
ae813fd8
SZ
1946 if (error) {
1947 if_printf(&sc->arpcom.ac_if,
244a9aa3 1948 "could not create TX desc ring\n");
ae813fd8
SZ
1949 return error;
1950 }
244a9aa3
SZ
1951 ring->tag = dmem.dmem_tag;
1952 ring->map = dmem.dmem_map;
1953 *desc = dmem.dmem_addr;
1954 ring->physaddr = dmem.dmem_busaddr;
ae813fd8 1955
b4633098
SZ
1956 ring->data = kmalloc(sizeof(struct nfe_tx_data) * sc->sc_tx_ring_count,
1957 M_DEVBUF, M_WAITOK | M_ZERO);
1958
244a9aa3
SZ
1959 error = bus_dma_tag_create(sc->sc_dtag, 1, 0,
1960 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1961 NULL, NULL,
1962 NFE_JBYTES, NFE_MAX_SCATTER, MCLBYTES,
90a9e482 1963 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
244a9aa3 1964 &ring->data_tag);
ae813fd8
SZ
1965 if (error) {
1966 if_printf(&sc->arpcom.ac_if,
1967 "could not create TX buf DMA tag\n");
1968 return error;
1969 }
1970
b4633098 1971 for (i = 0; i < sc->sc_tx_ring_count; i++) {
244a9aa3 1972 error = bus_dmamap_create(ring->data_tag,
90a9e482 1973 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
244a9aa3 1974 &ring->data[i].map);
ae813fd8
SZ
1975 if (error) {
1976 if_printf(&sc->arpcom.ac_if,
1977 "could not create %dth TX buf DMA map\n", i);
1978 goto fail;
1979 }
1980 }
1981
1982 return 0;
1983fail:
1984 for (j = 0; j < i; ++j)
1985 bus_dmamap_destroy(ring->data_tag, ring->data[i].map);
1986 bus_dma_tag_destroy(ring->data_tag);
1987 ring->data_tag = NULL;
1988 return error;
1989}
1990
1991static void
1992nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1993{
1994 int i;
1995
b4633098 1996 for (i = 0; i < sc->sc_tx_ring_count; i++) {
ae813fd8
SZ
1997 struct nfe_tx_data *data = &ring->data[i];
1998
88d487c3 1999 if (sc->sc_caps & NFE_40BIT_ADDR)
ae813fd8
SZ
2000 ring->desc64[i].flags = 0;
2001 else
2002 ring->desc32[i].flags = 0;
2003
2004 if (data->m != NULL) {
ae813fd8
SZ
2005 bus_dmamap_unload(ring->data_tag, data->map);
2006 m_freem(data->m);
2007 data->m = NULL;
2008 }
2009 }
ae813fd8
SZ
2010
2011 ring->queued = 0;
2012 ring->cur = ring->next = 0;
2013}
2014
2015static int
2016nfe_init_tx_ring(struct nfe_softc *sc __unused,
2017 struct nfe_tx_ring *ring __unused)
2018{
2019 return 0;
2020}
2021
2022static void
2023nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
2024{
2025 if (ring->data_tag != NULL) {
2026 struct nfe_tx_data *data;
2027 int i;
2028
b4633098 2029 for (i = 0; i < sc->sc_tx_ring_count; ++i) {
ae813fd8
SZ
2030 data = &ring->data[i];
2031
2032 if (data->m != NULL) {
2033 bus_dmamap_unload(ring->data_tag, data->map);
2034 m_freem(data->m);
2035 }
2036 bus_dmamap_destroy(ring->data_tag, data->map);
2037 }
2038
2039 bus_dma_tag_destroy(ring->data_tag);
2040 }
2041
b4633098
SZ
2042 if (ring->data != NULL)
2043 kfree(ring->data, M_DEVBUF);
2044
ae813fd8
SZ
2045 if (ring->tag != NULL) {
2046 void *desc;
2047
88d487c3 2048 if (sc->sc_caps & NFE_40BIT_ADDR)
ae813fd8
SZ
2049 desc = ring->desc64;
2050 else
2051 desc = ring->desc32;
2052
2053 bus_dmamap_unload(ring->tag, ring->map);
2054 bus_dmamem_free(ring->tag, desc, ring->map);
2055 bus_dma_tag_destroy(ring->tag);
2056 }
2057}
2058
2059static int
2060nfe_ifmedia_upd(struct ifnet *ifp)
2061{
2062 struct nfe_softc *sc = ifp->if_softc;
2063 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2064
c0dcc88e
SZ
2065 ASSERT_SERIALIZED(ifp->if_serializer);
2066
ae813fd8
SZ
2067 if (mii->mii_instance != 0) {
2068 struct mii_softc *miisc;
2069
2070 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2071 mii_phy_reset(miisc);
2072 }
2073 mii_mediachg(mii);
2074
2075 return 0;
2076}
2077
2078static void
2079nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2080{
2081 struct nfe_softc *sc = ifp->if_softc;
2082 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2083
c0dcc88e
SZ
2084 ASSERT_SERIALIZED(ifp->if_serializer);
2085
ae813fd8
SZ
2086 mii_pollstat(mii);
2087 ifmr->ifm_status = mii->mii_media_status;
2088 ifmr->ifm_active = mii->mii_media_active;
2089}
2090
2091static void
2092nfe_setmulti(struct nfe_softc *sc)
2093{
2094 struct ifnet *ifp = &sc->arpcom.ac_if;
2095 struct ifmultiaddr *ifma;
2096 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
2097 uint32_t filter = NFE_RXFILTER_MAGIC;
2098 int i;
2099
2100 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2101 bzero(addr, ETHER_ADDR_LEN);
2102 bzero(mask, ETHER_ADDR_LEN);
2103 goto done;
2104 }
2105
2106 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
2107 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
2108
2109 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2110 caddr_t maddr;
2111
2112 if (ifma->ifma_addr->sa_family != AF_LINK)
2113 continue;
2114
2115 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2116 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2117 addr[i] &= maddr[i];
2118 mask[i] &= ~maddr[i];
2119 }
2120 }
2121
2122 for (i = 0; i < ETHER_ADDR_LEN; i++)
2123 mask[i] |= addr[i];
2124
2125done:
2126 addr[0] |= 0x01; /* make sure multicast bit is set */
2127
2128 NFE_WRITE(sc, NFE_MULTIADDR_HI,
2129 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2130 NFE_WRITE(sc, NFE_MULTIADDR_LO,
2131 addr[5] << 8 | addr[4]);
2132 NFE_WRITE(sc, NFE_MULTIMASK_HI,
2133 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
2134 NFE_WRITE(sc, NFE_MULTIMASK_LO,
2135 mask[5] << 8 | mask[4]);
2136
2137 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
2138 NFE_WRITE(sc, NFE_RXFILTER, filter);
2139}
2140
2141static void
2142nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
2143{
ece56005
SZ
2144 uint32_t lo, hi;
2145
2146 lo = NFE_READ(sc, NFE_MACADDR_LO);
2147 hi = NFE_READ(sc, NFE_MACADDR_HI);
2148 if (sc->sc_caps & NFE_FIX_EADDR) {
2149 addr[0] = (lo >> 8) & 0xff;
2150 addr[1] = (lo & 0xff);
2151
2152 addr[2] = (hi >> 24) & 0xff;
2153 addr[3] = (hi >> 16) & 0xff;
2154 addr[4] = (hi >> 8) & 0xff;
2155 addr[5] = (hi & 0xff);
2156 } else {
2157 addr[0] = (hi & 0xff);
2158 addr[1] = (hi >> 8) & 0xff;
2159 addr[2] = (hi >> 16) & 0xff;
2160 addr[3] = (hi >> 24) & 0xff;
ae813fd8 2161
ece56005
SZ
2162 addr[4] = (lo & 0xff);
2163 addr[5] = (lo >> 8) & 0xff;
2164 }
ae813fd8
SZ
2165}
2166
2167static void
2168nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
2169{
2170 NFE_WRITE(sc, NFE_MACADDR_LO,
2171 addr[5] << 8 | addr[4]);
2172 NFE_WRITE(sc, NFE_MACADDR_HI,
2173 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2174}
2175
2176static void
2177nfe_tick(void *arg)
2178{
2179 struct nfe_softc *sc = arg;
2180 struct ifnet *ifp = &sc->arpcom.ac_if;
2181 struct mii_data *mii = device_get_softc(sc->sc_miibus);
2182
2183 lwkt_serialize_enter(ifp->if_serializer);
2184
2185 mii_tick(mii);
2186 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc);
2187
2188 lwkt_serialize_exit(ifp->if_serializer);
2189}
2190
ae813fd8
SZ
2191static int
2192nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2193 int wait)
2194{
2195 struct nfe_rx_data *data = &ring->data[idx];
ae813fd8
SZ
2196 bus_dma_segment_t seg;
2197 bus_dmamap_t map;
2198 struct mbuf *m;
b6bb439d 2199 int nsegs, error;
ae813fd8
SZ
2200
2201 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2202 if (m == NULL)
2203 return ENOBUFS;
2204 m->m_len = m->m_pkthdr.len = MCLBYTES;
f81efabe
MD
2205
2206 /*
2207 * Aligning the payload improves access times.
2208 */
2209 if (sc->sc_caps & NFE_WORDALIGN)
2210 m_adj(m, ETHER_ALIGN);
ae813fd8 2211
b6bb439d
SZ
2212 error = bus_dmamap_load_mbuf_segment(ring->data_tag, ring->data_tmpmap,
2213 m, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
2214 if (error) {
ae813fd8 2215 m_freem(m);
77cdd7f0
SZ
2216 if (wait) {
2217 if_printf(&sc->arpcom.ac_if,
2218 "could map RX mbuf %d\n", error);
2219 }
ae813fd8
SZ
2220 return error;
2221 }
2222
e679c149
SZ
2223 if (data->m != NULL) {
2224 /* Sync and unload originally mapped mbuf */
2225 bus_dmamap_sync(ring->data_tag, data->map,
2226 BUS_DMASYNC_POSTREAD);
2227 bus_dmamap_unload(ring->data_tag, data->map);
2228 }
ae813fd8
SZ
2229
2230 /* Swap this DMA map with tmp DMA map */
2231 map = data->map;
2232 data->map = ring->data_tmpmap;
2233 ring->data_tmpmap = map;
2234
2235 /* Caller is assumed to have collected the old mbuf */
2236 data->m = m;
2237
2238 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr);
ae813fd8
SZ
2239 return 0;
2240}
2241
2242static int
2243nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2244 int wait)
2245{
2246 struct nfe_rx_data *data = &ring->data[idx];
2247 struct nfe_jbuf *jbuf;
2248 struct mbuf *m;
2249
2250 MGETHDR(m, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA);
2251 if (m == NULL)
2252 return ENOBUFS;
2253
2254 jbuf = nfe_jalloc(sc);
2255 if (jbuf == NULL) {
2256 m_freem(m);
2257 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed "
2258 "-- packet dropped!\n");
2259 return ENOBUFS;
2260 }
2261
2262 m->m_ext.ext_arg = jbuf;
2263 m->m_ext.ext_buf = jbuf->buf;
2264 m->m_ext.ext_free = nfe_jfree;
2265 m->m_ext.ext_ref = nfe_jref;
2266 m->m_ext.ext_size = NFE_JBYTES;
2267
2268 m->m_data = m->m_ext.ext_buf;
2269 m->m_flags |= M_EXT;
2270 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
f81efabe
MD
2271
2272 /*
2273 * Aligning the payload improves access times.
2274 */
2275 if (sc->sc_caps & NFE_WORDALIGN)
2276 m_adj(m, ETHER_ALIGN);
ae813fd8
SZ
2277
2278 /* Caller is assumed to have collected the old mbuf */
2279 data->m = m;
2280
2281 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr);
ae813fd8
SZ
2282 return 0;
2283}
2284
2285static void
2286nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx,
2287 bus_addr_t physaddr)
2288{
88d487c3 2289 if (sc->sc_caps & NFE_40BIT_ADDR) {
ae813fd8
SZ
2290 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2291
7752918d
SZ
2292 desc64->physaddr[0] = htole32(NFE_ADDR_HI(physaddr));
2293 desc64->physaddr[1] = htole32(NFE_ADDR_LO(physaddr));
ae813fd8
SZ
2294 } else {
2295 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2296
2297 desc32->physaddr = htole32(physaddr);
2298 }
2299}
2300
2301static void
2302nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx)
2303{
88d487c3 2304 if (sc->sc_caps & NFE_40BIT_ADDR) {
ae813fd8
SZ
2305 struct nfe_desc64 *desc64 = &ring->desc64[idx];
2306
2307 desc64->length = htole16(ring->bufsz);
2308 desc64->flags = htole16(NFE_RX_READY);
2309 } else {
2310 struct nfe_desc32 *desc32 = &ring->desc32[idx];
2311
2312 desc32->length = htole16(ring->bufsz);
2313 desc32->flags = htole16(NFE_RX_READY);
2314 }
2315}
ec9403d0
SZ
2316
2317static int
2318nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS)
2319{
2320 struct nfe_softc *sc = arg1;
2321 struct ifnet *ifp = &sc->arpcom.ac_if;
04b9ef8d 2322 uint32_t flags;
ec9403d0
SZ
2323 int error, v;
2324
2325 lwkt_serialize_enter(ifp->if_serializer);
2326
04b9ef8d 2327 flags = sc->sc_flags & ~NFE_F_DYN_IM;
ec9403d0 2328 v = sc->sc_imtime;
04b9ef8d
SZ
2329 if (sc->sc_flags & NFE_F_DYN_IM)
2330 v = -v;
2331
ec9403d0
SZ
2332 error = sysctl_handle_int(oidp, &v, 0, req);
2333 if (error || req->newptr == NULL)
2334 goto back;
04b9ef8d
SZ
2335
2336 if (v < 0) {
2337 flags |= NFE_F_DYN_IM;
2338 v = -v;
ec9403d0
SZ
2339 }
2340
04b9ef8d 2341 if (v != sc->sc_imtime || (flags ^ sc->sc_flags)) {
c00ddf33
MD
2342 if (NFE_IMTIME(v) == 0)
2343 v = 0;
ec9403d0 2344 sc->sc_imtime = v;
04b9ef8d 2345 sc->sc_flags = flags;
ec9403d0
SZ
2346 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc);
2347
2348 if ((ifp->if_flags & (IFF_POLLING | IFF_RUNNING))
2349 == IFF_RUNNING) {
c00ddf33 2350 nfe_enable_intrs(sc);
ec9403d0
SZ
2351 }
2352 }
2353back:
2354 lwkt_serialize_exit(ifp->if_serializer);
2355 return error;
2356}
faaea42e
SZ
2357
2358static void
2359nfe_powerup(device_t dev)
2360{
2361 struct nfe_softc *sc = device_get_softc(dev);
2362 uint32_t pwr_state;
2363 uint16_t did;
2364
2365 /*
2366 * Bring MAC and PHY out of low power state
2367 */
2368
2369 pwr_state = NFE_READ(sc, NFE_PWR_STATE2) & ~NFE_PWRUP_MASK;
2370
2371 did = pci_get_device(dev);
2372 if ((did == PCI_PRODUCT_NVIDIA_MCP51_LAN1 ||
2373 did == PCI_PRODUCT_NVIDIA_MCP51_LAN2) &&
2374 pci_get_revid(dev) >= 0xa3)
2375 pwr_state |= NFE_PWRUP_REV_A3;
2376
2377 NFE_WRITE(sc, NFE_PWR_STATE2, pwr_state);
2378}
2379
2380static void
2381nfe_mac_reset(struct nfe_softc *sc)
2382{
2383 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2;
2384 uint32_t macaddr_hi, macaddr_lo, tx_poll;
2385
2386 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl);
2387
2388 /* Save several registers for later restoration */
2389 macaddr_hi = NFE_READ(sc, NFE_MACADDR_HI);
2390 macaddr_lo = NFE_READ(sc, NFE_MACADDR_LO);
2391 tx_poll = NFE_READ(sc, NFE_TX_POLL);
2392
2393 NFE_WRITE(sc, NFE_MAC_RESET, NFE_RESET_ASSERT);
2394 DELAY(100);
2395
2396 NFE_WRITE(sc, NFE_MAC_RESET, 0);
2397 DELAY(100);
2398
2399 /* Restore saved registers */
2400 NFE_WRITE(sc, NFE_MACADDR_HI, macaddr_hi);
2401 NFE_WRITE(sc, NFE_MACADDR_LO, macaddr_lo);
2402 NFE_WRITE(sc, NFE_TX_POLL, tx_poll);
2403
2404 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl);
2405}
04b9ef8d
SZ
2406
2407static void
2408nfe_enable_intrs(struct nfe_softc *sc)
2409{
2410 /*
2411 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER.
2412 * It is unclear how wide the timer is. Base programming does
2413 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so
2414 * we don't get any interrupt moderation. TX moderation is
2415 * possible by using the timer interrupt instead of TX_DONE.
2416 *
2417 * It is unclear whether there are other bits that can be
2418 * set to make the NFE device actually do interrupt moderation
2419 * on the RX side.
2420 *
2421 * For now set a 128uS interval as a placemark, but don't use
2422 * the timer.
2423 */
2424 if (sc->sc_imtime == 0)
2425 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME_DEFAULT);
2426 else
2427 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME(sc->sc_imtime));
2428
2429 /* Enable interrupts */
2430 NFE_WRITE(sc, NFE_IRQ_MASK, sc->sc_irq_enable);
2431
2432 if (sc->sc_irq_enable & NFE_IRQ_TIMER)
2433 sc->sc_flags |= NFE_F_IRQ_TIMER;
2434 else
2435 sc->sc_flags &= ~NFE_F_IRQ_TIMER;
2436}
2437
2438static void
2439nfe_disable_intrs(struct nfe_softc *sc)
2440{
2441 /* Disable interrupts */
2442 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
2443 sc->sc_flags &= ~NFE_F_IRQ_TIMER;
2444}