network code: Convert if_multiaddrs from LIST to TAILQ.
[dragonfly.git] / sys / dev / netif / fxp / if_fxp.c
... / ...
CommitLineData
1/*-
2 * Copyright (c) 1995, David Greenman
3 * Copyright (c) 2001 Jonathan Lemon <jlemon@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD: src/sys/dev/fxp/if_fxp.c,v 1.110.2.30 2003/06/12 16:47:05 mux Exp $
29 * $DragonFly: src/sys/dev/netif/fxp/if_fxp.c,v 1.61 2008/09/17 08:51:29 sephe Exp $
30 */
31
32/*
33 * Intel EtherExpress Pro/100B PCI Fast Ethernet driver
34 */
35
36#include "opt_polling.h"
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/mbuf.h>
41#include <sys/malloc.h>
42#include <sys/kernel.h>
43#include <sys/interrupt.h>
44#include <sys/socket.h>
45#include <sys/sysctl.h>
46#include <sys/thread2.h>
47
48#include <net/if.h>
49#include <net/ifq_var.h>
50#include <net/if_dl.h>
51#include <net/if_media.h>
52
53#ifdef NS
54#include <netns/ns.h>
55#include <netns/ns_if.h>
56#endif
57
58#include <net/bpf.h>
59#include <sys/sockio.h>
60#include <sys/bus.h>
61#include <sys/rman.h>
62
63#include <net/ethernet.h>
64#include <net/if_arp.h>
65
66#include <vm/vm.h> /* for vtophys */
67#include <vm/pmap.h> /* for vtophys */
68
69#include <net/if_types.h>
70#include <net/vlan/if_vlan_var.h>
71
72#include <bus/pci/pcivar.h>
73#include <bus/pci/pcireg.h> /* for PCIM_CMD_xxx */
74
75#include "../mii_layer/mii.h"
76#include "../mii_layer/miivar.h"
77
78#include "if_fxpreg.h"
79#include "if_fxpvar.h"
80#include "rcvbundl.h"
81
82#include "miibus_if.h"
83
84/*
85 * NOTE! On the Alpha, we have an alignment constraint. The
86 * card DMAs the packet immediately following the RFA. However,
87 * the first thing in the packet is a 14-byte Ethernet header.
88 * This means that the packet is misaligned. To compensate,
89 * we actually offset the RFA 2 bytes into the cluster. This
90 * alignes the packet after the Ethernet header at a 32-bit
91 * boundary. HOWEVER! This means that the RFA is misaligned!
92 */
93#define RFA_ALIGNMENT_FUDGE 2
94
95/*
96 * Set initial transmit threshold at 64 (512 bytes). This is
97 * increased by 64 (512 bytes) at a time, to maximum of 192
98 * (1536 bytes), if an underrun occurs.
99 */
100static int tx_threshold = 64;
101
102/*
103 * The configuration byte map has several undefined fields which
104 * must be one or must be zero. Set up a template for these bits
105 * only, (assuming a 82557 chip) leaving the actual configuration
106 * to fxp_init.
107 *
108 * See struct fxp_cb_config for the bit definitions.
109 */
110static u_char fxp_cb_config_template[] = {
111 0x0, 0x0, /* cb_status */
112 0x0, 0x0, /* cb_command */
113 0x0, 0x0, 0x0, 0x0, /* link_addr */
114 0x0, /* 0 */
115 0x0, /* 1 */
116 0x0, /* 2 */
117 0x0, /* 3 */
118 0x0, /* 4 */
119 0x0, /* 5 */
120 0x32, /* 6 */
121 0x0, /* 7 */
122 0x0, /* 8 */
123 0x0, /* 9 */
124 0x6, /* 10 */
125 0x0, /* 11 */
126 0x0, /* 12 */
127 0x0, /* 13 */
128 0xf2, /* 14 */
129 0x48, /* 15 */
130 0x0, /* 16 */
131 0x40, /* 17 */
132 0xf0, /* 18 */
133 0x0, /* 19 */
134 0x3f, /* 20 */
135 0x5 /* 21 */
136};
137
138struct fxp_ident {
139 u_int16_t devid;
140 int16_t revid; /* -1 matches anything */
141 char *name;
142};
143
144/*
145 * Claim various Intel PCI device identifiers for this driver. The
146 * sub-vendor and sub-device field are extensively used to identify
147 * particular variants, but we don't currently differentiate between
148 * them.
149 */
150static struct fxp_ident fxp_ident_table[] = {
151 { 0x1029, -1, "Intel 82559 PCI/CardBus Pro/100" },
152 { 0x1030, -1, "Intel 82559 Pro/100 Ethernet" },
153 { 0x1031, -1, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
154 { 0x1032, -1, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
155 { 0x1033, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
156 { 0x1034, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
157 { 0x1035, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
158 { 0x1036, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
159 { 0x1037, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
160 { 0x1038, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
161 { 0x1039, -1, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
162 { 0x103A, -1, "Intel 82801DB (ICH4) Pro/100 Ethernet" },
163 { 0x103B, -1, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
164 { 0x103C, -1, "Intel 82801DB (ICH4) Pro/100 Ethernet" },
165 { 0x103D, -1, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
166 { 0x103E, -1, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
167 { 0x1050, -1, "Intel 82801BA (D865) Pro/100 VE Ethernet" },
168 { 0x1051, -1, "Intel 82562ET (ICH5/ICH5R) Pro/100 VE Ethernet" },
169 { 0x1059, -1, "Intel 82551QM Pro/100 M Mobile Connection" },
170 { 0x1064, -1, "Intel 82562ET/EZ/GT/GZ (ICH6/ICH6R) Pro/100 VE Ethernet" },
171 { 0x1065, -1, "Intel 82562ET/EZ/GT/GZ PRO/100 VE Ethernet" },
172 { 0x1068, -1, "Intel 82801FBM (ICH6-M) Pro/100 VE Ethernet" },
173 { 0x1069, -1, "Intel 82562EM/EX/GX Pro/100 Ethernet" },
174 { 0x1091, -1, "Intel 82562GX Pro/100 Ethernet" },
175 { 0x1092, -1, "Intel Pro/100 VE Network Connection" },
176 { 0x1093, -1, "Intel Pro/100 VM Network Connection" },
177 { 0x1094, -1, "Intel Pro/100 946GZ (ICH7) Network Connection" },
178 { 0x1209, -1, "Intel 82559ER Embedded 10/100 Ethernet" },
179 { 0x1229, 0x01, "Intel 82557 Pro/100 Ethernet" },
180 { 0x1229, 0x02, "Intel 82557 Pro/100 Ethernet" },
181 { 0x1229, 0x03, "Intel 82557 Pro/100 Ethernet" },
182 { 0x1229, 0x04, "Intel 82558 Pro/100 Ethernet" },
183 { 0x1229, 0x05, "Intel 82558 Pro/100 Ethernet" },
184 { 0x1229, 0x06, "Intel 82559 Pro/100 Ethernet" },
185 { 0x1229, 0x07, "Intel 82559 Pro/100 Ethernet" },
186 { 0x1229, 0x08, "Intel 82559 Pro/100 Ethernet" },
187 { 0x1229, 0x09, "Intel 82559ER Pro/100 Ethernet" },
188 { 0x1229, 0x0c, "Intel 82550 Pro/100 Ethernet" },
189 { 0x1229, 0x0d, "Intel 82550 Pro/100 Ethernet" },
190 { 0x1229, 0x0e, "Intel 82550 Pro/100 Ethernet" },
191 { 0x1229, 0x0f, "Intel 82551 Pro/100 Ethernet" },
192 { 0x1229, 0x10, "Intel 82551 Pro/100 Ethernet" },
193 { 0x1229, -1, "Intel 82557/8/9 Pro/100 Ethernet" },
194 { 0x2449, -1, "Intel 82801BA/CAM (ICH2/3) Pro/100 Ethernet" },
195 { 0x27dc, -1, "Intel 82801GB (ICH7) 10/100 Ethernet" },
196 { 0, -1, NULL },
197};
198
199static int fxp_probe(device_t dev);
200static int fxp_attach(device_t dev);
201static int fxp_detach(device_t dev);
202static int fxp_shutdown(device_t dev);
203static int fxp_suspend(device_t dev);
204static int fxp_resume(device_t dev);
205
206static void fxp_intr(void *xsc);
207static void fxp_intr_body(struct fxp_softc *sc,
208 u_int8_t statack, int count);
209
210static void fxp_init(void *xsc);
211static void fxp_tick(void *xsc);
212static void fxp_powerstate_d0(device_t dev);
213static void fxp_start(struct ifnet *ifp);
214static void fxp_stop(struct fxp_softc *sc);
215static void fxp_release(device_t dev);
216static int fxp_ioctl(struct ifnet *ifp, u_long command,
217 caddr_t data, struct ucred *);
218static void fxp_watchdog(struct ifnet *ifp);
219static int fxp_add_rfabuf(struct fxp_softc *sc, struct mbuf *oldm);
220static int fxp_mc_addrs(struct fxp_softc *sc);
221static void fxp_mc_setup(struct fxp_softc *sc);
222static u_int16_t fxp_eeprom_getword(struct fxp_softc *sc, int offset,
223 int autosize);
224static void fxp_eeprom_putword(struct fxp_softc *sc, int offset,
225 u_int16_t data);
226static void fxp_autosize_eeprom(struct fxp_softc *sc);
227static void fxp_read_eeprom(struct fxp_softc *sc, u_short *data,
228 int offset, int words);
229static void fxp_write_eeprom(struct fxp_softc *sc, u_short *data,
230 int offset, int words);
231static int fxp_ifmedia_upd(struct ifnet *ifp);
232static void fxp_ifmedia_sts(struct ifnet *ifp,
233 struct ifmediareq *ifmr);
234static int fxp_serial_ifmedia_upd(struct ifnet *ifp);
235static void fxp_serial_ifmedia_sts(struct ifnet *ifp,
236 struct ifmediareq *ifmr);
237static int fxp_miibus_readreg(device_t dev, int phy, int reg);
238static void fxp_miibus_writereg(device_t dev, int phy, int reg,
239 int value);
240static void fxp_load_ucode(struct fxp_softc *sc);
241static int sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS);
242static int sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS);
243#ifdef DEVICE_POLLING
244static poll_handler_t fxp_poll;
245#endif
246
247static void fxp_lwcopy(volatile u_int32_t *src,
248 volatile u_int32_t *dst);
249static void fxp_scb_wait(struct fxp_softc *sc);
250static void fxp_scb_cmd(struct fxp_softc *sc, int cmd);
251static void fxp_dma_wait(volatile u_int16_t *status,
252 struct fxp_softc *sc);
253
254static device_method_t fxp_methods[] = {
255 /* Device interface */
256 DEVMETHOD(device_probe, fxp_probe),
257 DEVMETHOD(device_attach, fxp_attach),
258 DEVMETHOD(device_detach, fxp_detach),
259 DEVMETHOD(device_shutdown, fxp_shutdown),
260 DEVMETHOD(device_suspend, fxp_suspend),
261 DEVMETHOD(device_resume, fxp_resume),
262
263 /* MII interface */
264 DEVMETHOD(miibus_readreg, fxp_miibus_readreg),
265 DEVMETHOD(miibus_writereg, fxp_miibus_writereg),
266
267 { 0, 0 }
268};
269
270static driver_t fxp_driver = {
271 "fxp",
272 fxp_methods,
273 sizeof(struct fxp_softc),
274};
275
276static devclass_t fxp_devclass;
277
278DECLARE_DUMMY_MODULE(if_fxp);
279MODULE_DEPEND(if_fxp, miibus, 1, 1, 1);
280DRIVER_MODULE(if_fxp, pci, fxp_driver, fxp_devclass, 0, 0);
281DRIVER_MODULE(if_fxp, cardbus, fxp_driver, fxp_devclass, 0, 0);
282DRIVER_MODULE(miibus, fxp, miibus_driver, miibus_devclass, 0, 0);
283
284static int fxp_rnr;
285SYSCTL_INT(_hw, OID_AUTO, fxp_rnr, CTLFLAG_RW, &fxp_rnr, 0, "fxp rnr events");
286
287/*
288 * Copy a 16-bit aligned 32-bit quantity.
289 */
290static void
291fxp_lwcopy(volatile u_int32_t *src, volatile u_int32_t *dst)
292{
293#ifdef __i386__
294 *dst = *src;
295#else
296 volatile u_int16_t *a = (volatile u_int16_t *)src;
297 volatile u_int16_t *b = (volatile u_int16_t *)dst;
298
299 b[0] = a[0];
300 b[1] = a[1];
301#endif
302}
303
304/*
305 * Wait for the previous command to be accepted (but not necessarily
306 * completed).
307 */
308static void
309fxp_scb_wait(struct fxp_softc *sc)
310{
311 int i = 10000;
312
313 while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i)
314 DELAY(2);
315 if (i == 0) {
316 if_printf(&sc->arpcom.ac_if,
317 "SCB timeout: 0x%x 0x%x 0x%x 0x%x\n",
318 CSR_READ_1(sc, FXP_CSR_SCB_COMMAND),
319 CSR_READ_1(sc, FXP_CSR_SCB_STATACK),
320 CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS),
321 CSR_READ_2(sc, FXP_CSR_FLOWCONTROL));
322 }
323}
324
325static void
326fxp_scb_cmd(struct fxp_softc *sc, int cmd)
327{
328
329 if (cmd == FXP_SCB_COMMAND_CU_RESUME && sc->cu_resume_bug) {
330 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_CB_COMMAND_NOP);
331 fxp_scb_wait(sc);
332 }
333 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, cmd);
334}
335
336static void
337fxp_dma_wait(volatile u_int16_t *status, struct fxp_softc *sc)
338{
339 int i = 10000;
340
341 while (!(*status & FXP_CB_STATUS_C) && --i)
342 DELAY(2);
343 if (i == 0)
344 if_printf(&sc->arpcom.ac_if, "DMA timeout\n");
345}
346
347/*
348 * Return identification string if this is device is ours.
349 */
350static int
351fxp_probe(device_t dev)
352{
353 u_int16_t devid;
354 u_int8_t revid;
355 struct fxp_ident *ident;
356
357 if (pci_get_vendor(dev) == FXP_VENDORID_INTEL) {
358 devid = pci_get_device(dev);
359 revid = pci_get_revid(dev);
360 for (ident = fxp_ident_table; ident->name != NULL; ident++) {
361 if (ident->devid == devid &&
362 (ident->revid == revid || ident->revid == -1)) {
363 device_set_desc(dev, ident->name);
364 return (0);
365 }
366 }
367 }
368 return (ENXIO);
369}
370
371static void
372fxp_powerstate_d0(device_t dev)
373{
374 u_int32_t iobase, membase, irq;
375
376 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
377 /* Save important PCI config data. */
378 iobase = pci_read_config(dev, FXP_PCI_IOBA, 4);
379 membase = pci_read_config(dev, FXP_PCI_MMBA, 4);
380 irq = pci_read_config(dev, PCIR_INTLINE, 4);
381
382 /* Reset the power state. */
383 device_printf(dev, "chip is in D%d power mode "
384 "-- setting to D0\n", pci_get_powerstate(dev));
385
386 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
387
388 /* Restore PCI config data. */
389 pci_write_config(dev, FXP_PCI_IOBA, iobase, 4);
390 pci_write_config(dev, FXP_PCI_MMBA, membase, 4);
391 pci_write_config(dev, PCIR_INTLINE, irq, 4);
392 }
393}
394
395static int
396fxp_attach(device_t dev)
397{
398 int error = 0;
399 struct fxp_softc *sc = device_get_softc(dev);
400 struct ifnet *ifp;
401 u_int32_t val;
402 u_int16_t data;
403 int i, rid, m1, m2, prefer_iomap;
404
405 callout_init(&sc->fxp_stat_timer);
406 sysctl_ctx_init(&sc->sysctl_ctx);
407
408 /*
409 * Enable bus mastering. Enable memory space too, in case
410 * BIOS/Prom forgot about it.
411 */
412 pci_enable_busmaster(dev);
413 pci_enable_io(dev, SYS_RES_MEMORY);
414 val = pci_read_config(dev, PCIR_COMMAND, 2);
415
416 fxp_powerstate_d0(dev);
417
418 /*
419 * Figure out which we should try first - memory mapping or i/o mapping?
420 * We default to memory mapping. Then we accept an override from the
421 * command line. Then we check to see which one is enabled.
422 */
423 m1 = PCIM_CMD_MEMEN;
424 m2 = PCIM_CMD_PORTEN;
425 prefer_iomap = 0;
426 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
427 "prefer_iomap", &prefer_iomap) == 0 && prefer_iomap != 0) {
428 m1 = PCIM_CMD_PORTEN;
429 m2 = PCIM_CMD_MEMEN;
430 }
431
432 if (val & m1) {
433 sc->rtp =
434 (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
435 sc->rgd = (m1 == PCIM_CMD_MEMEN)? FXP_PCI_MMBA : FXP_PCI_IOBA;
436 sc->mem = bus_alloc_resource_any(dev, sc->rtp, &sc->rgd,
437 RF_ACTIVE);
438 }
439 if (sc->mem == NULL && (val & m2)) {
440 sc->rtp =
441 (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
442 sc->rgd = (m2 == PCIM_CMD_MEMEN)? FXP_PCI_MMBA : FXP_PCI_IOBA;
443 sc->mem = bus_alloc_resource_any(dev, sc->rtp, &sc->rgd,
444 RF_ACTIVE);
445 }
446
447 if (!sc->mem) {
448 device_printf(dev, "could not map device registers\n");
449 error = ENXIO;
450 goto fail;
451 }
452 if (bootverbose) {
453 device_printf(dev, "using %s space register mapping\n",
454 sc->rtp == SYS_RES_MEMORY? "memory" : "I/O");
455 }
456
457 sc->sc_st = rman_get_bustag(sc->mem);
458 sc->sc_sh = rman_get_bushandle(sc->mem);
459
460 /*
461 * Allocate our interrupt.
462 */
463 rid = 0;
464 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
465 RF_SHAREABLE | RF_ACTIVE);
466 if (sc->irq == NULL) {
467 device_printf(dev, "could not map interrupt\n");
468 error = ENXIO;
469 goto fail;
470 }
471
472 /*
473 * Reset to a stable state.
474 */
475 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
476 DELAY(10);
477
478 sc->cbl_base = kmalloc(sizeof(struct fxp_cb_tx) * FXP_NTXCB,
479 M_DEVBUF, M_WAITOK | M_ZERO);
480
481 sc->fxp_stats = kmalloc(sizeof(struct fxp_stats), M_DEVBUF,
482 M_WAITOK | M_ZERO);
483
484 sc->mcsp = kmalloc(sizeof(struct fxp_cb_mcs), M_DEVBUF, M_WAITOK);
485
486 /*
487 * Pre-allocate our receive buffers.
488 */
489 for (i = 0; i < FXP_NRFABUFS; i++) {
490 if (fxp_add_rfabuf(sc, NULL) != 0) {
491 goto failmem;
492 }
493 }
494
495 /*
496 * Find out how large of an SEEPROM we have.
497 */
498 fxp_autosize_eeprom(sc);
499
500 /*
501 * Determine whether we must use the 503 serial interface.
502 */
503 fxp_read_eeprom(sc, &data, 6, 1);
504 if ((data & FXP_PHY_DEVICE_MASK) != 0 &&
505 (data & FXP_PHY_SERIAL_ONLY))
506 sc->flags |= FXP_FLAG_SERIAL_MEDIA;
507
508 /*
509 * Create the sysctl tree
510 */
511 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
512 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
513 device_get_nameunit(dev), CTLFLAG_RD, 0, "");
514 if (sc->sysctl_tree == NULL)
515 goto fail;
516 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
517 OID_AUTO, "int_delay", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_PRISON,
518 &sc->tunable_int_delay, 0, &sysctl_hw_fxp_int_delay, "I",
519 "FXP driver receive interrupt microcode bundling delay");
520 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
521 OID_AUTO, "bundle_max", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_PRISON,
522 &sc->tunable_bundle_max, 0, &sysctl_hw_fxp_bundle_max, "I",
523 "FXP driver receive interrupt microcode bundle size limit");
524
525 /*
526 * Pull in device tunables.
527 */
528 sc->tunable_int_delay = TUNABLE_INT_DELAY;
529 sc->tunable_bundle_max = TUNABLE_BUNDLE_MAX;
530 resource_int_value(device_get_name(dev), device_get_unit(dev),
531 "int_delay", &sc->tunable_int_delay);
532 resource_int_value(device_get_name(dev), device_get_unit(dev),
533 "bundle_max", &sc->tunable_bundle_max);
534
535 /*
536 * Find out the chip revision; lump all 82557 revs together.
537 */
538 fxp_read_eeprom(sc, &data, 5, 1);
539 if ((data >> 8) == 1)
540 sc->revision = FXP_REV_82557;
541 else
542 sc->revision = pci_get_revid(dev);
543
544 /*
545 * Enable workarounds for certain chip revision deficiencies.
546 *
547 * Systems based on the ICH2/ICH2-M chip from Intel, and possibly
548 * some systems based a normal 82559 design, have a defect where
549 * the chip can cause a PCI protocol violation if it receives
550 * a CU_RESUME command when it is entering the IDLE state. The
551 * workaround is to disable Dynamic Standby Mode, so the chip never
552 * deasserts CLKRUN#, and always remains in an active state.
553 *
554 * See Intel 82801BA/82801BAM Specification Update, Errata #30.
555 */
556 i = pci_get_device(dev);
557 if (i == 0x2449 || (i > 0x1030 && i < 0x1039) ||
558 sc->revision >= FXP_REV_82559_A0) {
559 fxp_read_eeprom(sc, &data, 10, 1);
560 if (data & 0x02) { /* STB enable */
561 u_int16_t cksum;
562 int i;
563
564 device_printf(dev,
565 "Disabling dynamic standby mode in EEPROM\n");
566 data &= ~0x02;
567 fxp_write_eeprom(sc, &data, 10, 1);
568 device_printf(dev, "New EEPROM ID: 0x%x\n", data);
569 cksum = 0;
570 for (i = 0; i < (1 << sc->eeprom_size) - 1; i++) {
571 fxp_read_eeprom(sc, &data, i, 1);
572 cksum += data;
573 }
574 i = (1 << sc->eeprom_size) - 1;
575 cksum = 0xBABA - cksum;
576 fxp_read_eeprom(sc, &data, i, 1);
577 fxp_write_eeprom(sc, &cksum, i, 1);
578 device_printf(dev,
579 "EEPROM checksum @ 0x%x: 0x%x -> 0x%x\n",
580 i, data, cksum);
581#if 1
582 /*
583 * If the user elects to continue, try the software
584 * workaround, as it is better than nothing.
585 */
586 sc->flags |= FXP_FLAG_CU_RESUME_BUG;
587#endif
588 }
589 }
590
591 /*
592 * If we are not a 82557 chip, we can enable extended features.
593 */
594 if (sc->revision != FXP_REV_82557) {
595 /*
596 * If MWI is enabled in the PCI configuration, and there
597 * is a valid cacheline size (8 or 16 dwords), then tell
598 * the board to turn on MWI.
599 */
600 if (val & PCIM_CMD_MWRICEN &&
601 pci_read_config(dev, PCIR_CACHELNSZ, 1) != 0)
602 sc->flags |= FXP_FLAG_MWI_ENABLE;
603
604 /* turn on the extended TxCB feature */
605 sc->flags |= FXP_FLAG_EXT_TXCB;
606
607 /* enable reception of long frames for VLAN */
608 sc->flags |= FXP_FLAG_LONG_PKT_EN;
609 }
610
611 /*
612 * Read MAC address.
613 */
614 fxp_read_eeprom(sc, (u_int16_t *)sc->arpcom.ac_enaddr, 0, 3);
615 if (sc->flags & FXP_FLAG_SERIAL_MEDIA)
616 device_printf(dev, "10Mbps\n");
617 if (bootverbose) {
618 device_printf(dev, "PCI IDs: %04x %04x %04x %04x %04x\n",
619 pci_get_vendor(dev), pci_get_device(dev),
620 pci_get_subvendor(dev), pci_get_subdevice(dev),
621 pci_get_revid(dev));
622 fxp_read_eeprom(sc, &data, 10, 1);
623 device_printf(dev, "Dynamic Standby mode is %s\n",
624 data & 0x02 ? "enabled" : "disabled");
625 }
626
627 /*
628 * If this is only a 10Mbps device, then there is no MII, and
629 * the PHY will use a serial interface instead.
630 *
631 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
632 * doesn't have a programming interface of any sort. The
633 * media is sensed automatically based on how the link partner
634 * is configured. This is, in essence, manual configuration.
635 */
636 if (sc->flags & FXP_FLAG_SERIAL_MEDIA) {
637 ifmedia_init(&sc->sc_media, 0, fxp_serial_ifmedia_upd,
638 fxp_serial_ifmedia_sts);
639 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
640 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
641 } else {
642 if (mii_phy_probe(dev, &sc->miibus, fxp_ifmedia_upd,
643 fxp_ifmedia_sts)) {
644 device_printf(dev, "MII without any PHY!\n");
645 error = ENXIO;
646 goto fail;
647 }
648 }
649
650 ifp = &sc->arpcom.ac_if;
651 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
652 ifp->if_baudrate = 100000000;
653 ifp->if_init = fxp_init;
654 ifp->if_softc = sc;
655 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
656 ifp->if_ioctl = fxp_ioctl;
657 ifp->if_start = fxp_start;
658#ifdef DEVICE_POLLING
659 ifp->if_poll = fxp_poll;
660#endif
661 ifp->if_watchdog = fxp_watchdog;
662
663 /*
664 * Attach the interface.
665 */
666 ether_ifattach(ifp, sc->arpcom.ac_enaddr, NULL);
667
668 /*
669 * Tell the upper layer(s) we support long frames.
670 */
671 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
672
673 /*
674 * Let the system queue as many packets as we have available
675 * TX descriptors.
676 */
677 ifq_set_maxlen(&ifp->if_snd, FXP_USABLE_TXCB);
678 ifq_set_ready(&ifp->if_snd);
679
680 error = bus_setup_intr(dev, sc->irq, INTR_MPSAFE,
681 fxp_intr, sc, &sc->ih,
682 ifp->if_serializer);
683 if (error) {
684 ether_ifdetach(ifp);
685 if (sc->flags & FXP_FLAG_SERIAL_MEDIA)
686 ifmedia_removeall(&sc->sc_media);
687 device_printf(dev, "could not setup irq\n");
688 goto fail;
689 }
690
691 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->irq));
692 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
693
694 return (0);
695
696failmem:
697 device_printf(dev, "Failed to malloc memory\n");
698 error = ENOMEM;
699fail:
700 fxp_release(dev);
701 return (error);
702}
703
704/*
705 * release all resources
706 */
707static void
708fxp_release(device_t dev)
709{
710 struct fxp_softc *sc = device_get_softc(dev);
711
712 if (sc->miibus)
713 device_delete_child(dev, sc->miibus);
714 bus_generic_detach(dev);
715
716 if (sc->cbl_base)
717 kfree(sc->cbl_base, M_DEVBUF);
718 if (sc->fxp_stats)
719 kfree(sc->fxp_stats, M_DEVBUF);
720 if (sc->mcsp)
721 kfree(sc->mcsp, M_DEVBUF);
722 if (sc->rfa_headm)
723 m_freem(sc->rfa_headm);
724
725 if (sc->irq)
726 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq);
727 if (sc->mem)
728 bus_release_resource(dev, sc->rtp, sc->rgd, sc->mem);
729
730 sysctl_ctx_free(&sc->sysctl_ctx);
731}
732
733/*
734 * Detach interface.
735 */
736static int
737fxp_detach(device_t dev)
738{
739 struct fxp_softc *sc = device_get_softc(dev);
740
741 lwkt_serialize_enter(sc->arpcom.ac_if.if_serializer);
742
743 /*
744 * Stop DMA and drop transmit queue.
745 */
746 fxp_stop(sc);
747
748 /*
749 * Disable interrupts.
750 *
751 * NOTE: This should be done after fxp_stop(), because software
752 * resetting in fxp_stop() may leave interrupts turned on.
753 */
754 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
755
756 /*
757 * Free all media structures.
758 */
759 if (sc->flags & FXP_FLAG_SERIAL_MEDIA)
760 ifmedia_removeall(&sc->sc_media);
761
762 if (sc->ih)
763 bus_teardown_intr(dev, sc->irq, sc->ih);
764
765 lwkt_serialize_exit(sc->arpcom.ac_if.if_serializer);
766
767 /*
768 * Close down routes etc.
769 */
770 ether_ifdetach(&sc->arpcom.ac_if);
771
772 /* Release our allocated resources. */
773 fxp_release(dev);
774
775 return (0);
776}
777
778/*
779 * Device shutdown routine. Called at system shutdown after sync. The
780 * main purpose of this routine is to shut off receiver DMA so that
781 * kernel memory doesn't get clobbered during warmboot.
782 */
783static int
784fxp_shutdown(device_t dev)
785{
786 struct fxp_softc *sc = device_get_softc(dev);
787 struct ifnet *ifp = &sc->arpcom.ac_if;
788
789 lwkt_serialize_enter(ifp->if_serializer);
790 /*
791 * Make sure that DMA is disabled prior to reboot. Not doing
792 * do could allow DMA to corrupt kernel memory during the
793 * reboot before the driver initializes.
794 */
795 fxp_stop(sc);
796 lwkt_serialize_exit(ifp->if_serializer);
797 return (0);
798}
799
800/*
801 * Device suspend routine. Stop the interface and save some PCI
802 * settings in case the BIOS doesn't restore them properly on
803 * resume.
804 */
805static int
806fxp_suspend(device_t dev)
807{
808 struct fxp_softc *sc = device_get_softc(dev);
809 int i;
810
811 lwkt_serialize_enter(sc->arpcom.ac_if.if_serializer);
812
813 fxp_stop(sc);
814
815 for (i = 0; i < 5; i++)
816 sc->saved_maps[i] = pci_read_config(dev, PCIR_BAR(i), 4);
817 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
818 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
819 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
820 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
821
822 sc->suspended = 1;
823
824 lwkt_serialize_exit(sc->arpcom.ac_if.if_serializer);
825 return (0);
826}
827
828/*
829 * Device resume routine. Restore some PCI settings in case the BIOS
830 * doesn't, re-enable busmastering, and restart the interface if
831 * appropriate.
832 */
833static int
834fxp_resume(device_t dev)
835{
836 struct fxp_softc *sc = device_get_softc(dev);
837 struct ifnet *ifp = &sc->arpcom.ac_if;
838 int i;
839
840 lwkt_serialize_enter(sc->arpcom.ac_if.if_serializer);
841
842 fxp_powerstate_d0(dev);
843
844 /* better way to do this? */
845 for (i = 0; i < 5; i++)
846 pci_write_config(dev, PCIR_BAR(i), sc->saved_maps[i], 4);
847 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
848 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
849 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
850 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
851
852 /* reenable busmastering and memory space */
853 pci_enable_busmaster(dev);
854 pci_enable_io(dev, SYS_RES_MEMORY);
855
856 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
857 DELAY(10);
858
859 /* reinitialize interface if necessary */
860 if (ifp->if_flags & IFF_UP)
861 fxp_init(sc);
862
863 sc->suspended = 0;
864
865 lwkt_serialize_exit(sc->arpcom.ac_if.if_serializer);
866 return (0);
867}
868
869static void
870fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length)
871{
872 u_int16_t reg;
873 int x;
874
875 /*
876 * Shift in data.
877 */
878 for (x = 1 << (length - 1); x; x >>= 1) {
879 if (data & x)
880 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
881 else
882 reg = FXP_EEPROM_EECS;
883 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
884 DELAY(1);
885 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
886 DELAY(1);
887 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
888 DELAY(1);
889 }
890}
891
892/*
893 * Read from the serial EEPROM. Basically, you manually shift in
894 * the read opcode (one bit at a time) and then shift in the address,
895 * and then you shift out the data (all of this one bit at a time).
896 * The word size is 16 bits, so you have to provide the address for
897 * every 16 bits of data.
898 */
899static u_int16_t
900fxp_eeprom_getword(struct fxp_softc *sc, int offset, int autosize)
901{
902 u_int16_t reg, data;
903 int x;
904
905 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
906 /*
907 * Shift in read opcode.
908 */
909 fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3);
910 /*
911 * Shift in address.
912 */
913 data = 0;
914 for (x = 1 << (sc->eeprom_size - 1); x; x >>= 1) {
915 if (offset & x)
916 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
917 else
918 reg = FXP_EEPROM_EECS;
919 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
920 DELAY(1);
921 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
922 DELAY(1);
923 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
924 DELAY(1);
925 reg = CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO;
926 data++;
927 if (autosize && reg == 0) {
928 sc->eeprom_size = data;
929 break;
930 }
931 }
932 /*
933 * Shift out data.
934 */
935 data = 0;
936 reg = FXP_EEPROM_EECS;
937 for (x = 1 << 15; x; x >>= 1) {
938 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
939 DELAY(1);
940 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
941 data |= x;
942 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
943 DELAY(1);
944 }
945 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
946 DELAY(1);
947
948 return (data);
949}
950
951static void
952fxp_eeprom_putword(struct fxp_softc *sc, int offset, u_int16_t data)
953{
954 int i;
955
956 /*
957 * Erase/write enable.
958 */
959 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
960 fxp_eeprom_shiftin(sc, 0x4, 3);
961 fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size);
962 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
963 DELAY(1);
964 /*
965 * Shift in write opcode, address, data.
966 */
967 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
968 fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3);
969 fxp_eeprom_shiftin(sc, offset, sc->eeprom_size);
970 fxp_eeprom_shiftin(sc, data, 16);
971 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
972 DELAY(1);
973 /*
974 * Wait for EEPROM to finish up.
975 */
976 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
977 DELAY(1);
978 for (i = 0; i < 1000; i++) {
979 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
980 break;
981 DELAY(50);
982 }
983 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
984 DELAY(1);
985 /*
986 * Erase/write disable.
987 */
988 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
989 fxp_eeprom_shiftin(sc, 0x4, 3);
990 fxp_eeprom_shiftin(sc, 0, sc->eeprom_size);
991 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
992 DELAY(1);
993}
994
995/*
996 * From NetBSD:
997 *
998 * Figure out EEPROM size.
999 *
1000 * 559's can have either 64-word or 256-word EEPROMs, the 558
1001 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet
1002 * talks about the existance of 16 to 256 word EEPROMs.
1003 *
1004 * The only known sizes are 64 and 256, where the 256 version is used
1005 * by CardBus cards to store CIS information.
1006 *
1007 * The address is shifted in msb-to-lsb, and after the last
1008 * address-bit the EEPROM is supposed to output a `dummy zero' bit,
1009 * after which follows the actual data. We try to detect this zero, by
1010 * probing the data-out bit in the EEPROM control register just after
1011 * having shifted in a bit. If the bit is zero, we assume we've
1012 * shifted enough address bits. The data-out should be tri-state,
1013 * before this, which should translate to a logical one.
1014 */
1015static void
1016fxp_autosize_eeprom(struct fxp_softc *sc)
1017{
1018
1019 /* guess maximum size of 256 words */
1020 sc->eeprom_size = 8;
1021
1022 /* autosize */
1023 fxp_eeprom_getword(sc, 0, 1);
1024}
1025
1026static void
1027fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
1028{
1029 int i;
1030
1031 for (i = 0; i < words; i++)
1032 data[i] = fxp_eeprom_getword(sc, offset + i, 0);
1033}
1034
1035static void
1036fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
1037{
1038 int i;
1039
1040 for (i = 0; i < words; i++)
1041 fxp_eeprom_putword(sc, offset + i, data[i]);
1042}
1043
1044/*
1045 * Start packet transmission on the interface.
1046 */
1047static void
1048fxp_start(struct ifnet *ifp)
1049{
1050 struct fxp_softc *sc = ifp->if_softc;
1051 struct fxp_cb_tx *txp;
1052
1053 ASSERT_SERIALIZED(ifp->if_serializer);
1054
1055 /*
1056 * See if we need to suspend xmit until the multicast filter
1057 * has been reprogrammed (which can only be done at the head
1058 * of the command chain).
1059 */
1060 if (sc->need_mcsetup) {
1061 ifq_purge(&ifp->if_snd);
1062 return;
1063 }
1064
1065 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1066 return;
1067
1068 txp = NULL;
1069
1070 /*
1071 * We're finished if there is nothing more to add to the list or if
1072 * we're all filled up with buffers to transmit.
1073 * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add
1074 * a NOP command when needed.
1075 */
1076 while (!ifq_is_empty(&ifp->if_snd) && sc->tx_queued < FXP_USABLE_TXCB) {
1077 struct mbuf *m, *mb_head;
1078 int segment, ntries = 0;
1079
1080 /*
1081 * Grab a packet to transmit.
1082 */
1083 mb_head = ifq_dequeue(&ifp->if_snd, NULL);
1084 if (mb_head == NULL)
1085 break;
1086tbdinit:
1087 /*
1088 * Make sure that the packet fits into one TX desc
1089 */
1090 segment = 0;
1091 for (m = mb_head; m != NULL; m = m->m_next) {
1092 if (m->m_len != 0) {
1093 ++segment;
1094 if (segment >= FXP_NTXSEG)
1095 break;
1096 }
1097 }
1098 if (segment >= FXP_NTXSEG) {
1099 struct mbuf *mn;
1100
1101 if (ntries) {
1102 /*
1103 * Packet is excessively fragmented,
1104 * and will never fit into one TX
1105 * desc. Give it up.
1106 */
1107 m_freem(mb_head);
1108 ifp->if_oerrors++;
1109 continue;
1110 }
1111
1112 mn = m_dup(mb_head, MB_DONTWAIT);
1113 if (mn == NULL) {
1114 m_freem(mb_head);
1115 ifp->if_oerrors++;
1116 continue;
1117 }
1118
1119 m_freem(mb_head);
1120 mb_head = mn;
1121 ntries = 1;
1122 goto tbdinit;
1123 }
1124
1125 /*
1126 * Get pointer to next available tx desc.
1127 */
1128 txp = sc->cbl_last->next;
1129
1130 /*
1131 * Go through each of the mbufs in the chain and initialize
1132 * the transmit buffer descriptors with the physical address
1133 * and size of the mbuf.
1134 */
1135 for (m = mb_head, segment = 0; m != NULL; m = m->m_next) {
1136 if (m->m_len != 0) {
1137 KKASSERT(segment < FXP_NTXSEG);
1138
1139 txp->tbd[segment].tb_addr =
1140 vtophys(mtod(m, vm_offset_t));
1141 txp->tbd[segment].tb_size = m->m_len;
1142 segment++;
1143 }
1144 }
1145 KKASSERT(m == NULL);
1146
1147 txp->tbd_number = segment;
1148 txp->mb_head = mb_head;
1149 txp->cb_status = 0;
1150 if (sc->tx_queued != FXP_CXINT_THRESH - 1) {
1151 txp->cb_command =
1152 FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF |
1153 FXP_CB_COMMAND_S;
1154 } else {
1155 txp->cb_command =
1156 FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF |
1157 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I;
1158 }
1159 txp->tx_threshold = tx_threshold;
1160
1161 /*
1162 * Advance the end of list forward.
1163 */
1164 sc->cbl_last->cb_command &= ~FXP_CB_COMMAND_S;
1165 sc->cbl_last = txp;
1166
1167 /*
1168 * Advance the beginning of the list forward if there are
1169 * no other packets queued (when nothing is queued, cbl_first
1170 * sits on the last TxCB that was sent out).
1171 */
1172 if (sc->tx_queued == 0)
1173 sc->cbl_first = txp;
1174
1175 sc->tx_queued++;
1176 /*
1177 * Set a 5 second timer just in case we don't hear
1178 * from the card again.
1179 */
1180 ifp->if_timer = 5;
1181
1182 BPF_MTAP(ifp, mb_head);
1183 }
1184
1185 if (sc->tx_queued >= FXP_USABLE_TXCB)
1186 ifp->if_flags |= IFF_OACTIVE;
1187
1188 /*
1189 * We're finished. If we added to the list, issue a RESUME to get DMA
1190 * going again if suspended.
1191 */
1192 if (txp != NULL) {
1193 fxp_scb_wait(sc);
1194 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
1195 }
1196}
1197
1198#ifdef DEVICE_POLLING
1199
1200static void
1201fxp_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1202{
1203 struct fxp_softc *sc = ifp->if_softc;
1204 u_int8_t statack;
1205
1206 ASSERT_SERIALIZED(ifp->if_serializer);
1207
1208 switch(cmd) {
1209 case POLL_REGISTER:
1210 /* disable interrupts */
1211 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
1212 break;
1213 case POLL_DEREGISTER:
1214 /* enable interrupts */
1215 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
1216 break;
1217 default:
1218 statack = FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA |
1219 FXP_SCB_STATACK_FR;
1220 if (cmd == POLL_AND_CHECK_STATUS) {
1221 u_int8_t tmp;
1222
1223 tmp = CSR_READ_1(sc, FXP_CSR_SCB_STATACK);
1224 if (tmp == 0xff || tmp == 0)
1225 return; /* nothing to do */
1226 tmp &= ~statack;
1227 /* ack what we can */
1228 if (tmp != 0)
1229 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, tmp);
1230 statack |= tmp;
1231 }
1232 fxp_intr_body(sc, statack, count);
1233 break;
1234 }
1235}
1236
1237#endif /* DEVICE_POLLING */
1238
1239/*
1240 * Process interface interrupts.
1241 */
1242static void
1243fxp_intr(void *xsc)
1244{
1245 struct fxp_softc *sc = xsc;
1246 u_int8_t statack;
1247
1248 ASSERT_SERIALIZED(sc->arpcom.ac_if.if_serializer);
1249
1250 if (sc->suspended) {
1251 return;
1252 }
1253
1254 while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) {
1255 /*
1256 * It should not be possible to have all bits set; the
1257 * FXP_SCB_INTR_SWI bit always returns 0 on a read. If
1258 * all bits are set, this may indicate that the card has
1259 * been physically ejected, so ignore it.
1260 */
1261 if (statack == 0xff)
1262 return;
1263
1264 /*
1265 * First ACK all the interrupts in this pass.
1266 */
1267 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack);
1268 fxp_intr_body(sc, statack, -1);
1269 }
1270}
1271
1272static void
1273fxp_intr_body(struct fxp_softc *sc, u_int8_t statack, int count)
1274{
1275 struct ifnet *ifp = &sc->arpcom.ac_if;
1276 struct mbuf *m;
1277 struct fxp_rfa *rfa;
1278 int rnr = (statack & FXP_SCB_STATACK_RNR) ? 1 : 0;
1279 struct mbuf_chain chain[MAXCPU];
1280
1281 if (rnr)
1282 fxp_rnr++;
1283#ifdef DEVICE_POLLING
1284 /* Pick up a deferred RNR condition if `count' ran out last time. */
1285 if (sc->flags & FXP_FLAG_DEFERRED_RNR) {
1286 sc->flags &= ~FXP_FLAG_DEFERRED_RNR;
1287 rnr = 1;
1288 }
1289#endif
1290
1291 /*
1292 * Free any finished transmit mbuf chains.
1293 *
1294 * Handle the CNA event likt a CXTNO event. It used to
1295 * be that this event (control unit not ready) was not
1296 * encountered, but it is now with the SMPng modifications.
1297 * The exact sequence of events that occur when the interface
1298 * is brought up are different now, and if this event
1299 * goes unhandled, the configuration/rxfilter setup sequence
1300 * can stall for several seconds. The result is that no
1301 * packets go out onto the wire for about 5 to 10 seconds
1302 * after the interface is ifconfig'ed for the first time.
1303 */
1304 if (statack & (FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA)) {
1305 struct fxp_cb_tx *txp;
1306
1307 for (txp = sc->cbl_first; sc->tx_queued &&
1308 (txp->cb_status & FXP_CB_STATUS_C) != 0;
1309 txp = txp->next) {
1310 if ((m = txp->mb_head) != NULL) {
1311 txp->mb_head = NULL;
1312 sc->tx_queued--;
1313 m_freem(m);
1314 } else {
1315 sc->tx_queued--;
1316 }
1317 }
1318 sc->cbl_first = txp;
1319
1320 if (sc->tx_queued < FXP_USABLE_TXCB)
1321 ifp->if_flags &= ~IFF_OACTIVE;
1322
1323 if (sc->tx_queued == 0) {
1324 ifp->if_timer = 0;
1325 if (sc->need_mcsetup)
1326 fxp_mc_setup(sc);
1327 }
1328
1329 /*
1330 * Try to start more packets transmitting.
1331 */
1332 if (!ifq_is_empty(&ifp->if_snd))
1333 if_devstart(ifp);
1334 }
1335
1336 /*
1337 * Just return if nothing happened on the receive side.
1338 */
1339 if (!rnr && (statack & FXP_SCB_STATACK_FR) == 0)
1340 return;
1341
1342 ether_input_chain_init(chain);
1343
1344 /*
1345 * Process receiver interrupts. If a no-resource (RNR)
1346 * condition exists, get whatever packets we can and
1347 * re-start the receiver.
1348 *
1349 * When using polling, we do not process the list to completion,
1350 * so when we get an RNR interrupt we must defer the restart
1351 * until we hit the last buffer with the C bit set.
1352 * If we run out of cycles and rfa_headm has the C bit set,
1353 * record the pending RNR in the FXP_FLAG_DEFERRED_RNR flag so
1354 * that the info will be used in the subsequent polling cycle.
1355 */
1356 for (;;) {
1357 m = sc->rfa_headm;
1358 rfa = (struct fxp_rfa *)(m->m_ext.ext_buf +
1359 RFA_ALIGNMENT_FUDGE);
1360
1361#ifdef DEVICE_POLLING /* loop at most count times if count >=0 */
1362 if (count >= 0 && count-- == 0) {
1363 if (rnr) {
1364 /* Defer RNR processing until the next time. */
1365 sc->flags |= FXP_FLAG_DEFERRED_RNR;
1366 rnr = 0;
1367 }
1368 break;
1369 }
1370#endif /* DEVICE_POLLING */
1371
1372 if ( (rfa->rfa_status & FXP_RFA_STATUS_C) == 0)
1373 break;
1374
1375 /*
1376 * Remove first packet from the chain.
1377 */
1378 sc->rfa_headm = m->m_next;
1379 m->m_next = NULL;
1380
1381 /*
1382 * Add a new buffer to the receive chain.
1383 * If this fails, the old buffer is recycled
1384 * instead.
1385 */
1386 if (fxp_add_rfabuf(sc, m) == 0) {
1387 int total_len;
1388
1389 /*
1390 * Fetch packet length (the top 2 bits of
1391 * actual_size are flags set by the controller
1392 * upon completion), and drop the packet in case
1393 * of bogus length or CRC errors.
1394 */
1395 total_len = rfa->actual_size & 0x3fff;
1396 if (total_len < sizeof(struct ether_header) ||
1397 total_len > MCLBYTES - RFA_ALIGNMENT_FUDGE -
1398 sizeof(struct fxp_rfa) ||
1399 rfa->rfa_status & FXP_RFA_STATUS_CRC) {
1400 m_freem(m);
1401 continue;
1402 }
1403 m->m_pkthdr.len = m->m_len = total_len;
1404 ether_input_chain(ifp, m, NULL, chain);
1405 }
1406 }
1407
1408 ether_input_dispatch(chain);
1409
1410 if (rnr) {
1411 fxp_scb_wait(sc);
1412 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1413 vtophys(sc->rfa_headm->m_ext.ext_buf) +
1414 RFA_ALIGNMENT_FUDGE);
1415 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
1416 }
1417}
1418
1419/*
1420 * Update packet in/out/collision statistics. The i82557 doesn't
1421 * allow you to access these counters without doing a fairly
1422 * expensive DMA to get _all_ of the statistics it maintains, so
1423 * we do this operation here only once per second. The statistics
1424 * counters in the kernel are updated from the previous dump-stats
1425 * DMA and then a new dump-stats DMA is started. The on-chip
1426 * counters are zeroed when the DMA completes. If we can't start
1427 * the DMA immediately, we don't wait - we just prepare to read
1428 * them again next time.
1429 */
1430static void
1431fxp_tick(void *xsc)
1432{
1433 struct fxp_softc *sc = xsc;
1434 struct ifnet *ifp = &sc->arpcom.ac_if;
1435 struct fxp_stats *sp = sc->fxp_stats;
1436 struct fxp_cb_tx *txp;
1437 struct mbuf *m;
1438
1439 lwkt_serialize_enter(sc->arpcom.ac_if.if_serializer);
1440
1441 ifp->if_opackets += sp->tx_good;
1442 ifp->if_collisions += sp->tx_total_collisions;
1443 if (sp->rx_good) {
1444 ifp->if_ipackets += sp->rx_good;
1445 sc->rx_idle_secs = 0;
1446 } else {
1447 /*
1448 * Receiver's been idle for another second.
1449 */
1450 sc->rx_idle_secs++;
1451 }
1452 ifp->if_ierrors +=
1453 sp->rx_crc_errors +
1454 sp->rx_alignment_errors +
1455 sp->rx_rnr_errors +
1456 sp->rx_overrun_errors;
1457 /*
1458 * If any transmit underruns occured, bump up the transmit
1459 * threshold by another 512 bytes (64 * 8).
1460 */
1461 if (sp->tx_underruns) {
1462 ifp->if_oerrors += sp->tx_underruns;
1463 if (tx_threshold < 192)
1464 tx_threshold += 64;
1465 }
1466
1467 /*
1468 * Release any xmit buffers that have completed DMA. This isn't
1469 * strictly necessary to do here, but it's advantagous for mbufs
1470 * with external storage to be released in a timely manner rather
1471 * than being defered for a potentially long time. This limits
1472 * the delay to a maximum of one second.
1473 */
1474 for (txp = sc->cbl_first; sc->tx_queued &&
1475 (txp->cb_status & FXP_CB_STATUS_C) != 0;
1476 txp = txp->next) {
1477 if ((m = txp->mb_head) != NULL) {
1478 txp->mb_head = NULL;
1479 sc->tx_queued--;
1480 m_freem(m);
1481 } else {
1482 sc->tx_queued--;
1483 }
1484 }
1485 sc->cbl_first = txp;
1486
1487 if (sc->tx_queued < FXP_USABLE_TXCB)
1488 ifp->if_flags &= ~IFF_OACTIVE;
1489 if (sc->tx_queued == 0)
1490 ifp->if_timer = 0;
1491
1492 /*
1493 * Try to start more packets transmitting.
1494 */
1495 if (!ifq_is_empty(&ifp->if_snd))
1496 if_devstart(ifp);
1497
1498 /*
1499 * If we haven't received any packets in FXP_MAC_RX_IDLE seconds,
1500 * then assume the receiver has locked up and attempt to clear
1501 * the condition by reprogramming the multicast filter. This is
1502 * a work-around for a bug in the 82557 where the receiver locks
1503 * up if it gets certain types of garbage in the syncronization
1504 * bits prior to the packet header. This bug is supposed to only
1505 * occur in 10Mbps mode, but has been seen to occur in 100Mbps
1506 * mode as well (perhaps due to a 10/100 speed transition).
1507 */
1508 if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) {
1509 sc->rx_idle_secs = 0;
1510 fxp_mc_setup(sc);
1511 }
1512 /*
1513 * If there is no pending command, start another stats
1514 * dump. Otherwise punt for now.
1515 */
1516 if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) {
1517 /*
1518 * Start another stats dump.
1519 */
1520 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET);
1521 } else {
1522 /*
1523 * A previous command is still waiting to be accepted.
1524 * Just zero our copy of the stats and wait for the
1525 * next timer event to update them.
1526 */
1527 sp->tx_good = 0;
1528 sp->tx_underruns = 0;
1529 sp->tx_total_collisions = 0;
1530
1531 sp->rx_good = 0;
1532 sp->rx_crc_errors = 0;
1533 sp->rx_alignment_errors = 0;
1534 sp->rx_rnr_errors = 0;
1535 sp->rx_overrun_errors = 0;
1536 }
1537 if (sc->miibus != NULL)
1538 mii_tick(device_get_softc(sc->miibus));
1539 /*
1540 * Schedule another timeout one second from now.
1541 */
1542 callout_reset(&sc->fxp_stat_timer, hz, fxp_tick, sc);
1543
1544 lwkt_serialize_exit(sc->arpcom.ac_if.if_serializer);
1545}
1546
1547/*
1548 * Stop the interface. Cancels the statistics updater and resets
1549 * the interface.
1550 */
1551static void
1552fxp_stop(struct fxp_softc *sc)
1553{
1554 struct ifnet *ifp = &sc->arpcom.ac_if;
1555 struct fxp_cb_tx *txp;
1556 int i;
1557
1558 ASSERT_SERIALIZED(ifp->if_serializer);
1559
1560 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1561 ifp->if_timer = 0;
1562
1563 /*
1564 * Cancel stats updater.
1565 */
1566 callout_stop(&sc->fxp_stat_timer);
1567
1568 /*
1569 * Issue software reset, which also unloads the microcode.
1570 */
1571 sc->flags &= ~FXP_FLAG_UCODE;
1572 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET);
1573 DELAY(50);
1574
1575 /*
1576 * Release any xmit buffers.
1577 */
1578 txp = sc->cbl_base;
1579 if (txp != NULL) {
1580 for (i = 0; i < FXP_NTXCB; i++) {
1581 if (txp[i].mb_head != NULL) {
1582 m_freem(txp[i].mb_head);
1583 txp[i].mb_head = NULL;
1584 }
1585 }
1586 }
1587 sc->tx_queued = 0;
1588
1589 /*
1590 * Free all the receive buffers then reallocate/reinitialize
1591 */
1592 if (sc->rfa_headm != NULL)
1593 m_freem(sc->rfa_headm);
1594 sc->rfa_headm = NULL;
1595 sc->rfa_tailm = NULL;
1596 for (i = 0; i < FXP_NRFABUFS; i++) {
1597 if (fxp_add_rfabuf(sc, NULL) != 0) {
1598 /*
1599 * This "can't happen" - we're at splimp()
1600 * and we just freed all the buffers we need
1601 * above.
1602 */
1603 panic("fxp_stop: no buffers!");
1604 }
1605 }
1606}
1607
1608/*
1609 * Watchdog/transmission transmit timeout handler. Called when a
1610 * transmission is started on the interface, but no interrupt is
1611 * received before the timeout. This usually indicates that the
1612 * card has wedged for some reason.
1613 */
1614static void
1615fxp_watchdog(struct ifnet *ifp)
1616{
1617 ASSERT_SERIALIZED(ifp->if_serializer);
1618
1619 if_printf(ifp, "device timeout\n");
1620 ifp->if_oerrors++;
1621 fxp_init(ifp->if_softc);
1622}
1623
1624static void
1625fxp_init(void *xsc)
1626{
1627 struct fxp_softc *sc = xsc;
1628 struct ifnet *ifp = &sc->arpcom.ac_if;
1629 struct fxp_cb_config *cbp;
1630 struct fxp_cb_ias *cb_ias;
1631 struct fxp_cb_tx *txp;
1632 struct fxp_cb_mcs *mcsp;
1633 int i, prm;
1634
1635 ASSERT_SERIALIZED(ifp->if_serializer);
1636
1637 /*
1638 * Cancel any pending I/O
1639 */
1640 fxp_stop(sc);
1641
1642 prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0;
1643
1644 /*
1645 * Initialize base of CBL and RFA memory. Loading with zero
1646 * sets it up for regular linear addressing.
1647 */
1648 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0);
1649 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE);
1650
1651 fxp_scb_wait(sc);
1652 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE);
1653
1654 /*
1655 * Initialize base of dump-stats buffer.
1656 */
1657 fxp_scb_wait(sc);
1658 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(sc->fxp_stats));
1659 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR);
1660
1661 /*
1662 * Attempt to load microcode if requested.
1663 */
1664 if (ifp->if_flags & IFF_LINK0 && (sc->flags & FXP_FLAG_UCODE) == 0)
1665 fxp_load_ucode(sc);
1666
1667 /*
1668 * Initialize the multicast address list.
1669 */
1670 if (fxp_mc_addrs(sc)) {
1671 mcsp = sc->mcsp;
1672 mcsp->cb_status = 0;
1673 mcsp->cb_command = FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL;
1674 mcsp->link_addr = -1;
1675 /*
1676 * Start the multicast setup command.
1677 */
1678 fxp_scb_wait(sc);
1679 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&mcsp->cb_status));
1680 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
1681 /* ...and wait for it to complete. */
1682 fxp_dma_wait(&mcsp->cb_status, sc);
1683 }
1684
1685 /*
1686 * We temporarily use memory that contains the TxCB list to
1687 * construct the config CB. The TxCB list memory is rebuilt
1688 * later.
1689 */
1690 cbp = (struct fxp_cb_config *) sc->cbl_base;
1691
1692 /*
1693 * This bcopy is kind of disgusting, but there are a bunch of must be
1694 * zero and must be one bits in this structure and this is the easiest
1695 * way to initialize them all to proper values.
1696 */
1697 bcopy(fxp_cb_config_template,
1698 (void *)(uintptr_t)(volatile void *)&cbp->cb_status,
1699 sizeof(fxp_cb_config_template));
1700
1701 cbp->cb_status = 0;
1702 cbp->cb_command = FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL;
1703 cbp->link_addr = -1; /* (no) next command */
1704 cbp->byte_count = 22; /* (22) bytes to config */
1705 cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */
1706 cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */
1707 cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */
1708 cbp->mwi_enable = sc->flags & FXP_FLAG_MWI_ENABLE ? 1 : 0;
1709 cbp->type_enable = 0; /* actually reserved */
1710 cbp->read_align_en = sc->flags & FXP_FLAG_READ_ALIGN ? 1 : 0;
1711 cbp->end_wr_on_cl = sc->flags & FXP_FLAG_WRITE_ALIGN ? 1 : 0;
1712 cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */
1713 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */
1714 cbp->dma_mbce = 0; /* (disable) dma max counters */
1715 cbp->late_scb = 0; /* (don't) defer SCB update */
1716 cbp->direct_dma_dis = 1; /* disable direct rcv dma mode */
1717 cbp->tno_int_or_tco_en =0; /* (disable) tx not okay interrupt */
1718 cbp->ci_int = 1; /* interrupt on CU idle */
1719 cbp->ext_txcb_dis = sc->flags & FXP_FLAG_EXT_TXCB ? 0 : 1;
1720 cbp->ext_stats_dis = 1; /* disable extended counters */
1721 cbp->keep_overrun_rx = 0; /* don't pass overrun frames to host */
1722 cbp->save_bf = sc->revision == FXP_REV_82557 ? 1 : prm;
1723 cbp->disc_short_rx = !prm; /* discard short packets */
1724 cbp->underrun_retry = 1; /* retry mode (once) on DMA underrun */
1725 cbp->two_frames = 0; /* do not limit FIFO to 2 frames */
1726 cbp->dyn_tbd = 0; /* (no) dynamic TBD mode */
1727 cbp->mediatype = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 0 : 1;
1728 cbp->csma_dis = 0; /* (don't) disable link */
1729 cbp->tcp_udp_cksum = 0; /* (don't) enable checksum */
1730 cbp->vlan_tco = 0; /* (don't) enable vlan wakeup */
1731 cbp->link_wake_en = 0; /* (don't) assert PME# on link change */
1732 cbp->arp_wake_en = 0; /* (don't) assert PME# on arp */
1733 cbp->mc_wake_en = 0; /* (don't) enable PME# on mcmatch */
1734 cbp->nsai = 1; /* (don't) disable source addr insert */
1735 cbp->preamble_length = 2; /* (7 byte) preamble */
1736 cbp->loopback = 0; /* (don't) loopback */
1737 cbp->linear_priority = 0; /* (normal CSMA/CD operation) */
1738 cbp->linear_pri_mode = 0; /* (wait after xmit only) */
1739 cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */
1740 cbp->promiscuous = prm; /* promiscuous mode */
1741 cbp->bcast_disable = 0; /* (don't) disable broadcasts */
1742 cbp->wait_after_win = 0; /* (don't) enable modified backoff alg*/
1743 cbp->ignore_ul = 0; /* consider U/L bit in IA matching */
1744 cbp->crc16_en = 0; /* (don't) enable crc-16 algorithm */
1745 cbp->crscdt = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 1 : 0;
1746
1747 cbp->stripping = !prm; /* truncate rx packet to byte count */
1748 cbp->padding = 1; /* (do) pad short tx packets */
1749 cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */
1750 cbp->long_rx_en = sc->flags & FXP_FLAG_LONG_PKT_EN ? 1 : 0;
1751 cbp->ia_wake_en = 0; /* (don't) wake up on address match */
1752 cbp->magic_pkt_dis = 0; /* (don't) disable magic packet */
1753 /* must set wake_en in PMCSR also */
1754 cbp->force_fdx = 0; /* (don't) force full duplex */
1755 cbp->fdx_pin_en = 1; /* (enable) FDX# pin */
1756 cbp->multi_ia = 0; /* (don't) accept multiple IAs */
1757 cbp->mc_all = sc->flags & FXP_FLAG_ALL_MCAST ? 1 : 0;
1758
1759 if (sc->revision == FXP_REV_82557) {
1760 /*
1761 * The 82557 has no hardware flow control, the values
1762 * below are the defaults for the chip.
1763 */
1764 cbp->fc_delay_lsb = 0;
1765 cbp->fc_delay_msb = 0x40;
1766 cbp->pri_fc_thresh = 3;
1767 cbp->tx_fc_dis = 0;
1768 cbp->rx_fc_restop = 0;
1769 cbp->rx_fc_restart = 0;
1770 cbp->fc_filter = 0;
1771 cbp->pri_fc_loc = 1;
1772 } else {
1773 cbp->fc_delay_lsb = 0x1f;
1774 cbp->fc_delay_msb = 0x01;
1775 cbp->pri_fc_thresh = 3;
1776 cbp->tx_fc_dis = 0; /* enable transmit FC */
1777 cbp->rx_fc_restop = 1; /* enable FC restop frames */
1778 cbp->rx_fc_restart = 1; /* enable FC restart frames */
1779 cbp->fc_filter = !prm; /* drop FC frames to host */
1780 cbp->pri_fc_loc = 1; /* FC pri location (byte31) */
1781 }
1782
1783 /*
1784 * Start the config command/DMA.
1785 */
1786 fxp_scb_wait(sc);
1787 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&cbp->cb_status));
1788 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
1789 /* ...and wait for it to complete. */
1790 fxp_dma_wait(&cbp->cb_status, sc);
1791
1792 /*
1793 * Now initialize the station address. Temporarily use the TxCB
1794 * memory area like we did above for the config CB.
1795 */
1796 cb_ias = (struct fxp_cb_ias *) sc->cbl_base;
1797 cb_ias->cb_status = 0;
1798 cb_ias->cb_command = FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL;
1799 cb_ias->link_addr = -1;
1800 bcopy(sc->arpcom.ac_enaddr,
1801 (void *)(uintptr_t)(volatile void *)cb_ias->macaddr,
1802 sizeof(sc->arpcom.ac_enaddr));
1803
1804 /*
1805 * Start the IAS (Individual Address Setup) command/DMA.
1806 */
1807 fxp_scb_wait(sc);
1808 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
1809 /* ...and wait for it to complete. */
1810 fxp_dma_wait(&cb_ias->cb_status, sc);
1811
1812 /*
1813 * Initialize transmit control block (TxCB) list.
1814 */
1815
1816 txp = sc->cbl_base;
1817 bzero(txp, sizeof(struct fxp_cb_tx) * FXP_NTXCB);
1818 for (i = 0; i < FXP_NTXCB; i++) {
1819 txp[i].cb_status = FXP_CB_STATUS_C | FXP_CB_STATUS_OK;
1820 txp[i].cb_command = FXP_CB_COMMAND_NOP;
1821 txp[i].link_addr =
1822 vtophys(&txp[(i + 1) & FXP_TXCB_MASK].cb_status);
1823 if (sc->flags & FXP_FLAG_EXT_TXCB)
1824 txp[i].tbd_array_addr = vtophys(&txp[i].tbd[2]);
1825 else
1826 txp[i].tbd_array_addr = vtophys(&txp[i].tbd[0]);
1827 txp[i].next = &txp[(i + 1) & FXP_TXCB_MASK];
1828 }
1829 /*
1830 * Set the suspend flag on the first TxCB and start the control
1831 * unit. It will execute the NOP and then suspend.
1832 */
1833 txp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S;
1834 sc->cbl_first = sc->cbl_last = txp;
1835 sc->tx_queued = 1;
1836
1837 fxp_scb_wait(sc);
1838 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
1839
1840 /*
1841 * Initialize receiver buffer area - RFA.
1842 */
1843 fxp_scb_wait(sc);
1844 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1845 vtophys(sc->rfa_headm->m_ext.ext_buf) + RFA_ALIGNMENT_FUDGE);
1846 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
1847
1848 /*
1849 * Set current media.
1850 */
1851 if (sc->miibus != NULL)
1852 mii_mediachg(device_get_softc(sc->miibus));
1853
1854 ifp->if_flags |= IFF_RUNNING;
1855 ifp->if_flags &= ~IFF_OACTIVE;
1856
1857 /*
1858 * Enable interrupts.
1859 */
1860#ifdef DEVICE_POLLING
1861 /*
1862 * ... but only do that if we are not polling. And because (presumably)
1863 * the default is interrupts on, we need to disable them explicitly!
1864 */
1865 if ( ifp->if_flags & IFF_POLLING )
1866 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
1867 else
1868#endif /* DEVICE_POLLING */
1869 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
1870
1871 /*
1872 * Start stats updater.
1873 */
1874 callout_reset(&sc->fxp_stat_timer, hz, fxp_tick, sc);
1875}
1876
1877static int
1878fxp_serial_ifmedia_upd(struct ifnet *ifp)
1879{
1880 ASSERT_SERIALIZED(ifp->if_serializer);
1881 return (0);
1882}
1883
1884static void
1885fxp_serial_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1886{
1887 ASSERT_SERIALIZED(ifp->if_serializer);
1888 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
1889}
1890
1891/*
1892 * Change media according to request.
1893 */
1894static int
1895fxp_ifmedia_upd(struct ifnet *ifp)
1896{
1897 struct fxp_softc *sc = ifp->if_softc;
1898 struct mii_data *mii;
1899
1900 ASSERT_SERIALIZED(ifp->if_serializer);
1901
1902 mii = device_get_softc(sc->miibus);
1903 mii_mediachg(mii);
1904 return (0);
1905}
1906
1907/*
1908 * Notify the world which media we're using.
1909 */
1910static void
1911fxp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1912{
1913 struct fxp_softc *sc = ifp->if_softc;
1914 struct mii_data *mii;
1915
1916 ASSERT_SERIALIZED(ifp->if_serializer);
1917
1918 mii = device_get_softc(sc->miibus);
1919 mii_pollstat(mii);
1920 ifmr->ifm_active = mii->mii_media_active;
1921 ifmr->ifm_status = mii->mii_media_status;
1922
1923 if (ifmr->ifm_status & IFM_10_T && sc->flags & FXP_FLAG_CU_RESUME_BUG)
1924 sc->cu_resume_bug = 1;
1925 else
1926 sc->cu_resume_bug = 0;
1927}
1928
1929/*
1930 * Add a buffer to the end of the RFA buffer list.
1931 * Return 0 if successful, 1 for failure. A failure results in
1932 * adding the 'oldm' (if non-NULL) on to the end of the list -
1933 * tossing out its old contents and recycling it.
1934 * The RFA struct is stuck at the beginning of mbuf cluster and the
1935 * data pointer is fixed up to point just past it.
1936 */
1937static int
1938fxp_add_rfabuf(struct fxp_softc *sc, struct mbuf *oldm)
1939{
1940 u_int32_t v;
1941 struct mbuf *m;
1942 struct fxp_rfa *rfa, *p_rfa;
1943
1944 m = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
1945 if (m == NULL) { /* try to recycle the old mbuf instead */
1946 if (oldm == NULL)
1947 return 1;
1948 m = oldm;
1949 m->m_data = m->m_ext.ext_buf;
1950 }
1951
1952 /*
1953 * Move the data pointer up so that the incoming data packet
1954 * will be 32-bit aligned.
1955 */
1956 m->m_data += RFA_ALIGNMENT_FUDGE;
1957
1958 /*
1959 * Get a pointer to the base of the mbuf cluster and move
1960 * data start past it.
1961 */
1962 rfa = mtod(m, struct fxp_rfa *);
1963 m->m_data += sizeof(struct fxp_rfa);
1964 rfa->size = (u_int16_t)(MCLBYTES - sizeof(struct fxp_rfa) - RFA_ALIGNMENT_FUDGE);
1965
1966 /*
1967 * Initialize the rest of the RFA. Note that since the RFA
1968 * is misaligned, we cannot store values directly. Instead,
1969 * we use an optimized, inline copy.
1970 */
1971
1972 rfa->rfa_status = 0;
1973 rfa->rfa_control = FXP_RFA_CONTROL_EL;
1974 rfa->actual_size = 0;
1975
1976 v = -1;
1977 fxp_lwcopy(&v, (volatile u_int32_t *) rfa->link_addr);
1978 fxp_lwcopy(&v, (volatile u_int32_t *) rfa->rbd_addr);
1979
1980 /*
1981 * If there are other buffers already on the list, attach this
1982 * one to the end by fixing up the tail to point to this one.
1983 */
1984 if (sc->rfa_headm != NULL) {
1985 p_rfa = (struct fxp_rfa *) (sc->rfa_tailm->m_ext.ext_buf +
1986 RFA_ALIGNMENT_FUDGE);
1987 sc->rfa_tailm->m_next = m;
1988 v = vtophys(rfa);
1989 fxp_lwcopy(&v, (volatile u_int32_t *) p_rfa->link_addr);
1990 p_rfa->rfa_control = 0;
1991 } else {
1992 sc->rfa_headm = m;
1993 }
1994 sc->rfa_tailm = m;
1995
1996 return (m == oldm);
1997}
1998
1999static int
2000fxp_miibus_readreg(device_t dev, int phy, int reg)
2001{
2002 struct fxp_softc *sc = device_get_softc(dev);
2003 int count = 10000;
2004 int value;
2005
2006 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
2007 (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21));
2008
2009 while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0
2010 && count--)
2011 DELAY(10);
2012
2013 if (count <= 0)
2014 device_printf(dev, "fxp_miibus_readreg: timed out\n");
2015
2016 return (value & 0xffff);
2017}
2018
2019static void
2020fxp_miibus_writereg(device_t dev, int phy, int reg, int value)
2021{
2022 struct fxp_softc *sc = device_get_softc(dev);
2023 int count = 10000;
2024
2025 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
2026 (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) |
2027 (value & 0xffff));
2028
2029 while ((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 &&
2030 count--)
2031 DELAY(10);
2032
2033 if (count <= 0)
2034 device_printf(dev, "fxp_miibus_writereg: timed out\n");
2035}
2036
2037static int
2038fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
2039{
2040 struct fxp_softc *sc = ifp->if_softc;
2041 struct ifreq *ifr = (struct ifreq *)data;
2042 struct mii_data *mii;
2043 int error = 0;
2044
2045 ASSERT_SERIALIZED(ifp->if_serializer);
2046
2047 switch (command) {
2048
2049 case SIOCSIFFLAGS:
2050 if (ifp->if_flags & IFF_ALLMULTI)
2051 sc->flags |= FXP_FLAG_ALL_MCAST;
2052 else
2053 sc->flags &= ~FXP_FLAG_ALL_MCAST;
2054
2055 /*
2056 * If interface is marked up and not running, then start it.
2057 * If it is marked down and running, stop it.
2058 * XXX If it's up then re-initialize it. This is so flags
2059 * such as IFF_PROMISC are handled.
2060 */
2061 if (ifp->if_flags & IFF_UP) {
2062 fxp_init(sc);
2063 } else {
2064 if (ifp->if_flags & IFF_RUNNING)
2065 fxp_stop(sc);
2066 }
2067 break;
2068
2069 case SIOCADDMULTI:
2070 case SIOCDELMULTI:
2071 if (ifp->if_flags & IFF_ALLMULTI)
2072 sc->flags |= FXP_FLAG_ALL_MCAST;
2073 else
2074 sc->flags &= ~FXP_FLAG_ALL_MCAST;
2075 /*
2076 * Multicast list has changed; set the hardware filter
2077 * accordingly.
2078 */
2079 if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0)
2080 fxp_mc_setup(sc);
2081 /*
2082 * fxp_mc_setup() can set FXP_FLAG_ALL_MCAST, so check it
2083 * again rather than else {}.
2084 */
2085 if (sc->flags & FXP_FLAG_ALL_MCAST)
2086 fxp_init(sc);
2087 error = 0;
2088 break;
2089
2090 case SIOCSIFMEDIA:
2091 case SIOCGIFMEDIA:
2092 if (sc->miibus != NULL) {
2093 mii = device_get_softc(sc->miibus);
2094 error = ifmedia_ioctl(ifp, ifr,
2095 &mii->mii_media, command);
2096 } else {
2097 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
2098 }
2099 break;
2100
2101 default:
2102 error = ether_ioctl(ifp, command, data);
2103 break;
2104 }
2105 return (error);
2106}
2107
2108/*
2109 * Fill in the multicast address list and return number of entries.
2110 */
2111static int
2112fxp_mc_addrs(struct fxp_softc *sc)
2113{
2114 struct fxp_cb_mcs *mcsp = sc->mcsp;
2115 struct ifnet *ifp = &sc->arpcom.ac_if;
2116 struct ifmultiaddr *ifma;
2117 int nmcasts;
2118
2119 nmcasts = 0;
2120 if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0) {
2121 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2122 if (ifma->ifma_addr->sa_family != AF_LINK)
2123 continue;
2124 if (nmcasts >= MAXMCADDR) {
2125 sc->flags |= FXP_FLAG_ALL_MCAST;
2126 nmcasts = 0;
2127 break;
2128 }
2129 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2130 (void *)(uintptr_t)(volatile void *)
2131 &sc->mcsp->mc_addr[nmcasts][0], 6);
2132 nmcasts++;
2133 }
2134 }
2135 mcsp->mc_cnt = nmcasts * 6;
2136 return (nmcasts);
2137}
2138
2139/*
2140 * Program the multicast filter.
2141 *
2142 * We have an artificial restriction that the multicast setup command
2143 * must be the first command in the chain, so we take steps to ensure
2144 * this. By requiring this, it allows us to keep up the performance of
2145 * the pre-initialized command ring (esp. link pointers) by not actually
2146 * inserting the mcsetup command in the ring - i.e. its link pointer
2147 * points to the TxCB ring, but the mcsetup descriptor itself is not part
2148 * of it. We then can do 'CU_START' on the mcsetup descriptor and have it
2149 * lead into the regular TxCB ring when it completes.
2150 *
2151 * This function must be called at splimp.
2152 */
2153static void
2154fxp_mc_setup(struct fxp_softc *sc)
2155{
2156 struct fxp_cb_mcs *mcsp = sc->mcsp;
2157 struct ifnet *ifp = &sc->arpcom.ac_if;
2158 int count;
2159
2160 /*
2161 * If there are queued commands, we must wait until they are all
2162 * completed. If we are already waiting, then add a NOP command
2163 * with interrupt option so that we're notified when all commands
2164 * have been completed - fxp_start() ensures that no additional
2165 * TX commands will be added when need_mcsetup is true.
2166 */
2167 if (sc->tx_queued) {
2168 struct fxp_cb_tx *txp;
2169
2170 /*
2171 * need_mcsetup will be true if we are already waiting for the
2172 * NOP command to be completed (see below). In this case, bail.
2173 */
2174 if (sc->need_mcsetup)
2175 return;
2176 sc->need_mcsetup = 1;
2177
2178 /*
2179 * Add a NOP command with interrupt so that we are notified
2180 * when all TX commands have been processed.
2181 */
2182 txp = sc->cbl_last->next;
2183 txp->mb_head = NULL;
2184 txp->cb_status = 0;
2185 txp->cb_command = FXP_CB_COMMAND_NOP |
2186 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I;
2187 /*
2188 * Advance the end of list forward.
2189 */
2190 sc->cbl_last->cb_command &= ~FXP_CB_COMMAND_S;
2191 sc->cbl_last = txp;
2192 sc->tx_queued++;
2193 /*
2194 * Issue a resume in case the CU has just suspended.
2195 */
2196 fxp_scb_wait(sc);
2197 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
2198 /*
2199 * Set a 5 second timer just in case we don't hear from the
2200 * card again.
2201 */
2202 ifp->if_timer = 5;
2203
2204 return;
2205 }
2206 sc->need_mcsetup = 0;
2207
2208 /*
2209 * Initialize multicast setup descriptor.
2210 */
2211 mcsp->next = sc->cbl_base;
2212 mcsp->mb_head = NULL;
2213 mcsp->cb_status = 0;
2214 mcsp->cb_command = FXP_CB_COMMAND_MCAS |
2215 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I;
2216 mcsp->link_addr = vtophys(&sc->cbl_base->cb_status);
2217 fxp_mc_addrs(sc);
2218 sc->cbl_first = sc->cbl_last = (struct fxp_cb_tx *) mcsp;
2219 sc->tx_queued = 1;
2220
2221 /*
2222 * Wait until command unit is not active. This should never
2223 * be the case when nothing is queued, but make sure anyway.
2224 */
2225 count = 100;
2226 while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) ==
2227 FXP_SCB_CUS_ACTIVE && --count)
2228 DELAY(10);
2229 if (count == 0) {
2230 if_printf(&sc->arpcom.ac_if, "command queue timeout\n");
2231 return;
2232 }
2233
2234 /*
2235 * Start the multicast setup command.
2236 */
2237 fxp_scb_wait(sc);
2238 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&mcsp->cb_status));
2239 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2240
2241 ifp->if_timer = 2;
2242 return;
2243}
2244
2245static u_int32_t fxp_ucode_d101a[] = D101_A_RCVBUNDLE_UCODE;
2246static u_int32_t fxp_ucode_d101b0[] = D101_B0_RCVBUNDLE_UCODE;
2247static u_int32_t fxp_ucode_d101ma[] = D101M_B_RCVBUNDLE_UCODE;
2248static u_int32_t fxp_ucode_d101s[] = D101S_RCVBUNDLE_UCODE;
2249static u_int32_t fxp_ucode_d102[] = D102_B_RCVBUNDLE_UCODE;
2250static u_int32_t fxp_ucode_d102c[] = D102_C_RCVBUNDLE_UCODE;
2251
2252#define UCODE(x) x, sizeof(x)
2253
2254struct ucode {
2255 u_int32_t revision;
2256 u_int32_t *ucode;
2257 int length;
2258 u_short int_delay_offset;
2259 u_short bundle_max_offset;
2260} ucode_table[] = {
2261 { FXP_REV_82558_A4, UCODE(fxp_ucode_d101a), D101_CPUSAVER_DWORD, 0 },
2262 { FXP_REV_82558_B0, UCODE(fxp_ucode_d101b0), D101_CPUSAVER_DWORD, 0 },
2263 { FXP_REV_82559_A0, UCODE(fxp_ucode_d101ma),
2264 D101M_CPUSAVER_DWORD, D101M_CPUSAVER_BUNDLE_MAX_DWORD },
2265 { FXP_REV_82559S_A, UCODE(fxp_ucode_d101s),
2266 D101S_CPUSAVER_DWORD, D101S_CPUSAVER_BUNDLE_MAX_DWORD },
2267 { FXP_REV_82550, UCODE(fxp_ucode_d102),
2268 D102_B_CPUSAVER_DWORD, D102_B_CPUSAVER_BUNDLE_MAX_DWORD },
2269 { FXP_REV_82550_C, UCODE(fxp_ucode_d102c),
2270 D102_C_CPUSAVER_DWORD, D102_C_CPUSAVER_BUNDLE_MAX_DWORD },
2271 { 0, NULL, 0, 0, 0 }
2272};
2273
2274static void
2275fxp_load_ucode(struct fxp_softc *sc)
2276{
2277 struct ucode *uc;
2278 struct fxp_cb_ucode *cbp;
2279
2280 for (uc = ucode_table; uc->ucode != NULL; uc++)
2281 if (sc->revision == uc->revision)
2282 break;
2283 if (uc->ucode == NULL)
2284 return;
2285 cbp = (struct fxp_cb_ucode *)sc->cbl_base;
2286 cbp->cb_status = 0;
2287 cbp->cb_command = FXP_CB_COMMAND_UCODE | FXP_CB_COMMAND_EL;
2288 cbp->link_addr = -1; /* (no) next command */
2289 memcpy(cbp->ucode, uc->ucode, uc->length);
2290 if (uc->int_delay_offset)
2291 *(u_short *)&cbp->ucode[uc->int_delay_offset] =
2292 sc->tunable_int_delay + sc->tunable_int_delay / 2;
2293 if (uc->bundle_max_offset)
2294 *(u_short *)&cbp->ucode[uc->bundle_max_offset] =
2295 sc->tunable_bundle_max;
2296 /*
2297 * Download the ucode to the chip.
2298 */
2299 fxp_scb_wait(sc);
2300 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&cbp->cb_status));
2301 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2302 /* ...and wait for it to complete. */
2303 fxp_dma_wait(&cbp->cb_status, sc);
2304 if_printf(&sc->arpcom.ac_if,
2305 "Microcode loaded, int_delay: %d usec bundle_max: %d\n",
2306 sc->tunable_int_delay,
2307 uc->bundle_max_offset == 0 ? 0 : sc->tunable_bundle_max);
2308 sc->flags |= FXP_FLAG_UCODE;
2309}
2310
2311/*
2312 * Interrupt delay is expressed in microseconds, a multiplier is used
2313 * to convert this to the appropriate clock ticks before using.
2314 */
2315static int
2316sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS)
2317{
2318 return (sysctl_int_range(oidp, arg1, arg2, req, 300, 3000));
2319}
2320
2321static int
2322sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS)
2323{
2324 return (sysctl_int_range(oidp, arg1, arg2, req, 1, 0xffff));
2325}