network code: Convert if_multiaddrs from LIST to TAILQ.
[dragonfly.git] / sys / dev / netif / bfe / if_bfe.c
CommitLineData
7f186839
JS
1/*
2 * Copyright (c) 2003 Stuart Walsh<stu@ipng.org.uk>
3 * and Duncan Barclay<dmlb@dmlb.org>
4 * Modifications for FreeBSD-stable by Edwin Groothuis
5 * <edwin at mavetju.org
6 * < http://lists.freebsd.org/mailman/listinfo/freebsd-bugs>>
7 */
8
9/*
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS 'AS IS' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
2ba09803 31 * $FreeBSD: src/sys/dev/bfe/if_bfe.c 1.4.4.7 2004/03/02 08:41:33 julian Exp v
a75a1559 32 * $DragonFly: src/sys/dev/netif/bfe/if_bfe.c,v 1.40 2008/09/17 08:51:29 sephe Exp $
7f186839
JS
33 */
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/sockio.h>
38#include <sys/mbuf.h>
39#include <sys/malloc.h>
9db4b353 40#include <sys/interrupt.h>
7f186839
JS
41#include <sys/kernel.h>
42#include <sys/socket.h>
43#include <sys/queue.h>
1f7ab7c9
MD
44#include <sys/bus.h>
45#include <sys/rman.h>
0f20326f 46#include <sys/thread2.h>
7f186839
JS
47
48#include <net/if.h>
0bf9a476 49#include <net/ifq_var.h>
7f186839
JS
50#include <net/if_arp.h>
51#include <net/ethernet.h>
52#include <net/if_dl.h>
53#include <net/if_media.h>
54
55#include <net/bpf.h>
56
57#include <net/if_types.h>
58#include <net/vlan/if_vlan_var.h>
59
60#include <netinet/in_systm.h>
61#include <netinet/in.h>
62#include <netinet/ip.h>
63
7f186839
JS
64#include <bus/pci/pcireg.h>
65#include <bus/pci/pcivar.h>
66#include <bus/pci/pcidevs.h>
67
68#include <dev/netif/mii_layer/mii.h>
69#include <dev/netif/mii_layer/miivar.h>
70
bd1a73d5 71#include <dev/netif/bfe/if_bfereg.h>
7f186839
JS
72
73MODULE_DEPEND(bfe, pci, 1, 1, 1);
7f186839
JS
74MODULE_DEPEND(bfe, miibus, 1, 1, 1);
75
76/* "controller miibus0" required. See GENERIC if you get errors here. */
77#include "miibus_if.h"
78
79#define BFE_DEVDESC_MAX 64 /* Maximum device description length */
80
81static struct bfe_type bfe_devs[] = {
82 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401,
83 "Broadcom BCM4401 Fast Ethernet" },
31713cfa
JS
84 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4401B0,
85 "Broadcom BCM4401-B0 Fast Ethernet" },
bd1a73d5
SZ
86 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4402,
87 "Broadcom BCM4402 Fast Ethernet" },
7f186839
JS
88 { 0, 0, NULL }
89};
90
91static int bfe_probe(device_t);
92static int bfe_attach(device_t);
93static int bfe_detach(device_t);
7f186839
JS
94static void bfe_intr(void *);
95static void bfe_start(struct ifnet *);
bd4539cc 96static int bfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
7f186839
JS
97static void bfe_init(void *);
98static void bfe_stop(struct bfe_softc *);
99static void bfe_watchdog(struct ifnet *);
100static void bfe_shutdown(device_t);
101static void bfe_tick(void *);
102static void bfe_txeof(struct bfe_softc *);
103static void bfe_rxeof(struct bfe_softc *);
104static void bfe_set_rx_mode(struct bfe_softc *);
105static int bfe_list_rx_init(struct bfe_softc *);
fbb9cf99
SZ
106static int bfe_newbuf(struct bfe_softc *, int, int);
107static void bfe_setup_rxdesc(struct bfe_softc *, int);
7f186839
JS
108static void bfe_rx_ring_free(struct bfe_softc *);
109
110static void bfe_pci_setup(struct bfe_softc *, uint32_t);
111static int bfe_ifmedia_upd(struct ifnet *);
112static void bfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
113static int bfe_miibus_readreg(device_t, int, int);
114static int bfe_miibus_writereg(device_t, int, int, int);
7f186839
JS
115static int bfe_wait_bit(struct bfe_softc *, uint32_t, uint32_t,
116 u_long, const int);
117static void bfe_get_config(struct bfe_softc *sc);
118static void bfe_read_eeprom(struct bfe_softc *, uint8_t *);
119static void bfe_stats_update(struct bfe_softc *);
120static void bfe_clear_stats (struct bfe_softc *);
121static int bfe_readphy(struct bfe_softc *, uint32_t, uint32_t*);
122static int bfe_writephy(struct bfe_softc *, uint32_t, uint32_t);
123static int bfe_resetphy(struct bfe_softc *);
124static int bfe_setupphy(struct bfe_softc *);
125static void bfe_chip_reset(struct bfe_softc *);
126static void bfe_chip_halt(struct bfe_softc *);
127static void bfe_core_reset(struct bfe_softc *);
128static void bfe_core_disable(struct bfe_softc *);
129static int bfe_dma_alloc(device_t);
250ace3d 130static void bfe_dma_free(struct bfe_softc *);
7f186839
JS
131static void bfe_cam_write(struct bfe_softc *, u_char *, int);
132
133static device_method_t bfe_methods[] = {
134 /* Device interface */
135 DEVMETHOD(device_probe, bfe_probe),
136 DEVMETHOD(device_attach, bfe_attach),
137 DEVMETHOD(device_detach, bfe_detach),
138 DEVMETHOD(device_shutdown, bfe_shutdown),
139
140 /* bus interface */
141 DEVMETHOD(bus_print_child, bus_generic_print_child),
142 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
143
144 /* MII interface */
145 DEVMETHOD(miibus_readreg, bfe_miibus_readreg),
146 DEVMETHOD(miibus_writereg, bfe_miibus_writereg),
7f186839
JS
147
148 { 0, 0 }
149};
150
151static driver_t bfe_driver = {
152 "bfe",
153 bfe_methods,
154 sizeof(struct bfe_softc)
155};
156
157static devclass_t bfe_devclass;
158
159DRIVER_MODULE(bfe, pci, bfe_driver, bfe_devclass, 0, 0);
160DRIVER_MODULE(miibus, bfe, miibus_driver, miibus_devclass, 0, 0);
161
162/*
163 * Probe for a Broadcom 4401 chip.
164 */
165static int
166bfe_probe(device_t dev)
167{
168 struct bfe_type *t;
97293ee3 169 uint16_t vendor, product;
7f186839 170
97293ee3
JS
171 vendor = pci_get_vendor(dev);
172 product = pci_get_device(dev);
7f186839 173
97293ee3
JS
174 for (t = bfe_devs; t->bfe_name != NULL; t++) {
175 if (vendor == t->bfe_vid && product == t->bfe_did) {
250ace3d 176 device_set_desc(dev, t->bfe_name);
7f186839
JS
177 return(0);
178 }
7f186839
JS
179 }
180
181 return(ENXIO);
182}
183
184static int
185bfe_dma_alloc(device_t dev)
186{
28488fd3 187 struct bfe_softc *sc = device_get_softc(dev);
b320a7b1 188 bus_dmamem_t dmem;
96f2b7e7 189 int error, i, tx_pos = 0, rx_pos = 0;
7f186839 190
28488fd3 191 /*
884508cf
SZ
192 * Parent tag. Apparently the chip cannot handle any DMA address
193 * greater than BFE_BUS_SPACE_MAXADDR (1GB).
28488fd3
SZ
194 */
195 error = bus_dma_tag_create(NULL, /* parent */
884508cf
SZ
196 1, 0, /* alignment, boundary */
197 BFE_BUS_SPACE_MAXADDR, /* lowaddr */
45e49764 198 BUS_SPACE_MAXADDR, /* highaddr */
7f186839 199 NULL, NULL, /* filter, filterarg */
884508cf
SZ
200 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
201 0, /* num of segments */
7f186839 202 BUS_SPACE_MAXSIZE_32BIT, /* max segment size */
28488fd3 203 0, /* flags */
7f186839 204 &sc->bfe_parent_tag);
7f186839 205 if (error) {
250ace3d
JS
206 device_printf(dev, "could not allocate parent dma tag\n");
207 return(error);
7f186839 208 }
7f186839 209
b320a7b1
SZ
210 /* Allocate TX ring */
211 error = bus_dmamem_coherent(sc->bfe_parent_tag, PAGE_SIZE, 0,
212 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
213 BFE_TX_LIST_SIZE,
214 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
7f186839 215 if (error) {
b320a7b1 216 device_printf(dev, "could not allocate TX list\n");
250ace3d 217 return(error);
7f186839 218 }
b320a7b1
SZ
219 sc->bfe_tx_tag = dmem.dmem_tag;
220 sc->bfe_tx_map = dmem.dmem_map;
221 sc->bfe_tx_list = dmem.dmem_addr;
222 sc->bfe_tx_dma = dmem.dmem_busaddr;
7f186839 223
b320a7b1
SZ
224 /* Allocate RX ring */
225 error = bus_dmamem_coherent(sc->bfe_parent_tag, PAGE_SIZE, 0,
226 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
227 BFE_RX_LIST_SIZE,
228 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
7f186839 229 if (error) {
b320a7b1 230 device_printf(dev, "could not allocate RX list\n");
250ace3d 231 return(error);
7f186839 232 }
b320a7b1
SZ
233 sc->bfe_rx_tag = dmem.dmem_tag;
234 sc->bfe_rx_map = dmem.dmem_map;
235 sc->bfe_rx_list = dmem.dmem_addr;
236 sc->bfe_rx_dma = dmem.dmem_busaddr;
7f186839 237
96f2b7e7 238 /* Tag for RX mbufs */
f7768771 239 error = bus_dma_tag_create(sc->bfe_parent_tag, 1, 0,
28488fd3
SZ
240 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
241 NULL, NULL,
884508cf 242 MCLBYTES, 1, MCLBYTES,
b77f5f80
SZ
243 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
244 &sc->bfe_rxbuf_tag);
7f186839 245 if (error) {
96f2b7e7 246 device_printf(dev, "could not allocate dma tag for RX mbufs\n");
250ace3d 247 return(error);
7f186839
JS
248 }
249
b472fecc
SZ
250 error = bus_dmamap_create(sc->bfe_rxbuf_tag, BUS_DMA_WAITOK,
251 &sc->bfe_rx_tmpmap);
96f2b7e7
SZ
252 if (error) {
253 device_printf(dev, "could not create RX mbuf tmp map\n");
254 bus_dma_tag_destroy(sc->bfe_rxbuf_tag);
255 sc->bfe_rxbuf_tag = NULL;
256 return error;
257 }
250ace3d 258
96f2b7e7 259 /* Allocate dma maps for RX list */
7f186839 260 for (i = 0; i < BFE_RX_LIST_CNT; i++) {
b77f5f80 261 error = bus_dmamap_create(sc->bfe_rxbuf_tag, BUS_DMA_WAITOK,
28488fd3 262 &sc->bfe_rx_ring[i].bfe_map);
7f186839 263 if (error) {
250ace3d 264 rx_pos = i;
7f186839 265 device_printf(dev, "cannot create DMA map for RX\n");
250ace3d 266 goto ring_fail;
7f186839
JS
267 }
268 }
250ace3d 269 rx_pos = BFE_RX_LIST_CNT;
7f186839 270
96f2b7e7 271 /* Tag for TX mbufs */
f7768771 272 error = bus_dma_tag_create(sc->bfe_parent_tag, 1, 0,
96f2b7e7
SZ
273 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
274 NULL, NULL,
c35e788d 275 MCLBYTES, BFE_MAXSEGS, MCLBYTES,
b77f5f80
SZ
276 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
277 &sc->bfe_txbuf_tag);
96f2b7e7
SZ
278 if (error) {
279 device_printf(dev, "could not allocate dma tag for TX mbufs\n");
280 return(error);
281 }
282
283 /* Allocate dmamaps for TX list */
7f186839 284 for (i = 0; i < BFE_TX_LIST_CNT; i++) {
b77f5f80 285 error = bus_dmamap_create(sc->bfe_txbuf_tag, BUS_DMA_WAITOK,
28488fd3 286 &sc->bfe_tx_ring[i].bfe_map);
7f186839 287 if (error) {
250ace3d 288 tx_pos = i;
7f186839 289 device_printf(dev, "cannot create DMA map for TX\n");
250ace3d 290 goto ring_fail;
7f186839
JS
291 }
292 }
293
7f186839 294 return(0);
250ace3d
JS
295
296ring_fail:
96f2b7e7
SZ
297 if (sc->bfe_rxbuf_tag != NULL) {
298 for (i = 0; i < rx_pos; ++i) {
299 bus_dmamap_destroy(sc->bfe_rxbuf_tag,
300 sc->bfe_rx_ring[i].bfe_map);
301 }
302 bus_dmamap_destroy(sc->bfe_rxbuf_tag, sc->bfe_rx_tmpmap);
303 bus_dma_tag_destroy(sc->bfe_rxbuf_tag);
304 sc->bfe_rxbuf_tag = NULL;
305 }
250ace3d 306
96f2b7e7
SZ
307 if (sc->bfe_txbuf_tag != NULL) {
308 for (i = 0; i < tx_pos; ++i) {
309 bus_dmamap_destroy(sc->bfe_txbuf_tag,
310 sc->bfe_tx_ring[i].bfe_map);
311 }
312 bus_dma_tag_destroy(sc->bfe_txbuf_tag);
313 sc->bfe_txbuf_tag = NULL;
314 }
250ace3d 315 return error;
7f186839
JS
316}
317
318static int
319bfe_attach(device_t dev)
320{
321 struct ifnet *ifp;
322 struct bfe_softc *sc;
50b872f3 323 int error = 0, rid;
7f186839
JS
324
325 sc = device_get_softc(dev);
326
7f186839 327 sc->bfe_dev = dev;
7fa4e3c3 328 callout_init(&sc->bfe_stat_timer);
7f186839 329
9db4b353 330#ifndef BURN_BRIDGES
7f186839
JS
331 /*
332 * Handle power management nonsense.
333 */
334 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
335 uint32_t membase, irq;
336
337 /* Save important PCI config data. */
338 membase = pci_read_config(dev, BFE_PCI_MEMLO, 4);
339 irq = pci_read_config(dev, BFE_PCI_INTLINE, 4);
340
341 /* Reset the power state. */
0e17b030 342 device_printf(dev, "chip is in D%d power mode"
50b872f3 343 " -- setting to D0\n", pci_get_powerstate(dev));
7f186839
JS
344
345 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
346
347 /* Restore PCI config data. */
348 pci_write_config(dev, BFE_PCI_MEMLO, membase, 4);
349 pci_write_config(dev, BFE_PCI_INTLINE, irq, 4);
350 }
9db4b353 351#endif /* !BURN_BRIDGE */
7f186839
JS
352
353 /*
354 * Map control/status registers.
355 */
356 pci_enable_busmaster(dev);
357
358 rid = BFE_PCI_MEMLO;
4e6d744d
JS
359 sc->bfe_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
360 RF_ACTIVE);
7f186839 361 if (sc->bfe_res == NULL) {
50b872f3 362 device_printf(dev, "couldn't map memory\n");
250ace3d 363 return ENXIO;
7f186839
JS
364 }
365
366 sc->bfe_btag = rman_get_bustag(sc->bfe_res);
367 sc->bfe_bhandle = rman_get_bushandle(sc->bfe_res);
368
369 /* Allocate interrupt */
370 rid = 0;
371
4e6d744d
JS
372 sc->bfe_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
373 RF_SHAREABLE | RF_ACTIVE);
7f186839 374 if (sc->bfe_irq == NULL) {
50b872f3 375 device_printf(dev, "couldn't map interrupt\n");
7f186839
JS
376 error = ENXIO;
377 goto fail;
378 }
379
250ace3d
JS
380 error = bfe_dma_alloc(dev);
381 if (error != 0) {
50b872f3 382 device_printf(dev, "failed to allocate DMA resources\n");
7f186839
JS
383 goto fail;
384 }
385
386 /* Set up ifnet structure */
387 ifp = &sc->arpcom.ac_if;
388 ifp->if_softc = sc;
389 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
390 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
391 ifp->if_ioctl = bfe_ioctl;
7f186839
JS
392 ifp->if_start = bfe_start;
393 ifp->if_watchdog = bfe_watchdog;
394 ifp->if_init = bfe_init;
395 ifp->if_mtu = ETHERMTU;
0e557a3c
JS
396 ifp->if_baudrate = 100000000;
397 ifp->if_capabilities |= IFCAP_VLAN_MTU;
398 ifp->if_capenable |= IFCAP_VLAN_MTU;
399 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
0bf9a476
JS
400 ifq_set_maxlen(&ifp->if_snd, BFE_TX_QLEN);
401 ifq_set_ready(&ifp->if_snd);
7f186839
JS
402
403 bfe_get_config(sc);
404
7f186839
JS
405 /* Reset the chip and turn on the PHY */
406 bfe_chip_reset(sc);
407
408 if (mii_phy_probe(dev, &sc->bfe_miibus,
409 bfe_ifmedia_upd, bfe_ifmedia_sts)) {
50b872f3 410 device_printf(dev, "MII without any PHY!\n");
7f186839
JS
411 error = ENXIO;
412 goto fail;
413 }
414
78195a76 415 ether_ifattach(ifp, sc->arpcom.ac_enaddr, NULL);
7f186839
JS
416
417 /*
418 * Hook interrupt last to avoid having to lock softc
419 */
95893fe4 420 error = bus_setup_intr(dev, sc->bfe_irq, INTR_MPSAFE,
78195a76
MD
421 bfe_intr, sc, &sc->bfe_intrhand,
422 sc->arpcom.ac_if.if_serializer);
7f186839
JS
423
424 if (error) {
0f20326f 425 ether_ifdetach(ifp);
50b872f3 426 device_printf(dev, "couldn't set up irq\n");
7f186839
JS
427 goto fail;
428 }
9db4b353
SZ
429
430 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->bfe_irq));
431 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
250ace3d 432 return 0;
7f186839 433fail:
250ace3d 434 bfe_detach(dev);
7f186839
JS
435 return(error);
436}
437
438static int
439bfe_detach(device_t dev)
440{
0f20326f
JS
441 struct bfe_softc *sc = device_get_softc(dev);
442 struct ifnet *ifp = &sc->arpcom.ac_if;
7f186839 443
7f186839 444 if (device_is_attached(dev)) {
cdf89432 445 lwkt_serialize_enter(ifp->if_serializer);
7f186839 446 bfe_stop(sc);
250ace3d 447 bfe_chip_reset(sc);
cdf89432
SZ
448 bus_teardown_intr(dev, sc->bfe_irq, sc->bfe_intrhand);
449 lwkt_serialize_exit(ifp->if_serializer);
450
451 ether_ifdetach(ifp);
7f186839 452 }
7f186839
JS
453 if (sc->bfe_miibus != NULL)
454 device_delete_child(dev, sc->bfe_miibus);
0f20326f 455 bus_generic_detach(dev);
7f186839 456
250ace3d
JS
457 if (sc->bfe_irq != NULL)
458 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bfe_irq);
459
460 if (sc->bfe_res != NULL) {
461 bus_release_resource(dev, SYS_RES_MEMORY, BFE_PCI_MEMLO,
462 sc->bfe_res);
463 }
250ace3d 464 bfe_dma_free(sc);
cdf89432 465
7f186839
JS
466 return(0);
467}
468
469/*
470 * Stop all chip I/O so that the kernel's probe routines don't
471 * get confused by errant DMAs when rebooting.
472 */
473static void
474bfe_shutdown(device_t dev)
475{
0f20326f 476 struct bfe_softc *sc = device_get_softc(dev);
78195a76 477 struct ifnet *ifp = &sc->arpcom.ac_if;
7f186839 478
78195a76 479 lwkt_serialize_enter(ifp->if_serializer);
7f186839 480 bfe_stop(sc);
78195a76 481 lwkt_serialize_exit(ifp->if_serializer);
7f186839
JS
482}
483
484static int
485bfe_miibus_readreg(device_t dev, int phy, int reg)
486{
487 struct bfe_softc *sc;
488 uint32_t ret;
489
490 sc = device_get_softc(dev);
491 if (phy != sc->bfe_phyaddr)
492 return(0);
493 bfe_readphy(sc, reg, &ret);
494
495 return(ret);
496}
497
498static int
499bfe_miibus_writereg(device_t dev, int phy, int reg, int val)
500{
501 struct bfe_softc *sc;
502
503 sc = device_get_softc(dev);
504 if (phy != sc->bfe_phyaddr)
505 return(0);
506 bfe_writephy(sc, reg, val);
507
508 return(0);
509}
510
7f186839
JS
511static void
512bfe_tx_ring_free(struct bfe_softc *sc)
513{
514 int i;
515
28488fd3 516 for (i = 0; i < BFE_TX_LIST_CNT; i++) {
7f186839 517 if (sc->bfe_tx_ring[i].bfe_mbuf != NULL) {
c35e788d
SZ
518 bus_dmamap_unload(sc->bfe_txbuf_tag,
519 sc->bfe_tx_ring[i].bfe_map);
7f186839
JS
520 m_freem(sc->bfe_tx_ring[i].bfe_mbuf);
521 sc->bfe_tx_ring[i].bfe_mbuf = NULL;
7f186839 522 }
28488fd3 523 }
7f186839 524 bzero(sc->bfe_tx_list, BFE_TX_LIST_SIZE);
7f186839
JS
525}
526
527static void
528bfe_rx_ring_free(struct bfe_softc *sc)
529{
530 int i;
531
28488fd3 532 for (i = 0; i < BFE_RX_LIST_CNT; i++) {
7f186839 533 if (sc->bfe_rx_ring[i].bfe_mbuf != NULL) {
96f2b7e7 534 bus_dmamap_unload(sc->bfe_rxbuf_tag,
7f186839 535 sc->bfe_rx_ring[i].bfe_map);
28488fd3
SZ
536 m_freem(sc->bfe_rx_ring[i].bfe_mbuf);
537 sc->bfe_rx_ring[i].bfe_mbuf = NULL;
7f186839 538 }
28488fd3 539 }
7f186839 540 bzero(sc->bfe_rx_list, BFE_RX_LIST_SIZE);
7f186839
JS
541}
542
7f186839
JS
543static int
544bfe_list_rx_init(struct bfe_softc *sc)
545{
fbb9cf99 546 int i, error;
7f186839 547
fbb9cf99
SZ
548 for (i = 0; i < BFE_RX_LIST_CNT; i++) {
549 error = bfe_newbuf(sc, i, 1);
550 if (error)
551 return(error);
552 }
7f186839 553
7f186839
JS
554 CSR_WRITE_4(sc, BFE_DMARX_PTR, (i * sizeof(struct bfe_desc)));
555
556 sc->bfe_rx_cons = 0;
557
558 return(0);
559}
560
561static int
fbb9cf99
SZ
562bfe_newbuf(struct bfe_softc *sc, int c, int init)
563{
564 struct bfe_data *r;
565 bus_dmamap_t map;
566 bus_dma_segment_t seg;
fbb9cf99 567 struct mbuf *m;
5ae4196e 568 int error, nsegs;
fbb9cf99
SZ
569
570 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
571 if (m == NULL)
572 return ENOBUFS;
573 m->m_len = m->m_pkthdr.len = MCLBYTES;
574
5ae4196e
SZ
575 error = bus_dmamap_load_mbuf_segment(sc->bfe_rxbuf_tag,
576 sc->bfe_rx_tmpmap, m,
577 &seg, 1, &nsegs, BUS_DMA_NOWAIT);
578 if (error) {
fbb9cf99 579 m_freem(m);
fbb9cf99
SZ
580 if (init)
581 if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
582 return error;
583 }
584
585 KKASSERT(c >= 0 && c < BFE_RX_LIST_CNT);
586 r = &sc->bfe_rx_ring[c];
587
588 if (r->bfe_mbuf != NULL)
589 bus_dmamap_unload(sc->bfe_rxbuf_tag, r->bfe_map);
590
591 map = r->bfe_map;
592 r->bfe_map = sc->bfe_rx_tmpmap;
593 sc->bfe_rx_tmpmap = map;
594
595 r->bfe_mbuf = m;
596 r->bfe_paddr = seg.ds_addr;
597
598 bfe_setup_rxdesc(sc, c);
599 return 0;
600}
601
602static void
603bfe_setup_rxdesc(struct bfe_softc *sc, int c)
7f186839
JS
604{
605 struct bfe_rxheader *rx_header;
fbb9cf99 606 struct mbuf *m;
7f186839
JS
607 struct bfe_desc *d;
608 struct bfe_data *r;
609 uint32_t ctrl;
610
fbb9cf99
SZ
611 KKASSERT(c >= 0 && c < BFE_RX_LIST_CNT);
612 r = &sc->bfe_rx_ring[c];
613 d = &sc->bfe_rx_list[c];
7f186839 614
fbb9cf99 615 KKASSERT(r->bfe_mbuf != NULL && r->bfe_paddr != 0);
7f186839 616
fbb9cf99 617 m = r->bfe_mbuf;
7f186839
JS
618 rx_header = mtod(m, struct bfe_rxheader *);
619 rx_header->len = 0;
620 rx_header->flags = 0;
96f2b7e7 621 bus_dmamap_sync(sc->bfe_rxbuf_tag, r->bfe_map, BUS_DMASYNC_PREWRITE);
7f186839
JS
622
623 ctrl = ETHER_MAX_LEN + 32;
fbb9cf99 624 if (c == BFE_RX_LIST_CNT - 1)
7f186839
JS
625 ctrl |= BFE_DESC_EOT;
626
fbb9cf99 627 d->bfe_addr = r->bfe_paddr + BFE_PCI_DMA;
7f186839 628 d->bfe_ctrl = ctrl;
7f186839
JS
629}
630
631static void
632bfe_get_config(struct bfe_softc *sc)
633{
634 uint8_t eeprom[128];
635
636 bfe_read_eeprom(sc, eeprom);
637
638 sc->arpcom.ac_enaddr[0] = eeprom[79];
639 sc->arpcom.ac_enaddr[1] = eeprom[78];
640 sc->arpcom.ac_enaddr[2] = eeprom[81];
641 sc->arpcom.ac_enaddr[3] = eeprom[80];
642 sc->arpcom.ac_enaddr[4] = eeprom[83];
643 sc->arpcom.ac_enaddr[5] = eeprom[82];
644
645 sc->bfe_phyaddr = eeprom[90] & 0x1f;
646 sc->bfe_mdc_port = (eeprom[90] >> 14) & 0x1;
647
648 sc->bfe_core_unit = 0;
649 sc->bfe_dma_offset = BFE_PCI_DMA;
650}
651
652static void
653bfe_pci_setup(struct bfe_softc *sc, uint32_t cores)
654{
655 uint32_t bar_orig, pci_rev, val;
656
657 bar_orig = pci_read_config(sc->bfe_dev, BFE_BAR0_WIN, 4);
658 pci_write_config(sc->bfe_dev, BFE_BAR0_WIN, BFE_REG_PCI, 4);
659 pci_rev = CSR_READ_4(sc, BFE_SBIDHIGH) & BFE_RC_MASK;
660
661 val = CSR_READ_4(sc, BFE_SBINTVEC);
662 val |= cores;
663 CSR_WRITE_4(sc, BFE_SBINTVEC, val);
664
665 val = CSR_READ_4(sc, BFE_SSB_PCI_TRANS_2);
666 val |= BFE_SSB_PCI_PREF | BFE_SSB_PCI_BURST;
667 CSR_WRITE_4(sc, BFE_SSB_PCI_TRANS_2, val);
668
669 pci_write_config(sc->bfe_dev, BFE_BAR0_WIN, bar_orig, 4);
670}
671
672static void
673bfe_clear_stats(struct bfe_softc *sc)
674{
675 u_long reg;
7f186839 676
7f186839
JS
677 CSR_WRITE_4(sc, BFE_MIB_CTRL, BFE_MIB_CLR_ON_READ);
678 for (reg = BFE_TX_GOOD_O; reg <= BFE_TX_PAUSE; reg += 4)
679 CSR_READ_4(sc, reg);
680 for (reg = BFE_RX_GOOD_O; reg <= BFE_RX_NPAUSE; reg += 4)
681 CSR_READ_4(sc, reg);
7f186839
JS
682}
683
684static int
685bfe_resetphy(struct bfe_softc *sc)
686{
687 uint32_t val;
7f186839 688
7f186839
JS
689 bfe_writephy(sc, 0, BMCR_RESET);
690 DELAY(100);
691 bfe_readphy(sc, 0, &val);
692 if (val & BMCR_RESET) {
50b872f3
JS
693 if_printf(&sc->arpcom.ac_if,
694 "PHY Reset would not complete.\n");
7f186839
JS
695 return(ENXIO);
696 }
7f186839
JS
697 return(0);
698}
699
700static void
701bfe_chip_halt(struct bfe_softc *sc)
702{
7f186839
JS
703 /* disable interrupts - not that it actually does..*/
704 CSR_WRITE_4(sc, BFE_IMASK, 0);
705 CSR_READ_4(sc, BFE_IMASK);
706
707 CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE);
708 bfe_wait_bit(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE, 200, 1);
709
710 CSR_WRITE_4(sc, BFE_DMARX_CTRL, 0);
711 CSR_WRITE_4(sc, BFE_DMATX_CTRL, 0);
712 DELAY(10);
7f186839
JS
713}
714
715static void
716bfe_chip_reset(struct bfe_softc *sc)
717{
718 uint32_t val;
7f186839 719
7f186839
JS
720 /* Set the interrupt vector for the enet core */
721 bfe_pci_setup(sc, BFE_INTVEC_ENET0);
722
723 /* is core up? */
724 val = CSR_READ_4(sc, BFE_SBTMSLOW) & (BFE_RESET | BFE_REJECT | BFE_CLOCK);
725 if (val == BFE_CLOCK) {
726 /* It is, so shut it down */
727 CSR_WRITE_4(sc, BFE_RCV_LAZY, 0);
728 CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE);
729 bfe_wait_bit(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE, 100, 1);
730 CSR_WRITE_4(sc, BFE_DMATX_CTRL, 0);
731 sc->bfe_tx_cnt = sc->bfe_tx_prod = sc->bfe_tx_cons = 0;
732 if (CSR_READ_4(sc, BFE_DMARX_STAT) & BFE_STAT_EMASK)
733 bfe_wait_bit(sc, BFE_DMARX_STAT, BFE_STAT_SIDLE, 100, 0);
734 CSR_WRITE_4(sc, BFE_DMARX_CTRL, 0);
40be5c8e 735 sc->bfe_rx_cons = 0;
7f186839
JS
736 }
737
738 bfe_core_reset(sc);
739 bfe_clear_stats(sc);
740
741 /*
742 * We want the phy registers to be accessible even when
743 * the driver is "downed" so initialize MDC preamble, frequency,
744 * and whether internal or external phy here.
745 */
746
747 /* 4402 has 62.5Mhz SB clock and internal phy */
748 CSR_WRITE_4(sc, BFE_MDIO_CTRL, 0x8d);
749
750 /* Internal or external PHY? */
751 val = CSR_READ_4(sc, BFE_DEVCTRL);
752 if (!(val & BFE_IPP))
753 CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_EPSEL);
754 else if (CSR_READ_4(sc, BFE_DEVCTRL) & BFE_EPR) {
755 BFE_AND(sc, BFE_DEVCTRL, ~BFE_EPR);
756 DELAY(100);
757 }
758
1bcc3431
JS
759 /* Enable CRC32 generation and set proper LED modes */
760 BFE_OR(sc, BFE_MAC_CTRL, BFE_CTRL_CRC32_ENAB | BFE_CTRL_LED);
761
762 /* Reset or clear powerdown control bit */
763 BFE_AND(sc, BFE_MAC_CTRL, ~BFE_CTRL_PDOWN);
764
7f186839
JS
765 CSR_WRITE_4(sc, BFE_RCV_LAZY, ((1 << BFE_LAZY_FC_SHIFT) &
766 BFE_LAZY_FC_MASK));
767
768 /*
769 * We don't want lazy interrupts, so just send them at the end of a
770 * frame, please
771 */
772 BFE_OR(sc, BFE_RCV_LAZY, 0);
773
774 /* Set max lengths, accounting for VLAN tags */
775 CSR_WRITE_4(sc, BFE_RXMAXLEN, ETHER_MAX_LEN+32);
776 CSR_WRITE_4(sc, BFE_TXMAXLEN, ETHER_MAX_LEN+32);
777
778 /* Set watermark XXX - magic */
779 CSR_WRITE_4(sc, BFE_TX_WMARK, 56);
780
781 /*
782 * Initialise DMA channels - not forgetting dma addresses need to be
783 * added to BFE_PCI_DMA
784 */
785 CSR_WRITE_4(sc, BFE_DMATX_CTRL, BFE_TX_CTRL_ENABLE);
786 CSR_WRITE_4(sc, BFE_DMATX_ADDR, sc->bfe_tx_dma + BFE_PCI_DMA);
787
788 CSR_WRITE_4(sc, BFE_DMARX_CTRL, (BFE_RX_OFFSET << BFE_RX_CTRL_ROSHIFT) |
789 BFE_RX_CTRL_ENABLE);
790 CSR_WRITE_4(sc, BFE_DMARX_ADDR, sc->bfe_rx_dma + BFE_PCI_DMA);
791
792 bfe_resetphy(sc);
793 bfe_setupphy(sc);
7f186839
JS
794}
795
796static void
797bfe_core_disable(struct bfe_softc *sc)
798{
799 if ((CSR_READ_4(sc, BFE_SBTMSLOW)) & BFE_RESET)
800 return;
801
802 /*
803 * Set reject, wait for it set, then wait for the core to stop being busy
804 * Then set reset and reject and enable the clocks
805 */
806 CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_REJECT | BFE_CLOCK));
807 bfe_wait_bit(sc, BFE_SBTMSLOW, BFE_REJECT, 1000, 0);
808 bfe_wait_bit(sc, BFE_SBTMSHIGH, BFE_BUSY, 1000, 1);
809 CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_FGC | BFE_CLOCK | BFE_REJECT |
810 BFE_RESET));
811 CSR_READ_4(sc, BFE_SBTMSLOW);
812 DELAY(10);
813 /* Leave reset and reject set */
814 CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_REJECT | BFE_RESET));
815 DELAY(10);
816}
817
818static void
819bfe_core_reset(struct bfe_softc *sc)
820{
821 uint32_t val;
822
823 /* Disable the core */
824 bfe_core_disable(sc);
825
826 /* and bring it back up */
827 CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_RESET | BFE_CLOCK | BFE_FGC));
828 CSR_READ_4(sc, BFE_SBTMSLOW);
829 DELAY(10);
830
831 /* Chip bug, clear SERR, IB and TO if they are set. */
832 if (CSR_READ_4(sc, BFE_SBTMSHIGH) & BFE_SERR)
833 CSR_WRITE_4(sc, BFE_SBTMSHIGH, 0);
834 val = CSR_READ_4(sc, BFE_SBIMSTATE);
835 if (val & (BFE_IBE | BFE_TO))
836 CSR_WRITE_4(sc, BFE_SBIMSTATE, val & ~(BFE_IBE | BFE_TO));
837
838 /* Clear reset and allow it to move through the core */
839 CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_CLOCK | BFE_FGC));
840 CSR_READ_4(sc, BFE_SBTMSLOW);
841 DELAY(10);
842
843 /* Leave the clock set */
844 CSR_WRITE_4(sc, BFE_SBTMSLOW, BFE_CLOCK);
845 CSR_READ_4(sc, BFE_SBTMSLOW);
846 DELAY(10);
847}
848
849static void
850bfe_cam_write(struct bfe_softc *sc, u_char *data, int index)
851{
852 uint32_t val;
853
854 val = ((uint32_t) data[2]) << 24;
855 val |= ((uint32_t) data[3]) << 16;
856 val |= ((uint32_t) data[4]) << 8;
857 val |= ((uint32_t) data[5]);
858 CSR_WRITE_4(sc, BFE_CAM_DATA_LO, val);
859 val = (BFE_CAM_HI_VALID |
860 (((uint32_t) data[0]) << 8) |
861 (((uint32_t) data[1])));
862 CSR_WRITE_4(sc, BFE_CAM_DATA_HI, val);
863 CSR_WRITE_4(sc, BFE_CAM_CTRL, (BFE_CAM_WRITE |
1bcc3431 864 ((uint32_t)index << BFE_CAM_INDEX_SHIFT)));
7f186839
JS
865 bfe_wait_bit(sc, BFE_CAM_CTRL, BFE_CAM_BUSY, 10000, 1);
866}
867
868static void
869bfe_set_rx_mode(struct bfe_softc *sc)
870{
871 struct ifnet *ifp = &sc->arpcom.ac_if;
faeb7d30 872 struct ifmultiaddr *ifma;
7f186839
JS
873 uint32_t val;
874 int i = 0;
875
876 val = CSR_READ_4(sc, BFE_RXCONF);
877
878 if (ifp->if_flags & IFF_PROMISC)
879 val |= BFE_RXCONF_PROMISC;
880 else
881 val &= ~BFE_RXCONF_PROMISC;
882
883 if (ifp->if_flags & IFF_BROADCAST)
884 val &= ~BFE_RXCONF_DBCAST;
885 else
886 val |= BFE_RXCONF_DBCAST;
887
888
889 CSR_WRITE_4(sc, BFE_CAM_CTRL, 0);
890 bfe_cam_write(sc, sc->arpcom.ac_enaddr, i++);
891
faeb7d30
JS
892 if (ifp->if_flags & IFF_ALLMULTI) {
893 val |= BFE_RXCONF_ALLMULTI;
894 } else {
895 val &= ~BFE_RXCONF_ALLMULTI;
441d34b2 896 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
faeb7d30
JS
897 if (ifma->ifma_addr->sa_family != AF_LINK)
898 continue;
899 bfe_cam_write(sc,
900 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i++);
901 }
902 }
903
7f186839
JS
904 CSR_WRITE_4(sc, BFE_RXCONF, val);
905 BFE_OR(sc, BFE_CAM_CTRL, BFE_CAM_ENABLE);
906}
907
7f186839 908static void
250ace3d 909bfe_dma_free(struct bfe_softc *sc)
7f186839 910{
96f2b7e7
SZ
911 int i;
912
7f186839
JS
913 if (sc->bfe_tx_tag != NULL) {
914 bus_dmamap_unload(sc->bfe_tx_tag, sc->bfe_tx_map);
250ace3d
JS
915 if (sc->bfe_tx_list != NULL) {
916 bus_dmamem_free(sc->bfe_tx_tag, sc->bfe_tx_list,
917 sc->bfe_tx_map);
918 sc->bfe_tx_list = NULL;
919 }
7f186839
JS
920 bus_dma_tag_destroy(sc->bfe_tx_tag);
921 sc->bfe_tx_tag = NULL;
922 }
923
924 if (sc->bfe_rx_tag != NULL) {
925 bus_dmamap_unload(sc->bfe_rx_tag, sc->bfe_rx_map);
250ace3d
JS
926 if (sc->bfe_rx_list != NULL) {
927 bus_dmamem_free(sc->bfe_rx_tag, sc->bfe_rx_list,
928 sc->bfe_rx_map);
929 sc->bfe_rx_list = NULL;
930 }
7f186839
JS
931 bus_dma_tag_destroy(sc->bfe_rx_tag);
932 sc->bfe_rx_tag = NULL;
933 }
934
96f2b7e7 935 if (sc->bfe_txbuf_tag != NULL) {
7f186839 936 for (i = 0; i < BFE_TX_LIST_CNT; i++) {
96f2b7e7 937 bus_dmamap_destroy(sc->bfe_txbuf_tag,
7f186839
JS
938 sc->bfe_tx_ring[i].bfe_map);
939 }
96f2b7e7
SZ
940 bus_dma_tag_destroy(sc->bfe_txbuf_tag);
941 sc->bfe_txbuf_tag = NULL;
942 }
943
944 if (sc->bfe_rxbuf_tag != NULL) {
250ace3d 945 for (i = 0; i < BFE_RX_LIST_CNT; i++) {
96f2b7e7 946 bus_dmamap_destroy(sc->bfe_rxbuf_tag,
250ace3d
JS
947 sc->bfe_rx_ring[i].bfe_map);
948 }
96f2b7e7
SZ
949 bus_dmamap_destroy(sc->bfe_rxbuf_tag, sc->bfe_rx_tmpmap);
950 bus_dma_tag_destroy(sc->bfe_rxbuf_tag);
951 sc->bfe_rxbuf_tag = NULL;
7f186839
JS
952 }
953
250ace3d 954 if (sc->bfe_parent_tag != NULL) {
7f186839 955 bus_dma_tag_destroy(sc->bfe_parent_tag);
250ace3d
JS
956 sc->bfe_parent_tag = NULL;
957 }
7f186839
JS
958}
959
960static void
961bfe_read_eeprom(struct bfe_softc *sc, uint8_t *data)
962{
963 long i;
964 uint16_t *ptr = (uint16_t *)data;
965
966 for (i = 0; i < 128; i += 2)
967 ptr[i/2] = CSR_READ_4(sc, 4096 + i);
968}
969
970static int
971bfe_wait_bit(struct bfe_softc *sc, uint32_t reg, uint32_t bit,
972 u_long timeout, const int clear)
973{
974 u_long i;
975
976 for (i = 0; i < timeout; i++) {
977 uint32_t val = CSR_READ_4(sc, reg);
978
979 if (clear && !(val & bit))
980 break;
981 if (!clear && (val & bit))
982 break;
983 DELAY(10);
984 }
985 if (i == timeout) {
50b872f3
JS
986 if_printf(&sc->arpcom.ac_if,
987 "BUG! Timeout waiting for bit %08x of register "
988 "%x to %s.\n", bit, reg,
989 (clear ? "clear" : "set"));
7f186839
JS
990 return -1;
991 }
992 return 0;
993}
994
995static int
996bfe_readphy(struct bfe_softc *sc, uint32_t reg, uint32_t *val)
997{
998 int err;
7f186839 999
7f186839
JS
1000 /* Clear MII ISR */
1001 CSR_WRITE_4(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII);
1002 CSR_WRITE_4(sc, BFE_MDIO_DATA, (BFE_MDIO_SB_START |
1003 (BFE_MDIO_OP_READ << BFE_MDIO_OP_SHIFT) |
1004 (sc->bfe_phyaddr << BFE_MDIO_PMD_SHIFT) |
1005 (reg << BFE_MDIO_RA_SHIFT) |
1006 (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT)));
1007 err = bfe_wait_bit(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 100, 0);
1008 *val = CSR_READ_4(sc, BFE_MDIO_DATA) & BFE_MDIO_DATA_DATA;
7f186839
JS
1009 return(err);
1010}
1011
1012static int
1013bfe_writephy(struct bfe_softc *sc, uint32_t reg, uint32_t val)
1014{
1015 int status;
7f186839 1016
7f186839
JS
1017 CSR_WRITE_4(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII);
1018 CSR_WRITE_4(sc, BFE_MDIO_DATA, (BFE_MDIO_SB_START |
1019 (BFE_MDIO_OP_WRITE << BFE_MDIO_OP_SHIFT) |
1020 (sc->bfe_phyaddr << BFE_MDIO_PMD_SHIFT) |
1021 (reg << BFE_MDIO_RA_SHIFT) |
1022 (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT) |
1023 (val & BFE_MDIO_DATA_DATA)));
1024 status = bfe_wait_bit(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 100, 0);
1025
7f186839
JS
1026 return status;
1027}
1028
1029/*
1030 * XXX - I think this is handled by the PHY driver, but it can't hurt to do it
1031 * twice
1032 */
1033static int
1034bfe_setupphy(struct bfe_softc *sc)
1035{
1036 uint32_t val;
7f186839 1037
7f186839
JS
1038 /* Enable activity LED */
1039 bfe_readphy(sc, 26, &val);
1040 bfe_writephy(sc, 26, val & 0x7fff);
1041 bfe_readphy(sc, 26, &val);
1042
1043 /* Enable traffic meter LED mode */
1044 bfe_readphy(sc, 27, &val);
1045 bfe_writephy(sc, 27, val | (1 << 6));
1046
7f186839
JS
1047 return(0);
1048}
1049
1050static void
1051bfe_stats_update(struct bfe_softc *sc)
1052{
1053 u_long reg;
1054 uint32_t *val;
1055
1056 val = &sc->bfe_hwstats.tx_good_octets;
1057 for (reg = BFE_TX_GOOD_O; reg <= BFE_TX_PAUSE; reg += 4)
1058 *val++ += CSR_READ_4(sc, reg);
1059 val = &sc->bfe_hwstats.rx_good_octets;
1060 for (reg = BFE_RX_GOOD_O; reg <= BFE_RX_NPAUSE; reg += 4)
1061 *val++ += CSR_READ_4(sc, reg);
1062}
1063
1064static void
1065bfe_txeof(struct bfe_softc *sc)
1066{
0f20326f 1067 struct ifnet *ifp = &sc->arpcom.ac_if;
7f186839
JS
1068 uint32_t i, chipidx;
1069
7f186839
JS
1070 chipidx = CSR_READ_4(sc, BFE_DMATX_STAT) & BFE_STAT_CDMASK;
1071 chipidx /= sizeof(struct bfe_desc);
1072
1073 i = sc->bfe_tx_cons;
c35e788d 1074
7f186839
JS
1075 /* Go through the mbufs and free those that have been transmitted */
1076 while (i != chipidx) {
1077 struct bfe_data *r = &sc->bfe_tx_ring[i];
28488fd3 1078
7f186839
JS
1079 if (r->bfe_mbuf != NULL) {
1080 ifp->if_opackets++;
c35e788d 1081 bus_dmamap_unload(sc->bfe_txbuf_tag, r->bfe_map);
7f186839
JS
1082 m_freem(r->bfe_mbuf);
1083 r->bfe_mbuf = NULL;
7f186839 1084 }
c35e788d
SZ
1085
1086 KKASSERT(sc->bfe_tx_cnt > 0);
7f186839
JS
1087 sc->bfe_tx_cnt--;
1088 BFE_INC(i, BFE_TX_LIST_CNT);
1089 }
1090
1091 if (i != sc->bfe_tx_cons) {
7f186839 1092 sc->bfe_tx_cons = i;
c35e788d
SZ
1093
1094 if (sc->bfe_tx_cnt + BFE_SPARE_TXDESC < BFE_TX_LIST_CNT)
1095 ifp->if_flags &= ~IFF_OACTIVE;
7f186839
JS
1096 }
1097 if (sc->bfe_tx_cnt == 0)
1098 ifp->if_timer = 0;
7f186839
JS
1099}
1100
1101/* Pass a received packet up the stack */
1102static void
1103bfe_rxeof(struct bfe_softc *sc)
1104{
0f20326f 1105 struct ifnet *ifp = &sc->arpcom.ac_if;
7f186839 1106 struct mbuf *m;
7f186839
JS
1107 struct bfe_rxheader *rxheader;
1108 struct bfe_data *r;
1109 uint32_t cons, status, current, len, flags;
f9142ddd 1110 struct mbuf_chain chain[MAXCPU];
7f186839 1111
7f186839
JS
1112 cons = sc->bfe_rx_cons;
1113 status = CSR_READ_4(sc, BFE_DMARX_STAT);
1114 current = (status & BFE_STAT_CDMASK) / sizeof(struct bfe_desc);
1115
f9142ddd 1116 ether_input_chain_init(chain);
f9142ddd 1117
7f186839
JS
1118 while (current != cons) {
1119 r = &sc->bfe_rx_ring[cons];
fbb9cf99
SZ
1120 bus_dmamap_sync(sc->bfe_rxbuf_tag, r->bfe_map,
1121 BUS_DMASYNC_POSTREAD);
1122
1123 KKASSERT(r->bfe_mbuf != NULL);
7f186839
JS
1124 m = r->bfe_mbuf;
1125 rxheader = mtod(m, struct bfe_rxheader*);
fbb9cf99 1126 len = rxheader->len - ETHER_CRC_LEN;
7f186839
JS
1127 flags = rxheader->flags;
1128
7f186839 1129 /* flag an error and try again */
fbb9cf99 1130 if (len > ETHER_MAX_LEN + 32 || (flags & BFE_RX_FLAG_ERRORS)) {
7f186839
JS
1131 ifp->if_ierrors++;
1132 if (flags & BFE_RX_FLAG_SERR)
1133 ifp->if_collisions++;
fbb9cf99
SZ
1134
1135 bfe_setup_rxdesc(sc, cons);
2ba09803 1136 BFE_INC(cons, BFE_RX_LIST_CNT);
7f186839
JS
1137 continue;
1138 }
1139
1140 /* Go past the rx header */
fbb9cf99
SZ
1141 if (bfe_newbuf(sc, cons, 0) != 0) {
1142 bfe_setup_rxdesc(sc, cons);
7f186839 1143 ifp->if_ierrors++;
fbb9cf99 1144 BFE_INC(cons, BFE_RX_LIST_CNT);
7f186839
JS
1145 continue;
1146 }
1147
3013ac0e
JS
1148 m_adj(m, BFE_RX_OFFSET);
1149 m->m_len = m->m_pkthdr.len = len;
1150
7f186839
JS
1151 ifp->if_ipackets++;
1152 m->m_pkthdr.rcvif = ifp;
1153
2eb0d069 1154 ether_input_chain(ifp, m, NULL, chain);
7f186839
JS
1155 BFE_INC(cons, BFE_RX_LIST_CNT);
1156 }
f9142ddd 1157
f9142ddd 1158 ether_input_dispatch(chain);
f9142ddd 1159
7f186839 1160 sc->bfe_rx_cons = cons;
7f186839
JS
1161}
1162
1163static void
1164bfe_intr(void *xsc)
1165{
1166 struct bfe_softc *sc = xsc;
0f20326f 1167 struct ifnet *ifp = &sc->arpcom.ac_if;
7f186839 1168 uint32_t istat, imask, flag;
7f186839 1169
7f186839
JS
1170 istat = CSR_READ_4(sc, BFE_ISTAT);
1171 imask = CSR_READ_4(sc, BFE_IMASK);
1172
1173 /*
1174 * Defer unsolicited interrupts - This is necessary because setting the
1175 * chips interrupt mask register to 0 doesn't actually stop the
1176 * interrupts
1177 */
1178 istat &= imask;
1179 CSR_WRITE_4(sc, BFE_ISTAT, istat);
1180 CSR_READ_4(sc, BFE_ISTAT);
1181
1182 /* not expecting this interrupt, disregard it */
1183 if (istat == 0) {
7f186839
JS
1184 return;
1185 }
1186
1187 if (istat & BFE_ISTAT_ERRORS) {
1188 flag = CSR_READ_4(sc, BFE_DMATX_STAT);
1189 if (flag & BFE_STAT_EMASK)
1190 ifp->if_oerrors++;
1191
1192 flag = CSR_READ_4(sc, BFE_DMARX_STAT);
1193 if (flag & BFE_RX_FLAG_ERRORS)
1194 ifp->if_ierrors++;
1195
1196 ifp->if_flags &= ~IFF_RUNNING;
1197 bfe_init(sc);
1198 }
1199
1200 /* A packet was received */
1201 if (istat & BFE_ISTAT_RX)
1202 bfe_rxeof(sc);
1203
1204 /* A packet was sent */
1205 if (istat & BFE_ISTAT_TX)
1206 bfe_txeof(sc);
1207
1208 /* We have packets pending, fire them out */
0bf9a476 1209 if ((ifp->if_flags & IFF_RUNNING) && !ifq_is_empty(&ifp->if_snd))
9db4b353 1210 if_devstart(ifp);
7f186839
JS
1211}
1212
1213static int
28488fd3 1214bfe_encap(struct bfe_softc *sc, struct mbuf **m_head, uint32_t *txidx)
7f186839 1215{
c35e788d
SZ
1216 bus_dma_segment_t segs[BFE_MAXSEGS];
1217 bus_dmamap_t map;
5ae4196e 1218 int i, first_idx, last_idx, cur, error, maxsegs, nsegs;
c35e788d
SZ
1219
1220 KKASSERT(sc->bfe_tx_cnt + BFE_SPARE_TXDESC < BFE_TX_LIST_CNT);
1221 maxsegs = BFE_TX_LIST_CNT - sc->bfe_tx_cnt - BFE_SPARE_TXDESC;
1222 if (maxsegs > BFE_MAXSEGS)
1223 maxsegs = BFE_MAXSEGS;
1224
1225 first_idx = *txidx;
1226 map = sc->bfe_tx_ring[first_idx].bfe_map;
1227
dbf7776a 1228 error = bus_dmamap_load_mbuf_defrag(sc->bfe_txbuf_tag, map, m_head,
5ae4196e 1229 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
dbf7776a 1230 if (error)
b77f5f80 1231 goto fail;
c35e788d
SZ
1232 bus_dmamap_sync(sc->bfe_txbuf_tag, map, BUS_DMASYNC_PREWRITE);
1233
1234 last_idx = -1;
1235 cur = first_idx;
5ae4196e 1236 for (i = 0; i < nsegs; ++i) {
c35e788d
SZ
1237 struct bfe_desc *d;
1238 uint32_t ctrl;
1239
1240 ctrl = BFE_DESC_LEN & segs[i].ds_len;
1241 ctrl |= BFE_DESC_IOC; /* always interrupt */
1242 if (cur == BFE_TX_LIST_CNT - 1) {
1243 /*
1244 * Tell the chip to wrap to the
1245 * start of the descriptor list.
1246 */
1247 ctrl |= BFE_DESC_EOT;
1248 }
7f186839 1249
c35e788d
SZ
1250 d = &sc->bfe_tx_list[cur];
1251 d->bfe_addr = segs[i].ds_addr + BFE_PCI_DMA;
1252 d->bfe_ctrl = ctrl;
28488fd3 1253
c35e788d
SZ
1254 last_idx = cur;
1255 BFE_INC(cur, BFE_TX_LIST_CNT);
1256 }
1257 KKASSERT(last_idx >= 0);
7f186839 1258
c35e788d
SZ
1259 /* End of the frame */
1260 sc->bfe_tx_list[last_idx].bfe_ctrl |= BFE_DESC_EOF;
7f186839 1261
c35e788d
SZ
1262 /*
1263 * Set start of the frame on the first fragment,
1264 * _after_ all of the fragments are setup.
1265 */
1266 sc->bfe_tx_list[first_idx].bfe_ctrl |= BFE_DESC_SOF;
1267
1268 sc->bfe_tx_ring[first_idx].bfe_map = sc->bfe_tx_ring[last_idx].bfe_map;
1269 sc->bfe_tx_ring[last_idx].bfe_map = map;
dbf7776a 1270 sc->bfe_tx_ring[last_idx].bfe_mbuf = *m_head;
7f186839 1271
7f186839 1272 *txidx = cur;
5ae4196e 1273 sc->bfe_tx_cnt += nsegs;
c35e788d 1274 return 0;
b77f5f80 1275fail:
dbf7776a 1276 m_freem(*m_head);
b77f5f80
SZ
1277 *m_head = NULL;
1278 return error;
7f186839
JS
1279}
1280
1281/*
1282 * Set up to transmit a packet
1283 */
1284static void
1285bfe_start(struct ifnet *ifp)
1286{
0f20326f 1287 struct bfe_softc *sc = ifp->if_softc;
7f186839 1288 struct mbuf *m_head = NULL;
efb8ae81 1289 int idx, need_trans;
7f186839 1290
603a5653
SZ
1291 ASSERT_SERIALIZED(ifp->if_serializer);
1292
7f186839 1293 /*
efb8ae81
JS
1294 * Not much point trying to send if the link is down
1295 * or we have nothing to send.
7f186839 1296 */
9db4b353
SZ
1297 if (!sc->bfe_link) {
1298 ifq_purge(&ifp->if_snd);
7f186839 1299 return;
9db4b353 1300 }
7f186839 1301
78195a76 1302 if (ifp->if_flags & IFF_OACTIVE)
7f186839 1303 return;
7f186839 1304
0f20326f
JS
1305 idx = sc->bfe_tx_prod;
1306
efb8ae81 1307 need_trans = 0;
c35e788d
SZ
1308 while (!ifq_is_empty(&ifp->if_snd)) {
1309 if (sc->bfe_tx_cnt + BFE_SPARE_TXDESC >= BFE_TX_LIST_CNT) {
28488fd3
SZ
1310 ifp->if_flags |= IFF_OACTIVE;
1311 break;
1312 }
1313
1314 m_head = ifq_dequeue(&ifp->if_snd, NULL);
7f186839
JS
1315 if (m_head == NULL)
1316 break;
1317
1318 /*
efb8ae81
JS
1319 * Pack the data into the tx ring. If we don't have
1320 * enough room, let the chip drain the ring.
7f186839 1321 */
28488fd3 1322 if (bfe_encap(sc, &m_head, &idx)) {
b77f5f80
SZ
1323 /* m_head is freed by re_encap(), if we reach here */
1324 ifp->if_oerrors++;
1325
1326 if (sc->bfe_tx_cnt > 0) {
1327 ifp->if_flags |= IFF_OACTIVE;
1328 break;
1329 } else {
1330 /*
1331 * IFF_OACTIVE could not be set under
1332 * this situation, since except up/down,
1333 * nothing will clear IFF_OACTIVE.
1334 *
1335 * Let's just keep draining the ifq ...
1336 */
1337 continue;
1338 }
7f186839 1339 }
efb8ae81 1340 need_trans = 1;
7f186839
JS
1341
1342 /*
1343 * If there's a BPF listener, bounce a copy of this frame
1344 * to him.
1345 */
1346 BPF_MTAP(ifp, m_head);
1347 }
1348
78195a76 1349 if (!need_trans)
efb8ae81 1350 return;
efb8ae81 1351
7f186839 1352 sc->bfe_tx_prod = idx;
c35e788d 1353
7f186839
JS
1354 /* Transmit - twice due to apparent hardware bug */
1355 CSR_WRITE_4(sc, BFE_DMATX_PTR, idx * sizeof(struct bfe_desc));
1356 CSR_WRITE_4(sc, BFE_DMATX_PTR, idx * sizeof(struct bfe_desc));
1357
1358 /*
1359 * Set a timeout in case the chip goes out to lunch.
1360 */
1361 ifp->if_timer = 5;
7f186839
JS
1362}
1363
1364static void
1365bfe_init(void *xsc)
1366{
1367 struct bfe_softc *sc = (struct bfe_softc*)xsc;
1368 struct ifnet *ifp = &sc->arpcom.ac_if;
7f186839 1369
603a5653
SZ
1370 ASSERT_SERIALIZED(ifp->if_serializer);
1371
78195a76 1372 if (ifp->if_flags & IFF_RUNNING)
7f186839 1373 return;
7f186839
JS
1374
1375 bfe_stop(sc);
1376 bfe_chip_reset(sc);
1377
1378 if (bfe_list_rx_init(sc) == ENOBUFS) {
50b872f3
JS
1379 if_printf(ifp, "bfe_init failed. "
1380 " Not enough memory for list buffers\n");
7f186839
JS
1381 bfe_stop(sc);
1382 return;
1383 }
1384
1385 bfe_set_rx_mode(sc);
1386
1387 /* Enable the chip and core */
1388 BFE_OR(sc, BFE_ENET_CTRL, BFE_ENET_ENABLE);
1389 /* Enable interrupts */
1390 CSR_WRITE_4(sc, BFE_IMASK, BFE_IMASK_DEF);
1391
1392 bfe_ifmedia_upd(ifp);
1393 ifp->if_flags |= IFF_RUNNING;
1394 ifp->if_flags &= ~IFF_OACTIVE;
1395
7fa4e3c3 1396 callout_reset(&sc->bfe_stat_timer, hz, bfe_tick, sc);
7f186839
JS
1397}
1398
1399/*
1400 * Set media options.
1401 */
1402static int
1403bfe_ifmedia_upd(struct ifnet *ifp)
1404{
0f20326f 1405 struct bfe_softc *sc = ifp->if_softc;
7f186839 1406 struct mii_data *mii;
7f186839 1407
603a5653
SZ
1408 ASSERT_SERIALIZED(ifp->if_serializer);
1409
7f186839
JS
1410 mii = device_get_softc(sc->bfe_miibus);
1411 sc->bfe_link = 0;
1412 if (mii->mii_instance) {
1413 struct mii_softc *miisc;
1414 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
1415 miisc = LIST_NEXT(miisc, mii_list))
1416 mii_phy_reset(miisc);
1417 }
1418 mii_mediachg(mii);
1419
9e61439d
SZ
1420 bfe_setupphy(sc);
1421
7f186839
JS
1422 return(0);
1423}
1424
1425/*
1426 * Report current media status.
1427 */
1428static void
1429bfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1430{
1431 struct bfe_softc *sc = ifp->if_softc;
1432 struct mii_data *mii;
7f186839 1433
603a5653
SZ
1434 ASSERT_SERIALIZED(ifp->if_serializer);
1435
7f186839
JS
1436 mii = device_get_softc(sc->bfe_miibus);
1437 mii_pollstat(mii);
1438 ifmr->ifm_active = mii->mii_media_active;
1439 ifmr->ifm_status = mii->mii_media_status;
7f186839
JS
1440}
1441
1442static int
bd4539cc 1443bfe_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
7f186839
JS
1444{
1445 struct bfe_softc *sc = ifp->if_softc;
1446 struct ifreq *ifr = (struct ifreq *) data;
1447 struct mii_data *mii;
1448 int error = 0;
7f186839 1449
603a5653
SZ
1450 ASSERT_SERIALIZED(ifp->if_serializer);
1451
7f186839
JS
1452 switch (command) {
1453 case SIOCSIFFLAGS:
1454 if (ifp->if_flags & IFF_UP)
1455 if (ifp->if_flags & IFF_RUNNING)
1456 bfe_set_rx_mode(sc);
1457 else
1458 bfe_init(sc);
1459 else if (ifp->if_flags & IFF_RUNNING)
1460 bfe_stop(sc);
1461 break;
1462 case SIOCADDMULTI:
1463 case SIOCDELMULTI:
1464 if (ifp->if_flags & IFF_RUNNING)
1465 bfe_set_rx_mode(sc);
1466 break;
1467 case SIOCGIFMEDIA:
1468 case SIOCSIFMEDIA:
1469 mii = device_get_softc(sc->bfe_miibus);
1470 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
1471 command);
1472 break;
7f186839 1473 default:
4cde4dd5 1474 error = ether_ioctl(ifp, command, data);
7f186839
JS
1475 break;
1476 }
7f186839
JS
1477 return error;
1478}
1479
1480static void
1481bfe_watchdog(struct ifnet *ifp)
1482{
0f20326f 1483 struct bfe_softc *sc = ifp->if_softc;
7f186839 1484
603a5653
SZ
1485 ASSERT_SERIALIZED(ifp->if_serializer);
1486
50b872f3 1487 if_printf(ifp, "watchdog timeout -- resetting\n");
7f186839
JS
1488
1489 ifp->if_flags &= ~IFF_RUNNING;
1490 bfe_init(sc);
1491
1492 ifp->if_oerrors++;
7f186839
JS
1493}
1494
1495static void
1496bfe_tick(void *xsc)
1497{
1498 struct bfe_softc *sc = xsc;
1499 struct mii_data *mii;
78195a76 1500 struct ifnet *ifp = &sc->arpcom.ac_if;
7f186839
JS
1501
1502 mii = device_get_softc(sc->bfe_miibus);
1503
78195a76
MD
1504 lwkt_serialize_enter(ifp->if_serializer);
1505
7f186839 1506 bfe_stats_update(sc);
7fa4e3c3 1507 callout_reset(&sc->bfe_stat_timer, hz, bfe_tick, sc);
7f186839 1508
3641b7ca 1509 if (sc->bfe_link == 0) {
78195a76
MD
1510 mii_tick(mii);
1511 if (!sc->bfe_link && mii->mii_media_status & IFM_ACTIVE &&
1512 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1513 sc->bfe_link++;
1514 }
1515 if (!sc->bfe_link)
1516 sc->bfe_link++;
7f186839 1517 }
78195a76 1518 lwkt_serialize_exit(ifp->if_serializer);
7f186839
JS
1519}
1520
1521/*
1522 * Stop the adapter and free any mbufs allocated to the
1523 * RX and TX lists.
1524 */
1525static void
1526bfe_stop(struct bfe_softc *sc)
1527{
0f20326f 1528 struct ifnet *ifp = &sc->arpcom.ac_if;
7f186839 1529
603a5653
SZ
1530 ASSERT_SERIALIZED(ifp->if_serializer);
1531
7fa4e3c3 1532 callout_stop(&sc->bfe_stat_timer);
7f186839 1533
7f186839
JS
1534 bfe_chip_halt(sc);
1535 bfe_tx_ring_free(sc);
1536 bfe_rx_ring_free(sc);
1537
1538 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
7f186839 1539}