2 * Copyright (c) 2008 Stanislav Sedov <stas@FreeBSD.org>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * Driver for Attansic Technology Corp. L2 FastEthernet adapter.
27 * This driver is heavily based on age(4) Attansic L1 driver by Pyun YongHyeon.
29 * $FreeBSD: src/sys/dev/ae/if_ae.c,v 1.1.2.3.2.1 2009/04/15 03:14:26 kensmith Exp $
32 #include <sys/param.h>
33 #include <sys/endian.h>
34 #include <sys/kernel.h>
36 #include <sys/interrupt.h>
37 #include <sys/malloc.h>
40 #include <sys/serialize.h>
41 #include <sys/socket.h>
42 #include <sys/sockio.h>
43 #include <sys/sysctl.h>
45 #include <net/ethernet.h>
48 #include <net/if_arp.h>
49 #include <net/if_dl.h>
50 #include <net/if_media.h>
51 #include <net/ifq_var.h>
52 #include <net/vlan/if_vlan_var.h>
53 #include <net/vlan/if_vlan_ether.h>
55 #include <bus/pci/pcireg.h>
56 #include <bus/pci/pcivar.h>
59 #include <dev/netif/mii_layer/miivar.h>
61 #include <dev/netif/ae/if_aereg.h>
62 #include <dev/netif/ae/if_aevar.h>
64 /* "device miibus" required. See GENERIC if you get errors here. */
65 #include "miibus_if.h"
68 * Devices supported by this driver.
70 static const struct ae_dev {
75 { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L2,
76 "Attansic Technology Corp, L2 Fast Ethernet" },
77 /* Required last entry */
82 static int ae_probe(device_t);
83 static int ae_attach(device_t);
84 static int ae_detach(device_t);
85 static int ae_shutdown(device_t);
86 static int ae_suspend(device_t);
87 static int ae_resume(device_t);
88 static int ae_miibus_readreg(device_t, int, int);
89 static int ae_miibus_writereg(device_t, int, int, int);
90 static void ae_miibus_statchg(device_t);
92 static int ae_mediachange(struct ifnet *);
93 static void ae_mediastatus(struct ifnet *, struct ifmediareq *);
94 static void ae_init(void *);
95 static void ae_start(struct ifnet *, struct ifaltq_subque *);
96 static int ae_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
97 static void ae_watchdog(struct ifnet *);
98 static void ae_stop(struct ae_softc *);
99 static void ae_tick(void *);
101 static void ae_intr(void *);
102 static void ae_tx_intr(struct ae_softc *);
103 static void ae_rx_intr(struct ae_softc *);
104 static int ae_rxeof(struct ae_softc *, struct ae_rxd *);
106 static int ae_encap(struct ae_softc *, struct mbuf **);
107 static void ae_sysctl_node(struct ae_softc *);
108 static void ae_phy_reset(struct ae_softc *);
109 static int ae_reset(struct ae_softc *);
110 static void ae_pcie_init(struct ae_softc *);
111 static void ae_get_eaddr(struct ae_softc *);
112 static void ae_dma_free(struct ae_softc *);
113 static int ae_dma_alloc(struct ae_softc *);
114 static void ae_mac_config(struct ae_softc *);
115 static void ae_stop_rxmac(struct ae_softc *);
116 static void ae_stop_txmac(struct ae_softc *);
117 static void ae_rxfilter(struct ae_softc *);
118 static void ae_rxvlan(struct ae_softc *);
119 static void ae_update_stats_rx(uint16_t, struct ae_stats *);
120 static void ae_update_stats_tx(uint16_t, struct ae_stats *);
121 static void ae_powersave_disable(struct ae_softc *);
122 static void ae_powersave_enable(struct ae_softc *);
124 static device_method_t ae_methods[] = {
125 /* Device interface. */
126 DEVMETHOD(device_probe, ae_probe),
127 DEVMETHOD(device_attach, ae_attach),
128 DEVMETHOD(device_detach, ae_detach),
129 DEVMETHOD(device_shutdown, ae_shutdown),
130 DEVMETHOD(device_suspend, ae_suspend),
131 DEVMETHOD(device_resume, ae_resume),
134 DEVMETHOD(bus_print_child, bus_generic_print_child),
135 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
138 DEVMETHOD(miibus_readreg, ae_miibus_readreg),
139 DEVMETHOD(miibus_writereg, ae_miibus_writereg),
140 DEVMETHOD(miibus_statchg, ae_miibus_statchg),
144 static driver_t ae_driver = {
147 sizeof(struct ae_softc)
150 static devclass_t ae_devclass;
151 DECLARE_DUMMY_MODULE(if_ae);
152 MODULE_DEPEND(if_ae, miibus, 1, 1, 1);
153 DRIVER_MODULE(if_ae, pci, ae_driver, ae_devclass, NULL, NULL);
154 DRIVER_MODULE(miibus, ae, miibus_driver, miibus_devclass, NULL, NULL);
156 /* Register access macros. */
157 #define AE_WRITE_4(_sc, reg, val) \
158 bus_space_write_4((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg), (val))
159 #define AE_WRITE_2(_sc, reg, val) \
160 bus_space_write_2((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg), (val))
161 #define AE_WRITE_1(_sc, reg, val) \
162 bus_space_write_1((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg), (val))
163 #define AE_READ_4(_sc, reg) \
164 bus_space_read_4((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg))
165 #define AE_READ_2(_sc, reg) \
166 bus_space_read_2((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg))
167 #define AE_READ_1(_sc, reg) \
168 bus_space_read_1((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg))
170 #define AE_PHY_READ(sc, reg) \
171 ae_miibus_readreg(sc->ae_dev, 0, reg)
172 #define AE_PHY_WRITE(sc, reg, val) \
173 ae_miibus_writereg(sc->ae_dev, 0, reg, val)
174 #define AE_CHECK_EADDR_VALID(eaddr) \
175 ((eaddr[0] == 0 && eaddr[1] == 0) || \
176 (eaddr[0] == 0xffffffff && eaddr[1] == 0xffff))
177 #define AE_RXD_VLAN(vtag) \
178 (((vtag) >> 4) | (((vtag) & 0x07) << 13) | (((vtag) & 0x08) << 9))
179 #define AE_TXD_VLAN(vtag) \
180 (((vtag) << 4) | (((vtag) >> 13) & 0x07) | (((vtag) >> 9) & 0x08))
185 #define STATS_ENTRY(node, desc, field) \
186 { node, desc, offsetof(struct ae_stats, field) }
192 STATS_ENTRY("bcast", "broadcast frames", tx_bcast),
193 STATS_ENTRY("mcast", "multicast frames", tx_mcast),
194 STATS_ENTRY("pause", "PAUSE frames", tx_pause),
195 STATS_ENTRY("control", "control frames", tx_ctrl),
196 STATS_ENTRY("defers", "deferrals occuried", tx_defer),
197 STATS_ENTRY("exc_defers", "excessive deferrals occuried", tx_excdefer),
198 STATS_ENTRY("singlecols", "single collisions occuried", tx_singlecol),
199 STATS_ENTRY("multicols", "multiple collisions occuried", tx_multicol),
200 STATS_ENTRY("latecols", "late collisions occuried", tx_latecol),
201 STATS_ENTRY("aborts", "transmit aborts due collisions", tx_abortcol),
202 STATS_ENTRY("underruns", "Tx FIFO underruns", tx_underrun)
204 STATS_ENTRY("bcast", "broadcast frames", rx_bcast),
205 STATS_ENTRY("mcast", "multicast frames", rx_mcast),
206 STATS_ENTRY("pause", "PAUSE frames", rx_pause),
207 STATS_ENTRY("control", "control frames", rx_ctrl),
208 STATS_ENTRY("crc_errors", "frames with CRC errors", rx_crcerr),
209 STATS_ENTRY("code_errors", "frames with invalid opcode", rx_codeerr),
210 STATS_ENTRY("runt", "runt frames", rx_runt),
211 STATS_ENTRY("frag", "fragmented frames", rx_frag),
212 STATS_ENTRY("align_errors", "frames with alignment errors", rx_align),
213 STATS_ENTRY("truncated", "frames truncated due to Rx FIFO inderrun",
216 #define AE_STATS_RX_LEN NELEM(ae_stats_rx)
217 #define AE_STATS_TX_LEN NELEM(ae_stats_tx)
220 ae_stop(struct ae_softc *sc)
222 struct ifnet *ifp = &sc->arpcom.ac_if;
225 ASSERT_SERIALIZED(ifp->if_serializer);
227 ifp->if_flags &= ~IFF_RUNNING;
228 ifq_clr_oactive(&ifp->if_snd);
231 sc->ae_flags &= ~AE_FLAG_LINK;
232 callout_stop(&sc->ae_tick_ch);
235 * Clear and disable interrupts.
237 AE_WRITE_4(sc, AE_IMR_REG, 0);
238 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
249 AE_WRITE_1(sc, AE_DMAREAD_REG, ~AE_DMAREAD_EN);
250 AE_WRITE_1(sc, AE_DMAWRITE_REG, ~AE_DMAWRITE_EN);
253 * Wait for everything to enter idle state.
255 for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
256 if (AE_READ_4(sc, AE_IDLE_REG) == 0)
260 if (i == AE_IDLE_TIMEOUT)
261 if_printf(ifp, "could not enter idle state in stop.\n");
265 ae_stop_rxmac(struct ae_softc *sc)
271 * Stop Rx MAC engine.
273 val = AE_READ_4(sc, AE_MAC_REG);
274 if ((val & AE_MAC_RX_EN) != 0) {
275 val &= ~AE_MAC_RX_EN;
276 AE_WRITE_4(sc, AE_MAC_REG, val);
280 * Stop Rx DMA engine.
282 if (AE_READ_1(sc, AE_DMAWRITE_REG) == AE_DMAWRITE_EN)
283 AE_WRITE_1(sc, AE_DMAWRITE_REG, 0);
286 * Wait for IDLE state.
288 for (i = 0; i < AE_IDLE_TIMEOUT; i--) {
289 val = AE_READ_4(sc, AE_IDLE_REG);
290 if ((val & (AE_IDLE_RXMAC | AE_IDLE_DMAWRITE)) == 0)
294 if (i == AE_IDLE_TIMEOUT) {
295 if_printf(&sc->arpcom.ac_if,
296 "timed out while stopping Rx MAC.\n");
301 ae_stop_txmac(struct ae_softc *sc)
307 * Stop Tx MAC engine.
309 val = AE_READ_4(sc, AE_MAC_REG);
310 if ((val & AE_MAC_TX_EN) != 0) {
311 val &= ~AE_MAC_TX_EN;
312 AE_WRITE_4(sc, AE_MAC_REG, val);
316 * Stop Tx DMA engine.
318 if (AE_READ_1(sc, AE_DMAREAD_REG) == AE_DMAREAD_EN)
319 AE_WRITE_1(sc, AE_DMAREAD_REG, 0);
322 * Wait for IDLE state.
324 for (i = 0; i < AE_IDLE_TIMEOUT; i--) {
325 val = AE_READ_4(sc, AE_IDLE_REG);
326 if ((val & (AE_IDLE_TXMAC | AE_IDLE_DMAREAD)) == 0)
330 if (i == AE_IDLE_TIMEOUT) {
331 if_printf(&sc->arpcom.ac_if,
332 "timed out while stopping Tx MAC.\n");
337 * Callback from MII layer when media changes.
340 ae_miibus_statchg(device_t dev)
342 struct ae_softc *sc = device_get_softc(dev);
343 struct ifnet *ifp = &sc->arpcom.ac_if;
344 struct mii_data *mii;
347 ASSERT_SERIALIZED(ifp->if_serializer);
349 if ((ifp->if_flags & IFF_RUNNING) == 0)
352 mii = device_get_softc(sc->ae_miibus);
353 sc->ae_flags &= ~AE_FLAG_LINK;
354 if ((mii->mii_media_status & IFM_AVALID) != 0) {
355 switch (IFM_SUBTYPE(mii->mii_media_active)) {
358 sc->ae_flags |= AE_FLAG_LINK;
365 /* Stop Rx/Tx MACs. */
369 /* Program MACs with resolved speed/duplex/flow-control. */
370 if ((sc->ae_flags & AE_FLAG_LINK) != 0) {
374 * Restart DMA engines.
376 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
377 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
380 * Enable Rx and Tx MACs.
382 val = AE_READ_4(sc, AE_MAC_REG);
383 val |= AE_MAC_TX_EN | AE_MAC_RX_EN;
384 AE_WRITE_4(sc, AE_MAC_REG, val);
389 ae_sysctl_node(struct ae_softc *sc)
391 struct sysctl_ctx_list *ctx;
392 struct sysctl_oid *root, *stats, *stats_rx, *stats_tx;
393 struct ae_stats *ae_stats;
396 ae_stats = &sc->stats;
397 sysctl_ctx_init(&sc->ae_sysctl_ctx);
398 sc->ae_sysctl_tree = SYSCTL_ADD_NODE(&sc->ae_sysctl_ctx,
399 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
400 device_get_nameunit(sc->ae_dev),
402 if (sc->ae_sysctl_tree == NULL) {
403 device_printf(sc->ae_dev, "can't add sysctl node\n");
406 ctx = &sc->ae_sysctl_ctx;
407 root = sc->ae_sysctl_tree;
409 stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(root), OID_AUTO, "stats",
410 CTLFLAG_RD, NULL, "ae statistics");
412 device_printf(sc->ae_dev, "can't add stats sysctl node\n");
417 * Receiver statistcics.
419 stats_rx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx",
420 CTLFLAG_RD, NULL, "Rx MAC statistics");
421 if (stats_rx != NULL) {
422 for (i = 0; i < AE_STATS_RX_LEN; i++) {
423 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(stats_rx),
424 OID_AUTO, ae_stats_rx[i].node, CTLFLAG_RD,
425 (char *)ae_stats + ae_stats_rx[i].offset, 0,
426 ae_stats_rx[i].desc);
431 * Transmitter statistcics.
433 stats_tx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx",
434 CTLFLAG_RD, NULL, "Tx MAC statistics");
435 if (stats_tx != NULL) {
436 for (i = 0; i < AE_STATS_TX_LEN; i++) {
437 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(stats_tx),
438 OID_AUTO, ae_stats_tx[i].node, CTLFLAG_RD,
439 (char *)ae_stats + ae_stats_tx[i].offset, 0,
440 ae_stats_tx[i].desc);
446 ae_miibus_readreg(device_t dev, int phy, int reg)
448 struct ae_softc *sc = device_get_softc(dev);
453 * Locking is done in upper layers.
455 if (phy != sc->ae_phyaddr)
457 val = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
458 AE_MDIO_START | AE_MDIO_READ | AE_MDIO_SUP_PREAMBLE |
459 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK);
460 AE_WRITE_4(sc, AE_MDIO_REG, val);
463 * Wait for operation to complete.
465 for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
467 val = AE_READ_4(sc, AE_MDIO_REG);
468 if ((val & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
471 if (i == AE_MDIO_TIMEOUT) {
472 device_printf(sc->ae_dev, "phy read timeout: %d.\n", reg);
475 return ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
479 ae_miibus_writereg(device_t dev, int phy, int reg, int val)
481 struct ae_softc *sc = device_get_softc(dev);
486 * Locking is done in upper layers.
488 if (phy != sc->ae_phyaddr)
490 aereg = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
491 AE_MDIO_START | AE_MDIO_SUP_PREAMBLE |
492 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK) |
493 ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
494 AE_WRITE_4(sc, AE_MDIO_REG, aereg);
497 * Wait for operation to complete.
499 for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
501 aereg = AE_READ_4(sc, AE_MDIO_REG);
502 if ((aereg & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
505 if (i == AE_MDIO_TIMEOUT)
506 device_printf(sc->ae_dev, "phy write timeout: %d.\n", reg);
511 ae_probe(device_t dev)
513 uint16_t vendor, devid;
514 const struct ae_dev *sp;
516 vendor = pci_get_vendor(dev);
517 devid = pci_get_device(dev);
518 for (sp = ae_devs; sp->ae_name != NULL; sp++) {
519 if (vendor == sp->ae_vendorid &&
520 devid == sp->ae_deviceid) {
521 device_set_desc(dev, sp->ae_name);
529 ae_dma_alloc(struct ae_softc *sc)
535 * Create parent DMA tag.
537 error = bus_dma_tag_create(NULL, 1, 0,
538 BUS_SPACE_MAXADDR_32BIT,
541 BUS_SPACE_MAXSIZE_32BIT,
543 BUS_SPACE_MAXSIZE_32BIT,
544 0, &sc->dma_parent_tag);
546 device_printf(sc->ae_dev, "could not creare parent DMA tag.\n");
551 * Create DMA stuffs for TxD.
553 sc->txd_base = bus_dmamem_coherent_any(sc->dma_parent_tag, 4,
554 AE_TXD_BUFSIZE_DEFAULT, BUS_DMA_WAITOK | BUS_DMA_ZERO,
555 &sc->dma_txd_tag, &sc->dma_txd_map,
556 &sc->dma_txd_busaddr);
557 if (sc->txd_base == NULL) {
558 device_printf(sc->ae_dev, "could not creare TxD DMA stuffs.\n");
563 * Create DMA stuffs for TxS.
565 sc->txs_base = bus_dmamem_coherent_any(sc->dma_parent_tag, 4,
566 AE_TXS_COUNT_DEFAULT * 4, BUS_DMA_WAITOK | BUS_DMA_ZERO,
567 &sc->dma_txs_tag, &sc->dma_txs_map,
568 &sc->dma_txs_busaddr);
569 if (sc->txs_base == NULL) {
570 device_printf(sc->ae_dev, "could not creare TxS DMA stuffs.\n");
575 * Create DMA stuffs for RxD.
577 sc->rxd_base_dma = bus_dmamem_coherent_any(sc->dma_parent_tag, 128,
578 AE_RXD_COUNT_DEFAULT * 1536 + 120,
579 BUS_DMA_WAITOK | BUS_DMA_ZERO,
580 &sc->dma_rxd_tag, &sc->dma_rxd_map,
582 if (sc->rxd_base_dma == NULL) {
583 device_printf(sc->ae_dev, "could not creare RxD DMA stuffs.\n");
586 sc->dma_rxd_busaddr = busaddr + 120;
587 sc->rxd_base = (struct ae_rxd *)(sc->rxd_base_dma + 120);
593 ae_mac_config(struct ae_softc *sc)
595 struct mii_data *mii;
598 mii = device_get_softc(sc->ae_miibus);
599 val = AE_READ_4(sc, AE_MAC_REG);
600 val &= ~AE_MAC_FULL_DUPLEX;
601 /* XXX disable AE_MAC_TX_FLOW_EN? */
602 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
603 val |= AE_MAC_FULL_DUPLEX;
604 AE_WRITE_4(sc, AE_MAC_REG, val);
608 ae_rxeof(struct ae_softc *sc, struct ae_rxd *rxd)
610 struct ifnet *ifp = &sc->arpcom.ac_if;
615 flags = le16toh(rxd->flags);
617 if_printf(ifp, "Rx interrupt occuried.\n");
619 size = le16toh(rxd->len) - ETHER_CRC_LEN;
620 if (size < (ETHER_MIN_LEN - ETHER_CRC_LEN -
621 sizeof(struct ether_vlan_header))) {
622 if_printf(ifp, "Runt frame received.");
626 m = m_devget(&rxd->data[0], size, ETHER_ALIGN, ifp, NULL);
630 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
631 (flags & AE_RXD_HAS_VLAN)) {
632 m->m_pkthdr.ether_vlantag = AE_RXD_VLAN(le16toh(rxd->vlan));
633 m->m_flags |= M_VLANTAG;
635 ifp->if_input(ifp, m);
641 ae_rx_intr(struct ae_softc *sc)
643 struct ifnet *ifp = &sc->arpcom.ac_if;
649 * Syncronize DMA buffers.
651 bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map,
652 BUS_DMASYNC_POSTREAD);
654 rxd = (struct ae_rxd *)(sc->rxd_base + sc->rxd_cur);
656 flags = le16toh(rxd->flags);
657 if ((flags & AE_RXD_UPDATE) == 0)
659 rxd->flags = htole16(flags & ~AE_RXD_UPDATE);
662 ae_update_stats_rx(flags, &sc->stats);
665 * Update position index.
667 sc->rxd_cur = (sc->rxd_cur + 1) % AE_RXD_COUNT_DEFAULT;
668 if ((flags & AE_RXD_SUCCESS) == 0) {
669 IFNET_STAT_INC(ifp, ierrors, 1);
673 error = ae_rxeof(sc, rxd);
675 IFNET_STAT_INC(ifp, ierrors, 1);
677 IFNET_STAT_INC(ifp, ipackets, 1);
680 /* Update Rx index. */
681 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
685 ae_tx_intr(struct ae_softc *sc)
687 struct ifnet *ifp = &sc->arpcom.ac_if;
693 * Syncronize DMA buffers.
695 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_POSTREAD);
696 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, BUS_DMASYNC_POSTREAD);
699 txs = sc->txs_base + sc->txs_ack;
701 flags = le16toh(txs->flags);
702 if ((flags & AE_TXS_UPDATE) == 0)
704 txs->flags = htole16(flags & ~AE_TXS_UPDATE);
707 ae_update_stats_tx(flags, &sc->stats);
710 * Update TxS position.
712 sc->txs_ack = (sc->txs_ack + 1) % AE_TXS_COUNT_DEFAULT;
713 sc->ae_flags |= AE_FLAG_TXAVAIL;
714 txd = (struct ae_txd *)(sc->txd_base + sc->txd_ack);
715 if (txs->len != txd->len) {
716 device_printf(sc->ae_dev, "Size mismatch: "
718 le16toh(txs->len), le16toh(txd->len));
722 * Move txd ack and align on 4-byte boundary.
724 sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) + 4 + 3) & ~3) %
725 AE_TXD_BUFSIZE_DEFAULT;
726 if ((flags & AE_TXS_SUCCESS) != 0)
727 IFNET_STAT_INC(ifp, opackets, 1);
729 IFNET_STAT_INC(ifp, oerrors, 1);
733 if (sc->tx_inproc < 0) {
735 if_printf(ifp, "Received stray Tx interrupt(s).\n");
738 if (sc->tx_inproc == 0)
739 ifp->if_timer = 0; /* Unarm watchdog. */
740 if (sc->ae_flags & AE_FLAG_TXAVAIL) {
741 ifq_clr_oactive(&ifp->if_snd);
742 if (!ifq_is_empty(&ifp->if_snd))
751 * Syncronize DMA buffers.
753 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREWRITE);
754 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, BUS_DMASYNC_PREWRITE);
760 struct ae_softc *sc = xsc;
761 struct ifnet *ifp = &sc->arpcom.ac_if;
764 ASSERT_SERIALIZED(ifp->if_serializer);
766 val = AE_READ_4(sc, AE_ISR_REG);
767 if (val == 0 || (val & AE_IMR_DEFAULT) == 0)
771 AE_WRITE_4(sc, AE_ISR_REG, AE_ISR_DISABLE);
774 /* Read interrupt status. */
775 val = AE_READ_4(sc, AE_ISR_REG);
777 /* Clear interrupts and disable them. */
778 AE_WRITE_4(sc, AE_ISR_REG, val | AE_ISR_DISABLE);
780 if (ifp->if_flags & IFF_RUNNING) {
781 if (val & (AE_ISR_DMAR_TIMEOUT |
782 AE_ISR_DMAW_TIMEOUT |
783 AE_ISR_PHY_LINKDOWN)) {
786 if (val & AE_ISR_TX_EVENT)
788 if (val & AE_ISR_RX_EVENT)
792 /* Re-enable interrupts. */
793 AE_WRITE_4(sc, AE_ISR_REG, 0);
799 struct ae_softc *sc = xsc;
800 struct ifnet *ifp = &sc->arpcom.ac_if;
801 struct mii_data *mii;
802 uint8_t eaddr[ETHER_ADDR_LEN];
806 ASSERT_SERIALIZED(ifp->if_serializer);
808 mii = device_get_softc(sc->ae_miibus);
812 ae_powersave_disable(sc);
815 * Clear and disable interrupts.
817 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
820 * Set the MAC address.
822 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
823 val = eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5];
824 AE_WRITE_4(sc, AE_EADDR0_REG, val);
825 val = eaddr[0] << 8 | eaddr[1];
826 AE_WRITE_4(sc, AE_EADDR1_REG, val);
829 * Set ring buffers base addresses.
831 addr = sc->dma_rxd_busaddr;
832 AE_WRITE_4(sc, AE_DESC_ADDR_HI_REG, BUS_ADDR_HI(addr));
833 AE_WRITE_4(sc, AE_RXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
834 addr = sc->dma_txd_busaddr;
835 AE_WRITE_4(sc, AE_TXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
836 addr = sc->dma_txs_busaddr;
837 AE_WRITE_4(sc, AE_TXS_ADDR_LO_REG, BUS_ADDR_LO(addr));
840 * Configure ring buffers sizes.
842 AE_WRITE_2(sc, AE_RXD_COUNT_REG, AE_RXD_COUNT_DEFAULT);
843 AE_WRITE_2(sc, AE_TXD_BUFSIZE_REG, AE_TXD_BUFSIZE_DEFAULT / 4);
844 AE_WRITE_2(sc, AE_TXS_COUNT_REG, AE_TXS_COUNT_DEFAULT);
847 * Configure interframe gap parameters.
849 val = ((AE_IFG_TXIPG_DEFAULT << AE_IFG_TXIPG_SHIFT) &
851 ((AE_IFG_RXIPG_DEFAULT << AE_IFG_RXIPG_SHIFT) &
853 ((AE_IFG_IPGR1_DEFAULT << AE_IFG_IPGR1_SHIFT) &
855 ((AE_IFG_IPGR2_DEFAULT << AE_IFG_IPGR2_SHIFT) &
857 AE_WRITE_4(sc, AE_IFG_REG, val);
860 * Configure half-duplex operation.
862 val = ((AE_HDPX_LCOL_DEFAULT << AE_HDPX_LCOL_SHIFT) &
864 ((AE_HDPX_RETRY_DEFAULT << AE_HDPX_RETRY_SHIFT) &
865 AE_HDPX_RETRY_MASK) |
866 ((AE_HDPX_ABEBT_DEFAULT << AE_HDPX_ABEBT_SHIFT) &
867 AE_HDPX_ABEBT_MASK) |
868 ((AE_HDPX_JAMIPG_DEFAULT << AE_HDPX_JAMIPG_SHIFT) &
869 AE_HDPX_JAMIPG_MASK) | AE_HDPX_EXC_EN;
870 AE_WRITE_4(sc, AE_HDPX_REG, val);
873 * Configure interrupt moderate timer.
875 AE_WRITE_2(sc, AE_IMT_REG, AE_IMT_DEFAULT);
876 val = AE_READ_4(sc, AE_MASTER_REG);
877 val |= AE_MASTER_IMT_EN;
878 AE_WRITE_4(sc, AE_MASTER_REG, val);
881 * Configure interrupt clearing timer.
883 AE_WRITE_2(sc, AE_ICT_REG, AE_ICT_DEFAULT);
888 val = ifp->if_mtu + ETHER_HDR_LEN + sizeof(struct ether_vlan_header) +
890 AE_WRITE_2(sc, AE_MTU_REG, val);
893 * Configure cut-through threshold.
895 AE_WRITE_4(sc, AE_CUT_THRESH_REG, AE_CUT_THRESH_DEFAULT);
898 * Configure flow control.
900 AE_WRITE_2(sc, AE_FLOW_THRESH_HI_REG, (AE_RXD_COUNT_DEFAULT / 8) * 7);
901 AE_WRITE_2(sc, AE_FLOW_THRESH_LO_REG, (AE_RXD_COUNT_MIN / 8) >
902 (AE_RXD_COUNT_DEFAULT / 12) ? (AE_RXD_COUNT_MIN / 8) :
903 (AE_RXD_COUNT_DEFAULT / 12));
908 sc->txd_cur = sc->rxd_cur = 0;
909 sc->txs_ack = sc->txd_ack = 0;
911 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur);
912 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
914 sc->ae_flags |= AE_FLAG_TXAVAIL; /* Free Tx's available. */
919 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
920 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
923 * Check if everything is OK.
925 val = AE_READ_4(sc, AE_ISR_REG);
926 if ((val & AE_ISR_PHY_LINKDOWN) != 0) {
927 device_printf(sc->ae_dev, "Initialization failed.\n");
932 * Clear interrupt status.
934 AE_WRITE_4(sc, AE_ISR_REG, 0x3fffffff);
935 AE_WRITE_4(sc, AE_ISR_REG, 0x0);
940 val = AE_READ_4(sc, AE_MASTER_REG);
941 AE_WRITE_4(sc, AE_MASTER_REG, val | AE_MASTER_MANUAL_INT);
942 AE_WRITE_4(sc, AE_IMR_REG, AE_IMR_DEFAULT);
947 AE_WRITE_4(sc, AE_WOL_REG, 0);
952 val = AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD |
953 AE_MAC_FULL_DUPLEX | AE_MAC_CLK_PHY |
954 AE_MAC_TX_FLOW_EN | AE_MAC_RX_FLOW_EN |
955 ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & AE_HALFBUF_MASK) |
956 ((AE_MAC_PREAMBLE_DEFAULT << AE_MAC_PREAMBLE_SHIFT) &
957 AE_MAC_PREAMBLE_MASK);
958 AE_WRITE_4(sc, AE_MAC_REG, val);
969 val = AE_READ_4(sc, AE_MAC_REG);
970 AE_WRITE_4(sc, AE_MAC_REG, val | AE_MAC_TX_EN | AE_MAC_RX_EN);
972 sc->ae_flags &= ~AE_FLAG_LINK;
973 mii_mediachg(mii); /* Switch to the current media. */
975 callout_reset(&sc->ae_tick_ch, hz, ae_tick, sc);
976 ifp->if_flags |= IFF_RUNNING;
977 ifq_clr_oactive(&ifp->if_snd);
981 ae_watchdog(struct ifnet *ifp)
983 struct ae_softc *sc = ifp->if_softc;
985 ASSERT_SERIALIZED(ifp->if_serializer);
987 if ((sc->ae_flags & AE_FLAG_LINK) == 0)
988 if_printf(ifp, "watchdog timeout (missed link).\n");
990 if_printf(ifp, "watchdog timeout - resetting.\n");
991 IFNET_STAT_INC(ifp, oerrors, 1);
994 if (!ifq_is_empty(&ifp->if_snd))
1001 struct ae_softc *sc = xsc;
1002 struct ifnet *ifp = &sc->arpcom.ac_if;
1003 struct mii_data *mii = device_get_softc(sc->ae_miibus);
1005 lwkt_serialize_enter(ifp->if_serializer);
1007 callout_reset(&sc->ae_tick_ch, hz, ae_tick, sc);
1008 lwkt_serialize_exit(ifp->if_serializer);
1012 ae_rxvlan(struct ae_softc *sc)
1014 struct ifnet *ifp = &sc->arpcom.ac_if;
1017 val = AE_READ_4(sc, AE_MAC_REG);
1018 val &= ~AE_MAC_RMVLAN_EN;
1019 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1020 val |= AE_MAC_RMVLAN_EN;
1021 AE_WRITE_4(sc, AE_MAC_REG, val);
1025 ae_rxfilter(struct ae_softc *sc)
1027 struct ifnet *ifp = &sc->arpcom.ac_if;
1028 struct ifmultiaddr *ifma;
1033 rxcfg = AE_READ_4(sc, AE_MAC_REG);
1034 rxcfg &= ~(AE_MAC_MCAST_EN | AE_MAC_BCAST_EN | AE_MAC_PROMISC_EN);
1035 rxcfg |= AE_MAC_BCAST_EN;
1036 if (ifp->if_flags & IFF_PROMISC)
1037 rxcfg |= AE_MAC_PROMISC_EN;
1038 if (ifp->if_flags & IFF_ALLMULTI)
1039 rxcfg |= AE_MAC_MCAST_EN;
1042 * Wipe old settings.
1044 AE_WRITE_4(sc, AE_REG_MHT0, 0);
1045 AE_WRITE_4(sc, AE_REG_MHT1, 0);
1046 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1047 AE_WRITE_4(sc, AE_REG_MHT0, 0xffffffff);
1048 AE_WRITE_4(sc, AE_REG_MHT1, 0xffffffff);
1049 AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
1054 * Load multicast tables.
1056 bzero(mchash, sizeof(mchash));
1057 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1058 if (ifma->ifma_addr->sa_family != AF_LINK)
1060 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1061 ifma->ifma_addr), ETHER_ADDR_LEN);
1062 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
1064 AE_WRITE_4(sc, AE_REG_MHT0, mchash[0]);
1065 AE_WRITE_4(sc, AE_REG_MHT1, mchash[1]);
1066 AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
1070 ae_tx_avail_size(struct ae_softc *sc)
1074 if (sc->txd_cur >= sc->txd_ack)
1075 avail = AE_TXD_BUFSIZE_DEFAULT - (sc->txd_cur - sc->txd_ack);
1077 avail = sc->txd_ack - sc->txd_cur;
1078 return (avail - 4); /* 4-byte header. */
1082 ae_encap(struct ae_softc *sc, struct mbuf **m_head)
1086 unsigned int to_end;
1089 M_ASSERTPKTHDR((*m_head));
1091 len = m0->m_pkthdr.len;
1092 if ((sc->ae_flags & AE_FLAG_TXAVAIL) == 0 ||
1093 ae_tx_avail_size(sc) < len) {
1095 if_printf(sc->ifp, "No free Tx available.\n");
1100 hdr = (struct ae_txd *)(sc->txd_base + sc->txd_cur);
1101 bzero(hdr, sizeof(*hdr));
1104 sc->txd_cur = (sc->txd_cur + 4) % AE_TXD_BUFSIZE_DEFAULT;
1106 /* Space available to the end of the ring */
1107 to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur;
1109 if (to_end >= len) {
1110 m_copydata(m0, 0, len, (caddr_t)(sc->txd_base + sc->txd_cur));
1112 m_copydata(m0, 0, to_end, (caddr_t)(sc->txd_base +
1114 m_copydata(m0, to_end, len - to_end, (caddr_t)sc->txd_base);
1118 * Set TxD flags and parameters.
1120 if ((m0->m_flags & M_VLANTAG) != 0) {
1121 hdr->vlan = htole16(AE_TXD_VLAN(m0->m_pkthdr.ether_vlantag));
1122 hdr->len = htole16(len | AE_TXD_INSERT_VTAG);
1124 hdr->len = htole16(len);
1128 * Set current TxD position and round up to a 4-byte boundary.
1130 sc->txd_cur = ((sc->txd_cur + len + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
1131 if (sc->txd_cur == sc->txd_ack)
1132 sc->ae_flags &= ~AE_FLAG_TXAVAIL;
1134 if_printf(sc->ifp, "New txd_cur = %d.\n", sc->txd_cur);
1138 * Update TxS position and check if there are empty TxS available.
1140 sc->txs_base[sc->txs_cur].flags &= ~htole16(AE_TXS_UPDATE);
1141 sc->txs_cur = (sc->txs_cur + 1) % AE_TXS_COUNT_DEFAULT;
1142 if (sc->txs_cur == sc->txs_ack)
1143 sc->ae_flags &= ~AE_FLAG_TXAVAIL;
1146 * Synchronize DMA memory.
1148 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREWRITE);
1149 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, BUS_DMASYNC_PREWRITE);
1155 ae_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1157 struct ae_softc *sc = ifp->if_softc;
1160 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1161 ASSERT_SERIALIZED(ifp->if_serializer);
1164 if_printf(ifp, "Start called.\n");
1166 if ((sc->ae_flags & AE_FLAG_LINK) == 0) {
1167 ifq_purge(&ifp->if_snd);
1170 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
1174 while (!ifq_is_empty(&ifp->if_snd)) {
1177 m0 = ifq_dequeue(&ifp->if_snd);
1179 break; /* Nothing to do. */
1181 error = ae_encap(sc, &m0);
1184 ifq_prepend(&ifp->if_snd, m0);
1185 ifq_set_oactive(&ifp->if_snd);
1187 if_printf(ifp, "Setting OACTIVE.\n");
1195 /* Bounce a copy of the frame to BPF. */
1196 ETHER_BPF_MTAP(ifp, m0);
1199 if (trans) { /* Something was dequeued. */
1200 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur / 4);
1201 ifp->if_timer = AE_TX_TIMEOUT; /* Load watchdog. */
1203 if_printf(ifp, "%d packets dequeued.\n", count);
1204 if_printf(ifp, "Tx pos now is %d.\n", sc->txd_cur);
1210 ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1212 struct ae_softc *sc = ifp->if_softc;
1214 struct mii_data *mii;
1215 int error = 0, mask;
1217 ASSERT_SERIALIZED(ifp->if_serializer);
1219 ifr = (struct ifreq *)data;
1222 if (ifp->if_flags & IFF_UP) {
1223 if (ifp->if_flags & IFF_RUNNING) {
1224 if (((ifp->if_flags ^ sc->ae_if_flags)
1225 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1231 if (ifp->if_flags & IFF_RUNNING)
1234 sc->ae_if_flags = ifp->if_flags;
1239 if (ifp->if_flags & IFF_RUNNING)
1245 mii = device_get_softc(sc->ae_miibus);
1246 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1250 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1251 if (mask & IFCAP_VLAN_HWTAGGING) {
1252 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1258 error = ether_ioctl(ifp, cmd, data);
1265 ae_attach(device_t dev)
1267 struct ae_softc *sc = device_get_softc(dev);
1268 struct ifnet *ifp = &sc->arpcom.ac_if;
1272 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1273 callout_init(&sc->ae_tick_ch);
1275 /* Enable bus mastering */
1276 pci_enable_busmaster(dev);
1279 * Allocate memory mapped IO
1281 sc->ae_mem_rid = PCIR_BAR(0);
1282 sc->ae_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1283 &sc->ae_mem_rid, RF_ACTIVE);
1284 if (sc->ae_mem_res == NULL) {
1285 device_printf(dev, "can't allocate IO memory\n");
1288 sc->ae_mem_bt = rman_get_bustag(sc->ae_mem_res);
1289 sc->ae_mem_bh = rman_get_bushandle(sc->ae_mem_res);
1295 sc->ae_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1297 RF_SHAREABLE | RF_ACTIVE);
1298 if (sc->ae_irq_res == NULL) {
1299 device_printf(dev, "can't allocate irq\n");
1304 /* Set PHY address. */
1305 sc->ae_phyaddr = AE_PHYADDR_DEFAULT;
1307 /* Create sysctl tree */
1314 * Reset the ethernet controller.
1320 * Get PCI and chip id/revision.
1322 sc->ae_rev = pci_get_revid(dev);
1324 (AE_READ_4(sc, AE_MASTER_REG) >> AE_MASTER_REVNUM_SHIFT) &
1325 AE_MASTER_REVNUM_MASK;
1327 device_printf(dev, "PCI device revision : 0x%04x\n", sc->ae_rev);
1328 device_printf(dev, "Chip id/revision : 0x%04x\n",
1334 * Unintialized hardware returns an invalid chip id/revision
1335 * as well as 0xFFFFFFFF for Tx/Rx fifo length. It seems that
1336 * unplugged cable results in putting hardware into automatic
1337 * power down mode which in turn returns invalld chip revision.
1339 if (sc->ae_chip_rev == 0xFFFF) {
1340 device_printf(dev,"invalid chip revision : 0x%04x -- "
1341 "not initialized?\n", sc->ae_chip_rev);
1346 /* Get DMA parameters from PCIe device control register. */
1347 pcie_ptr = pci_get_pciecap_ptr(dev);
1350 sc->ae_flags |= AE_FLAG_PCIE;
1351 devctl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
1352 /* Max read request size. */
1353 sc->ae_dma_rd_burst = ((devctl >> 12) & 0x07) <<
1354 DMA_CFG_RD_BURST_SHIFT;
1355 /* Max payload size. */
1356 sc->ae_dma_wr_burst = ((devctl >> 5) & 0x07) <<
1357 DMA_CFG_WR_BURST_SHIFT;
1359 device_printf(dev, "Read request size : %d bytes.\n",
1360 128 << ((devctl >> 12) & 0x07));
1361 device_printf(dev, "TLP payload size : %d bytes.\n",
1362 128 << ((devctl >> 5) & 0x07));
1365 sc->ae_dma_rd_burst = DMA_CFG_RD_BURST_128;
1366 sc->ae_dma_wr_burst = DMA_CFG_WR_BURST_128;
1370 /* Create DMA stuffs */
1371 error = ae_dma_alloc(sc);
1375 /* Load station address. */
1379 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1380 ifp->if_ioctl = ae_ioctl;
1381 ifp->if_start = ae_start;
1382 ifp->if_init = ae_init;
1383 ifp->if_watchdog = ae_watchdog;
1384 ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN - 1);
1385 ifq_set_ready(&ifp->if_snd);
1386 ifp->if_capabilities = IFCAP_VLAN_MTU |
1387 IFCAP_VLAN_HWTAGGING;
1388 ifp->if_hwassist = 0;
1389 ifp->if_capenable = ifp->if_capabilities;
1391 /* Set up MII bus. */
1392 error = mii_phy_probe(dev, &sc->ae_miibus,
1393 ae_mediachange, ae_mediastatus);
1395 device_printf(dev, "no PHY found!\n");
1398 ether_ifattach(ifp, sc->ae_eaddr, NULL);
1400 /* Tell the upper layer(s) we support long frames. */
1401 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1403 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->ae_irq_res));
1405 error = bus_setup_intr(dev, sc->ae_irq_res, INTR_MPSAFE, ae_intr, sc,
1406 &sc->ae_irq_handle, ifp->if_serializer);
1408 device_printf(dev, "could not set up interrupt handler.\n");
1409 ether_ifdetach(ifp);
1419 ae_detach(device_t dev)
1421 struct ae_softc *sc = device_get_softc(dev);
1423 if (device_is_attached(dev)) {
1424 struct ifnet *ifp = &sc->arpcom.ac_if;
1426 lwkt_serialize_enter(ifp->if_serializer);
1427 sc->ae_flags |= AE_FLAG_DETACH;
1429 bus_teardown_intr(dev, sc->ae_irq_res, sc->ae_irq_handle);
1430 lwkt_serialize_exit(ifp->if_serializer);
1432 ether_ifdetach(ifp);
1435 if (sc->ae_miibus != NULL)
1436 device_delete_child(dev, sc->ae_miibus);
1437 bus_generic_detach(dev);
1439 if (sc->ae_irq_res != NULL) {
1440 bus_release_resource(dev, SYS_RES_IRQ, sc->ae_irq_rid,
1443 if (sc->ae_mem_res != NULL) {
1444 bus_release_resource(dev, SYS_RES_MEMORY, sc->ae_mem_rid,
1448 if (sc->ae_sysctl_tree != NULL)
1449 sysctl_ctx_free(&sc->ae_sysctl_ctx);
1457 ae_dma_free(struct ae_softc *sc)
1459 if (sc->dma_txd_tag != NULL) {
1460 bus_dmamap_unload(sc->dma_txd_tag, sc->dma_txd_map);
1461 bus_dmamem_free(sc->dma_txd_tag, sc->txd_base,
1463 bus_dma_tag_destroy(sc->dma_txd_tag);
1465 if (sc->dma_txs_tag != NULL) {
1466 bus_dmamap_unload(sc->dma_txs_tag, sc->dma_txs_map);
1467 bus_dmamem_free(sc->dma_txs_tag, sc->txs_base,
1469 bus_dma_tag_destroy(sc->dma_txs_tag);
1471 if (sc->dma_rxd_tag != NULL) {
1472 bus_dmamap_unload(sc->dma_rxd_tag, sc->dma_rxd_map);
1473 bus_dmamem_free(sc->dma_rxd_tag,
1474 sc->rxd_base_dma, sc->dma_rxd_map);
1475 bus_dma_tag_destroy(sc->dma_rxd_tag);
1477 if (sc->dma_parent_tag != NULL)
1478 bus_dma_tag_destroy(sc->dma_parent_tag);
1482 ae_pcie_init(struct ae_softc *sc)
1484 AE_WRITE_4(sc, AE_PCIE_LTSSM_TESTMODE_REG,
1485 AE_PCIE_LTSSM_TESTMODE_DEFAULT);
1486 AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG,
1487 AE_PCIE_DLL_TX_CTRL_DEFAULT);
1491 ae_phy_reset(struct ae_softc *sc)
1493 AE_WRITE_4(sc, AE_PHY_ENABLE_REG, AE_PHY_ENABLE);
1494 DELAY(1000); /* XXX: pause(9) ? */
1498 ae_reset(struct ae_softc *sc)
1503 * Issue a soft reset.
1505 AE_WRITE_4(sc, AE_MASTER_REG, AE_MASTER_SOFT_RESET);
1506 bus_space_barrier(sc->ae_mem_bt, sc->ae_mem_bh, AE_MASTER_REG, 4,
1507 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1510 * Wait for reset to complete.
1512 for (i = 0; i < AE_RESET_TIMEOUT; i++) {
1513 if ((AE_READ_4(sc, AE_MASTER_REG) & AE_MASTER_SOFT_RESET) == 0)
1517 if (i == AE_RESET_TIMEOUT) {
1518 device_printf(sc->ae_dev, "reset timeout.\n");
1523 * Wait for everything to enter idle state.
1525 for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
1526 if (AE_READ_4(sc, AE_IDLE_REG) == 0)
1530 if (i == AE_IDLE_TIMEOUT) {
1531 device_printf(sc->ae_dev, "could not enter idle state.\n");
1538 ae_check_eeprom_present(struct ae_softc *sc, int *vpdc)
1544 * Not sure why, but Linux does this.
1546 val = AE_READ_4(sc, AE_SPICTL_REG);
1547 if ((val & AE_SPICTL_VPD_EN) != 0) {
1548 val &= ~AE_SPICTL_VPD_EN;
1549 AE_WRITE_4(sc, AE_SPICTL_REG, val);
1551 error = pci_find_extcap(sc->ae_dev, PCIY_VPD, vpdc);
1556 ae_vpd_read_word(struct ae_softc *sc, int reg, uint32_t *word)
1561 AE_WRITE_4(sc, AE_VPD_DATA_REG, 0); /* Clear register value. */
1564 * VPD registers start at offset 0x100. Read them.
1566 val = 0x100 + reg * 4;
1567 AE_WRITE_4(sc, AE_VPD_CAP_REG, (val << AE_VPD_CAP_ADDR_SHIFT) &
1568 AE_VPD_CAP_ADDR_MASK);
1569 for (i = 0; i < AE_VPD_TIMEOUT; i++) {
1571 val = AE_READ_4(sc, AE_VPD_CAP_REG);
1572 if ((val & AE_VPD_CAP_DONE) != 0)
1575 if (i == AE_VPD_TIMEOUT) {
1576 device_printf(sc->ae_dev, "timeout reading VPD register %d.\n",
1580 *word = AE_READ_4(sc, AE_VPD_DATA_REG);
1585 ae_get_vpd_eaddr(struct ae_softc *sc, uint32_t *eaddr)
1587 uint32_t word, reg, val;
1596 error = ae_check_eeprom_present(sc, &vpdc);
1601 * Read the VPD configuration space.
1602 * Each register is prefixed with signature,
1603 * so we can check if it is valid.
1605 for (i = 0, found = 0; i < AE_VPD_NREGS; i++) {
1606 error = ae_vpd_read_word(sc, i, &word);
1613 if ((word & AE_VPD_SIG_MASK) != AE_VPD_SIG)
1615 reg = word >> AE_VPD_REG_SHIFT;
1616 i++; /* Move to the next word. */
1617 if (reg != AE_EADDR0_REG && reg != AE_EADDR1_REG)
1620 error = ae_vpd_read_word(sc, i, &val);
1623 if (reg == AE_EADDR0_REG)
1632 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */
1633 if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
1635 device_printf(sc->ae_dev,
1636 "VPD ethernet address registers are invalid.\n");
1643 ae_get_reg_eaddr(struct ae_softc *sc, uint32_t *eaddr)
1646 * BIOS is supposed to set this.
1648 eaddr[0] = AE_READ_4(sc, AE_EADDR0_REG);
1649 eaddr[1] = AE_READ_4(sc, AE_EADDR1_REG);
1650 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */
1651 if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
1653 device_printf(sc->ae_dev,
1654 "Ethetnet address registers are invalid.\n");
1661 ae_get_eaddr(struct ae_softc *sc)
1663 uint32_t eaddr[2] = {0, 0};
1669 error = ae_get_vpd_eaddr(sc, eaddr);
1671 error = ae_get_reg_eaddr(sc, eaddr);
1674 device_printf(sc->ae_dev,
1675 "Generating random ethernet address.\n");
1676 eaddr[0] = karc4random();
1678 * Set OUI to ASUSTek COMPUTER INC.
1680 sc->ae_eaddr[0] = 0x02; /* U/L bit set. */
1681 sc->ae_eaddr[1] = 0x1f;
1682 sc->ae_eaddr[2] = 0xc6;
1683 sc->ae_eaddr[3] = (eaddr[0] >> 16) & 0xff;
1684 sc->ae_eaddr[4] = (eaddr[0] >> 8) & 0xff;
1685 sc->ae_eaddr[5] = (eaddr[0] >> 0) & 0xff;
1687 sc->ae_eaddr[0] = (eaddr[1] >> 8) & 0xff;
1688 sc->ae_eaddr[1] = (eaddr[1] >> 0) & 0xff;
1689 sc->ae_eaddr[2] = (eaddr[0] >> 24) & 0xff;
1690 sc->ae_eaddr[3] = (eaddr[0] >> 16) & 0xff;
1691 sc->ae_eaddr[4] = (eaddr[0] >> 8) & 0xff;
1692 sc->ae_eaddr[5] = (eaddr[0] >> 0) & 0xff;
1697 ae_mediachange(struct ifnet *ifp)
1699 struct ae_softc *sc = ifp->if_softc;
1700 struct mii_data *mii = device_get_softc(sc->ae_miibus);
1703 ASSERT_SERIALIZED(ifp->if_serializer);
1704 if (mii->mii_instance != 0) {
1705 struct mii_softc *miisc;
1706 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1707 mii_phy_reset(miisc);
1709 error = mii_mediachg(mii);
1714 ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1716 struct ae_softc *sc = ifp->if_softc;
1717 struct mii_data *mii = device_get_softc(sc->ae_miibus);
1719 ASSERT_SERIALIZED(ifp->if_serializer);
1721 ifmr->ifm_status = mii->mii_media_status;
1722 ifmr->ifm_active = mii->mii_media_active;
1726 ae_update_stats_tx(uint16_t flags, struct ae_stats *stats)
1728 if ((flags & AE_TXS_BCAST) != 0)
1730 if ((flags & AE_TXS_MCAST) != 0)
1732 if ((flags & AE_TXS_PAUSE) != 0)
1734 if ((flags & AE_TXS_CTRL) != 0)
1736 if ((flags & AE_TXS_DEFER) != 0)
1738 if ((flags & AE_TXS_EXCDEFER) != 0)
1739 stats->tx_excdefer++;
1740 if ((flags & AE_TXS_SINGLECOL) != 0)
1741 stats->tx_singlecol++;
1742 if ((flags & AE_TXS_MULTICOL) != 0)
1743 stats->tx_multicol++;
1744 if ((flags & AE_TXS_LATECOL) != 0)
1745 stats->tx_latecol++;
1746 if ((flags & AE_TXS_ABORTCOL) != 0)
1747 stats->tx_abortcol++;
1748 if ((flags & AE_TXS_UNDERRUN) != 0)
1749 stats->tx_underrun++;
1753 ae_update_stats_rx(uint16_t flags, struct ae_stats *stats)
1755 if ((flags & AE_RXD_BCAST) != 0)
1757 if ((flags & AE_RXD_MCAST) != 0)
1759 if ((flags & AE_RXD_PAUSE) != 0)
1761 if ((flags & AE_RXD_CTRL) != 0)
1763 if ((flags & AE_RXD_CRCERR) != 0)
1765 if ((flags & AE_RXD_CODEERR) != 0)
1766 stats->rx_codeerr++;
1767 if ((flags & AE_RXD_RUNT) != 0)
1769 if ((flags & AE_RXD_FRAG) != 0)
1771 if ((flags & AE_RXD_TRUNC) != 0)
1773 if ((flags & AE_RXD_ALIGN) != 0)
1778 ae_resume(device_t dev)
1780 struct ae_softc *sc = device_get_softc(dev);
1781 struct ifnet *ifp = &sc->arpcom.ac_if;
1783 lwkt_serialize_enter(ifp->if_serializer);
1785 AE_READ_4(sc, AE_WOL_REG); /* Clear WOL status. */
1788 if ((ifp->if_flags & IFF_UP) != 0)
1790 lwkt_serialize_exit(ifp->if_serializer);
1795 ae_suspend(device_t dev)
1797 struct ae_softc *sc = device_get_softc(dev);
1798 struct ifnet *ifp = &sc->arpcom.ac_if;
1800 lwkt_serialize_enter(ifp->if_serializer);
1803 /* we don't use ae_pm_init because we don't want WOL */
1806 lwkt_serialize_exit(ifp->if_serializer);
1811 ae_shutdown(device_t dev)
1813 struct ae_softc *sc = device_get_softc(dev);
1814 struct ifnet *ifp = &sc->arpcom.ac_if;
1818 lwkt_serialize_enter(ifp->if_serializer);
1819 ae_powersave_enable(sc);
1820 lwkt_serialize_exit(ifp->if_serializer);
1826 ae_powersave_disable(struct ae_softc *sc)
1830 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
1831 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
1832 if (val & AE_PHY_DBG_POWERSAVE) {
1833 val &= ~AE_PHY_DBG_POWERSAVE;
1834 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, val);
1840 ae_powersave_enable(struct ae_softc *sc)
1845 * XXX magic numbers.
1847 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
1848 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
1849 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, val | 0x1000);
1850 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 2);
1851 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0x3000);
1852 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 3);
1853 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0);