2 * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/dev/alc/if_alc.c,v 1.6 2009/09/29 23:03:16 yongari Exp $
30 /* Driver for Atheros AR8131/AR8132 PCIe Ethernet. */
32 #include <sys/param.h>
33 #include <sys/bitops.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
37 #include <sys/interrupt.h>
38 #include <sys/malloc.h>
41 #include <sys/serialize.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
46 #include <net/ethernet.h>
49 #include <net/if_arp.h>
50 #include <net/if_dl.h>
51 #include <net/if_media.h>
52 #include <net/ifq_var.h>
53 #include <net/vlan/if_vlan_var.h>
54 #include <net/vlan/if_vlan_ether.h>
56 #include <netinet/tcp.h>
58 #include <dev/netif/mii_layer/mii.h>
59 #include <dev/netif/mii_layer/miivar.h>
61 #include <bus/pci/pcireg.h>
62 #include <bus/pci/pcivar.h>
63 #include <bus/pci/pcidevs.h>
65 #include <dev/netif/alc/if_alcreg.h>
66 #include <dev/netif/alc/if_alcvar.h>
68 /* "device miibus" required. See GENERIC if you get errors here. */
69 #include "miibus_if.h"
71 #undef ALC_USE_CUSTOM_CSUM
72 #ifdef ALC_USE_CUSTOM_CSUM
73 #define ALC_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
75 #define ALC_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
79 static int alc_msi_enable = 1;
80 TUNABLE_INT("hw.alc.msi.enable", &alc_msi_enable);
83 * Devices supported by this driver.
86 static struct alc_ident alc_ident_table[] = {
87 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8131, 9 * 1024,
88 "Atheros AR8131 PCIe Gigabit Ethernet" },
89 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8132, 9 * 1024,
90 "Atheros AR8132 PCIe Fast Ethernet" },
91 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151, 6 * 1024,
92 "Atheros AR8151 v1.0 PCIe Gigabit Ethernet" },
93 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151_V2, 6 * 1024,
94 "Atheros AR8151 v2.0 PCIe Gigabit Ethernet" },
95 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B, 6 * 1024,
96 "Atheros AR8152 v1.1 PCIe Fast Ethernet" },
97 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B2, 6 * 1024,
98 "Atheros AR8152 v2.0 PCIe Fast Ethernet" },
102 static int alc_attach(device_t);
103 static int alc_probe(device_t);
104 static int alc_detach(device_t);
105 static int alc_shutdown(device_t);
106 static int alc_suspend(device_t);
107 static int alc_resume(device_t);
108 static int alc_miibus_readreg(device_t, int, int);
109 static void alc_miibus_statchg(device_t);
110 static int alc_miibus_writereg(device_t, int, int, int);
112 static void alc_init(void *);
113 static void alc_start(struct ifnet *, struct ifaltq_subque *);
114 static void alc_watchdog(struct alc_softc *);
115 static int alc_mediachange(struct ifnet *);
116 static void alc_mediastatus(struct ifnet *, struct ifmediareq *);
117 static int alc_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
119 static void alc_aspm(struct alc_softc *, int);
121 static int alc_check_boundary(struct alc_softc *);
123 static void alc_disable_l0s_l1(struct alc_softc *);
124 static int alc_dma_alloc(struct alc_softc *);
125 static void alc_dma_free(struct alc_softc *);
126 static void alc_dmamap_cb(void *, bus_dma_segment_t *, int, int);
127 static int alc_encap(struct alc_softc *, struct mbuf **);
128 static struct alc_ident *alc_find_ident(device_t);
129 static void alc_get_macaddr(struct alc_softc *);
130 static void alc_init_cmb(struct alc_softc *);
131 static void alc_init_rr_ring(struct alc_softc *);
132 static int alc_init_rx_ring(struct alc_softc *);
133 static void alc_init_smb(struct alc_softc *);
134 static void alc_init_tx_ring(struct alc_softc *);
135 static void alc_intr(void *);
136 static void alc_mac_config(struct alc_softc *);
137 static int alc_newbuf(struct alc_softc *, struct alc_rxdesc *, boolean_t);
138 static void alc_phy_down(struct alc_softc *);
139 static void alc_phy_reset(struct alc_softc *);
140 static void alc_reset(struct alc_softc *);
141 static void alc_rxeof(struct alc_softc *, struct rx_rdesc *);
142 static int alc_rxintr(struct alc_softc *);
143 static void alc_rxfilter(struct alc_softc *);
144 static void alc_rxvlan(struct alc_softc *);
146 static void alc_setlinkspeed(struct alc_softc *);
148 static void alc_setwol(struct alc_softc *);
150 static void alc_start_queue(struct alc_softc *);
151 static void alc_stats_clear(struct alc_softc *);
152 static void alc_stats_update(struct alc_softc *);
153 static void alc_stop(struct alc_softc *);
154 static void alc_stop_mac(struct alc_softc *);
155 static void alc_stop_queue(struct alc_softc *);
156 static void alc_sysctl_node(struct alc_softc *);
157 static void alc_tick(void *);
158 static void alc_txeof(struct alc_softc *);
159 static int sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS);
160 static int sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS);
162 static device_method_t alc_methods[] = {
163 /* Device interface. */
164 DEVMETHOD(device_probe, alc_probe),
165 DEVMETHOD(device_attach, alc_attach),
166 DEVMETHOD(device_detach, alc_detach),
167 DEVMETHOD(device_shutdown, alc_shutdown),
168 DEVMETHOD(device_suspend, alc_suspend),
169 DEVMETHOD(device_resume, alc_resume),
172 DEVMETHOD(miibus_readreg, alc_miibus_readreg),
173 DEVMETHOD(miibus_writereg, alc_miibus_writereg),
174 DEVMETHOD(miibus_statchg, alc_miibus_statchg),
179 static DEFINE_CLASS_0(alc, alc_driver, alc_methods, sizeof(struct alc_softc));
180 static devclass_t alc_devclass;
182 DECLARE_DUMMY_MODULE(if_alc);
183 DRIVER_MODULE(if_alc, pci, alc_driver, alc_devclass, NULL, NULL);
184 DRIVER_MODULE(miibus, alc, miibus_driver, miibus_devclass, NULL, NULL);
186 static const uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0 };
189 alc_miibus_readreg(device_t dev, int phy, int reg)
191 struct alc_softc *sc;
195 sc = device_get_softc(dev);
197 if (phy != sc->alc_phyaddr)
201 * For AR8132 fast ethernet controller, do not report 1000baseT
202 * capability to mii(4). Even though AR8132 uses the same
203 * model/revision number of F1 gigabit PHY, the PHY has no
204 * ability to establish 1000baseT link.
206 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0 &&
210 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
211 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
212 for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
214 v = CSR_READ_4(sc, ALC_MDIO);
215 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
220 device_printf(sc->alc_dev, "phy read timeout : %d\n", reg);
224 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
228 alc_miibus_writereg(device_t dev, int phy, int reg, int val)
230 struct alc_softc *sc;
234 sc = device_get_softc(dev);
236 if (phy != sc->alc_phyaddr)
239 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
240 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
241 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
242 for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
244 v = CSR_READ_4(sc, ALC_MDIO);
245 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
250 device_printf(sc->alc_dev, "phy write timeout : %d\n", reg);
256 alc_miibus_statchg(device_t dev)
258 struct alc_softc *sc;
259 struct mii_data *mii;
263 sc = device_get_softc(dev);
265 mii = device_get_softc(sc->alc_miibus);
267 if (mii == NULL || ifp == NULL ||
268 (ifp->if_flags & IFF_RUNNING) == 0)
271 sc->alc_flags &= ~ALC_FLAG_LINK;
272 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
273 (IFM_ACTIVE | IFM_AVALID)) {
274 switch (IFM_SUBTYPE(mii->mii_media_active)) {
277 sc->alc_flags |= ALC_FLAG_LINK;
280 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
281 sc->alc_flags |= ALC_FLAG_LINK;
288 /* Stop Rx/Tx MACs. */
291 /* Program MACs with resolved speed/duplex/flow-control. */
292 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
295 /* Re-enable Tx/Rx MACs. */
296 reg = CSR_READ_4(sc, ALC_MAC_CFG);
297 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
298 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
300 alc_aspm(sc, IFM_SUBTYPE(mii->mii_media_active));
304 alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
306 struct alc_softc *sc;
307 struct mii_data *mii;
310 if ((ifp->if_flags & IFF_UP) == 0)
312 mii = device_get_softc(sc->alc_miibus);
315 ifmr->ifm_status = mii->mii_media_status;
316 ifmr->ifm_active = mii->mii_media_active;
320 alc_mediachange(struct ifnet *ifp)
322 struct alc_softc *sc;
323 struct mii_data *mii;
324 struct mii_softc *miisc;
328 mii = device_get_softc(sc->alc_miibus);
329 if (mii->mii_instance != 0) {
330 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
331 mii_phy_reset(miisc);
333 error = mii_mediachg(mii);
338 static struct alc_ident *
339 alc_find_ident(device_t dev)
341 struct alc_ident *ident;
342 uint16_t vendor, devid;
344 vendor = pci_get_vendor(dev);
345 devid = pci_get_device(dev);
346 for (ident = alc_ident_table; ident->name != NULL; ident++) {
347 if (vendor == ident->vendorid && devid == ident->deviceid)
354 alc_probe(device_t dev)
356 struct alc_ident *ident;
358 ident = alc_find_ident(dev);
360 device_set_desc(dev, ident->name);
361 return (BUS_PROBE_DEFAULT);
367 alc_get_macaddr(struct alc_softc *sc)
374 opt = CSR_READ_4(sc, ALC_OPT_CFG);
375 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 &&
376 (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) {
378 * EEPROM found, let TWSI reload EEPROM configuration.
379 * This will set ethernet address of controller.
382 switch (sc->alc_ident->deviceid) {
383 case DEVICEID_ATHEROS_AR8131:
384 case DEVICEID_ATHEROS_AR8132:
385 if ((opt & OPT_CFG_CLK_ENB) == 0) {
386 opt |= OPT_CFG_CLK_ENB;
387 CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
388 CSR_READ_4(sc, ALC_OPT_CFG);
392 case DEVICEID_ATHEROS_AR8151:
393 case DEVICEID_ATHEROS_AR8151_V2:
394 case DEVICEID_ATHEROS_AR8152_B:
395 case DEVICEID_ATHEROS_AR8152_B2:
396 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
397 ALC_MII_DBG_ADDR, 0x00);
398 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
400 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
401 ALC_MII_DBG_DATA, val & 0xFF7F);
402 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
403 ALC_MII_DBG_ADDR, 0x3B);
404 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
406 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
407 ALC_MII_DBG_DATA, val | 0x0008);
412 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
413 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
414 CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
415 CSR_READ_4(sc, ALC_WOL_CFG);
417 CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) |
418 TWSI_CFG_SW_LD_START);
420 for (i = 100; i > 0; i--) {
422 if ((CSR_READ_4(sc, ALC_TWSI_CFG) &
423 TWSI_CFG_SW_LD_START) == 0)
427 device_printf(sc->alc_dev,
428 "reloading EEPROM timeout!\n");
431 device_printf(sc->alc_dev, "EEPROM not found!\n");
435 switch (sc->alc_ident->deviceid) {
436 case DEVICEID_ATHEROS_AR8131:
437 case DEVICEID_ATHEROS_AR8132:
438 if ((opt & OPT_CFG_CLK_ENB) != 0) {
439 opt &= ~OPT_CFG_CLK_ENB;
440 CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
441 CSR_READ_4(sc, ALC_OPT_CFG);
445 case DEVICEID_ATHEROS_AR8151:
446 case DEVICEID_ATHEROS_AR8151_V2:
447 case DEVICEID_ATHEROS_AR8152_B:
448 case DEVICEID_ATHEROS_AR8152_B2:
449 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
450 ALC_MII_DBG_ADDR, 0x00);
451 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
453 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
454 ALC_MII_DBG_DATA, val | 0x0080);
455 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
456 ALC_MII_DBG_ADDR, 0x3B);
457 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
459 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
460 ALC_MII_DBG_DATA, val & 0xFFF7);
466 ea[0] = CSR_READ_4(sc, ALC_PAR0);
467 ea[1] = CSR_READ_4(sc, ALC_PAR1);
468 sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF;
469 sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF;
470 sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF;
471 sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF;
472 sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF;
473 sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF;
477 alc_disable_l0s_l1(struct alc_softc *sc)
481 /* Another magic from vendor. */
482 pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
483 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 |
484 PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK |
485 PM_CFG_SERDES_PD_EX_L1);
486 pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB |
487 PM_CFG_SERDES_L1_ENB;
488 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
492 alc_phy_reset(struct alc_softc *sc)
496 /* Reset magic from Linux. */
497 CSR_WRITE_2(sc, ALC_GPHY_CFG,
498 GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET);
499 CSR_READ_2(sc, ALC_GPHY_CFG);
502 CSR_WRITE_2(sc, ALC_GPHY_CFG,
503 GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
504 GPHY_CFG_SEL_ANA_RESET);
505 CSR_READ_2(sc, ALC_GPHY_CFG);
508 /* DSP fixup, Vendor magic. */
509 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) {
510 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
511 ALC_MII_DBG_ADDR, 0x000A);
512 data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
514 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
515 ALC_MII_DBG_DATA, data & 0xDFFF);
517 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 ||
518 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 ||
519 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B ||
520 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) {
521 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
522 ALC_MII_DBG_ADDR, 0x003B);
523 data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
525 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
526 ALC_MII_DBG_DATA, data & 0xFFF7);
529 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151) {
530 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
531 ALC_MII_DBG_ADDR, 0x0029);
532 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
533 ALC_MII_DBG_DATA, 0x929D);
535 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 ||
536 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132 ||
537 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 ||
538 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) {
539 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
540 ALC_MII_DBG_ADDR, 0x0029);
541 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
542 ALC_MII_DBG_DATA, 0xB6DD);
545 /* Load DSP codes, vendor magic. */
546 data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
547 ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK);
548 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
549 ALC_MII_DBG_ADDR, MII_ANA_CFG18);
550 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
551 ALC_MII_DBG_DATA, data);
553 data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) |
554 ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
556 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
557 ALC_MII_DBG_ADDR, MII_ANA_CFG5);
558 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
559 ALC_MII_DBG_DATA, data);
561 data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) &
562 ANA_LONG_CABLE_TH_100_MASK) |
563 ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) &
564 ANA_SHORT_CABLE_TH_100_SHIFT) |
565 ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW;
566 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
567 ALC_MII_DBG_ADDR, MII_ANA_CFG54);
568 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
569 ALC_MII_DBG_DATA, data);
571 data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) |
572 ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) |
573 ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) |
574 ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK);
575 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
576 ALC_MII_DBG_ADDR, MII_ANA_CFG4);
577 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
578 ALC_MII_DBG_DATA, data);
580 data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) |
581 ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB |
583 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
584 ALC_MII_DBG_ADDR, MII_ANA_CFG0);
585 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
586 ALC_MII_DBG_DATA, data);
591 alc_phy_down(struct alc_softc *sc)
593 switch (sc->alc_ident->deviceid) {
594 case DEVICEID_ATHEROS_AR8151:
595 case DEVICEID_ATHEROS_AR8151_V2:
597 * GPHY power down caused more problems on AR8151 v2.0.
598 * When driver is reloaded after GPHY power down,
599 * accesses to PHY/MAC registers hung the system. Only
600 * cold boot recovered from it. I'm not sure whether
601 * AR8151 v1.0 also requires this one though. I don't
602 * have AR8151 v1.0 controller in hand.
603 * The only option left is to isolate the PHY and
604 * initiates power down the PHY which in turn saves
605 * more power when driver is unloaded.
607 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
608 MII_BMCR, BMCR_ISO | BMCR_PDOWN);
611 /* Force PHY down. */
612 CSR_WRITE_2(sc, ALC_GPHY_CFG,
613 GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
614 GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ |
623 alc_aspm(struct alc_softc *sc, int media)
628 pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
629 if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) ==
630 (ALC_FLAG_APS | ALC_FLAG_PCIE)) {
631 linkcfg = CSR_READ_2(sc, sc->alc_expcap +
632 PCIR_EXPRESS_LINK_CTL);
637 pmcfg &= ~PM_CFG_SERDES_PD_EX_L1;
638 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK);
639 pmcfg |= PM_CFG_MAC_ASPM_CHK;
640 pmcfg |= PM_CFG_SERDES_ENB | PM_CFG_RBER_ENB;
641 pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
643 if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
644 /* Disable extended sync except AR8152 B v1.0 */
646 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B &&
647 sc->alc_rev == ATHEROS_AR8152_B_V10)
649 CSR_WRITE_2(sc, sc->alc_expcap + PCIR_EXPRESS_LINK_CTL,
651 pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB |
653 pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT <<
654 PM_CFG_L1_ENTRY_TIMER_SHIFT);
655 pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK;
656 pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT <<
657 PM_CFG_PM_REQ_TIMER_SHIFT);
658 pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV;
661 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
662 if ((sc->alc_flags & ALC_FLAG_L0S) != 0)
663 pmcfg |= PM_CFG_ASPM_L0S_ENB;
664 if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
665 pmcfg |= PM_CFG_ASPM_L1_ENB;
666 if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
667 if (sc->alc_ident->deviceid ==
668 DEVICEID_ATHEROS_AR8152_B) {
669 pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
671 pmcfg &= ~(PM_CFG_SERDES_L1_ENB |
672 PM_CFG_SERDES_PLL_L1_ENB |
673 PM_CFG_SERDES_BUDS_RX_L1_ENB);
674 pmcfg |= PM_CFG_CLK_SWH_L1;
675 if (media == IFM_100_TX || media == IFM_1000_T) {
676 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK;
677 switch (sc->alc_ident->deviceid) {
678 case DEVICEID_ATHEROS_AR8152_B:
680 PM_CFG_L1_ENTRY_TIMER_SHIFT);
682 case DEVICEID_ATHEROS_AR8152_B2:
683 case DEVICEID_ATHEROS_AR8151_V2:
685 PM_CFG_L1_ENTRY_TIMER_SHIFT);
689 PM_CFG_L1_ENTRY_TIMER_SHIFT);
694 pmcfg |= PM_CFG_SERDES_L1_ENB |
695 PM_CFG_SERDES_PLL_L1_ENB |
696 PM_CFG_SERDES_BUDS_RX_L1_ENB;
697 pmcfg &= ~(PM_CFG_CLK_SWH_L1 |
698 PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
701 pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB |
702 PM_CFG_SERDES_PLL_L1_ENB);
703 pmcfg |= PM_CFG_CLK_SWH_L1;
704 if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
705 pmcfg |= PM_CFG_ASPM_L1_ENB;
707 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
711 alc_attach(device_t dev)
713 struct alc_softc *sc;
715 const char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" };
717 int base, error, state;
718 uint32_t cap, ctl, val;
722 sc = device_get_softc(dev);
725 callout_init_mp(&sc->alc_tick_ch);
726 sc->alc_ident = alc_find_ident(dev);
728 /* Enable bus mastering */
729 pci_enable_busmaster(dev);
731 /* Map the device. */
732 sc->alc_res_rid = PCIR_BAR(0);
733 sc->alc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
734 &sc->alc_res_rid, RF_ACTIVE);
736 device_printf(dev, "cannot allocate memory resources.\n");
739 sc->alc_res_btag = rman_get_bustag(sc->alc_res);
740 sc->alc_res_bhand = rman_get_bushandle(sc->alc_res);
742 /* Set PHY address. */
743 sc->alc_phyaddr = ALC_PHY_ADDR;
745 /* Initialize DMA parameters. */
746 sc->alc_dma_rd_burst = 0;
747 sc->alc_dma_wr_burst = 0;
748 sc->alc_rcb = DMA_CFG_RCB_64;
749 if (pci_find_extcap(dev, PCIY_EXPRESS, &base) == 0) {
750 sc->alc_flags |= ALC_FLAG_PCIE;
751 sc->alc_expcap = base;
752 burst = CSR_READ_2(sc, base + PCIR_EXPRESS_DEVICE_CTL);
753 sc->alc_dma_rd_burst =
754 (burst & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12;
755 sc->alc_dma_wr_burst = (burst & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5;
757 device_printf(dev, "Read request size : %u bytes.\n",
758 alc_dma_burst[sc->alc_dma_rd_burst]);
759 device_printf(dev, "TLP payload size : %u bytes.\n",
760 alc_dma_burst[sc->alc_dma_wr_burst]);
762 if (alc_dma_burst[sc->alc_dma_rd_burst] > 1024)
763 sc->alc_dma_rd_burst = 3;
764 if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024)
765 sc->alc_dma_wr_burst = 3;
766 /* Clear data link and flow-control protocol error. */
767 val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV);
768 val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP);
769 CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val);
770 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
771 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
772 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC,
773 CSR_READ_4(sc, ALC_PCIE_PHYMISC) |
774 PCIE_PHYMISC_FORCE_RCV_DET);
775 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B &&
776 sc->alc_rev == ATHEROS_AR8152_B_V10) {
777 val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2);
778 val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK |
779 PCIE_PHYMISC2_SERDES_TH_MASK);
780 val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT;
781 val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
782 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val);
785 /* Disable ASPM L0S and L1. */
786 cap = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CAP);
787 if ((cap & PCIM_LINK_CAP_ASPM) != 0) {
788 ctl = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CTL);
789 if ((ctl & 0x08) != 0)
790 sc->alc_rcb = DMA_CFG_RCB_128;
792 device_printf(dev, "RCB %u bytes\n",
793 sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128);
796 sc->alc_flags |= ALC_FLAG_L0S;
798 sc->alc_flags |= ALC_FLAG_L1S;
800 device_printf(sc->alc_dev, "ASPM %s %s\n",
802 state == 0 ? "disabled" : "enabled");
803 alc_disable_l0s_l1(sc);
806 device_printf(sc->alc_dev, "no ASPM support\n");
813 /* Reset the ethernet controller. */
817 * One odd thing is AR8132 uses the same PHY hardware(F1
818 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports
819 * the PHY supports 1000Mbps but that's not true. The PHY
820 * used in AR8132 can't establish gigabit link even if it
821 * shows the same PHY model/revision number of AR8131.
823 switch (sc->alc_ident->deviceid) {
824 case DEVICEID_ATHEROS_AR8152_B:
825 case DEVICEID_ATHEROS_AR8152_B2:
826 sc->alc_flags |= ALC_FLAG_APS;
828 case DEVICEID_ATHEROS_AR8132:
829 sc->alc_flags |= ALC_FLAG_FASTETHER;
831 case DEVICEID_ATHEROS_AR8151:
832 case DEVICEID_ATHEROS_AR8151_V2:
833 sc->alc_flags |= ALC_FLAG_APS;
838 sc->alc_flags |= ALC_FLAG_ASPM_MON | ALC_FLAG_JUMBO;
841 * It seems that AR813x/AR815x has silicon bug for SMB. In
842 * addition, Atheros said that enabling SMB wouldn't improve
843 * performance. However I think it's bad to access lots of
844 * registers to extract MAC statistics.
846 sc->alc_flags |= ALC_FLAG_SMB_BUG;
849 * Don't use Tx CMB. It is known to have silicon bug.
851 sc->alc_flags |= ALC_FLAG_CMB_BUG;
852 sc->alc_rev = pci_get_revid(dev);
853 sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >>
854 MASTER_CHIP_REV_SHIFT;
856 device_printf(dev, "PCI device revision : 0x%04x\n",
858 device_printf(dev, "Chip id/revision : 0x%04x\n",
861 device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n",
862 CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8,
863 CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8);
865 sc->alc_irq_type = pci_alloc_1intr(dev, alc_msi_enable,
866 &sc->alc_irq_rid, &intr_flags);
868 /* Allocate IRQ resources. */
869 sc->alc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
870 &sc->alc_irq_rid, intr_flags);
872 device_printf(dev, "cannot allocate IRQ resources.\n");
876 /* Create device sysctl node. */
879 if ((error = alc_dma_alloc(sc) != 0))
882 /* Load station address. */
885 ifp = sc->alc_ifp = &sc->arpcom.ac_if;
887 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
888 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
889 ifp->if_ioctl = alc_ioctl;
890 ifp->if_start = alc_start;
891 ifp->if_init = alc_init;
892 ifq_set_maxlen(&ifp->if_snd, ALC_TX_RING_CNT - 1);
893 ifq_set_ready(&ifp->if_snd);
894 ifp->if_capabilities = IFCAP_TXCSUM;
895 ifp->if_hwassist = ALC_CSUM_FEATURES;
898 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) {
899 ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST;
900 sc->alc_flags |= ALC_FLAG_PM;
901 sc->alc_pmcap = base;
904 ifp->if_capenable = ifp->if_capabilities;
906 /* VLAN capability setup. */
907 ifp->if_capabilities |= IFCAP_VLAN_MTU;
908 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
909 ifp->if_capenable = ifp->if_capabilities;
913 * It seems enabling Tx checksum offloading makes more trouble.
914 * Sometimes the controller does not receive any frames when
915 * Tx checksum offloading is enabled. I'm not sure whether this
916 * is a bug in Tx checksum offloading logic or I got broken
917 * sample boards. To safety, don't enable Tx checksum offloading
918 * by default but give chance to users to toggle it if they know
919 * their controllers work without problems.
921 ifp->if_capenable &= ~IFCAP_TXCSUM;
922 ifp->if_hwassist &= ~ALC_CSUM_FEATURES;
924 /* Set up MII bus. */
925 if ((error = mii_phy_probe(dev, &sc->alc_miibus, alc_mediachange,
926 alc_mediastatus)) != 0) {
927 device_printf(dev, "no PHY found!\n");
931 ether_ifattach(ifp, sc->alc_eaddr, NULL);
933 /* Tell the upper layer(s) we support long frames. */
934 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
936 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->alc_irq));
938 /* Create local taskq. */
939 TASK_INIT(&sc->alc_tx_task, 1, alc_tx_task, ifp);
940 sc->alc_tq = taskqueue_create("alc_taskq", M_WAITOK,
941 taskqueue_thread_enqueue, &sc->alc_tq);
942 if (sc->alc_tq == NULL) {
943 device_printf(dev, "could not create taskqueue.\n");
948 taskqueue_start_threads(&sc->alc_tq, 1, TDPRI_KERN_DAEMON, -1, "%s taskq",
949 device_get_nameunit(sc->alc_dev));
951 if ((sc->alc_flags & ALC_FLAG_MSIX) != 0)
952 msic = ALC_MSIX_MESSAGES;
953 else if ((sc->alc_flags & ALC_FLAG_MSI) != 0)
954 msic = ALC_MSI_MESSAGES;
957 for (i = 0; i < msic; i++) {
958 error = bus_setup_intr(dev, sc->alc_irq[i], INTR_MPSAFE,
960 &sc->alc_intrhand[i], NULL);
965 device_printf(dev, "could not set up interrupt handler.\n");
966 taskqueue_free(sc->alc_tq);
972 error = bus_setup_intr(dev, sc->alc_irq, INTR_MPSAFE, alc_intr, sc,
973 &sc->alc_intrhand, ifp->if_serializer);
975 device_printf(dev, "could not set up interrupt handler.\n");
989 alc_detach(device_t dev)
991 struct alc_softc *sc = device_get_softc(dev);
993 if (device_is_attached(dev)) {
994 struct ifnet *ifp = sc->alc_ifp;
996 lwkt_serialize_enter(ifp->if_serializer);
998 bus_teardown_intr(dev, sc->alc_irq, sc->alc_intrhand);
999 lwkt_serialize_exit(ifp->if_serializer);
1001 ether_ifdetach(ifp);
1004 if (sc->alc_miibus != NULL)
1005 device_delete_child(dev, sc->alc_miibus);
1006 bus_generic_detach(dev);
1008 if (sc->alc_res != NULL)
1011 if (sc->alc_irq != NULL) {
1012 bus_release_resource(dev, SYS_RES_IRQ, sc->alc_irq_rid,
1015 if (sc->alc_irq_type == PCI_INTR_TYPE_MSI)
1016 pci_release_msi(dev);
1018 if (sc->alc_res != NULL) {
1019 bus_release_resource(dev, SYS_RES_MEMORY, sc->alc_res_rid,
1028 #define ALC_SYSCTL_STAT_ADD32(c, h, n, p, d) \
1029 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
1030 #define ALC_SYSCTL_STAT_ADD64(c, h, n, p, d) \
1031 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
1034 alc_sysctl_node(struct alc_softc *sc)
1036 struct sysctl_ctx_list *ctx;
1037 struct sysctl_oid *tree;
1038 struct sysctl_oid_list *child, *parent;
1039 struct alc_hw_stats *stats;
1042 stats = &sc->alc_stats;
1043 ctx = &sc->alc_sysctl_ctx;
1044 sysctl_ctx_init(ctx);
1046 tree = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_hw),
1048 device_get_nameunit(sc->alc_dev),
1051 device_printf(sc->alc_dev, "can't add sysctl node\n");
1054 child = SYSCTL_CHILDREN(tree);
1056 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod",
1057 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_rx_mod, 0,
1058 sysctl_hw_alc_int_mod, "I", "alc Rx interrupt moderation");
1059 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod",
1060 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_tx_mod, 0,
1061 sysctl_hw_alc_int_mod, "I", "alc Tx interrupt moderation");
1062 /* Pull in device tunables. */
1063 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
1064 error = resource_int_value(device_get_name(sc->alc_dev),
1065 device_get_unit(sc->alc_dev), "int_rx_mod", &sc->alc_int_rx_mod);
1067 if (sc->alc_int_rx_mod < ALC_IM_TIMER_MIN ||
1068 sc->alc_int_rx_mod > ALC_IM_TIMER_MAX) {
1069 device_printf(sc->alc_dev, "int_rx_mod value out of "
1070 "range; using default: %d\n",
1071 ALC_IM_RX_TIMER_DEFAULT);
1072 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
1075 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
1076 error = resource_int_value(device_get_name(sc->alc_dev),
1077 device_get_unit(sc->alc_dev), "int_tx_mod", &sc->alc_int_tx_mod);
1079 if (sc->alc_int_tx_mod < ALC_IM_TIMER_MIN ||
1080 sc->alc_int_tx_mod > ALC_IM_TIMER_MAX) {
1081 device_printf(sc->alc_dev, "int_tx_mod value out of "
1082 "range; using default: %d\n",
1083 ALC_IM_TX_TIMER_DEFAULT);
1084 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
1087 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
1088 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_process_limit, 0,
1089 sysctl_hw_alc_proc_limit, "I",
1090 "max number of Rx events to process");
1091 /* Pull in device tunables. */
1092 sc->alc_process_limit = ALC_PROC_DEFAULT;
1093 error = resource_int_value(device_get_name(sc->alc_dev),
1094 device_get_unit(sc->alc_dev), "process_limit",
1095 &sc->alc_process_limit);
1097 if (sc->alc_process_limit < ALC_PROC_MIN ||
1098 sc->alc_process_limit > ALC_PROC_MAX) {
1099 device_printf(sc->alc_dev,
1100 "process_limit value out of range; "
1101 "using default: %d\n", ALC_PROC_DEFAULT);
1102 sc->alc_process_limit = ALC_PROC_DEFAULT;
1106 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
1107 NULL, "ALC statistics");
1108 parent = SYSCTL_CHILDREN(tree);
1110 /* Rx statistics. */
1111 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
1112 NULL, "Rx MAC statistics");
1113 child = SYSCTL_CHILDREN(tree);
1114 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1115 &stats->rx_frames, "Good frames");
1116 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
1117 &stats->rx_bcast_frames, "Good broadcast frames");
1118 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
1119 &stats->rx_mcast_frames, "Good multicast frames");
1120 ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
1121 &stats->rx_pause_frames, "Pause control frames");
1122 ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
1123 &stats->rx_control_frames, "Control frames");
1124 ALC_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
1125 &stats->rx_crcerrs, "CRC errors");
1126 ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
1127 &stats->rx_lenerrs, "Frames with length mismatched");
1128 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
1129 &stats->rx_bytes, "Good octets");
1130 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
1131 &stats->rx_bcast_bytes, "Good broadcast octets");
1132 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
1133 &stats->rx_mcast_bytes, "Good multicast octets");
1134 ALC_SYSCTL_STAT_ADD32(ctx, child, "runts",
1135 &stats->rx_runts, "Too short frames");
1136 ALC_SYSCTL_STAT_ADD32(ctx, child, "fragments",
1137 &stats->rx_fragments, "Fragmented frames");
1138 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
1139 &stats->rx_pkts_64, "64 bytes frames");
1140 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
1141 &stats->rx_pkts_65_127, "65 to 127 bytes frames");
1142 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
1143 &stats->rx_pkts_128_255, "128 to 255 bytes frames");
1144 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
1145 &stats->rx_pkts_256_511, "256 to 511 bytes frames");
1146 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
1147 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames");
1148 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
1149 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames");
1150 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
1151 &stats->rx_pkts_1519_max, "1519 to max frames");
1152 ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
1153 &stats->rx_pkts_truncated, "Truncated frames due to MTU size");
1154 ALC_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
1155 &stats->rx_fifo_oflows, "FIFO overflows");
1156 ALC_SYSCTL_STAT_ADD32(ctx, child, "rrs_errs",
1157 &stats->rx_rrs_errs, "Return status write-back errors");
1158 ALC_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
1159 &stats->rx_alignerrs, "Alignment errors");
1160 ALC_SYSCTL_STAT_ADD32(ctx, child, "filtered",
1161 &stats->rx_pkts_filtered,
1162 "Frames dropped due to address filtering");
1164 /* Tx statistics. */
1165 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
1166 NULL, "Tx MAC statistics");
1167 child = SYSCTL_CHILDREN(tree);
1168 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1169 &stats->tx_frames, "Good frames");
1170 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
1171 &stats->tx_bcast_frames, "Good broadcast frames");
1172 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
1173 &stats->tx_mcast_frames, "Good multicast frames");
1174 ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
1175 &stats->tx_pause_frames, "Pause control frames");
1176 ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
1177 &stats->tx_control_frames, "Control frames");
1178 ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_defers",
1179 &stats->tx_excess_defer, "Frames with excessive derferrals");
1180 ALC_SYSCTL_STAT_ADD32(ctx, child, "defers",
1181 &stats->tx_excess_defer, "Frames with derferrals");
1182 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
1183 &stats->tx_bytes, "Good octets");
1184 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
1185 &stats->tx_bcast_bytes, "Good broadcast octets");
1186 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
1187 &stats->tx_mcast_bytes, "Good multicast octets");
1188 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
1189 &stats->tx_pkts_64, "64 bytes frames");
1190 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
1191 &stats->tx_pkts_65_127, "65 to 127 bytes frames");
1192 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
1193 &stats->tx_pkts_128_255, "128 to 255 bytes frames");
1194 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
1195 &stats->tx_pkts_256_511, "256 to 511 bytes frames");
1196 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
1197 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames");
1198 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
1199 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames");
1200 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
1201 &stats->tx_pkts_1519_max, "1519 to max frames");
1202 ALC_SYSCTL_STAT_ADD32(ctx, child, "single_colls",
1203 &stats->tx_single_colls, "Single collisions");
1204 ALC_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
1205 &stats->tx_multi_colls, "Multiple collisions");
1206 ALC_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
1207 &stats->tx_late_colls, "Late collisions");
1208 ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
1209 &stats->tx_excess_colls, "Excessive collisions");
1210 ALC_SYSCTL_STAT_ADD32(ctx, child, "abort",
1211 &stats->tx_abort, "Aborted frames due to Excessive collisions");
1212 ALC_SYSCTL_STAT_ADD32(ctx, child, "underruns",
1213 &stats->tx_underrun, "FIFO underruns");
1214 ALC_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns",
1215 &stats->tx_desc_underrun, "Descriptor write-back errors");
1216 ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
1217 &stats->tx_lenerrs, "Frames with length mismatched");
1218 ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
1219 &stats->tx_pkts_truncated, "Truncated frames due to MTU size");
1222 #undef ALC_SYSCTL_STAT_ADD32
1223 #undef ALC_SYSCTL_STAT_ADD64
1225 struct alc_dmamap_arg {
1226 bus_addr_t alc_busaddr;
1230 alc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1232 struct alc_dmamap_arg *ctx;
1237 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1239 ctx = (struct alc_dmamap_arg *)arg;
1240 ctx->alc_busaddr = segs[0].ds_addr;
1245 * Normal and high Tx descriptors shares single Tx high address.
1246 * Four Rx descriptor/return rings and CMB shares the same Rx
1250 alc_check_boundary(struct alc_softc *sc)
1252 bus_addr_t cmb_end, rx_ring_end, rr_ring_end, tx_ring_end;
1254 rx_ring_end = sc->alc_rdata.alc_rx_ring_paddr + ALC_RX_RING_SZ;
1255 rr_ring_end = sc->alc_rdata.alc_rr_ring_paddr + ALC_RR_RING_SZ;
1256 cmb_end = sc->alc_rdata.alc_cmb_paddr + ALC_CMB_SZ;
1257 tx_ring_end = sc->alc_rdata.alc_tx_ring_paddr + ALC_TX_RING_SZ;
1259 /* 4GB boundary crossing is not allowed. */
1260 if ((ALC_ADDR_HI(rx_ring_end) !=
1261 ALC_ADDR_HI(sc->alc_rdata.alc_rx_ring_paddr)) ||
1262 (ALC_ADDR_HI(rr_ring_end) !=
1263 ALC_ADDR_HI(sc->alc_rdata.alc_rr_ring_paddr)) ||
1264 (ALC_ADDR_HI(cmb_end) !=
1265 ALC_ADDR_HI(sc->alc_rdata.alc_cmb_paddr)) ||
1266 (ALC_ADDR_HI(tx_ring_end) !=
1267 ALC_ADDR_HI(sc->alc_rdata.alc_tx_ring_paddr)))
1270 * Make sure Rx return descriptor/Rx descriptor/CMB use
1271 * the same high address.
1273 if ((ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(rr_ring_end)) ||
1274 (ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(cmb_end)))
1282 alc_dma_alloc(struct alc_softc *sc)
1284 struct alc_txdesc *txd;
1285 struct alc_rxdesc *rxd;
1286 struct alc_dmamap_arg ctx;
1289 /* Create parent DMA tag. */
1290 error = bus_dma_tag_create(
1291 sc->alc_cdata.alc_parent_tag, /* parent */
1292 1, 0, /* alignment, boundary */
1293 BUS_SPACE_MAXADDR, /* lowaddr */
1294 BUS_SPACE_MAXADDR, /* highaddr */
1295 NULL, NULL, /* filter, filterarg */
1296 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1298 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1300 &sc->alc_cdata.alc_parent_tag);
1302 device_printf(sc->alc_dev,
1303 "could not create parent DMA tag.\n");
1307 /* Create DMA tag for Tx descriptor ring. */
1308 error = bus_dma_tag_create(
1309 sc->alc_cdata.alc_parent_tag, /* parent */
1310 ALC_TX_RING_ALIGN, 0, /* alignment, boundary */
1311 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1312 BUS_SPACE_MAXADDR, /* highaddr */
1313 NULL, NULL, /* filter, filterarg */
1314 ALC_TX_RING_SZ, /* maxsize */
1316 ALC_TX_RING_SZ, /* maxsegsize */
1318 &sc->alc_cdata.alc_tx_ring_tag);
1320 device_printf(sc->alc_dev,
1321 "could not create Tx ring DMA tag.\n");
1325 /* Create DMA tag for Rx free descriptor ring. */
1326 error = bus_dma_tag_create(
1327 sc->alc_cdata.alc_parent_tag, /* parent */
1328 ALC_RX_RING_ALIGN, 0, /* alignment, boundary */
1329 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1330 BUS_SPACE_MAXADDR, /* highaddr */
1331 NULL, NULL, /* filter, filterarg */
1332 ALC_RX_RING_SZ, /* maxsize */
1334 ALC_RX_RING_SZ, /* maxsegsize */
1336 &sc->alc_cdata.alc_rx_ring_tag);
1338 device_printf(sc->alc_dev,
1339 "could not create Rx ring DMA tag.\n");
1342 /* Create DMA tag for Rx return descriptor ring. */
1343 error = bus_dma_tag_create(
1344 sc->alc_cdata.alc_parent_tag, /* parent */
1345 ALC_RR_RING_ALIGN, 0, /* alignment, boundary */
1346 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1347 BUS_SPACE_MAXADDR, /* highaddr */
1348 NULL, NULL, /* filter, filterarg */
1349 ALC_RR_RING_SZ, /* maxsize */
1351 ALC_RR_RING_SZ, /* maxsegsize */
1353 &sc->alc_cdata.alc_rr_ring_tag);
1355 device_printf(sc->alc_dev,
1356 "could not create Rx return ring DMA tag.\n");
1360 /* Create DMA tag for coalescing message block. */
1361 error = bus_dma_tag_create(
1362 sc->alc_cdata.alc_parent_tag, /* parent */
1363 ALC_CMB_ALIGN, 0, /* alignment, boundary */
1364 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1365 BUS_SPACE_MAXADDR, /* highaddr */
1366 NULL, NULL, /* filter, filterarg */
1367 ALC_CMB_SZ, /* maxsize */
1369 ALC_CMB_SZ, /* maxsegsize */
1371 &sc->alc_cdata.alc_cmb_tag);
1373 device_printf(sc->alc_dev,
1374 "could not create CMB DMA tag.\n");
1377 /* Create DMA tag for status message block. */
1378 error = bus_dma_tag_create(
1379 sc->alc_cdata.alc_parent_tag, /* parent */
1380 ALC_SMB_ALIGN, 0, /* alignment, boundary */
1381 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1382 BUS_SPACE_MAXADDR, /* highaddr */
1383 NULL, NULL, /* filter, filterarg */
1384 ALC_SMB_SZ, /* maxsize */
1386 ALC_SMB_SZ, /* maxsegsize */
1388 &sc->alc_cdata.alc_smb_tag);
1390 device_printf(sc->alc_dev,
1391 "could not create SMB DMA tag.\n");
1395 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1396 error = bus_dmamem_alloc(sc->alc_cdata.alc_tx_ring_tag,
1397 (void **)&sc->alc_rdata.alc_tx_ring,
1398 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1399 &sc->alc_cdata.alc_tx_ring_map);
1401 device_printf(sc->alc_dev,
1402 "could not allocate DMA'able memory for Tx ring.\n");
1405 ctx.alc_busaddr = 0;
1406 error = bus_dmamap_load(sc->alc_cdata.alc_tx_ring_tag,
1407 sc->alc_cdata.alc_tx_ring_map, sc->alc_rdata.alc_tx_ring,
1408 ALC_TX_RING_SZ, alc_dmamap_cb, &ctx, 0);
1409 if (error != 0 || ctx.alc_busaddr == 0) {
1410 device_printf(sc->alc_dev,
1411 "could not load DMA'able memory for Tx ring.\n");
1414 sc->alc_rdata.alc_tx_ring_paddr = ctx.alc_busaddr;
1416 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1417 error = bus_dmamem_alloc(sc->alc_cdata.alc_rx_ring_tag,
1418 (void **)&sc->alc_rdata.alc_rx_ring,
1419 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1420 &sc->alc_cdata.alc_rx_ring_map);
1422 device_printf(sc->alc_dev,
1423 "could not allocate DMA'able memory for Rx ring.\n");
1426 ctx.alc_busaddr = 0;
1427 error = bus_dmamap_load(sc->alc_cdata.alc_rx_ring_tag,
1428 sc->alc_cdata.alc_rx_ring_map, sc->alc_rdata.alc_rx_ring,
1429 ALC_RX_RING_SZ, alc_dmamap_cb, &ctx, 0);
1430 if (error != 0 || ctx.alc_busaddr == 0) {
1431 device_printf(sc->alc_dev,
1432 "could not load DMA'able memory for Rx ring.\n");
1435 sc->alc_rdata.alc_rx_ring_paddr = ctx.alc_busaddr;
1437 /* Allocate DMA'able memory and load the DMA map for Rx return ring. */
1438 error = bus_dmamem_alloc(sc->alc_cdata.alc_rr_ring_tag,
1439 (void **)&sc->alc_rdata.alc_rr_ring,
1440 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1441 &sc->alc_cdata.alc_rr_ring_map);
1443 device_printf(sc->alc_dev,
1444 "could not allocate DMA'able memory for Rx return ring.\n");
1447 ctx.alc_busaddr = 0;
1448 error = bus_dmamap_load(sc->alc_cdata.alc_rr_ring_tag,
1449 sc->alc_cdata.alc_rr_ring_map, sc->alc_rdata.alc_rr_ring,
1450 ALC_RR_RING_SZ, alc_dmamap_cb, &ctx, 0);
1451 if (error != 0 || ctx.alc_busaddr == 0) {
1452 device_printf(sc->alc_dev,
1453 "could not load DMA'able memory for Tx ring.\n");
1456 sc->alc_rdata.alc_rr_ring_paddr = ctx.alc_busaddr;
1458 /* Allocate DMA'able memory and load the DMA map for CMB. */
1459 error = bus_dmamem_alloc(sc->alc_cdata.alc_cmb_tag,
1460 (void **)&sc->alc_rdata.alc_cmb,
1461 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1462 &sc->alc_cdata.alc_cmb_map);
1464 device_printf(sc->alc_dev,
1465 "could not allocate DMA'able memory for CMB.\n");
1468 ctx.alc_busaddr = 0;
1469 error = bus_dmamap_load(sc->alc_cdata.alc_cmb_tag,
1470 sc->alc_cdata.alc_cmb_map, sc->alc_rdata.alc_cmb,
1471 ALC_CMB_SZ, alc_dmamap_cb, &ctx, 0);
1472 if (error != 0 || ctx.alc_busaddr == 0) {
1473 device_printf(sc->alc_dev,
1474 "could not load DMA'able memory for CMB.\n");
1477 sc->alc_rdata.alc_cmb_paddr = ctx.alc_busaddr;
1479 /* Allocate DMA'able memory and load the DMA map for SMB. */
1480 error = bus_dmamem_alloc(sc->alc_cdata.alc_smb_tag,
1481 (void **)&sc->alc_rdata.alc_smb,
1482 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1483 &sc->alc_cdata.alc_smb_map);
1485 device_printf(sc->alc_dev,
1486 "could not allocate DMA'able memory for SMB.\n");
1489 ctx.alc_busaddr = 0;
1490 error = bus_dmamap_load(sc->alc_cdata.alc_smb_tag,
1491 sc->alc_cdata.alc_smb_map, sc->alc_rdata.alc_smb,
1492 ALC_SMB_SZ, alc_dmamap_cb, &ctx, 0);
1493 if (error != 0 || ctx.alc_busaddr == 0) {
1494 device_printf(sc->alc_dev,
1495 "could not load DMA'able memory for CMB.\n");
1498 sc->alc_rdata.alc_smb_paddr = ctx.alc_busaddr;
1502 * All of the status blocks and descriptor rings are
1503 * allocated at lower 4GB, their addresses high 32bits
1504 * part are same (all 0).
1507 /* Make sure we've not crossed 4GB boundary. */
1508 if ((error = alc_check_boundary(sc)) != 0) {
1509 device_printf(sc->alc_dev, "4GB boundary crossed, "
1510 "switching to 32bit DMA addressing mode.\n");
1513 * Limit max allowable DMA address space to 32bit
1516 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1522 * Create Tx buffer parent tag.
1523 * AR813x/AR815x allows 64bit DMA addressing of Tx/Rx buffers
1524 * so it needs separate parent DMA tag as parent DMA address
1525 * space could be restricted to be within 32bit address space
1526 * by 4GB boundary crossing.
1528 error = bus_dma_tag_create(
1529 sc->alc_cdata.alc_parent_tag, /* parent */
1530 1, 0, /* alignment, boundary */
1531 BUS_SPACE_MAXADDR, /* lowaddr */
1532 BUS_SPACE_MAXADDR, /* highaddr */
1533 NULL, NULL, /* filter, filterarg */
1534 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1536 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1538 &sc->alc_cdata.alc_buffer_tag);
1540 device_printf(sc->alc_dev,
1541 "could not create parent buffer DMA tag.\n");
1545 /* Create DMA tag for Tx buffers. */
1546 error = bus_dma_tag_create(
1547 sc->alc_cdata.alc_buffer_tag, /* parent */
1548 1, 0, /* alignment, boundary */
1549 BUS_SPACE_MAXADDR, /* lowaddr */
1550 BUS_SPACE_MAXADDR, /* highaddr */
1551 NULL, NULL, /* filter, filterarg */
1552 ALC_TSO_MAXSIZE, /* maxsize */
1553 ALC_MAXTXSEGS, /* nsegments */
1554 ALC_TSO_MAXSEGSIZE, /* maxsegsize */
1555 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, /* flags */
1556 &sc->alc_cdata.alc_tx_tag);
1558 device_printf(sc->alc_dev, "could not create Tx DMA tag.\n");
1562 /* Create DMA tag for Rx buffers. */
1563 error = bus_dma_tag_create(
1564 sc->alc_cdata.alc_buffer_tag, /* parent */
1565 ALC_RX_BUF_ALIGN, 0, /* alignment, boundary */
1566 BUS_SPACE_MAXADDR, /* lowaddr */
1567 BUS_SPACE_MAXADDR, /* highaddr */
1568 NULL, NULL, /* filter, filterarg */
1569 MCLBYTES, /* maxsize */
1571 MCLBYTES, /* maxsegsize */
1572 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED, /* flags */
1573 &sc->alc_cdata.alc_rx_tag);
1575 device_printf(sc->alc_dev, "could not create Rx DMA tag.\n");
1578 /* Create DMA maps for Tx buffers. */
1579 for (i = 0; i < ALC_TX_RING_CNT; i++) {
1580 txd = &sc->alc_cdata.alc_txdesc[i];
1582 txd->tx_dmamap = NULL;
1583 error = bus_dmamap_create(sc->alc_cdata.alc_tx_tag,
1584 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1587 device_printf(sc->alc_dev,
1588 "could not create Tx dmamap.\n");
1592 /* Create DMA maps for Rx buffers. */
1593 error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag,
1595 &sc->alc_cdata.alc_rx_sparemap);
1597 device_printf(sc->alc_dev,
1598 "could not create spare Rx dmamap.\n");
1601 for (i = 0; i < ALC_RX_RING_CNT; i++) {
1602 rxd = &sc->alc_cdata.alc_rxdesc[i];
1604 rxd->rx_dmamap = NULL;
1605 error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag,
1609 device_printf(sc->alc_dev,
1610 "could not create Rx dmamap.\n");
1620 alc_dma_free(struct alc_softc *sc)
1622 struct alc_txdesc *txd;
1623 struct alc_rxdesc *rxd;
1627 if (sc->alc_cdata.alc_tx_tag != NULL) {
1628 for (i = 0; i < ALC_TX_RING_CNT; i++) {
1629 txd = &sc->alc_cdata.alc_txdesc[i];
1630 if (txd->tx_dmamap != NULL) {
1631 bus_dmamap_destroy(sc->alc_cdata.alc_tx_tag,
1633 txd->tx_dmamap = NULL;
1636 bus_dma_tag_destroy(sc->alc_cdata.alc_tx_tag);
1637 sc->alc_cdata.alc_tx_tag = NULL;
1640 if (sc->alc_cdata.alc_rx_tag != NULL) {
1641 for (i = 0; i < ALC_RX_RING_CNT; i++) {
1642 rxd = &sc->alc_cdata.alc_rxdesc[i];
1643 if (rxd->rx_dmamap != NULL) {
1644 bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag,
1646 rxd->rx_dmamap = NULL;
1649 if (sc->alc_cdata.alc_rx_sparemap != NULL) {
1650 bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag,
1651 sc->alc_cdata.alc_rx_sparemap);
1652 sc->alc_cdata.alc_rx_sparemap = NULL;
1654 bus_dma_tag_destroy(sc->alc_cdata.alc_rx_tag);
1655 sc->alc_cdata.alc_rx_tag = NULL;
1657 /* Tx descriptor ring. */
1658 if (sc->alc_cdata.alc_tx_ring_tag != NULL) {
1659 if (sc->alc_cdata.alc_tx_ring_map != NULL)
1660 bus_dmamap_unload(sc->alc_cdata.alc_tx_ring_tag,
1661 sc->alc_cdata.alc_tx_ring_map);
1662 if (sc->alc_cdata.alc_tx_ring_map != NULL &&
1663 sc->alc_rdata.alc_tx_ring != NULL)
1664 bus_dmamem_free(sc->alc_cdata.alc_tx_ring_tag,
1665 sc->alc_rdata.alc_tx_ring,
1666 sc->alc_cdata.alc_tx_ring_map);
1667 sc->alc_rdata.alc_tx_ring = NULL;
1668 sc->alc_cdata.alc_tx_ring_map = NULL;
1669 bus_dma_tag_destroy(sc->alc_cdata.alc_tx_ring_tag);
1670 sc->alc_cdata.alc_tx_ring_tag = NULL;
1673 if (sc->alc_cdata.alc_rx_ring_tag != NULL) {
1674 if (sc->alc_cdata.alc_rx_ring_map != NULL)
1675 bus_dmamap_unload(sc->alc_cdata.alc_rx_ring_tag,
1676 sc->alc_cdata.alc_rx_ring_map);
1677 if (sc->alc_cdata.alc_rx_ring_map != NULL &&
1678 sc->alc_rdata.alc_rx_ring != NULL)
1679 bus_dmamem_free(sc->alc_cdata.alc_rx_ring_tag,
1680 sc->alc_rdata.alc_rx_ring,
1681 sc->alc_cdata.alc_rx_ring_map);
1682 sc->alc_rdata.alc_rx_ring = NULL;
1683 sc->alc_cdata.alc_rx_ring_map = NULL;
1684 bus_dma_tag_destroy(sc->alc_cdata.alc_rx_ring_tag);
1685 sc->alc_cdata.alc_rx_ring_tag = NULL;
1687 /* Rx return ring. */
1688 if (sc->alc_cdata.alc_rr_ring_tag != NULL) {
1689 if (sc->alc_cdata.alc_rr_ring_map != NULL)
1690 bus_dmamap_unload(sc->alc_cdata.alc_rr_ring_tag,
1691 sc->alc_cdata.alc_rr_ring_map);
1692 if (sc->alc_cdata.alc_rr_ring_map != NULL &&
1693 sc->alc_rdata.alc_rr_ring != NULL)
1694 bus_dmamem_free(sc->alc_cdata.alc_rr_ring_tag,
1695 sc->alc_rdata.alc_rr_ring,
1696 sc->alc_cdata.alc_rr_ring_map);
1697 sc->alc_rdata.alc_rr_ring = NULL;
1698 sc->alc_cdata.alc_rr_ring_map = NULL;
1699 bus_dma_tag_destroy(sc->alc_cdata.alc_rr_ring_tag);
1700 sc->alc_cdata.alc_rr_ring_tag = NULL;
1703 if (sc->alc_cdata.alc_cmb_tag != NULL) {
1704 if (sc->alc_cdata.alc_cmb_map != NULL)
1705 bus_dmamap_unload(sc->alc_cdata.alc_cmb_tag,
1706 sc->alc_cdata.alc_cmb_map);
1707 if (sc->alc_cdata.alc_cmb_map != NULL &&
1708 sc->alc_rdata.alc_cmb != NULL)
1709 bus_dmamem_free(sc->alc_cdata.alc_cmb_tag,
1710 sc->alc_rdata.alc_cmb,
1711 sc->alc_cdata.alc_cmb_map);
1712 sc->alc_rdata.alc_cmb = NULL;
1713 sc->alc_cdata.alc_cmb_map = NULL;
1714 bus_dma_tag_destroy(sc->alc_cdata.alc_cmb_tag);
1715 sc->alc_cdata.alc_cmb_tag = NULL;
1718 if (sc->alc_cdata.alc_smb_tag != NULL) {
1719 if (sc->alc_cdata.alc_smb_map != NULL)
1720 bus_dmamap_unload(sc->alc_cdata.alc_smb_tag,
1721 sc->alc_cdata.alc_smb_map);
1722 if (sc->alc_cdata.alc_smb_map != NULL &&
1723 sc->alc_rdata.alc_smb != NULL)
1724 bus_dmamem_free(sc->alc_cdata.alc_smb_tag,
1725 sc->alc_rdata.alc_smb,
1726 sc->alc_cdata.alc_smb_map);
1727 sc->alc_rdata.alc_smb = NULL;
1728 sc->alc_cdata.alc_smb_map = NULL;
1729 bus_dma_tag_destroy(sc->alc_cdata.alc_smb_tag);
1730 sc->alc_cdata.alc_smb_tag = NULL;
1732 if (sc->alc_cdata.alc_buffer_tag != NULL) {
1733 bus_dma_tag_destroy(sc->alc_cdata.alc_buffer_tag);
1734 sc->alc_cdata.alc_buffer_tag = NULL;
1736 if (sc->alc_cdata.alc_parent_tag != NULL) {
1737 bus_dma_tag_destroy(sc->alc_cdata.alc_parent_tag);
1738 sc->alc_cdata.alc_parent_tag = NULL;
1743 alc_shutdown(device_t dev)
1746 return (alc_suspend(dev));
1750 /* XXX: LINK SPEED */
1752 * Note, this driver resets the link speed to 10/100Mbps by
1753 * restarting auto-negotiation in suspend/shutdown phase but we
1754 * don't know whether that auto-negotiation would succeed or not
1755 * as driver has no control after powering off/suspend operation.
1756 * If the renegotiation fail WOL may not work. Running at 1Gbps
1757 * will draw more power than 375mA at 3.3V which is specified in
1758 * PCI specification and that would result in complete
1759 * shutdowning power to ethernet controller.
1762 * Save current negotiated media speed/duplex/flow-control to
1763 * softc and restore the same link again after resuming. PHY
1764 * handling such as power down/resetting to 100Mbps may be better
1765 * handled in suspend method in phy driver.
1768 alc_setlinkspeed(struct alc_softc *sc)
1770 struct mii_data *mii;
1773 mii = device_get_softc(sc->alc_miibus);
1776 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1777 (IFM_ACTIVE | IFM_AVALID)) {
1778 switch IFM_SUBTYPE(mii->mii_media_active) {
1789 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, MII_100T2CR, 0);
1790 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
1791 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1792 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
1793 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
1797 * Poll link state until alc(4) get a 10/100Mbps link.
1799 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1801 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
1802 == (IFM_ACTIVE | IFM_AVALID)) {
1803 switch (IFM_SUBTYPE(
1804 mii->mii_media_active)) {
1814 pause("alclnk", hz);
1817 if (i == MII_ANEGTICKS_GIGE)
1818 device_printf(sc->alc_dev,
1819 "establishing a link failed, WOL may not work!");
1822 * No link, force MAC to have 100Mbps, full-duplex link.
1823 * This is the last resort and may/may not work.
1825 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1826 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1834 alc_setwol(struct alc_softc *sc)
1840 ALC_LOCK_ASSERT(sc);
1842 alc_disable_l0s_l1(sc);
1844 if ((sc->alc_flags & ALC_FLAG_PM) == 0) {
1846 CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
1847 reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC);
1848 reg |= PCIE_PHYMISC_FORCE_RCV_DET;
1849 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg);
1850 /* Force PHY power down. */
1852 CSR_WRITE_4(sc, ALC_MASTER_CFG,
1853 CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS);
1857 if ((ifp->if_capenable & IFCAP_WOL) != 0) {
1858 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
1859 alc_setlinkspeed(sc);
1860 CSR_WRITE_4(sc, ALC_MASTER_CFG,
1861 CSR_READ_4(sc, ALC_MASTER_CFG) & ~MASTER_CLK_SEL_DIS);
1865 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
1866 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB;
1867 CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs);
1868 reg = CSR_READ_4(sc, ALC_MAC_CFG);
1869 reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI |
1871 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
1872 reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST;
1873 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1874 reg |= MAC_CFG_RX_ENB;
1875 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
1877 reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC);
1878 reg |= PCIE_PHYMISC_FORCE_RCV_DET;
1879 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg);
1880 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1881 /* WOL disabled, PHY power down. */
1883 CSR_WRITE_4(sc, ALC_MASTER_CFG,
1884 CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS);
1888 pmstat = pci_read_config(sc->alc_dev,
1889 sc->alc_pmcap + PCIR_POWER_STATUS, 2);
1890 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1891 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1892 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1893 pci_write_config(sc->alc_dev,
1894 sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2);
1899 alc_suspend(device_t dev)
1901 struct alc_softc *sc = device_get_softc(dev);
1902 struct ifnet *ifp = &sc->arpcom.ac_if;
1904 lwkt_serialize_enter(ifp->if_serializer);
1910 lwkt_serialize_exit(ifp->if_serializer);
1916 alc_resume(device_t dev)
1918 struct alc_softc *sc = device_get_softc(dev);
1919 struct ifnet *ifp = &sc->arpcom.ac_if;
1922 lwkt_serialize_enter(ifp->if_serializer);
1924 if ((sc->alc_flags & ALC_FLAG_PM) != 0) {
1925 /* Disable PME and clear PME status. */
1926 pmstat = pci_read_config(sc->alc_dev,
1927 sc->alc_pmcap + PCIR_POWER_STATUS, 2);
1928 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
1929 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1930 pci_write_config(sc->alc_dev,
1931 sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2);
1937 if (ifp->if_flags & IFF_UP)
1940 lwkt_serialize_exit(ifp->if_serializer);
1946 alc_encap(struct alc_softc *sc, struct mbuf **m_head)
1948 struct alc_txdesc *txd, *txd_last;
1949 struct tx_desc *desc;
1951 #if 0 /* XXX: TSO */
1955 bus_dma_segment_t txsegs[ALC_MAXTXSEGS];
1957 uint32_t cflags, hdrlen, poff, vtag;
1958 #if 0 /* XXX: TSO */
1961 int error, idx, nsegs, prod;
1963 M_ASSERTPKTHDR((*m_head));
1968 #if 0 /* XXX: TSO */
1972 if ((m->m_pkthdr.csum_flags & (ALC_CSUM_FEATURES | CSUM_TSO)) != 0) {
1974 * AR813x/AR815x requires offset of TCP/UDP header in its
1975 * Tx descriptor to perform Tx checksum offloading. TSO
1976 * also requires TCP header offset and modification of
1977 * IP/TCP header. This kind of operation takes many CPU
1978 * cycles on FreeBSD so fast host CPU is required to get
1979 * smooth TSO performance.
1981 struct ether_header *eh;
1983 if (M_WRITABLE(m) == 0) {
1984 /* Get a writable copy. */
1985 m = m_dup(*m_head, MB_DONTWAIT);
1986 /* Release original mbufs. */
1995 ip_off = sizeof(struct ether_header);
1996 m = m_pullup(m, ip_off + sizeof(struct ip));
2001 eh = mtod(m, struct ether_header *);
2003 * Check if hardware VLAN insertion is off.
2004 * Additional check for LLC/SNAP frame?
2006 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2007 ip_off = sizeof(struct ether_vlan_header);
2008 m = m_pullup(m, ip_off);
2014 m = m_pullup(m, ip_off + sizeof(struct ip));
2019 ip = (struct ip *)(mtod(m, char *) + ip_off);
2020 poff = ip_off + (ip->ip_hl << 2);
2022 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2023 m = m_pullup(m, poff + sizeof(struct tcphdr));
2028 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
2029 m = m_pullup(m, poff + (tcp->th_off << 2));
2035 * Due to strict adherence of Microsoft NDIS
2036 * Large Send specification, hardware expects
2037 * a pseudo TCP checksum inserted by upper
2038 * stack. Unfortunately the pseudo TCP
2039 * checksum that NDIS refers to does not include
2040 * TCP payload length so driver should recompute
2041 * the pseudo checksum here. Hopefully this
2042 * wouldn't be much burden on modern CPUs.
2044 * Reset IP checksum and recompute TCP pseudo
2045 * checksum as NDIS specification said.
2048 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
2049 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2055 prod = sc->alc_cdata.alc_tx_prod;
2056 txd = &sc->alc_cdata.alc_txdesc[prod];
2058 map = txd->tx_dmamap;
2060 error = bus_dmamap_load_mbuf_defrag(
2061 sc->alc_cdata.alc_tx_tag, map, m_head,
2062 txsegs, ALC_MAXTXSEGS, &nsegs, BUS_DMA_NOWAIT);
2074 /* Check descriptor overrun. */
2075 if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) {
2076 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, map);
2079 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, map, BUS_DMASYNC_PREWRITE);
2082 cflags = TD_ETHERNET;
2086 /* Configure VLAN hardware tag insertion. */
2087 if ((m->m_flags & M_VLANTAG) != 0) {
2088 vtag = htons(m->m_pkthdr.ether_vlantag);
2089 vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK;
2090 cflags |= TD_INS_VLAN_TAG;
2092 /* Configure Tx checksum offload. */
2093 if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) {
2094 #ifdef ALC_USE_CUSTOM_CSUM
2095 cflags |= TD_CUSTOM_CSUM;
2096 /* Set checksum start offset. */
2097 cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) &
2098 TD_PLOAD_OFFSET_MASK;
2099 /* Set checksum insertion position of TCP/UDP. */
2100 cflags |= (((poff + m->m_pkthdr.csum_data) >> 1) <<
2101 TD_CUSTOM_CSUM_OFFSET_SHIFT) & TD_CUSTOM_CSUM_OFFSET_MASK;
2103 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2104 cflags |= TD_IPCSUM;
2105 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2106 cflags |= TD_TCPCSUM;
2107 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2108 cflags |= TD_UDPCSUM;
2109 /* Set TCP/UDP header offset. */
2110 cflags |= (poff << TD_L4HDR_OFFSET_SHIFT) &
2111 TD_L4HDR_OFFSET_MASK;
2113 } else if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2114 /* Request TSO and set MSS. */
2115 cflags |= TD_TSO | TD_TSO_DESCV1;
2118 cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << TD_MSS_SHIFT) &
2120 /* Set TCP header offset. */
2122 cflags |= (poff << TD_TCPHDR_OFFSET_SHIFT) &
2123 TD_TCPHDR_OFFSET_MASK;
2125 * AR813x/AR815x requires the first buffer should
2126 * only hold IP/TCP header data. Payload should
2127 * be handled in other descriptors.
2129 hdrlen = poff + (tcp->th_off << 2);
2130 desc = &sc->alc_rdata.alc_tx_ring[prod];
2131 desc->len = htole32(TX_BYTES(hdrlen | vtag));
2132 desc->flags = htole32(cflags);
2133 desc->addr = htole64(txsegs[0].ds_addr);
2134 sc->alc_cdata.alc_tx_cnt++;
2135 ALC_DESC_INC(prod, ALC_TX_RING_CNT);
2136 if (m->m_len - hdrlen > 0) {
2137 /* Handle remaining payload of the first fragment. */
2138 desc = &sc->alc_rdata.alc_tx_ring[prod];
2139 desc->len = htole32(TX_BYTES((m->m_len - hdrlen) |
2141 desc->flags = htole32(cflags);
2142 desc->addr = htole64(txsegs[0].ds_addr + hdrlen);
2143 sc->alc_cdata.alc_tx_cnt++;
2144 ALC_DESC_INC(prod, ALC_TX_RING_CNT);
2146 /* Handle remaining fragments. */
2149 for (; idx < nsegs; idx++) {
2150 desc = &sc->alc_rdata.alc_tx_ring[prod];
2151 desc->len = htole32(TX_BYTES(txsegs[idx].ds_len) | vtag);
2152 desc->flags = htole32(cflags);
2153 desc->addr = htole64(txsegs[idx].ds_addr);
2154 sc->alc_cdata.alc_tx_cnt++;
2155 ALC_DESC_INC(prod, ALC_TX_RING_CNT);
2157 /* Update producer index. */
2158 sc->alc_cdata.alc_tx_prod = prod;
2160 /* Finally set EOP on the last descriptor. */
2161 prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT;
2162 desc = &sc->alc_rdata.alc_tx_ring[prod];
2163 desc->flags |= htole32(TD_EOP);
2165 /* Swap dmamap of the first and the last. */
2166 txd = &sc->alc_cdata.alc_txdesc[prod];
2167 map = txd_last->tx_dmamap;
2168 txd_last->tx_dmamap = txd->tx_dmamap;
2169 txd->tx_dmamap = map;
2176 alc_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
2178 struct alc_softc *sc = ifp->if_softc;
2179 struct mbuf *m_head;
2182 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
2183 ASSERT_SERIALIZED(ifp->if_serializer);
2185 /* Reclaim transmitted frames. */
2186 if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT)
2189 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
2191 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) {
2192 ifq_purge(&ifp->if_snd);
2196 for (enq = 0; !ifq_is_empty(&ifp->if_snd); ) {
2197 m_head = ifq_dequeue(&ifp->if_snd, NULL);
2201 * Pack the data into the transmit ring. If we
2202 * don't have room, set the OACTIVE flag and wait
2203 * for the NIC to drain the ring.
2205 if (alc_encap(sc, &m_head)) {
2208 ifq_prepend(&ifp->if_snd, m_head);
2209 ifq_set_oactive(&ifp->if_snd);
2215 * If there's a BPF listener, bounce a copy of this frame
2218 ETHER_BPF_MTAP(ifp, m_head);
2222 /* Sync descriptors. */
2223 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
2224 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE);
2225 /* Kick. Assume we're using normal Tx priority queue. */
2226 CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX,
2227 (sc->alc_cdata.alc_tx_prod <<
2228 MBOX_TD_PROD_LO_IDX_SHIFT) &
2229 MBOX_TD_PROD_LO_IDX_MASK);
2230 /* Set a timeout in case the chip goes out to lunch. */
2231 sc->alc_watchdog_timer = ALC_TX_TIMEOUT;
2236 alc_watchdog(struct alc_softc *sc)
2238 struct ifnet *ifp = &sc->arpcom.ac_if;
2240 ASSERT_SERIALIZED(ifp->if_serializer);
2242 if (sc->alc_watchdog_timer == 0 || --sc->alc_watchdog_timer)
2245 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) {
2246 if_printf(sc->alc_ifp, "watchdog timeout (lost link)\n");
2247 IFNET_STAT_INC(ifp, oerrors, 1);
2251 if_printf(sc->alc_ifp, "watchdog timeout -- resetting\n");
2252 IFNET_STAT_INC(ifp, oerrors, 1);
2254 if (!ifq_is_empty(&ifp->if_snd))
2259 alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
2261 struct alc_softc *sc;
2263 struct mii_data *mii;
2266 ASSERT_SERIALIZED(ifp->if_serializer);
2269 ifr = (struct ifreq *)data;
2273 if (ifr->ifr_mtu < ETHERMIN ||
2274 ifr->ifr_mtu > (sc->alc_ident->max_framelen -
2275 sizeof(struct ether_vlan_header) - ETHER_CRC_LEN) ||
2276 ((sc->alc_flags & ALC_FLAG_JUMBO) == 0 &&
2277 ifr->ifr_mtu > ETHERMTU)) {
2279 } else if (ifp->if_mtu != ifr->ifr_mtu) {
2280 ifp->if_mtu = ifr->ifr_mtu;
2282 /* AR813x/AR815x has 13 bits MSS field. */
2283 if (ifp->if_mtu > ALC_TSO_MTU &&
2284 (ifp->if_capenable & IFCAP_TSO4) != 0) {
2285 ifp->if_capenable &= ~IFCAP_TSO4;
2286 ifp->if_hwassist &= ~CSUM_TSO;
2292 if ((ifp->if_flags & IFF_UP) != 0) {
2293 if ((ifp->if_flags & IFF_RUNNING) != 0 &&
2294 ((ifp->if_flags ^ sc->alc_if_flags) &
2295 (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2297 else if ((ifp->if_flags & IFF_RUNNING) == 0)
2299 } else if ((ifp->if_flags & IFF_RUNNING) != 0)
2301 sc->alc_if_flags = ifp->if_flags;
2305 if ((ifp->if_flags & IFF_RUNNING) != 0)
2310 mii = device_get_softc(sc->alc_miibus);
2311 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2314 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2315 if ((mask & IFCAP_TXCSUM) != 0 &&
2316 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
2317 ifp->if_capenable ^= IFCAP_TXCSUM;
2318 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2319 ifp->if_hwassist |= ALC_CSUM_FEATURES;
2321 ifp->if_hwassist &= ~ALC_CSUM_FEATURES;
2325 if ((mask & IFCAP_WOL_MCAST) != 0 &&
2326 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0)
2327 ifp->if_capenable ^= IFCAP_WOL_MCAST;
2328 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2329 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
2330 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2332 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2333 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
2334 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2337 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2338 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
2339 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2342 * VLAN hardware tagging is required to do checksum
2343 * offload or TSO on VLAN interface. Checksum offload
2344 * on VLAN interface also requires hardware checksum
2345 * offload of parent interface.
2347 if ((ifp->if_capenable & IFCAP_TXCSUM) == 0)
2348 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
2349 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
2350 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
2351 // XXX VLAN_CAPABILITIES(ifp);
2354 error = ether_ioctl(ifp, cmd, data);
2362 alc_mac_config(struct alc_softc *sc)
2364 struct mii_data *mii;
2367 mii = device_get_softc(sc->alc_miibus);
2368 reg = CSR_READ_4(sc, ALC_MAC_CFG);
2369 reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
2370 MAC_CFG_SPEED_MASK);
2371 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 ||
2372 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 ||
2373 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) {
2374 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
2376 /* Reprogram MAC with resolved speed/duplex. */
2377 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2380 reg |= MAC_CFG_SPEED_10_100;
2383 reg |= MAC_CFG_SPEED_1000;
2386 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2387 reg |= MAC_CFG_FULL_DUPLEX;
2389 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2390 reg |= MAC_CFG_TX_FC;
2391 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2392 reg |= MAC_CFG_RX_FC;
2395 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2399 alc_stats_clear(struct alc_softc *sc)
2401 struct smb sb, *smb;
2405 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2406 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2407 sc->alc_cdata.alc_smb_map,
2408 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2409 smb = sc->alc_rdata.alc_smb;
2410 /* Update done, clear. */
2412 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2413 sc->alc_cdata.alc_smb_map,
2414 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2416 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
2418 CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
2419 i += sizeof(uint32_t);
2421 /* Read Tx statistics. */
2422 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
2424 CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
2425 i += sizeof(uint32_t);
2431 alc_stats_update(struct alc_softc *sc)
2433 struct alc_hw_stats *stat;
2434 struct smb sb, *smb;
2440 stat = &sc->alc_stats;
2441 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2442 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2443 sc->alc_cdata.alc_smb_map,
2444 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2445 smb = sc->alc_rdata.alc_smb;
2446 if (smb->updated == 0)
2450 /* Read Rx statistics. */
2451 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
2453 *reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
2454 i += sizeof(uint32_t);
2456 /* Read Tx statistics. */
2457 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
2459 *reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
2460 i += sizeof(uint32_t);
2465 stat->rx_frames += smb->rx_frames;
2466 stat->rx_bcast_frames += smb->rx_bcast_frames;
2467 stat->rx_mcast_frames += smb->rx_mcast_frames;
2468 stat->rx_pause_frames += smb->rx_pause_frames;
2469 stat->rx_control_frames += smb->rx_control_frames;
2470 stat->rx_crcerrs += smb->rx_crcerrs;
2471 stat->rx_lenerrs += smb->rx_lenerrs;
2472 stat->rx_bytes += smb->rx_bytes;
2473 stat->rx_runts += smb->rx_runts;
2474 stat->rx_fragments += smb->rx_fragments;
2475 stat->rx_pkts_64 += smb->rx_pkts_64;
2476 stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
2477 stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
2478 stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
2479 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
2480 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
2481 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
2482 stat->rx_pkts_truncated += smb->rx_pkts_truncated;
2483 stat->rx_fifo_oflows += smb->rx_fifo_oflows;
2484 stat->rx_rrs_errs += smb->rx_rrs_errs;
2485 stat->rx_alignerrs += smb->rx_alignerrs;
2486 stat->rx_bcast_bytes += smb->rx_bcast_bytes;
2487 stat->rx_mcast_bytes += smb->rx_mcast_bytes;
2488 stat->rx_pkts_filtered += smb->rx_pkts_filtered;
2491 stat->tx_frames += smb->tx_frames;
2492 stat->tx_bcast_frames += smb->tx_bcast_frames;
2493 stat->tx_mcast_frames += smb->tx_mcast_frames;
2494 stat->tx_pause_frames += smb->tx_pause_frames;
2495 stat->tx_excess_defer += smb->tx_excess_defer;
2496 stat->tx_control_frames += smb->tx_control_frames;
2497 stat->tx_deferred += smb->tx_deferred;
2498 stat->tx_bytes += smb->tx_bytes;
2499 stat->tx_pkts_64 += smb->tx_pkts_64;
2500 stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2501 stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2502 stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2503 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2504 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2505 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2506 stat->tx_single_colls += smb->tx_single_colls;
2507 stat->tx_multi_colls += smb->tx_multi_colls;
2508 stat->tx_late_colls += smb->tx_late_colls;
2509 stat->tx_excess_colls += smb->tx_excess_colls;
2510 stat->tx_abort += smb->tx_abort;
2511 stat->tx_underrun += smb->tx_underrun;
2512 stat->tx_desc_underrun += smb->tx_desc_underrun;
2513 stat->tx_lenerrs += smb->tx_lenerrs;
2514 stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2515 stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2516 stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2518 /* Update counters in ifnet. */
2519 IFNET_STAT_INC(ifp, opackets, smb->tx_frames);
2521 IFNET_STAT_INC(ifp, collisions, smb->tx_single_colls +
2522 smb->tx_multi_colls * 2 + smb->tx_late_colls +
2523 smb->tx_abort * HDPX_CFG_RETRY_DEFAULT);
2527 * tx_pkts_truncated counter looks suspicious. It constantly
2528 * increments with no sign of Tx errors. This may indicate
2529 * the counter name is not correct one so I've removed the
2530 * counter in output errors.
2532 IFNET_STAT_INC(ifp, oerrors, smb->tx_abort + smb->tx_late_colls +
2535 IFNET_STAT_INC(ifp, ipackets, smb->rx_frames);
2537 IFNET_STAT_INC(ifp, ierrors, smb->rx_crcerrs + smb->rx_lenerrs +
2538 smb->rx_runts + smb->rx_pkts_truncated +
2539 smb->rx_fifo_oflows + smb->rx_rrs_errs +
2542 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2543 /* Update done, clear. */
2545 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2546 sc->alc_cdata.alc_smb_map,
2547 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2554 struct alc_softc *sc = arg;
2555 struct ifnet *ifp = &sc->arpcom.ac_if;
2558 ASSERT_SERIALIZED(ifp->if_serializer);
2560 status = CSR_READ_4(sc, ALC_INTR_STATUS);
2561 if ((status & ALC_INTRS) == 0)
2564 /* Acknowledge interrupts and disable interrupts. */
2565 CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT);
2567 if (ifp->if_flags & IFF_RUNNING) {
2568 if (status & INTR_RX_PKT) {
2569 if (alc_rxintr(sc)) {
2574 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST |
2576 if (status & INTR_DMA_RD_TO_RST) {
2578 "DMA read error! -- resetting\n");
2580 if (status & INTR_DMA_WR_TO_RST) {
2582 "DMA write error! -- resetting\n");
2584 if (status & INTR_TXQ_TO_RST)
2585 if_printf(ifp, "TxQ reset! -- resetting\n");
2589 if (!ifq_is_empty(&ifp->if_snd))
2592 /* Re-enable interrupts */
2593 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF);
2598 alc_txeof(struct alc_softc *sc)
2601 struct alc_txdesc *txd;
2602 uint32_t cons, prod;
2607 if (sc->alc_cdata.alc_tx_cnt == 0)
2609 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
2610 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_POSTWRITE);
2611 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
2612 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag,
2613 sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_POSTREAD);
2614 prod = sc->alc_rdata.alc_cmb->cons;
2616 prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX);
2617 /* Assume we're using normal Tx priority queue. */
2618 prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >>
2619 MBOX_TD_CONS_LO_IDX_SHIFT;
2620 cons = sc->alc_cdata.alc_tx_cons;
2622 * Go through our Tx list and free mbufs for those
2623 * frames which have been transmitted.
2625 for (prog = 0; cons != prod; prog++,
2626 ALC_DESC_INC(cons, ALC_TX_RING_CNT)) {
2627 if (sc->alc_cdata.alc_tx_cnt <= 0)
2630 ifq_clr_oactive(&ifp->if_snd);
2631 sc->alc_cdata.alc_tx_cnt--;
2632 txd = &sc->alc_cdata.alc_txdesc[cons];
2633 if (txd->tx_m != NULL) {
2634 /* Reclaim transmitted mbufs. */
2635 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag,
2636 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2637 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag,
2644 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2645 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag,
2646 sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_PREREAD);
2647 sc->alc_cdata.alc_tx_cons = cons;
2649 * Unarm watchdog timer only when there is no pending
2650 * frames in Tx queue.
2652 if (sc->alc_cdata.alc_tx_cnt == 0)
2653 sc->alc_watchdog_timer = 0;
2657 alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd, boolean_t wait)
2660 bus_dma_segment_t segs[1];
2665 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2668 m->m_len = m->m_pkthdr.len = MCLBYTES;
2670 /* Hardware require 4 bytes align */
2671 m_adj(m, ETHER_ALIGN);
2674 error = bus_dmamap_load_mbuf_segment(
2675 sc->alc_cdata.alc_rx_tag,
2676 sc->alc_cdata.alc_rx_sparemap,
2677 m, segs, 1, &nsegs, BUS_DMA_NOWAIT);
2682 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2684 if (rxd->rx_m != NULL) {
2685 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap,
2686 BUS_DMASYNC_POSTREAD);
2687 bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap);
2689 map = rxd->rx_dmamap;
2690 rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap;
2691 sc->alc_cdata.alc_rx_sparemap = map;
2692 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap,
2693 BUS_DMASYNC_PREREAD);
2695 rxd->rx_desc->addr = htole64(segs[0].ds_addr);
2700 alc_rxintr(struct alc_softc *sc)
2703 struct rx_rdesc *rrd;
2704 uint32_t nsegs, status;
2707 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
2708 sc->alc_cdata.alc_rr_ring_map,
2709 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2710 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
2711 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_POSTWRITE);
2712 rr_cons = sc->alc_cdata.alc_rr_cons;
2714 for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0;) {
2715 rrd = &sc->alc_rdata.alc_rr_ring[rr_cons];
2716 status = le32toh(rrd->status);
2717 if ((status & RRD_VALID) == 0)
2719 nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo));
2721 /* This should not happen! */
2722 device_printf(sc->alc_dev,
2723 "unexpected segment count -- resetting\n");
2727 /* Clear Rx return status. */
2729 ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT);
2730 sc->alc_cdata.alc_rx_cons += nsegs;
2731 sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT;
2736 /* Update the consumer index. */
2737 sc->alc_cdata.alc_rr_cons = rr_cons;
2738 /* Sync Rx return descriptors. */
2739 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
2740 sc->alc_cdata.alc_rr_ring_map,
2741 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2743 * Sync updated Rx descriptors such that controller see
2744 * modified buffer addresses.
2746 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
2747 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE);
2749 * Let controller know availability of new Rx buffers.
2750 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors
2751 * it may be possible to update ALC_MBOX_RD0_PROD_IDX
2752 * only when Rx buffer pre-fetching is required. In
2753 * addition we already set ALC_RX_RD_FREE_THRESH to
2754 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However
2755 * it still seems that pre-fetching needs more
2758 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX,
2759 sc->alc_cdata.alc_rx_cons);
2765 /* Receive a frame. */
2767 alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd)
2769 struct alc_rxdesc *rxd;
2771 struct mbuf *mp, *m;
2772 uint32_t rdinfo, status, vtag;
2773 int count, nsegs, rx_cons;
2776 status = le32toh(rrd->status);
2777 rdinfo = le32toh(rrd->rdinfo);
2778 rx_cons = RRD_RD_IDX(rdinfo);
2779 nsegs = RRD_RD_CNT(rdinfo);
2781 sc->alc_cdata.alc_rxlen = RRD_BYTES(status);
2782 if ((status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) != 0) {
2784 * We want to pass the following frames to upper
2785 * layer regardless of error status of Rx return
2788 * o IP/TCP/UDP checksum is bad.
2789 * o frame length and protocol specific length
2792 * Force network stack compute checksum for
2795 status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK;
2796 if ((RRD_ERR_CRC | RRD_ERR_ALIGN | RRD_ERR_TRUNC |
2801 for (count = 0; count < nsegs; count++,
2802 ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) {
2803 rxd = &sc->alc_cdata.alc_rxdesc[rx_cons];
2805 /* Add a new receive buffer to the ring. */
2806 if (alc_newbuf(sc, rxd, FALSE) != 0) {
2807 IFNET_STAT_INC(ifp, iqdrops, 1);
2808 /* Reuse Rx buffers. */
2809 if (sc->alc_cdata.alc_rxhead != NULL)
2810 m_freem(sc->alc_cdata.alc_rxhead);
2815 * Assume we've received a full sized frame.
2816 * Actual size is fixed when we encounter the end of
2817 * multi-segmented frame.
2819 mp->m_len = sc->alc_buf_size;
2821 /* Chain received mbufs. */
2822 if (sc->alc_cdata.alc_rxhead == NULL) {
2823 sc->alc_cdata.alc_rxhead = mp;
2824 sc->alc_cdata.alc_rxtail = mp;
2826 sc->alc_cdata.alc_rxprev_tail =
2827 sc->alc_cdata.alc_rxtail;
2828 sc->alc_cdata.alc_rxtail->m_next = mp;
2829 sc->alc_cdata.alc_rxtail = mp;
2832 if (count == nsegs - 1) {
2833 /* Last desc. for this frame. */
2834 m = sc->alc_cdata.alc_rxhead;
2836 * It seems that L1C/L2C controller has no way
2837 * to tell hardware to strip CRC bytes.
2840 sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN;
2842 /* Set last mbuf size. */
2843 mp->m_len = sc->alc_cdata.alc_rxlen -
2844 (nsegs - 1) * sc->alc_buf_size;
2845 /* Remove the CRC bytes in chained mbufs. */
2846 if (mp->m_len <= ETHER_CRC_LEN) {
2847 sc->alc_cdata.alc_rxtail =
2848 sc->alc_cdata.alc_rxprev_tail;
2849 sc->alc_cdata.alc_rxtail->m_len -=
2850 (ETHER_CRC_LEN - mp->m_len);
2851 sc->alc_cdata.alc_rxtail->m_next = NULL;
2854 mp->m_len -= ETHER_CRC_LEN;
2857 m->m_len = m->m_pkthdr.len;
2858 m->m_pkthdr.rcvif = ifp;
2860 * Due to hardware bugs, Rx checksum offloading
2861 * was intentionally disabled.
2863 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
2864 (status & RRD_VLAN_TAG) != 0) {
2865 vtag = RRD_VLAN(le32toh(rrd->vtag));
2866 m->m_pkthdr.ether_vlantag = ntohs(vtag);
2867 m->m_flags |= M_VLANTAG;
2871 ifp->if_input(ifp, m);
2874 /* Reset mbuf chains. */
2875 ALC_RXCHAIN_RESET(sc);
2881 struct alc_softc *sc = arg;
2882 struct ifnet *ifp = &sc->arpcom.ac_if;
2883 struct mii_data *mii;
2885 lwkt_serialize_enter(ifp->if_serializer);
2887 mii = device_get_softc(sc->alc_miibus);
2889 alc_stats_update(sc);
2891 * alc(4) does not rely on Tx completion interrupts to reclaim
2892 * transferred buffers. Instead Tx completion interrupts are
2893 * used to hint for scheduling Tx task. So it's necessary to
2894 * release transmitted buffers by kicking Tx completion
2895 * handler. This limits the maximum reclamation delay to a hz.
2899 callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc);
2901 lwkt_serialize_exit(ifp->if_serializer);
2905 alc_reset(struct alc_softc *sc)
2910 reg = CSR_READ_4(sc, ALC_MASTER_CFG) & 0xFFFF;
2911 reg |= MASTER_OOB_DIS_OFF | MASTER_RESET;
2912 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2914 for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2916 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0)
2920 device_printf(sc->alc_dev, "master reset timeout!\n");
2922 for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2923 if ((reg = CSR_READ_4(sc, ALC_IDLE_STATUS)) == 0)
2929 device_printf(sc->alc_dev, "reset timeout(0x%08x)!\n", reg);
2935 struct alc_softc *sc = xsc;
2936 struct ifnet *ifp = &sc->arpcom.ac_if;
2937 struct mii_data *mii;
2938 uint8_t eaddr[ETHER_ADDR_LEN];
2940 uint32_t reg, rxf_hi, rxf_lo;
2942 ASSERT_SERIALIZED(ifp->if_serializer);
2944 mii = device_get_softc(sc->alc_miibus);
2947 * Cancel any pending I/O.
2951 * Reset the chip to a known state.
2955 /* Initialize Rx descriptors. */
2956 if (alc_init_rx_ring(sc) != 0) {
2957 device_printf(sc->alc_dev, "no memory for Rx buffers.\n");
2961 alc_init_rr_ring(sc);
2962 alc_init_tx_ring(sc);
2966 /* Reprogram the station address. */
2967 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2968 CSR_WRITE_4(sc, ALC_PAR0,
2969 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2970 CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]);
2972 * Clear WOL status and disable all WOL feature as WOL
2973 * would interfere Rx operation under normal environments.
2975 CSR_READ_4(sc, ALC_WOL_CFG);
2976 CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
2977 /* Set Tx descriptor base addresses. */
2978 paddr = sc->alc_rdata.alc_tx_ring_paddr;
2979 CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2980 CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2981 /* We don't use high priority ring. */
2982 CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0);
2983 /* Set Tx descriptor counter. */
2984 CSR_WRITE_4(sc, ALC_TD_RING_CNT,
2985 (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK);
2986 /* Set Rx descriptor base addresses. */
2987 paddr = sc->alc_rdata.alc_rx_ring_paddr;
2988 CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2989 CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2990 /* We use one Rx ring. */
2991 CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0);
2992 CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0);
2993 CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0);
2994 /* Set Rx descriptor counter. */
2995 CSR_WRITE_4(sc, ALC_RD_RING_CNT,
2996 (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK);
2999 * Let hardware split jumbo frames into alc_max_buf_sized chunks.
3000 * if it do not fit the buffer size. Rx return descriptor holds
3001 * a counter that indicates how many fragments were made by the
3002 * hardware. The buffer size should be multiple of 8 bytes.
3003 * Since hardware has limit on the size of buffer size, always
3004 * use the maximum value.
3005 * For strict-alignment architectures make sure to reduce buffer
3006 * size by 8 bytes to make room for alignment fixup.
3008 sc->alc_buf_size = RX_BUF_SIZE_MAX;
3009 CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size);
3011 paddr = sc->alc_rdata.alc_rr_ring_paddr;
3012 /* Set Rx return descriptor base addresses. */
3013 CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
3014 /* We use one Rx return ring. */
3015 CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0);
3016 CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0);
3017 CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0);
3018 /* Set Rx return descriptor counter. */
3019 CSR_WRITE_4(sc, ALC_RRD_RING_CNT,
3020 (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK);
3021 paddr = sc->alc_rdata.alc_cmb_paddr;
3022 CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
3023 paddr = sc->alc_rdata.alc_smb_paddr;
3024 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
3025 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
3027 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) {
3028 /* Reconfigure SRAM - Vendor magic. */
3029 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0);
3030 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100);
3031 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000);
3032 CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0);
3033 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0);
3034 CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0);
3035 CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000);
3036 CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000);
3039 /* Tell hardware that we're ready to load DMA blocks. */
3040 CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD);
3042 /* Configure interrupt moderation timer. */
3043 reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT;
3044 reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT;
3045 CSR_WRITE_4(sc, ALC_IM_TIMER, reg);
3047 * We don't want to automatic interrupt clear as task queue
3048 * for the interrupt should know interrupt status.
3050 reg = MASTER_SA_TIMER_ENB;
3051 if (ALC_USECS(sc->alc_int_rx_mod) != 0)
3052 reg |= MASTER_IM_RX_TIMER_ENB;
3053 if (ALC_USECS(sc->alc_int_tx_mod) != 0)
3054 reg |= MASTER_IM_TX_TIMER_ENB;
3055 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
3057 * Disable interrupt re-trigger timer. We don't want automatic
3058 * re-triggering of un-ACKed interrupts.
3060 CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0));
3061 /* Configure CMB. */
3062 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
3063 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4);
3064 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000));
3066 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0));
3069 * Hardware can be configured to issue SMB interrupt based
3070 * on programmed interval. Since there is a callout that is
3071 * invoked for every hz in driver we use that instead of
3072 * relying on periodic SMB interrupt.
3074 CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0));
3075 /* Clear MAC statistics. */
3076 alc_stats_clear(sc);
3079 * Always use maximum frame size that controller can support.
3080 * Otherwise received frames that has larger frame length
3081 * than alc(4) MTU would be silently dropped in hardware. This
3082 * would make path-MTU discovery hard as sender wouldn't get
3083 * any responses from receiver. alc(4) supports
3084 * multi-fragmented frames on Rx path so it has no issue on
3085 * assembling fragmented frames. Using maximum frame size also
3086 * removes the need to reinitialize hardware when interface
3087 * MTU configuration was changed.
3089 * Be conservative in what you do, be liberal in what you
3090 * accept from others - RFC 793.
3092 CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_ident->max_framelen);
3094 /* Disable header split(?) */
3095 CSR_WRITE_4(sc, ALC_HDS_CFG, 0);
3097 /* Configure IPG/IFG parameters. */
3098 CSR_WRITE_4(sc, ALC_IPG_IFG_CFG,
3099 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) |
3100 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
3101 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
3102 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK));
3103 /* Set parameters for half-duplex media. */
3104 CSR_WRITE_4(sc, ALC_HDPX_CFG,
3105 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
3106 HDPX_CFG_LCOL_MASK) |
3107 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
3108 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
3109 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
3110 HDPX_CFG_ABEBT_MASK) |
3111 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
3112 HDPX_CFG_JAMIPG_MASK));
3114 * Set TSO/checksum offload threshold. For frames that is
3115 * larger than this threshold, hardware wouldn't do
3116 * TSO/checksum offloading.
3118 CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH,
3119 (sc->alc_ident->max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) &
3120 TSO_OFFLOAD_THRESH_MASK);
3121 /* Configure TxQ. */
3122 reg = (alc_dma_burst[sc->alc_dma_rd_burst] <<
3123 TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK;
3124 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B ||
3125 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) {
3128 reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) &
3129 TXQ_CFG_TD_BURST_MASK;
3130 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE);
3132 /* Configure Rx free descriptor pre-fetching. */
3133 CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH,
3134 ((RX_RD_FREE_THRESH_HI_DEFAULT << RX_RD_FREE_THRESH_HI_SHIFT) &
3135 RX_RD_FREE_THRESH_HI_MASK) |
3136 ((RX_RD_FREE_THRESH_LO_DEFAULT << RX_RD_FREE_THRESH_LO_SHIFT) &
3137 RX_RD_FREE_THRESH_LO_MASK));
3140 * Configure flow control parameters.
3141 * XON : 80% of Rx FIFO
3142 * XOFF : 30% of Rx FIFO
3144 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 ||
3145 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132) {
3146 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
3147 rxf_hi = (reg * 8) / 10;
3148 rxf_lo = (reg * 3) / 10;
3149 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
3150 ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
3151 RX_FIFO_PAUSE_THRESH_LO_MASK) |
3152 ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
3153 RX_FIFO_PAUSE_THRESH_HI_MASK));
3156 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B ||
3157 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2) {
3158 CSR_WRITE_4(sc, ALC_SERDES_LOCK,
3159 CSR_READ_4(sc, ALC_SERDES_LOCK) | SERDES_MAC_CLK_SLOWDOWN |
3160 SERDES_PHY_CLK_SLOWDOWN);
3163 /* Disable RSS until I understand L1C/L2C's RSS logic. */
3164 CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0);
3165 CSR_WRITE_4(sc, ALC_RSS_CPU, 0);
3167 /* Configure RxQ. */
3168 reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
3169 RXQ_CFG_RD_BURST_MASK;
3170 reg |= RXQ_CFG_RSS_MODE_DIS;
3171 if ((sc->alc_flags & ALC_FLAG_ASPM_MON) != 0)
3172 reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_1M;
3173 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
3175 /* Configure DMA parameters. */
3176 reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI;
3178 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
3179 reg |= DMA_CFG_CMB_ENB;
3180 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0)
3181 reg |= DMA_CFG_SMB_ENB;
3183 reg |= DMA_CFG_SMB_DIS;
3184 reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) <<
3185 DMA_CFG_RD_BURST_SHIFT;
3186 reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) <<
3187 DMA_CFG_WR_BURST_SHIFT;
3188 reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
3189 DMA_CFG_RD_DELAY_CNT_MASK;
3190 reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
3191 DMA_CFG_WR_DELAY_CNT_MASK;
3192 CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
3195 * Configure Tx/Rx MACs.
3196 * - Auto-padding for short frames.
3197 * - Enable CRC generation.
3198 * Actual reconfiguration of MAC for resolved speed/duplex
3199 * is followed after detection of link establishment.
3200 * AR813x/AR815x always does checksum computation regardless
3201 * of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to
3202 * have bug in protocol field in Rx return structure so
3203 * these controllers can't handle fragmented frames. Disable
3204 * Rx checksum offloading until there is a newer controller
3205 * that has sane implementation.
3207 reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
3208 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
3209 MAC_CFG_PREAMBLE_MASK);
3210 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 ||
3211 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 ||
3212 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) {
3213 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
3215 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0)
3216 reg |= MAC_CFG_SPEED_10_100;
3218 reg |= MAC_CFG_SPEED_1000;
3219 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3221 /* Set up the receive filter. */
3225 /* Acknowledge all pending interrupts and clear it. */
3226 CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS);
3227 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3228 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0);
3230 sc->alc_flags &= ~ALC_FLAG_LINK;
3231 /* Switch to the current media. */
3234 callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc);
3236 ifp->if_flags |= IFF_RUNNING;
3237 ifq_clr_oactive(&ifp->if_snd);
3241 alc_stop(struct alc_softc *sc)
3243 struct ifnet *ifp = &sc->arpcom.ac_if;
3244 struct alc_txdesc *txd;
3245 struct alc_rxdesc *rxd;
3249 ASSERT_SERIALIZED(ifp->if_serializer);
3252 * Mark the interface down and cancel the watchdog timer.
3254 ifp->if_flags &= ~IFF_RUNNING;
3255 ifq_clr_oactive(&ifp->if_snd);
3256 sc->alc_flags &= ~ALC_FLAG_LINK;
3257 callout_stop(&sc->alc_tick_ch);
3258 sc->alc_watchdog_timer = 0;
3259 alc_stats_update(sc);
3260 /* Disable interrupts. */
3261 CSR_WRITE_4(sc, ALC_INTR_MASK, 0);
3262 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3265 reg = CSR_READ_4(sc, ALC_DMA_CFG);
3266 reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB);
3267 reg |= DMA_CFG_SMB_DIS;
3268 CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
3270 /* Stop Rx/Tx MACs. */
3272 /* Disable interrupts which might be touched in taskq handler. */
3273 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3275 /* Reclaim Rx buffers that have been processed. */
3276 if (sc->alc_cdata.alc_rxhead != NULL)
3277 m_freem(sc->alc_cdata.alc_rxhead);
3278 ALC_RXCHAIN_RESET(sc);
3280 * Free Tx/Rx mbufs still in the queues.
3282 for (i = 0; i < ALC_RX_RING_CNT; i++) {
3283 rxd = &sc->alc_cdata.alc_rxdesc[i];
3284 if (rxd->rx_m != NULL) {
3285 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag,
3286 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3287 bus_dmamap_unload(sc->alc_cdata.alc_rx_tag,
3293 for (i = 0; i < ALC_TX_RING_CNT; i++) {
3294 txd = &sc->alc_cdata.alc_txdesc[i];
3295 if (txd->tx_m != NULL) {
3296 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag,
3297 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3298 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag,
3307 alc_stop_mac(struct alc_softc *sc)
3312 /* Disable Rx/Tx MAC. */
3313 reg = CSR_READ_4(sc, ALC_MAC_CFG);
3314 if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
3315 reg &= ~MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
3316 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3318 for (i = ALC_TIMEOUT; i > 0; i--) {
3319 reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
3325 device_printf(sc->alc_dev,
3326 "could not disable Rx/Tx MAC(0x%08x)!\n", reg);
3330 alc_start_queue(struct alc_softc *sc)
3335 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB,
3336 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB,
3342 cfg = CSR_READ_4(sc, ALC_RXQ_CFG);
3343 cfg &= ~RXQ_CFG_ENB;
3345 CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg);
3347 cfg = CSR_READ_4(sc, ALC_TXQ_CFG);
3349 CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg);
3353 alc_stop_queue(struct alc_softc *sc)
3359 reg = CSR_READ_4(sc, ALC_RXQ_CFG);
3360 if ((reg & RXQ_CFG_ENB) != 0) {
3361 reg &= ~RXQ_CFG_ENB;
3362 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
3365 reg = CSR_READ_4(sc, ALC_TXQ_CFG);
3366 if ((reg & TXQ_CFG_ENB) == 0) {
3367 reg &= ~TXQ_CFG_ENB;
3368 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg);
3370 for (i = ALC_TIMEOUT; i > 0; i--) {
3371 reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
3372 if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
3377 device_printf(sc->alc_dev,
3378 "could not disable RxQ/TxQ (0x%08x)!\n", reg);
3382 alc_init_tx_ring(struct alc_softc *sc)
3384 struct alc_ring_data *rd;
3385 struct alc_txdesc *txd;
3388 sc->alc_cdata.alc_tx_prod = 0;
3389 sc->alc_cdata.alc_tx_cons = 0;
3390 sc->alc_cdata.alc_tx_cnt = 0;
3392 rd = &sc->alc_rdata;
3393 bzero(rd->alc_tx_ring, ALC_TX_RING_SZ);
3394 for (i = 0; i < ALC_TX_RING_CNT; i++) {
3395 txd = &sc->alc_cdata.alc_txdesc[i];
3399 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
3400 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE);
3404 alc_init_rx_ring(struct alc_softc *sc)
3406 struct alc_ring_data *rd;
3407 struct alc_rxdesc *rxd;
3410 sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1;
3411 rd = &sc->alc_rdata;
3412 bzero(rd->alc_rx_ring, ALC_RX_RING_SZ);
3413 for (i = 0; i < ALC_RX_RING_CNT; i++) {
3414 rxd = &sc->alc_cdata.alc_rxdesc[i];
3416 rxd->rx_desc = &rd->alc_rx_ring[i];
3417 if (alc_newbuf(sc, rxd, TRUE) != 0)
3422 * Since controller does not update Rx descriptors, driver
3423 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE
3424 * is enough to ensure coherence.
3426 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
3427 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE);
3428 /* Let controller know availability of new Rx buffers. */
3429 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons);
3435 alc_init_rr_ring(struct alc_softc *sc)
3437 struct alc_ring_data *rd;
3439 sc->alc_cdata.alc_rr_cons = 0;
3440 ALC_RXCHAIN_RESET(sc);
3442 rd = &sc->alc_rdata;
3443 bzero(rd->alc_rr_ring, ALC_RR_RING_SZ);
3444 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
3445 sc->alc_cdata.alc_rr_ring_map,
3446 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3450 alc_init_cmb(struct alc_softc *sc)
3452 struct alc_ring_data *rd;
3454 rd = &sc->alc_rdata;
3455 bzero(rd->alc_cmb, ALC_CMB_SZ);
3456 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, sc->alc_cdata.alc_cmb_map,
3457 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3461 alc_init_smb(struct alc_softc *sc)
3463 struct alc_ring_data *rd;
3465 rd = &sc->alc_rdata;
3466 bzero(rd->alc_smb, ALC_SMB_SZ);
3467 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, sc->alc_cdata.alc_smb_map,
3468 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3472 alc_rxvlan(struct alc_softc *sc)
3478 reg = CSR_READ_4(sc, ALC_MAC_CFG);
3479 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3480 reg |= MAC_CFG_VLAN_TAG_STRIP;
3482 reg &= ~MAC_CFG_VLAN_TAG_STRIP;
3483 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3487 alc_rxfilter(struct alc_softc *sc)
3490 struct ifmultiaddr *ifma;
3497 bzero(mchash, sizeof(mchash));
3498 rxcfg = CSR_READ_4(sc, ALC_MAC_CFG);
3499 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
3500 if ((ifp->if_flags & IFF_BROADCAST) != 0)
3501 rxcfg |= MAC_CFG_BCAST;
3502 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3503 if ((ifp->if_flags & IFF_PROMISC) != 0)
3504 rxcfg |= MAC_CFG_PROMISC;
3505 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
3506 rxcfg |= MAC_CFG_ALLMULTI;
3507 mchash[0] = 0xFFFFFFFF;
3508 mchash[1] = 0xFFFFFFFF;
3514 if_maddr_rlock(ifp);
3516 TAILQ_FOREACH(ifma, &sc->alc_ifp->if_multiaddrs, ifma_link) {
3517 if (ifma->ifma_addr->sa_family != AF_LINK)
3519 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3520 ifma->ifma_addr), ETHER_ADDR_LEN);
3521 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
3525 if_maddr_runlock(ifp);
3529 CSR_WRITE_4(sc, ALC_MAR0, mchash[0]);
3530 CSR_WRITE_4(sc, ALC_MAR1, mchash[1]);
3531 CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg);
3535 sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS)
3537 return (sysctl_int_range(oidp, arg1, arg2, req,
3538 ALC_PROC_MIN, ALC_PROC_MAX));
3542 sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS)
3545 return (sysctl_int_range(oidp, arg1, arg2, req,
3546 ALC_IM_TIMER_MIN, ALC_IM_TIMER_MAX));