network code: Convert if_multiaddrs from LIST to TAILQ.
[dragonfly.git] / sys / dev / netif / ae / if_ae.c
... / ...
CommitLineData
1/*-
2 * Copyright (c) 2008 Stanislav Sedov <stas@FreeBSD.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 * Driver for Attansic Technology Corp. L2 FastEthernet adapter.
26 *
27 * This driver is heavily based on age(4) Attansic L1 driver by Pyun YongHyeon.
28 *
29 * $FreeBSD: src/sys/dev/ae/if_ae.c,v 1.1.2.3.2.1 2009/04/15 03:14:26 kensmith Exp $
30 */
31
32#include <sys/param.h>
33#include <sys/endian.h>
34#include <sys/kernel.h>
35#include <sys/bus.h>
36#include <sys/interrupt.h>
37#include <sys/malloc.h>
38#include <sys/proc.h>
39#include <sys/rman.h>
40#include <sys/serialize.h>
41#include <sys/socket.h>
42#include <sys/sockio.h>
43#include <sys/sysctl.h>
44
45#include <net/ethernet.h>
46#include <net/if.h>
47#include <net/bpf.h>
48#include <net/if_arp.h>
49#include <net/if_dl.h>
50#include <net/if_media.h>
51#include <net/ifq_var.h>
52#include <net/vlan/if_vlan_var.h>
53#include <net/vlan/if_vlan_ether.h>
54
55#include <bus/pci/pcireg.h>
56#include <bus/pci/pcivar.h>
57#include <bus/pci/pcidevs.h>
58
59#include <dev/netif/mii_layer/miivar.h>
60
61#include <dev/netif/ae/if_aereg.h>
62#include <dev/netif/ae/if_aevar.h>
63
64/* "device miibus" required. See GENERIC if you get errors here. */
65#include "miibus_if.h"
66
67/*
68 * Devices supported by this driver.
69 */
70static const struct ae_dev {
71 uint16_t ae_vendorid;
72 uint16_t ae_deviceid;
73 const char *ae_name;
74} ae_devs[] = {
75 { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L2,
76 "Attansic Technology Corp, L2 Fast Ethernet" },
77 /* Required last entry */
78 { 0, 0, NULL }
79};
80
81
82static int ae_probe(device_t);
83static int ae_attach(device_t);
84static int ae_detach(device_t);
85static int ae_shutdown(device_t);
86static int ae_suspend(device_t);
87static int ae_resume(device_t);
88static int ae_miibus_readreg(device_t, int, int);
89static int ae_miibus_writereg(device_t, int, int, int);
90static void ae_miibus_statchg(device_t);
91
92static int ae_mediachange(struct ifnet *);
93static void ae_mediastatus(struct ifnet *, struct ifmediareq *);
94static void ae_init(void *);
95static void ae_start(struct ifnet *);
96static int ae_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
97static void ae_watchdog(struct ifnet *);
98static void ae_stop(struct ae_softc *);
99static void ae_tick(void *);
100
101static void ae_intr(void *);
102static void ae_tx_intr(struct ae_softc *);
103static void ae_rx_intr(struct ae_softc *);
104static int ae_rxeof(struct ae_softc *, struct ae_rxd *);
105
106static int ae_encap(struct ae_softc *, struct mbuf **);
107static void ae_sysctl_node(struct ae_softc *);
108static void ae_phy_reset(struct ae_softc *);
109static int ae_reset(struct ae_softc *);
110static void ae_pcie_init(struct ae_softc *);
111static void ae_get_eaddr(struct ae_softc *);
112static void ae_dma_free(struct ae_softc *);
113static int ae_dma_alloc(struct ae_softc *);
114static void ae_mac_config(struct ae_softc *);
115static void ae_stop_rxmac(struct ae_softc *);
116static void ae_stop_txmac(struct ae_softc *);
117static void ae_rxfilter(struct ae_softc *);
118static void ae_rxvlan(struct ae_softc *);
119static void ae_update_stats_rx(uint16_t, struct ae_stats *);
120static void ae_update_stats_tx(uint16_t, struct ae_stats *);
121static void ae_powersave_disable(struct ae_softc *);
122static void ae_powersave_enable(struct ae_softc *);
123
124static device_method_t ae_methods[] = {
125 /* Device interface. */
126 DEVMETHOD(device_probe, ae_probe),
127 DEVMETHOD(device_attach, ae_attach),
128 DEVMETHOD(device_detach, ae_detach),
129 DEVMETHOD(device_shutdown, ae_shutdown),
130 DEVMETHOD(device_suspend, ae_suspend),
131 DEVMETHOD(device_resume, ae_resume),
132
133 /* Bus interface. */
134 DEVMETHOD(bus_print_child, bus_generic_print_child),
135 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
136
137 /* MII interface. */
138 DEVMETHOD(miibus_readreg, ae_miibus_readreg),
139 DEVMETHOD(miibus_writereg, ae_miibus_writereg),
140 DEVMETHOD(miibus_statchg, ae_miibus_statchg),
141 { NULL, NULL }
142};
143
144static driver_t ae_driver = {
145 "ae",
146 ae_methods,
147 sizeof(struct ae_softc)
148};
149
150static devclass_t ae_devclass;
151DECLARE_DUMMY_MODULE(if_ae);
152MODULE_DEPEND(if_ae, miibus, 1, 1, 1);
153DRIVER_MODULE(if_ae, pci, ae_driver, ae_devclass, 0, 0);
154DRIVER_MODULE(miibus, ae, miibus_driver, miibus_devclass, 0, 0);
155
156/* Register access macros. */
157#define AE_WRITE_4(_sc, reg, val) \
158 bus_space_write_4((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg), (val))
159#define AE_WRITE_2(_sc, reg, val) \
160 bus_space_write_2((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg), (val))
161#define AE_WRITE_1(_sc, reg, val) \
162 bus_space_write_1((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg), (val))
163#define AE_READ_4(_sc, reg) \
164 bus_space_read_4((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg))
165#define AE_READ_2(_sc, reg) \
166 bus_space_read_2((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg))
167#define AE_READ_1(_sc, reg) \
168 bus_space_read_1((_sc)->ae_mem_bt, (_sc)->ae_mem_bh, (reg))
169
170#define AE_PHY_READ(sc, reg) \
171 ae_miibus_readreg(sc->ae_dev, 0, reg)
172#define AE_PHY_WRITE(sc, reg, val) \
173 ae_miibus_writereg(sc->ae_dev, 0, reg, val)
174#define AE_CHECK_EADDR_VALID(eaddr) \
175 ((eaddr[0] == 0 && eaddr[1] == 0) || \
176 (eaddr[0] == 0xffffffff && eaddr[1] == 0xffff))
177#define AE_RXD_VLAN(vtag) \
178 (((vtag) >> 4) | (((vtag) & 0x07) << 13) | (((vtag) & 0x08) << 9))
179#define AE_TXD_VLAN(vtag) \
180 (((vtag) << 4) | (((vtag) >> 13) & 0x07) | (((vtag) >> 9) & 0x08))
181
182/*
183 * ae statistics.
184 */
185#define STATS_ENTRY(node, desc, field) \
186 { node, desc, offsetof(struct ae_stats, field) }
187struct {
188 const char *node;
189 const char *desc;
190 intptr_t offset;
191} ae_stats_tx[] = {
192 STATS_ENTRY("bcast", "broadcast frames", tx_bcast),
193 STATS_ENTRY("mcast", "multicast frames", tx_mcast),
194 STATS_ENTRY("pause", "PAUSE frames", tx_pause),
195 STATS_ENTRY("control", "control frames", tx_ctrl),
196 STATS_ENTRY("defers", "deferrals occuried", tx_defer),
197 STATS_ENTRY("exc_defers", "excessive deferrals occuried", tx_excdefer),
198 STATS_ENTRY("singlecols", "single collisions occuried", tx_singlecol),
199 STATS_ENTRY("multicols", "multiple collisions occuried", tx_multicol),
200 STATS_ENTRY("latecols", "late collisions occuried", tx_latecol),
201 STATS_ENTRY("aborts", "transmit aborts due collisions", tx_abortcol),
202 STATS_ENTRY("underruns", "Tx FIFO underruns", tx_underrun)
203}, ae_stats_rx[] = {
204 STATS_ENTRY("bcast", "broadcast frames", rx_bcast),
205 STATS_ENTRY("mcast", "multicast frames", rx_mcast),
206 STATS_ENTRY("pause", "PAUSE frames", rx_pause),
207 STATS_ENTRY("control", "control frames", rx_ctrl),
208 STATS_ENTRY("crc_errors", "frames with CRC errors", rx_crcerr),
209 STATS_ENTRY("code_errors", "frames with invalid opcode", rx_codeerr),
210 STATS_ENTRY("runt", "runt frames", rx_runt),
211 STATS_ENTRY("frag", "fragmented frames", rx_frag),
212 STATS_ENTRY("align_errors", "frames with alignment errors", rx_align),
213 STATS_ENTRY("truncated", "frames truncated due to Rx FIFO inderrun",
214 rx_trunc)
215};
216#define AE_STATS_RX_LEN (sizeof(ae_stats_rx) / sizeof(*ae_stats_rx))
217#define AE_STATS_TX_LEN (sizeof(ae_stats_tx) / sizeof(*ae_stats_tx))
218
219static void
220ae_stop(struct ae_softc *sc)
221{
222 struct ifnet *ifp = &sc->arpcom.ac_if;
223 int i;
224
225 ASSERT_SERIALIZED(ifp->if_serializer);
226
227 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
228 ifp->if_timer = 0;
229
230 sc->ae_flags &= ~AE_FLAG_LINK;
231 callout_stop(&sc->ae_tick_ch);
232
233 /*
234 * Clear and disable interrupts.
235 */
236 AE_WRITE_4(sc, AE_IMR_REG, 0);
237 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
238
239 /*
240 * Stop Rx/Tx MACs.
241 */
242 ae_stop_txmac(sc);
243 ae_stop_rxmac(sc);
244
245 /*
246 * Stop DMA engines.
247 */
248 AE_WRITE_1(sc, AE_DMAREAD_REG, ~AE_DMAREAD_EN);
249 AE_WRITE_1(sc, AE_DMAWRITE_REG, ~AE_DMAWRITE_EN);
250
251 /*
252 * Wait for everything to enter idle state.
253 */
254 for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
255 if (AE_READ_4(sc, AE_IDLE_REG) == 0)
256 break;
257 DELAY(100);
258 }
259 if (i == AE_IDLE_TIMEOUT)
260 if_printf(ifp, "could not enter idle state in stop.\n");
261}
262
263static void
264ae_stop_rxmac(struct ae_softc *sc)
265{
266 uint32_t val;
267 int i;
268
269 /*
270 * Stop Rx MAC engine.
271 */
272 val = AE_READ_4(sc, AE_MAC_REG);
273 if ((val & AE_MAC_RX_EN) != 0) {
274 val &= ~AE_MAC_RX_EN;
275 AE_WRITE_4(sc, AE_MAC_REG, val);
276 }
277
278 /*
279 * Stop Rx DMA engine.
280 */
281 if (AE_READ_1(sc, AE_DMAWRITE_REG) == AE_DMAWRITE_EN)
282 AE_WRITE_1(sc, AE_DMAWRITE_REG, 0);
283
284 /*
285 * Wait for IDLE state.
286 */
287 for (i = 0; i < AE_IDLE_TIMEOUT; i--) {
288 val = AE_READ_4(sc, AE_IDLE_REG);
289 if ((val & (AE_IDLE_RXMAC | AE_IDLE_DMAWRITE)) == 0)
290 break;
291 DELAY(100);
292 }
293 if (i == AE_IDLE_TIMEOUT) {
294 if_printf(&sc->arpcom.ac_if,
295 "timed out while stopping Rx MAC.\n");
296 }
297}
298
299static void
300ae_stop_txmac(struct ae_softc *sc)
301{
302 uint32_t val;
303 int i;
304
305 /*
306 * Stop Tx MAC engine.
307 */
308 val = AE_READ_4(sc, AE_MAC_REG);
309 if ((val & AE_MAC_TX_EN) != 0) {
310 val &= ~AE_MAC_TX_EN;
311 AE_WRITE_4(sc, AE_MAC_REG, val);
312 }
313
314 /*
315 * Stop Tx DMA engine.
316 */
317 if (AE_READ_1(sc, AE_DMAREAD_REG) == AE_DMAREAD_EN)
318 AE_WRITE_1(sc, AE_DMAREAD_REG, 0);
319
320 /*
321 * Wait for IDLE state.
322 */
323 for (i = 0; i < AE_IDLE_TIMEOUT; i--) {
324 val = AE_READ_4(sc, AE_IDLE_REG);
325 if ((val & (AE_IDLE_TXMAC | AE_IDLE_DMAREAD)) == 0)
326 break;
327 DELAY(100);
328 }
329 if (i == AE_IDLE_TIMEOUT) {
330 if_printf(&sc->arpcom.ac_if,
331 "timed out while stopping Tx MAC.\n");
332 }
333}
334
335/*
336 * Callback from MII layer when media changes.
337 */
338static void
339ae_miibus_statchg(device_t dev)
340{
341 struct ae_softc *sc = device_get_softc(dev);
342 struct ifnet *ifp = &sc->arpcom.ac_if;
343 struct mii_data *mii;
344 uint32_t val;
345
346 ASSERT_SERIALIZED(ifp->if_serializer);
347
348 if ((ifp->if_flags & IFF_RUNNING) == 0)
349 return;
350
351 mii = device_get_softc(sc->ae_miibus);
352 sc->ae_flags &= ~AE_FLAG_LINK;
353 if ((mii->mii_media_status & IFM_AVALID) != 0) {
354 switch (IFM_SUBTYPE(mii->mii_media_active)) {
355 case IFM_10_T:
356 case IFM_100_TX:
357 sc->ae_flags |= AE_FLAG_LINK;
358 break;
359 default:
360 break;
361 }
362 }
363
364 /* Stop Rx/Tx MACs. */
365 ae_stop_rxmac(sc);
366 ae_stop_txmac(sc);
367
368 /* Program MACs with resolved speed/duplex/flow-control. */
369 if ((sc->ae_flags & AE_FLAG_LINK) != 0) {
370 ae_mac_config(sc);
371
372 /*
373 * Restart DMA engines.
374 */
375 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
376 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
377
378 /*
379 * Enable Rx and Tx MACs.
380 */
381 val = AE_READ_4(sc, AE_MAC_REG);
382 val |= AE_MAC_TX_EN | AE_MAC_RX_EN;
383 AE_WRITE_4(sc, AE_MAC_REG, val);
384 }
385}
386
387static void
388ae_sysctl_node(struct ae_softc *sc)
389{
390 struct sysctl_ctx_list *ctx;
391 struct sysctl_oid *root, *stats, *stats_rx, *stats_tx;
392 struct ae_stats *ae_stats;
393 unsigned int i;
394
395 ae_stats = &sc->stats;
396 sysctl_ctx_init(&sc->ae_sysctl_ctx);
397 sc->ae_sysctl_tree = SYSCTL_ADD_NODE(&sc->ae_sysctl_ctx,
398 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
399 device_get_nameunit(sc->ae_dev),
400 CTLFLAG_RD, 0, "");
401 if (sc->ae_sysctl_tree == NULL) {
402 device_printf(sc->ae_dev, "can't add sysctl node\n");
403 return;
404 }
405 ctx = &sc->ae_sysctl_ctx;
406 root = sc->ae_sysctl_tree;
407
408 stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(root), OID_AUTO, "stats",
409 CTLFLAG_RD, NULL, "ae statistics");
410 if (stats == NULL) {
411 device_printf(sc->ae_dev, "can't add stats sysctl node\n");
412 return;
413 }
414
415 /*
416 * Receiver statistcics.
417 */
418 stats_rx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx",
419 CTLFLAG_RD, NULL, "Rx MAC statistics");
420 if (stats_rx != NULL) {
421 for (i = 0; i < AE_STATS_RX_LEN; i++) {
422 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(stats_rx),
423 OID_AUTO, ae_stats_rx[i].node, CTLFLAG_RD,
424 (char *)ae_stats + ae_stats_rx[i].offset, 0,
425 ae_stats_rx[i].desc);
426 }
427 }
428
429 /*
430 * Transmitter statistcics.
431 */
432 stats_tx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx",
433 CTLFLAG_RD, NULL, "Tx MAC statistics");
434 if (stats_tx != NULL) {
435 for (i = 0; i < AE_STATS_TX_LEN; i++) {
436 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(stats_tx),
437 OID_AUTO, ae_stats_tx[i].node, CTLFLAG_RD,
438 (char *)ae_stats + ae_stats_tx[i].offset, 0,
439 ae_stats_tx[i].desc);
440 }
441 }
442}
443
444static int
445ae_miibus_readreg(device_t dev, int phy, int reg)
446{
447 struct ae_softc *sc = device_get_softc(dev);
448 uint32_t val;
449 int i;
450
451 /*
452 * Locking is done in upper layers.
453 */
454 if (phy != sc->ae_phyaddr)
455 return (0);
456 val = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
457 AE_MDIO_START | AE_MDIO_READ | AE_MDIO_SUP_PREAMBLE |
458 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK);
459 AE_WRITE_4(sc, AE_MDIO_REG, val);
460
461 /*
462 * Wait for operation to complete.
463 */
464 for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
465 DELAY(2);
466 val = AE_READ_4(sc, AE_MDIO_REG);
467 if ((val & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
468 break;
469 }
470 if (i == AE_MDIO_TIMEOUT) {
471 device_printf(sc->ae_dev, "phy read timeout: %d.\n", reg);
472 return (0);
473 }
474 return ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
475}
476
477static int
478ae_miibus_writereg(device_t dev, int phy, int reg, int val)
479{
480 struct ae_softc *sc = device_get_softc(dev);
481 uint32_t aereg;
482 int i;
483
484 /*
485 * Locking is done in upper layers.
486 */
487 if (phy != sc->ae_phyaddr)
488 return (0);
489 aereg = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
490 AE_MDIO_START | AE_MDIO_SUP_PREAMBLE |
491 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK) |
492 ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
493 AE_WRITE_4(sc, AE_MDIO_REG, aereg);
494
495 /*
496 * Wait for operation to complete.
497 */
498 for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
499 DELAY(2);
500 aereg = AE_READ_4(sc, AE_MDIO_REG);
501 if ((aereg & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
502 break;
503 }
504 if (i == AE_MDIO_TIMEOUT)
505 device_printf(sc->ae_dev, "phy write timeout: %d.\n", reg);
506 return (0);
507}
508
509static int
510ae_probe(device_t dev)
511{
512 uint16_t vendor, devid;
513 const struct ae_dev *sp;
514
515 vendor = pci_get_vendor(dev);
516 devid = pci_get_device(dev);
517 for (sp = ae_devs; sp->ae_name != NULL; sp++) {
518 if (vendor == sp->ae_vendorid &&
519 devid == sp->ae_deviceid) {
520 device_set_desc(dev, sp->ae_name);
521 return (0);
522 }
523 }
524 return (ENXIO);
525}
526
527static int
528ae_dma_alloc(struct ae_softc *sc)
529{
530 bus_addr_t busaddr;
531 int error;
532
533 /*
534 * Create parent DMA tag.
535 */
536 error = bus_dma_tag_create(NULL, 1, 0,
537 BUS_SPACE_MAXADDR_32BIT,
538 BUS_SPACE_MAXADDR,
539 NULL, NULL,
540 BUS_SPACE_MAXSIZE_32BIT,
541 0,
542 BUS_SPACE_MAXSIZE_32BIT,
543 0, &sc->dma_parent_tag);
544 if (error) {
545 device_printf(sc->ae_dev, "could not creare parent DMA tag.\n");
546 return (error);
547 }
548
549 /*
550 * Create DMA stuffs for TxD.
551 */
552 sc->txd_base = bus_dmamem_coherent_any(sc->dma_parent_tag, 4,
553 AE_TXD_BUFSIZE_DEFAULT, BUS_DMA_WAITOK | BUS_DMA_ZERO,
554 &sc->dma_txd_tag, &sc->dma_txd_map,
555 &sc->dma_txd_busaddr);
556 if (sc->txd_base == NULL) {
557 device_printf(sc->ae_dev, "could not creare TxD DMA stuffs.\n");
558 return ENOMEM;
559 }
560
561 /*
562 * Create DMA stuffs for TxS.
563 */
564 sc->txs_base = bus_dmamem_coherent_any(sc->dma_parent_tag, 4,
565 AE_TXS_COUNT_DEFAULT * 4, BUS_DMA_WAITOK | BUS_DMA_ZERO,
566 &sc->dma_txs_tag, &sc->dma_txs_map,
567 &sc->dma_txs_busaddr);
568 if (sc->txs_base == NULL) {
569 device_printf(sc->ae_dev, "could not creare TxS DMA stuffs.\n");
570 return ENOMEM;
571 }
572
573 /*
574 * Create DMA stuffs for RxD.
575 */
576 sc->rxd_base_dma = bus_dmamem_coherent_any(sc->dma_parent_tag, 128,
577 AE_RXD_COUNT_DEFAULT * 1536 + 120,
578 BUS_DMA_WAITOK | BUS_DMA_ZERO,
579 &sc->dma_rxd_tag, &sc->dma_rxd_map,
580 &busaddr);
581 if (sc->rxd_base_dma == NULL) {
582 device_printf(sc->ae_dev, "could not creare RxD DMA stuffs.\n");
583 return ENOMEM;
584 }
585 sc->dma_rxd_busaddr = busaddr + 120;
586 sc->rxd_base = (struct ae_rxd *)(sc->rxd_base_dma + 120);
587
588 return (0);
589}
590
591static void
592ae_mac_config(struct ae_softc *sc)
593{
594 struct mii_data *mii;
595 uint32_t val;
596
597 mii = device_get_softc(sc->ae_miibus);
598 val = AE_READ_4(sc, AE_MAC_REG);
599 val &= ~AE_MAC_FULL_DUPLEX;
600 /* XXX disable AE_MAC_TX_FLOW_EN? */
601 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
602 val |= AE_MAC_FULL_DUPLEX;
603 AE_WRITE_4(sc, AE_MAC_REG, val);
604}
605
606static int
607ae_rxeof(struct ae_softc *sc, struct ae_rxd *rxd)
608{
609 struct ifnet *ifp = &sc->arpcom.ac_if;
610 struct mbuf *m;
611 unsigned int size;
612 uint16_t flags;
613
614 flags = le16toh(rxd->flags);
615#ifdef AE_DEBUG
616 if_printf(ifp, "Rx interrupt occuried.\n");
617#endif
618 size = le16toh(rxd->len) - ETHER_CRC_LEN;
619 if (size < (ETHER_MIN_LEN - ETHER_CRC_LEN -
620 sizeof(struct ether_vlan_header))) {
621 if_printf(ifp, "Runt frame received.");
622 return (EIO);
623 }
624
625 m = m_devget(&rxd->data[0], size, ETHER_ALIGN, ifp, NULL);
626 if (m == NULL)
627 return (ENOBUFS);
628
629 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
630 (flags & AE_RXD_HAS_VLAN)) {
631 m->m_pkthdr.ether_vlantag = AE_RXD_VLAN(le16toh(rxd->vlan));
632 m->m_flags |= M_VLANTAG;
633 }
634 ifp->if_input(ifp, m);
635
636 return (0);
637}
638
639static void
640ae_rx_intr(struct ae_softc *sc)
641{
642 struct ifnet *ifp = &sc->arpcom.ac_if;
643 struct ae_rxd *rxd;
644 uint16_t flags;
645 int error;
646
647 /*
648 * Syncronize DMA buffers.
649 */
650 bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map,
651 BUS_DMASYNC_POSTREAD);
652 for (;;) {
653 rxd = (struct ae_rxd *)(sc->rxd_base + sc->rxd_cur);
654
655 flags = le16toh(rxd->flags);
656 if ((flags & AE_RXD_UPDATE) == 0)
657 break;
658 rxd->flags = htole16(flags & ~AE_RXD_UPDATE);
659
660 /* Update stats. */
661 ae_update_stats_rx(flags, &sc->stats);
662
663 /*
664 * Update position index.
665 */
666 sc->rxd_cur = (sc->rxd_cur + 1) % AE_RXD_COUNT_DEFAULT;
667 if ((flags & AE_RXD_SUCCESS) == 0) {
668 ifp->if_ierrors++;
669 continue;
670 }
671
672 error = ae_rxeof(sc, rxd);
673 if (error)
674 ifp->if_ierrors++;
675 else
676 ifp->if_ipackets++;
677 }
678
679 /* Update Rx index. */
680 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
681}
682
683static void
684ae_tx_intr(struct ae_softc *sc)
685{
686 struct ifnet *ifp = &sc->arpcom.ac_if;
687 struct ae_txd *txd;
688 struct ae_txs *txs;
689 uint16_t flags;
690
691 /*
692 * Syncronize DMA buffers.
693 */
694 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_POSTREAD);
695 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, BUS_DMASYNC_POSTREAD);
696
697 for (;;) {
698 txs = sc->txs_base + sc->txs_ack;
699
700 flags = le16toh(txs->flags);
701 if ((flags & AE_TXS_UPDATE) == 0)
702 break;
703 txs->flags = htole16(flags & ~AE_TXS_UPDATE);
704
705 /* Update stats. */
706 ae_update_stats_tx(flags, &sc->stats);
707
708 /*
709 * Update TxS position.
710 */
711 sc->txs_ack = (sc->txs_ack + 1) % AE_TXS_COUNT_DEFAULT;
712 sc->ae_flags |= AE_FLAG_TXAVAIL;
713 txd = (struct ae_txd *)(sc->txd_base + sc->txd_ack);
714 if (txs->len != txd->len) {
715 device_printf(sc->ae_dev, "Size mismatch: "
716 "TxS:%d TxD:%d\n",
717 le16toh(txs->len), le16toh(txd->len));
718 }
719
720 /*
721 * Move txd ack and align on 4-byte boundary.
722 */
723 sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) + 4 + 3) & ~3) %
724 AE_TXD_BUFSIZE_DEFAULT;
725 if ((flags & AE_TXS_SUCCESS) != 0)
726 ifp->if_opackets++;
727 else
728 ifp->if_oerrors++;
729 sc->tx_inproc--;
730 }
731
732 if (sc->tx_inproc < 0) {
733 /* XXX assert? */
734 if_printf(ifp, "Received stray Tx interrupt(s).\n");
735 sc->tx_inproc = 0;
736 }
737 if (sc->tx_inproc == 0)
738 ifp->if_timer = 0; /* Unarm watchdog. */
739 if (sc->ae_flags & AE_FLAG_TXAVAIL) {
740 ifp->if_flags &= ~IFF_OACTIVE;
741 if (!ifq_is_empty(&ifp->if_snd))
742#ifdef foo
743 ae_intr(sc);
744#else
745 if_devstart(ifp);
746#endif
747 }
748
749 /*
750 * Syncronize DMA buffers.
751 */
752 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREWRITE);
753 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, BUS_DMASYNC_PREWRITE);
754}
755
756static void
757ae_intr(void *xsc)
758{
759 struct ae_softc *sc = xsc;
760 struct ifnet *ifp = &sc->arpcom.ac_if;
761 uint32_t val;
762
763 ASSERT_SERIALIZED(ifp->if_serializer);
764
765 val = AE_READ_4(sc, AE_ISR_REG);
766 if (val == 0 || (val & AE_IMR_DEFAULT) == 0)
767 return;
768
769#ifdef foo
770 AE_WRITE_4(sc, AE_ISR_REG, AE_ISR_DISABLE);
771#endif
772
773 /* Read interrupt status. */
774 val = AE_READ_4(sc, AE_ISR_REG);
775
776 /* Clear interrupts and disable them. */
777 AE_WRITE_4(sc, AE_ISR_REG, val | AE_ISR_DISABLE);
778
779 if (ifp->if_flags & IFF_RUNNING) {
780 if (val & (AE_ISR_DMAR_TIMEOUT |
781 AE_ISR_DMAW_TIMEOUT |
782 AE_ISR_PHY_LINKDOWN)) {
783 ae_init(sc);
784 }
785 if (val & AE_ISR_TX_EVENT)
786 ae_tx_intr(sc);
787 if (val & AE_ISR_RX_EVENT)
788 ae_rx_intr(sc);
789 }
790
791 /* Re-enable interrupts. */
792 AE_WRITE_4(sc, AE_ISR_REG, 0);
793}
794
795static void
796ae_init(void *xsc)
797{
798 struct ae_softc *sc = xsc;
799 struct ifnet *ifp = &sc->arpcom.ac_if;
800 struct mii_data *mii;
801 uint8_t eaddr[ETHER_ADDR_LEN];
802 uint32_t val;
803 bus_addr_t addr;
804
805 ASSERT_SERIALIZED(ifp->if_serializer);
806
807 mii = device_get_softc(sc->ae_miibus);
808 ae_stop(sc);
809 ae_reset(sc);
810 ae_pcie_init(sc);
811 ae_powersave_disable(sc);
812
813 /*
814 * Clear and disable interrupts.
815 */
816 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
817
818 /*
819 * Set the MAC address.
820 */
821 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
822 val = eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5];
823 AE_WRITE_4(sc, AE_EADDR0_REG, val);
824 val = eaddr[0] << 8 | eaddr[1];
825 AE_WRITE_4(sc, AE_EADDR1_REG, val);
826
827 /*
828 * Set ring buffers base addresses.
829 */
830 addr = sc->dma_rxd_busaddr;
831 AE_WRITE_4(sc, AE_DESC_ADDR_HI_REG, BUS_ADDR_HI(addr));
832 AE_WRITE_4(sc, AE_RXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
833 addr = sc->dma_txd_busaddr;
834 AE_WRITE_4(sc, AE_TXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
835 addr = sc->dma_txs_busaddr;
836 AE_WRITE_4(sc, AE_TXS_ADDR_LO_REG, BUS_ADDR_LO(addr));
837
838 /*
839 * Configure ring buffers sizes.
840 */
841 AE_WRITE_2(sc, AE_RXD_COUNT_REG, AE_RXD_COUNT_DEFAULT);
842 AE_WRITE_2(sc, AE_TXD_BUFSIZE_REG, AE_TXD_BUFSIZE_DEFAULT / 4);
843 AE_WRITE_2(sc, AE_TXS_COUNT_REG, AE_TXS_COUNT_DEFAULT);
844
845 /*
846 * Configure interframe gap parameters.
847 */
848 val = ((AE_IFG_TXIPG_DEFAULT << AE_IFG_TXIPG_SHIFT) &
849 AE_IFG_TXIPG_MASK) |
850 ((AE_IFG_RXIPG_DEFAULT << AE_IFG_RXIPG_SHIFT) &
851 AE_IFG_RXIPG_MASK) |
852 ((AE_IFG_IPGR1_DEFAULT << AE_IFG_IPGR1_SHIFT) &
853 AE_IFG_IPGR1_MASK) |
854 ((AE_IFG_IPGR2_DEFAULT << AE_IFG_IPGR2_SHIFT) &
855 AE_IFG_IPGR2_MASK);
856 AE_WRITE_4(sc, AE_IFG_REG, val);
857
858 /*
859 * Configure half-duplex operation.
860 */
861 val = ((AE_HDPX_LCOL_DEFAULT << AE_HDPX_LCOL_SHIFT) &
862 AE_HDPX_LCOL_MASK) |
863 ((AE_HDPX_RETRY_DEFAULT << AE_HDPX_RETRY_SHIFT) &
864 AE_HDPX_RETRY_MASK) |
865 ((AE_HDPX_ABEBT_DEFAULT << AE_HDPX_ABEBT_SHIFT) &
866 AE_HDPX_ABEBT_MASK) |
867 ((AE_HDPX_JAMIPG_DEFAULT << AE_HDPX_JAMIPG_SHIFT) &
868 AE_HDPX_JAMIPG_MASK) | AE_HDPX_EXC_EN;
869 AE_WRITE_4(sc, AE_HDPX_REG, val);
870
871 /*
872 * Configure interrupt moderate timer.
873 */
874 AE_WRITE_2(sc, AE_IMT_REG, AE_IMT_DEFAULT);
875 val = AE_READ_4(sc, AE_MASTER_REG);
876 val |= AE_MASTER_IMT_EN;
877 AE_WRITE_4(sc, AE_MASTER_REG, val);
878
879 /*
880 * Configure interrupt clearing timer.
881 */
882 AE_WRITE_2(sc, AE_ICT_REG, AE_ICT_DEFAULT);
883
884 /*
885 * Configure MTU.
886 */
887 val = ifp->if_mtu + ETHER_HDR_LEN + sizeof(struct ether_vlan_header) +
888 ETHER_CRC_LEN;
889 AE_WRITE_2(sc, AE_MTU_REG, val);
890
891 /*
892 * Configure cut-through threshold.
893 */
894 AE_WRITE_4(sc, AE_CUT_THRESH_REG, AE_CUT_THRESH_DEFAULT);
895
896 /*
897 * Configure flow control.
898 */
899 AE_WRITE_2(sc, AE_FLOW_THRESH_HI_REG, (AE_RXD_COUNT_DEFAULT / 8) * 7);
900 AE_WRITE_2(sc, AE_FLOW_THRESH_LO_REG, (AE_RXD_COUNT_MIN / 8) >
901 (AE_RXD_COUNT_DEFAULT / 12) ? (AE_RXD_COUNT_MIN / 8) :
902 (AE_RXD_COUNT_DEFAULT / 12));
903
904 /*
905 * Init mailboxes.
906 */
907 sc->txd_cur = sc->rxd_cur = 0;
908 sc->txd_cur = sc->rxd_cur = 0;
909 sc->txs_ack = sc->txd_ack = 0;
910 sc->rxd_cur = 0;
911 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur);
912 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
913 sc->tx_inproc = 0;
914 sc->ae_flags |= AE_FLAG_TXAVAIL; /* Free Tx's available. */
915
916 /*
917 * Enable DMA.
918 */
919 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
920 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
921
922 /*
923 * Check if everything is OK.
924 */
925 val = AE_READ_4(sc, AE_ISR_REG);
926 if ((val & AE_ISR_PHY_LINKDOWN) != 0) {
927 device_printf(sc->ae_dev, "Initialization failed.\n");
928 return;
929 }
930
931 /*
932 * Clear interrupt status.
933 */
934 AE_WRITE_4(sc, AE_ISR_REG, 0x3fffffff);
935 AE_WRITE_4(sc, AE_ISR_REG, 0x0);
936
937 /*
938 * Enable interrupts.
939 */
940 val = AE_READ_4(sc, AE_MASTER_REG);
941 AE_WRITE_4(sc, AE_MASTER_REG, val | AE_MASTER_MANUAL_INT);
942 AE_WRITE_4(sc, AE_IMR_REG, AE_IMR_DEFAULT);
943
944 /*
945 * Disable WOL.
946 */
947 AE_WRITE_4(sc, AE_WOL_REG, 0);
948
949 /*
950 * Configure MAC.
951 */
952 val = AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD |
953 AE_MAC_FULL_DUPLEX | AE_MAC_CLK_PHY |
954 AE_MAC_TX_FLOW_EN | AE_MAC_RX_FLOW_EN |
955 ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & AE_HALFBUF_MASK) |
956 ((AE_MAC_PREAMBLE_DEFAULT << AE_MAC_PREAMBLE_SHIFT) &
957 AE_MAC_PREAMBLE_MASK);
958 AE_WRITE_4(sc, AE_MAC_REG, val);
959
960 /*
961 * Configure Rx MAC.
962 */
963 ae_rxfilter(sc);
964 ae_rxvlan(sc);
965
966 /*
967 * Enable Tx/Rx.
968 */
969 val = AE_READ_4(sc, AE_MAC_REG);
970 AE_WRITE_4(sc, AE_MAC_REG, val | AE_MAC_TX_EN | AE_MAC_RX_EN);
971
972 sc->ae_flags &= ~AE_FLAG_LINK;
973 mii_mediachg(mii); /* Switch to the current media. */
974
975 callout_reset(&sc->ae_tick_ch, hz, ae_tick, sc);
976 ifp->if_flags |= IFF_RUNNING;
977 ifp->if_flags &= ~IFF_OACTIVE;
978}
979
980static void
981ae_watchdog(struct ifnet *ifp)
982{
983 struct ae_softc *sc = ifp->if_softc;
984
985 ASSERT_SERIALIZED(ifp->if_serializer);
986
987 if ((sc->ae_flags & AE_FLAG_LINK) == 0)
988 if_printf(ifp, "watchdog timeout (missed link).\n");
989 else
990 if_printf(ifp, "watchdog timeout - resetting.\n");
991 ifp->if_oerrors++;
992
993 ae_init(sc);
994 if (!ifq_is_empty(&ifp->if_snd))
995 if_devstart(ifp);
996}
997
998static void
999ae_tick(void *xsc)
1000{
1001 struct ae_softc *sc = xsc;
1002 struct ifnet *ifp = &sc->arpcom.ac_if;
1003 struct mii_data *mii = device_get_softc(sc->ae_miibus);
1004
1005 lwkt_serialize_enter(ifp->if_serializer);
1006 mii_tick(mii);
1007 callout_reset(&sc->ae_tick_ch, hz, ae_tick, sc);
1008 lwkt_serialize_exit(ifp->if_serializer);
1009}
1010
1011static void
1012ae_rxvlan(struct ae_softc *sc)
1013{
1014 struct ifnet *ifp = &sc->arpcom.ac_if;
1015 uint32_t val;
1016
1017 val = AE_READ_4(sc, AE_MAC_REG);
1018 val &= ~AE_MAC_RMVLAN_EN;
1019 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1020 val |= AE_MAC_RMVLAN_EN;
1021 AE_WRITE_4(sc, AE_MAC_REG, val);
1022}
1023
1024static void
1025ae_rxfilter(struct ae_softc *sc)
1026{
1027 struct ifnet *ifp = &sc->arpcom.ac_if;
1028 struct ifmultiaddr *ifma;
1029 uint32_t crc;
1030 uint32_t mchash[2];
1031 uint32_t rxcfg;
1032
1033 rxcfg = AE_READ_4(sc, AE_MAC_REG);
1034 rxcfg &= ~(AE_MAC_MCAST_EN | AE_MAC_BCAST_EN | AE_MAC_PROMISC_EN);
1035 rxcfg |= AE_MAC_BCAST_EN;
1036 if (ifp->if_flags & IFF_PROMISC)
1037 rxcfg |= AE_MAC_PROMISC_EN;
1038 if (ifp->if_flags & IFF_ALLMULTI)
1039 rxcfg |= AE_MAC_MCAST_EN;
1040
1041 /*
1042 * Wipe old settings.
1043 */
1044 AE_WRITE_4(sc, AE_REG_MHT0, 0);
1045 AE_WRITE_4(sc, AE_REG_MHT1, 0);
1046 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1047 AE_WRITE_4(sc, AE_REG_MHT0, 0xffffffff);
1048 AE_WRITE_4(sc, AE_REG_MHT1, 0xffffffff);
1049 AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
1050 return;
1051 }
1052
1053 /*
1054 * Load multicast tables.
1055 */
1056 bzero(mchash, sizeof(mchash));
1057 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1058 if (ifma->ifma_addr->sa_family != AF_LINK)
1059 continue;
1060 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1061 ifma->ifma_addr), ETHER_ADDR_LEN);
1062 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
1063 }
1064 AE_WRITE_4(sc, AE_REG_MHT0, mchash[0]);
1065 AE_WRITE_4(sc, AE_REG_MHT1, mchash[1]);
1066 AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
1067}
1068
1069static unsigned int
1070ae_tx_avail_size(struct ae_softc *sc)
1071{
1072 unsigned int avail;
1073
1074 if (sc->txd_cur >= sc->txd_ack)
1075 avail = AE_TXD_BUFSIZE_DEFAULT - (sc->txd_cur - sc->txd_ack);
1076 else
1077 avail = sc->txd_ack - sc->txd_cur;
1078 return (avail - 4); /* 4-byte header. */
1079}
1080
1081static int
1082ae_encap(struct ae_softc *sc, struct mbuf **m_head)
1083{
1084 struct mbuf *m0;
1085 struct ae_txd *hdr;
1086 unsigned int to_end;
1087 uint16_t len;
1088
1089 M_ASSERTPKTHDR((*m_head));
1090 m0 = *m_head;
1091 len = m0->m_pkthdr.len;
1092 if ((sc->ae_flags & AE_FLAG_TXAVAIL) == 0 ||
1093 ae_tx_avail_size(sc) < len) {
1094#ifdef AE_DEBUG
1095 if_printf(sc->ifp, "No free Tx available.\n");
1096#endif
1097 return ENOBUFS;
1098 }
1099
1100 hdr = (struct ae_txd *)(sc->txd_base + sc->txd_cur);
1101 bzero(hdr, sizeof(*hdr));
1102
1103 /* Header size. */
1104 sc->txd_cur = (sc->txd_cur + 4) % AE_TXD_BUFSIZE_DEFAULT;
1105
1106 /* Space available to the end of the ring */
1107 to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur;
1108
1109 if (to_end >= len) {
1110 m_copydata(m0, 0, len, (caddr_t)(sc->txd_base + sc->txd_cur));
1111 } else {
1112 m_copydata(m0, 0, to_end, (caddr_t)(sc->txd_base +
1113 sc->txd_cur));
1114 m_copydata(m0, to_end, len - to_end, (caddr_t)sc->txd_base);
1115 }
1116
1117 /*
1118 * Set TxD flags and parameters.
1119 */
1120 if ((m0->m_flags & M_VLANTAG) != 0) {
1121 hdr->vlan = htole16(AE_TXD_VLAN(m0->m_pkthdr.ether_vlantag));
1122 hdr->len = htole16(len | AE_TXD_INSERT_VTAG);
1123 } else {
1124 hdr->len = htole16(len);
1125 }
1126
1127 /*
1128 * Set current TxD position and round up to a 4-byte boundary.
1129 */
1130 sc->txd_cur = ((sc->txd_cur + len + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
1131 if (sc->txd_cur == sc->txd_ack)
1132 sc->ae_flags &= ~AE_FLAG_TXAVAIL;
1133#ifdef AE_DEBUG
1134 if_printf(sc->ifp, "New txd_cur = %d.\n", sc->txd_cur);
1135#endif
1136
1137 /*
1138 * Update TxS position and check if there are empty TxS available.
1139 */
1140 sc->txs_base[sc->txs_cur].flags &= ~htole16(AE_TXS_UPDATE);
1141 sc->txs_cur = (sc->txs_cur + 1) % AE_TXS_COUNT_DEFAULT;
1142 if (sc->txs_cur == sc->txs_ack)
1143 sc->ae_flags &= ~AE_FLAG_TXAVAIL;
1144
1145 /*
1146 * Synchronize DMA memory.
1147 */
1148 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREWRITE);
1149 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, BUS_DMASYNC_PREWRITE);
1150
1151 return (0);
1152}
1153
1154static void
1155ae_start(struct ifnet *ifp)
1156{
1157 struct ae_softc *sc = ifp->if_softc;
1158 int error, trans;
1159
1160 ASSERT_SERIALIZED(ifp->if_serializer);
1161
1162#ifdef AE_DEBUG
1163 if_printf(ifp, "Start called.\n");
1164#endif
1165 if ((sc->ae_flags & AE_FLAG_LINK) == 0) {
1166 ifq_purge(&ifp->if_snd);
1167 return;
1168 }
1169 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1170 return;
1171
1172 trans = 0;
1173 while (!ifq_is_empty(&ifp->if_snd)) {
1174 struct mbuf *m0;
1175
1176 m0 = ifq_dequeue(&ifp->if_snd, NULL);
1177 if (m0 == NULL)
1178 break; /* Nothing to do. */
1179
1180 error = ae_encap(sc, &m0);
1181 if (error != 0) {
1182 if (m0 != NULL) {
1183 ifq_prepend(&ifp->if_snd, m0);
1184 ifp->if_flags |= IFF_OACTIVE;
1185#ifdef AE_DEBUG
1186 if_printf(ifp, "Setting OACTIVE.\n");
1187#endif
1188 }
1189 break;
1190 }
1191 trans = 1;
1192 sc->tx_inproc++;
1193
1194 /* Bounce a copy of the frame to BPF. */
1195 ETHER_BPF_MTAP(ifp, m0);
1196 m_freem(m0);
1197 }
1198 if (trans) { /* Something was dequeued. */
1199 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur / 4);
1200 ifp->if_timer = AE_TX_TIMEOUT; /* Load watchdog. */
1201#ifdef AE_DEBUG
1202 if_printf(ifp, "%d packets dequeued.\n", count);
1203 if_printf(ifp, "Tx pos now is %d.\n", sc->txd_cur);
1204#endif
1205 }
1206}
1207
1208static int
1209ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr)
1210{
1211 struct ae_softc *sc = ifp->if_softc;
1212 struct ifreq *ifr;
1213 struct mii_data *mii;
1214 int error = 0, mask;
1215
1216 ASSERT_SERIALIZED(ifp->if_serializer);
1217
1218 ifr = (struct ifreq *)data;
1219 switch (cmd) {
1220 case SIOCSIFFLAGS:
1221 if (ifp->if_flags & IFF_UP) {
1222 if (ifp->if_flags & IFF_RUNNING) {
1223 if (((ifp->if_flags ^ sc->ae_if_flags)
1224 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1225 ae_rxfilter(sc);
1226 } else {
1227 ae_init(sc);
1228 }
1229 } else {
1230 if (ifp->if_flags & IFF_RUNNING)
1231 ae_stop(sc);
1232 }
1233 sc->ae_if_flags = ifp->if_flags;
1234 break;
1235
1236 case SIOCADDMULTI:
1237 case SIOCDELMULTI:
1238 if (ifp->if_flags & IFF_RUNNING)
1239 ae_rxfilter(sc);
1240 break;
1241
1242 case SIOCSIFMEDIA:
1243 case SIOCGIFMEDIA:
1244 mii = device_get_softc(sc->ae_miibus);
1245 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1246 break;
1247
1248 case SIOCSIFCAP:
1249 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1250 if (mask & IFCAP_VLAN_HWTAGGING) {
1251 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1252 ae_rxvlan(sc);
1253 }
1254 break;
1255
1256 default:
1257 error = ether_ioctl(ifp, cmd, data);
1258 break;
1259 }
1260 return (error);
1261}
1262
1263static int
1264ae_attach(device_t dev)
1265{
1266 struct ae_softc *sc = device_get_softc(dev);
1267 struct ifnet *ifp = &sc->arpcom.ac_if;
1268 int error = 0;
1269
1270 sc->ae_dev = dev;
1271 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1272 callout_init(&sc->ae_tick_ch);
1273
1274 /* Enable bus mastering */
1275 pci_enable_busmaster(dev);
1276
1277 /*
1278 * Allocate memory mapped IO
1279 */
1280 sc->ae_mem_rid = PCIR_BAR(0);
1281 sc->ae_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1282 &sc->ae_mem_rid, RF_ACTIVE);
1283 if (sc->ae_mem_res == NULL) {
1284 device_printf(dev, "can't allocate IO memory\n");
1285 return ENXIO;
1286 }
1287 sc->ae_mem_bt = rman_get_bustag(sc->ae_mem_res);
1288 sc->ae_mem_bh = rman_get_bushandle(sc->ae_mem_res);
1289
1290 /*
1291 * Allocate IRQ
1292 */
1293 sc->ae_irq_rid = 0;
1294 sc->ae_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1295 &sc->ae_irq_rid,
1296 RF_SHAREABLE | RF_ACTIVE);
1297 if (sc->ae_irq_res == NULL) {
1298 device_printf(dev, "can't allocate irq\n");
1299 error = ENXIO;
1300 goto fail;
1301 }
1302
1303 /* Set PHY address. */
1304 sc->ae_phyaddr = AE_PHYADDR_DEFAULT;
1305
1306 /* Create sysctl tree */
1307 ae_sysctl_node(sc);
1308
1309 /* Reset PHY. */
1310 ae_phy_reset(sc);
1311
1312 /*
1313 * Reset the ethernet controller.
1314 */
1315 ae_reset(sc);
1316 ae_pcie_init(sc);
1317
1318 /*
1319 * Get PCI and chip id/revision.
1320 */
1321 sc->ae_rev = pci_get_revid(dev);
1322 sc->ae_chip_rev =
1323 (AE_READ_4(sc, AE_MASTER_REG) >> AE_MASTER_REVNUM_SHIFT) &
1324 AE_MASTER_REVNUM_MASK;
1325 if (bootverbose) {
1326 device_printf(dev, "PCI device revision : 0x%04x\n", sc->ae_rev);
1327 device_printf(dev, "Chip id/revision : 0x%04x\n",
1328 sc->ae_chip_rev);
1329 }
1330
1331 /*
1332 * XXX
1333 * Unintialized hardware returns an invalid chip id/revision
1334 * as well as 0xFFFFFFFF for Tx/Rx fifo length. It seems that
1335 * unplugged cable results in putting hardware into automatic
1336 * power down mode which in turn returns invalld chip revision.
1337 */
1338 if (sc->ae_chip_rev == 0xFFFF) {
1339 device_printf(dev,"invalid chip revision : 0x%04x -- "
1340 "not initialized?\n", sc->ae_chip_rev);
1341 error = ENXIO;
1342 goto fail;
1343 }
1344#if 0
1345 /* Get DMA parameters from PCIe device control register. */
1346 pcie_ptr = pci_get_pciecap_ptr(dev);
1347 if (pcie_ptr) {
1348 uint16_t devctl;
1349 sc->ae_flags |= AE_FLAG_PCIE;
1350 devctl = pci_read_config(dev, pcie_ptr + PCIER_DEVCTRL, 2);
1351 /* Max read request size. */
1352 sc->ae_dma_rd_burst = ((devctl >> 12) & 0x07) <<
1353 DMA_CFG_RD_BURST_SHIFT;
1354 /* Max payload size. */
1355 sc->ae_dma_wr_burst = ((devctl >> 5) & 0x07) <<
1356 DMA_CFG_WR_BURST_SHIFT;
1357 if (bootverbose) {
1358 device_printf(dev, "Read request size : %d bytes.\n",
1359 128 << ((devctl >> 12) & 0x07));
1360 device_printf(dev, "TLP payload size : %d bytes.\n",
1361 128 << ((devctl >> 5) & 0x07));
1362 }
1363 } else {
1364 sc->ae_dma_rd_burst = DMA_CFG_RD_BURST_128;
1365 sc->ae_dma_wr_burst = DMA_CFG_WR_BURST_128;
1366 }
1367#endif
1368
1369 /* Create DMA stuffs */
1370 error = ae_dma_alloc(sc);
1371 if (error)
1372 goto fail;
1373
1374 /* Load station address. */
1375 ae_get_eaddr(sc);
1376
1377 ifp->if_softc = sc;
1378 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1379 ifp->if_ioctl = ae_ioctl;
1380 ifp->if_start = ae_start;
1381 ifp->if_init = ae_init;
1382 ifp->if_watchdog = ae_watchdog;
1383 ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN - 1);
1384 ifq_set_ready(&ifp->if_snd);
1385 ifp->if_capabilities = IFCAP_VLAN_MTU |
1386 IFCAP_VLAN_HWTAGGING;
1387 ifp->if_hwassist = 0;
1388 ifp->if_capenable = ifp->if_capabilities;
1389
1390 /* Set up MII bus. */
1391 error = mii_phy_probe(dev, &sc->ae_miibus,
1392 ae_mediachange, ae_mediastatus);
1393 if (error) {
1394 device_printf(dev, "no PHY found!\n");
1395 goto fail;
1396 }
1397 ether_ifattach(ifp, sc->ae_eaddr, NULL);
1398
1399 /* Tell the upper layer(s) we support long frames. */
1400 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1401
1402 error = bus_setup_intr(dev, sc->ae_irq_res, INTR_MPSAFE, ae_intr, sc,
1403 &sc->ae_irq_handle, ifp->if_serializer);
1404 if (error) {
1405 device_printf(dev, "could not set up interrupt handler.\n");
1406 ether_ifdetach(ifp);
1407 goto fail;
1408 }
1409 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->ae_irq_res));
1410 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
1411 return 0;
1412fail:
1413 ae_detach(dev);
1414 return (error);
1415}
1416
1417static int
1418ae_detach(device_t dev)
1419{
1420 struct ae_softc *sc = device_get_softc(dev);
1421
1422 if (device_is_attached(dev)) {
1423 struct ifnet *ifp = &sc->arpcom.ac_if;
1424
1425 lwkt_serialize_enter(ifp->if_serializer);
1426 sc->ae_flags |= AE_FLAG_DETACH;
1427 ae_stop(sc);
1428 bus_teardown_intr(dev, sc->ae_irq_res, sc->ae_irq_handle);
1429 lwkt_serialize_exit(ifp->if_serializer);
1430
1431 ether_ifdetach(ifp);
1432 }
1433
1434 if (sc->ae_miibus != NULL)
1435 device_delete_child(dev, sc->ae_miibus);
1436 bus_generic_detach(dev);
1437
1438 if (sc->ae_irq_res != NULL) {
1439 bus_release_resource(dev, SYS_RES_IRQ, sc->ae_irq_rid,
1440 sc->ae_irq_res);
1441 }
1442 if (sc->ae_mem_res != NULL) {
1443 bus_release_resource(dev, SYS_RES_MEMORY, sc->ae_mem_rid,
1444 sc->ae_mem_res);
1445 }
1446
1447 if (sc->ae_sysctl_tree != NULL)
1448 sysctl_ctx_free(&sc->ae_sysctl_ctx);
1449
1450 ae_dma_free(sc);
1451
1452 return (0);
1453}
1454
1455static void
1456ae_dma_free(struct ae_softc *sc)
1457{
1458 if (sc->dma_txd_tag != NULL) {
1459 bus_dmamap_unload(sc->dma_txd_tag, sc->dma_txd_map);
1460 bus_dmamem_free(sc->dma_txd_tag, sc->txd_base,
1461 sc->dma_txd_map);
1462 bus_dma_tag_destroy(sc->dma_txd_tag);
1463 }
1464 if (sc->dma_txs_tag != NULL) {
1465 bus_dmamap_unload(sc->dma_txs_tag, sc->dma_txs_map);
1466 bus_dmamem_free(sc->dma_txs_tag, sc->txs_base,
1467 sc->dma_txs_map);
1468 bus_dma_tag_destroy(sc->dma_txs_tag);
1469 }
1470 if (sc->dma_rxd_tag != NULL) {
1471 bus_dmamap_unload(sc->dma_rxd_tag, sc->dma_rxd_map);
1472 bus_dmamem_free(sc->dma_rxd_tag,
1473 sc->rxd_base_dma, sc->dma_rxd_map);
1474 bus_dma_tag_destroy(sc->dma_rxd_tag);
1475 }
1476 if (sc->dma_parent_tag != NULL)
1477 bus_dma_tag_destroy(sc->dma_parent_tag);
1478}
1479
1480static void
1481ae_pcie_init(struct ae_softc *sc)
1482{
1483 AE_WRITE_4(sc, AE_PCIE_LTSSM_TESTMODE_REG,
1484 AE_PCIE_LTSSM_TESTMODE_DEFAULT);
1485 AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG,
1486 AE_PCIE_DLL_TX_CTRL_DEFAULT);
1487}
1488
1489static void
1490ae_phy_reset(struct ae_softc *sc)
1491{
1492 AE_WRITE_4(sc, AE_PHY_ENABLE_REG, AE_PHY_ENABLE);
1493 DELAY(1000); /* XXX: pause(9) ? */
1494}
1495
1496static int
1497ae_reset(struct ae_softc *sc)
1498{
1499 int i;
1500
1501 /*
1502 * Issue a soft reset.
1503 */
1504 AE_WRITE_4(sc, AE_MASTER_REG, AE_MASTER_SOFT_RESET);
1505 bus_space_barrier(sc->ae_mem_bt, sc->ae_mem_bh, AE_MASTER_REG, 4,
1506 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1507
1508 /*
1509 * Wait for reset to complete.
1510 */
1511 for (i = 0; i < AE_RESET_TIMEOUT; i++) {
1512 if ((AE_READ_4(sc, AE_MASTER_REG) & AE_MASTER_SOFT_RESET) == 0)
1513 break;
1514 DELAY(10);
1515 }
1516 if (i == AE_RESET_TIMEOUT) {
1517 device_printf(sc->ae_dev, "reset timeout.\n");
1518 return (ENXIO);
1519 }
1520
1521 /*
1522 * Wait for everything to enter idle state.
1523 */
1524 for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
1525 if (AE_READ_4(sc, AE_IDLE_REG) == 0)
1526 break;
1527 DELAY(100);
1528 }
1529 if (i == AE_IDLE_TIMEOUT) {
1530 device_printf(sc->ae_dev, "could not enter idle state.\n");
1531 return (ENXIO);
1532 }
1533 return (0);
1534}
1535
1536static int
1537ae_check_eeprom_present(struct ae_softc *sc, int *vpdc)
1538{
1539 int error;
1540 uint32_t val;
1541
1542 /*
1543 * Not sure why, but Linux does this.
1544 */
1545 val = AE_READ_4(sc, AE_SPICTL_REG);
1546 if ((val & AE_SPICTL_VPD_EN) != 0) {
1547 val &= ~AE_SPICTL_VPD_EN;
1548 AE_WRITE_4(sc, AE_SPICTL_REG, val);
1549 }
1550 error = pci_find_extcap(sc->ae_dev, PCIY_VPD, vpdc);
1551 return (error);
1552}
1553
1554static int
1555ae_vpd_read_word(struct ae_softc *sc, int reg, uint32_t *word)
1556{
1557 uint32_t val;
1558 int i;
1559
1560 AE_WRITE_4(sc, AE_VPD_DATA_REG, 0); /* Clear register value. */
1561
1562 /*
1563 * VPD registers start at offset 0x100. Read them.
1564 */
1565 val = 0x100 + reg * 4;
1566 AE_WRITE_4(sc, AE_VPD_CAP_REG, (val << AE_VPD_CAP_ADDR_SHIFT) &
1567 AE_VPD_CAP_ADDR_MASK);
1568 for (i = 0; i < AE_VPD_TIMEOUT; i++) {
1569 DELAY(2000);
1570 val = AE_READ_4(sc, AE_VPD_CAP_REG);
1571 if ((val & AE_VPD_CAP_DONE) != 0)
1572 break;
1573 }
1574 if (i == AE_VPD_TIMEOUT) {
1575 device_printf(sc->ae_dev, "timeout reading VPD register %d.\n",
1576 reg);
1577 return (ETIMEDOUT);
1578 }
1579 *word = AE_READ_4(sc, AE_VPD_DATA_REG);
1580 return (0);
1581}
1582
1583static int
1584ae_get_vpd_eaddr(struct ae_softc *sc, uint32_t *eaddr)
1585{
1586 uint32_t word, reg, val;
1587 int error;
1588 int found;
1589 int vpdc;
1590 int i;
1591
1592 /*
1593 * Check for EEPROM.
1594 */
1595 error = ae_check_eeprom_present(sc, &vpdc);
1596 if (error != 0)
1597 return (error);
1598
1599 /*
1600 * Read the VPD configuration space.
1601 * Each register is prefixed with signature,
1602 * so we can check if it is valid.
1603 */
1604 for (i = 0, found = 0; i < AE_VPD_NREGS; i++) {
1605 error = ae_vpd_read_word(sc, i, &word);
1606 if (error != 0)
1607 break;
1608
1609 /*
1610 * Check signature.
1611 */
1612 if ((word & AE_VPD_SIG_MASK) != AE_VPD_SIG)
1613 break;
1614 reg = word >> AE_VPD_REG_SHIFT;
1615 i++; /* Move to the next word. */
1616 if (reg != AE_EADDR0_REG && reg != AE_EADDR1_REG)
1617 continue;
1618
1619 error = ae_vpd_read_word(sc, i, &val);
1620 if (error != 0)
1621 break;
1622 if (reg == AE_EADDR0_REG)
1623 eaddr[0] = val;
1624 else
1625 eaddr[1] = val;
1626 found++;
1627 }
1628 if (found < 2)
1629 return (ENOENT);
1630
1631 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */
1632 if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
1633 if (bootverbose)
1634 device_printf(sc->ae_dev,
1635 "VPD ethernet address registers are invalid.\n");
1636 return (EINVAL);
1637 }
1638 return (0);
1639}
1640
1641static int
1642ae_get_reg_eaddr(struct ae_softc *sc, uint32_t *eaddr)
1643{
1644 /*
1645 * BIOS is supposed to set this.
1646 */
1647 eaddr[0] = AE_READ_4(sc, AE_EADDR0_REG);
1648 eaddr[1] = AE_READ_4(sc, AE_EADDR1_REG);
1649 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */
1650 if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
1651 if (bootverbose)
1652 device_printf(sc->ae_dev,
1653 "Ethetnet address registers are invalid.\n");
1654 return (EINVAL);
1655 }
1656 return (0);
1657}
1658
1659static void
1660ae_get_eaddr(struct ae_softc *sc)
1661{
1662 uint32_t eaddr[2] = {0, 0};
1663 int error;
1664
1665 /*
1666 *Check for EEPROM.
1667 */
1668 error = ae_get_vpd_eaddr(sc, eaddr);
1669 if (error)
1670 error = ae_get_reg_eaddr(sc, eaddr);
1671 if (error) {
1672 if (bootverbose)
1673 device_printf(sc->ae_dev,
1674 "Generating random ethernet address.\n");
1675 eaddr[0] = karc4random();
1676 /*
1677 * Set OUI to ASUSTek COMPUTER INC.
1678 */
1679 sc->ae_eaddr[0] = 0x02; /* U/L bit set. */
1680 sc->ae_eaddr[1] = 0x1f;
1681 sc->ae_eaddr[2] = 0xc6;
1682 sc->ae_eaddr[3] = (eaddr[0] >> 16) & 0xff;
1683 sc->ae_eaddr[4] = (eaddr[0] >> 8) & 0xff;
1684 sc->ae_eaddr[5] = (eaddr[0] >> 0) & 0xff;
1685 } else {
1686 sc->ae_eaddr[0] = (eaddr[1] >> 8) & 0xff;
1687 sc->ae_eaddr[1] = (eaddr[1] >> 0) & 0xff;
1688 sc->ae_eaddr[2] = (eaddr[0] >> 24) & 0xff;
1689 sc->ae_eaddr[3] = (eaddr[0] >> 16) & 0xff;
1690 sc->ae_eaddr[4] = (eaddr[0] >> 8) & 0xff;
1691 sc->ae_eaddr[5] = (eaddr[0] >> 0) & 0xff;
1692 }
1693}
1694
1695static int
1696ae_mediachange(struct ifnet *ifp)
1697{
1698 struct ae_softc *sc = ifp->if_softc;
1699 struct mii_data *mii = device_get_softc(sc->ae_miibus);
1700 int error;
1701
1702 ASSERT_SERIALIZED(ifp->if_serializer);
1703 if (mii->mii_instance != 0) {
1704 struct mii_softc *miisc;
1705 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1706 mii_phy_reset(miisc);
1707 }
1708 error = mii_mediachg(mii);
1709 return (error);
1710}
1711
1712static void
1713ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1714{
1715 struct ae_softc *sc = ifp->if_softc;
1716 struct mii_data *mii = device_get_softc(sc->ae_miibus);
1717
1718 ASSERT_SERIALIZED(ifp->if_serializer);
1719 mii_pollstat(mii);
1720 ifmr->ifm_status = mii->mii_media_status;
1721 ifmr->ifm_active = mii->mii_media_active;
1722}
1723
1724static void
1725ae_update_stats_tx(uint16_t flags, struct ae_stats *stats)
1726{
1727 if ((flags & AE_TXS_BCAST) != 0)
1728 stats->tx_bcast++;
1729 if ((flags & AE_TXS_MCAST) != 0)
1730 stats->tx_mcast++;
1731 if ((flags & AE_TXS_PAUSE) != 0)
1732 stats->tx_pause++;
1733 if ((flags & AE_TXS_CTRL) != 0)
1734 stats->tx_ctrl++;
1735 if ((flags & AE_TXS_DEFER) != 0)
1736 stats->tx_defer++;
1737 if ((flags & AE_TXS_EXCDEFER) != 0)
1738 stats->tx_excdefer++;
1739 if ((flags & AE_TXS_SINGLECOL) != 0)
1740 stats->tx_singlecol++;
1741 if ((flags & AE_TXS_MULTICOL) != 0)
1742 stats->tx_multicol++;
1743 if ((flags & AE_TXS_LATECOL) != 0)
1744 stats->tx_latecol++;
1745 if ((flags & AE_TXS_ABORTCOL) != 0)
1746 stats->tx_abortcol++;
1747 if ((flags & AE_TXS_UNDERRUN) != 0)
1748 stats->tx_underrun++;
1749}
1750
1751static void
1752ae_update_stats_rx(uint16_t flags, struct ae_stats *stats)
1753{
1754 if ((flags & AE_RXD_BCAST) != 0)
1755 stats->rx_bcast++;
1756 if ((flags & AE_RXD_MCAST) != 0)
1757 stats->rx_mcast++;
1758 if ((flags & AE_RXD_PAUSE) != 0)
1759 stats->rx_pause++;
1760 if ((flags & AE_RXD_CTRL) != 0)
1761 stats->rx_ctrl++;
1762 if ((flags & AE_RXD_CRCERR) != 0)
1763 stats->rx_crcerr++;
1764 if ((flags & AE_RXD_CODEERR) != 0)
1765 stats->rx_codeerr++;
1766 if ((flags & AE_RXD_RUNT) != 0)
1767 stats->rx_runt++;
1768 if ((flags & AE_RXD_FRAG) != 0)
1769 stats->rx_frag++;
1770 if ((flags & AE_RXD_TRUNC) != 0)
1771 stats->rx_trunc++;
1772 if ((flags & AE_RXD_ALIGN) != 0)
1773 stats->rx_align++;
1774}
1775
1776static int
1777ae_resume(device_t dev)
1778{
1779 struct ae_softc *sc = device_get_softc(dev);
1780 struct ifnet *ifp = &sc->arpcom.ac_if;
1781
1782 lwkt_serialize_enter(ifp->if_serializer);
1783#if 0
1784 AE_READ_4(sc, AE_WOL_REG); /* Clear WOL status. */
1785#endif
1786 ae_phy_reset(sc);
1787 if ((ifp->if_flags & IFF_UP) != 0)
1788 ae_init(sc);
1789 lwkt_serialize_exit(ifp->if_serializer);
1790 return (0);
1791}
1792
1793static int
1794ae_suspend(device_t dev)
1795{
1796 struct ae_softc *sc = device_get_softc(dev);
1797 struct ifnet *ifp = &sc->arpcom.ac_if;
1798
1799 lwkt_serialize_enter(ifp->if_serializer);
1800 ae_stop(sc);
1801#if 0
1802 /* we don't use ae_pm_init because we don't want WOL */
1803 ae_pm_init(sc);
1804#endif
1805 lwkt_serialize_exit(ifp->if_serializer);
1806 return (0);
1807}
1808
1809static int
1810ae_shutdown(device_t dev)
1811{
1812 struct ae_softc *sc = device_get_softc(dev);
1813 struct ifnet *ifp = &sc->arpcom.ac_if;
1814
1815 ae_suspend(dev);
1816
1817 lwkt_serialize_enter(ifp->if_serializer);
1818 ae_powersave_enable(sc);
1819 lwkt_serialize_exit(ifp->if_serializer);
1820
1821 return (0);
1822}
1823
1824static void
1825ae_powersave_disable(struct ae_softc *sc)
1826{
1827 uint32_t val;
1828
1829 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
1830 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
1831 if (val & AE_PHY_DBG_POWERSAVE) {
1832 val &= ~AE_PHY_DBG_POWERSAVE;
1833 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, val);
1834 DELAY(1000);
1835 }
1836}
1837
1838static void
1839ae_powersave_enable(struct ae_softc *sc)
1840{
1841 uint32_t val;
1842
1843 /*
1844 * XXX magic numbers.
1845 */
1846 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
1847 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
1848 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, val | 0x1000);
1849 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 2);
1850 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0x3000);
1851 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 3);
1852 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0);
1853}