2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved.
4 * Copyright (c) 2001-2008, Intel Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
34 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
36 * This code is derived from software contributed to The DragonFly Project
37 * by Matthew Dillon <dillon@backplane.com>
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
49 * 3. Neither the name of The DragonFly Project nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific, prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67 #include "opt_ifpoll.h"
70 #include <sys/param.h>
72 #include <sys/endian.h>
73 #include <sys/interrupt.h>
74 #include <sys/kernel.h>
76 #include <sys/malloc.h>
80 #include <sys/serialize.h>
81 #include <sys/serialize2.h>
82 #include <sys/socket.h>
83 #include <sys/sockio.h>
84 #include <sys/sysctl.h>
85 #include <sys/systm.h>
88 #include <net/ethernet.h>
90 #include <net/if_arp.h>
91 #include <net/if_dl.h>
92 #include <net/if_media.h>
93 #include <net/ifq_var.h>
94 #include <net/toeplitz.h>
95 #include <net/toeplitz2.h>
96 #include <net/vlan/if_vlan_var.h>
97 #include <net/vlan/if_vlan_ether.h>
98 #include <net/if_poll.h>
100 #include <netinet/in_systm.h>
101 #include <netinet/in.h>
102 #include <netinet/ip.h>
103 #include <netinet/tcp.h>
104 #include <netinet/udp.h>
106 #include <bus/pci/pcivar.h>
107 #include <bus/pci/pcireg.h>
109 #include <dev/netif/ig_hal/e1000_api.h>
110 #include <dev/netif/ig_hal/e1000_82571.h>
111 #include <dev/netif/ig_hal/e1000_dragonfly.h>
112 #include <dev/netif/emx/if_emx.h>
117 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) \
119 if (sc->rss_debug >= lvl) \
120 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
122 #else /* !EMX_RSS_DEBUG */
123 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
124 #endif /* EMX_RSS_DEBUG */
126 #define EMX_NAME "Intel(R) PRO/1000 "
128 #define EMX_DEVICE(id) \
129 { EMX_VENDOR_ID, E1000_DEV_ID_##id, EMX_NAME #id }
130 #define EMX_DEVICE_NULL { 0, 0, NULL }
132 static const struct emx_device {
137 EMX_DEVICE(82571EB_COPPER),
138 EMX_DEVICE(82571EB_FIBER),
139 EMX_DEVICE(82571EB_SERDES),
140 EMX_DEVICE(82571EB_SERDES_DUAL),
141 EMX_DEVICE(82571EB_SERDES_QUAD),
142 EMX_DEVICE(82571EB_QUAD_COPPER),
143 EMX_DEVICE(82571EB_QUAD_COPPER_BP),
144 EMX_DEVICE(82571EB_QUAD_COPPER_LP),
145 EMX_DEVICE(82571EB_QUAD_FIBER),
146 EMX_DEVICE(82571PT_QUAD_COPPER),
148 EMX_DEVICE(82572EI_COPPER),
149 EMX_DEVICE(82572EI_FIBER),
150 EMX_DEVICE(82572EI_SERDES),
154 EMX_DEVICE(82573E_IAMT),
157 EMX_DEVICE(80003ES2LAN_COPPER_SPT),
158 EMX_DEVICE(80003ES2LAN_SERDES_SPT),
159 EMX_DEVICE(80003ES2LAN_COPPER_DPT),
160 EMX_DEVICE(80003ES2LAN_SERDES_DPT),
165 EMX_DEVICE(PCH_LPT_I217_LM),
166 EMX_DEVICE(PCH_LPT_I217_V),
167 EMX_DEVICE(PCH_LPTLP_I218_LM),
168 EMX_DEVICE(PCH_LPTLP_I218_V),
169 EMX_DEVICE(PCH_I218_LM2),
170 EMX_DEVICE(PCH_I218_V2),
171 EMX_DEVICE(PCH_I218_LM3),
172 EMX_DEVICE(PCH_I218_V3),
173 EMX_DEVICE(PCH_SPT_I219_LM),
174 EMX_DEVICE(PCH_SPT_I219_V),
175 EMX_DEVICE(PCH_SPT_I219_LM2),
176 EMX_DEVICE(PCH_SPT_I219_V2),
178 /* required last entry */
182 static int emx_probe(device_t);
183 static int emx_attach(device_t);
184 static int emx_detach(device_t);
185 static int emx_shutdown(device_t);
186 static int emx_suspend(device_t);
187 static int emx_resume(device_t);
189 static void emx_init(void *);
190 static void emx_stop(struct emx_softc *);
191 static int emx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
192 static void emx_start(struct ifnet *, struct ifaltq_subque *);
194 static void emx_npoll(struct ifnet *, struct ifpoll_info *);
195 static void emx_npoll_status(struct ifnet *);
196 static void emx_npoll_tx(struct ifnet *, void *, int);
197 static void emx_npoll_rx(struct ifnet *, void *, int);
199 static void emx_watchdog(struct ifaltq_subque *);
200 static void emx_media_status(struct ifnet *, struct ifmediareq *);
201 static int emx_media_change(struct ifnet *);
202 static void emx_timer(void *);
203 static void emx_serialize(struct ifnet *, enum ifnet_serialize);
204 static void emx_deserialize(struct ifnet *, enum ifnet_serialize);
205 static int emx_tryserialize(struct ifnet *, enum ifnet_serialize);
207 static void emx_serialize_assert(struct ifnet *, enum ifnet_serialize,
211 static void emx_intr(void *);
212 static void emx_intr_mask(void *);
213 static void emx_intr_body(struct emx_softc *, boolean_t);
214 static void emx_rxeof(struct emx_rxdata *, int);
215 static void emx_txeof(struct emx_txdata *);
216 static void emx_tx_collect(struct emx_txdata *);
217 static void emx_tx_purge(struct emx_softc *);
218 static void emx_enable_intr(struct emx_softc *);
219 static void emx_disable_intr(struct emx_softc *);
221 static int emx_dma_alloc(struct emx_softc *);
222 static void emx_dma_free(struct emx_softc *);
223 static void emx_init_tx_ring(struct emx_txdata *);
224 static int emx_init_rx_ring(struct emx_rxdata *);
225 static void emx_free_tx_ring(struct emx_txdata *);
226 static void emx_free_rx_ring(struct emx_rxdata *);
227 static int emx_create_tx_ring(struct emx_txdata *);
228 static int emx_create_rx_ring(struct emx_rxdata *);
229 static void emx_destroy_tx_ring(struct emx_txdata *, int);
230 static void emx_destroy_rx_ring(struct emx_rxdata *, int);
231 static int emx_newbuf(struct emx_rxdata *, int, int);
232 static int emx_encap(struct emx_txdata *, struct mbuf **, int *, int *);
233 static int emx_txcsum(struct emx_txdata *, struct mbuf *,
234 uint32_t *, uint32_t *);
235 static int emx_tso_pullup(struct emx_txdata *, struct mbuf **);
236 static int emx_tso_setup(struct emx_txdata *, struct mbuf *,
237 uint32_t *, uint32_t *);
238 static int emx_get_txring_inuse(const struct emx_softc *, boolean_t);
240 static int emx_is_valid_eaddr(const uint8_t *);
241 static int emx_reset(struct emx_softc *);
242 static void emx_setup_ifp(struct emx_softc *);
243 static void emx_init_tx_unit(struct emx_softc *);
244 static void emx_init_rx_unit(struct emx_softc *);
245 static void emx_update_stats(struct emx_softc *);
246 static void emx_set_promisc(struct emx_softc *);
247 static void emx_disable_promisc(struct emx_softc *);
248 static void emx_set_multi(struct emx_softc *);
249 static void emx_update_link_status(struct emx_softc *);
250 static void emx_smartspeed(struct emx_softc *);
251 static void emx_set_itr(struct emx_softc *, uint32_t);
252 static void emx_disable_aspm(struct emx_softc *);
254 static void emx_print_debug_info(struct emx_softc *);
255 static void emx_print_nvm_info(struct emx_softc *);
256 static void emx_print_hw_stats(struct emx_softc *);
258 static int emx_sysctl_stats(SYSCTL_HANDLER_ARGS);
259 static int emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
260 static int emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS);
261 static int emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS);
262 static int emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS);
264 static int emx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS);
265 static int emx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS);
267 static void emx_add_sysctl(struct emx_softc *);
269 static void emx_serialize_skipmain(struct emx_softc *);
270 static void emx_deserialize_skipmain(struct emx_softc *);
272 /* Management and WOL Support */
273 static void emx_get_mgmt(struct emx_softc *);
274 static void emx_rel_mgmt(struct emx_softc *);
275 static void emx_get_hw_control(struct emx_softc *);
276 static void emx_rel_hw_control(struct emx_softc *);
277 static void emx_enable_wol(device_t);
279 static device_method_t emx_methods[] = {
280 /* Device interface */
281 DEVMETHOD(device_probe, emx_probe),
282 DEVMETHOD(device_attach, emx_attach),
283 DEVMETHOD(device_detach, emx_detach),
284 DEVMETHOD(device_shutdown, emx_shutdown),
285 DEVMETHOD(device_suspend, emx_suspend),
286 DEVMETHOD(device_resume, emx_resume),
290 static driver_t emx_driver = {
293 sizeof(struct emx_softc),
296 static devclass_t emx_devclass;
298 DECLARE_DUMMY_MODULE(if_emx);
299 MODULE_DEPEND(emx, ig_hal, 1, 1, 1);
300 DRIVER_MODULE(if_emx, pci, emx_driver, emx_devclass, NULL, NULL);
305 static int emx_int_throttle_ceil = EMX_DEFAULT_ITR;
306 static int emx_rxd = EMX_DEFAULT_RXD;
307 static int emx_txd = EMX_DEFAULT_TXD;
308 static int emx_smart_pwr_down = 0;
309 static int emx_rxr = 0;
310 static int emx_txr = 1;
312 /* Controls whether promiscuous also shows bad packets */
313 static int emx_debug_sbp = 0;
315 static int emx_82573_workaround = 1;
316 static int emx_msi_enable = 1;
318 static char emx_flowctrl[IFM_ETH_FC_STRLEN] = IFM_ETH_FC_RXPAUSE;
320 TUNABLE_INT("hw.emx.int_throttle_ceil", &emx_int_throttle_ceil);
321 TUNABLE_INT("hw.emx.rxd", &emx_rxd);
322 TUNABLE_INT("hw.emx.rxr", &emx_rxr);
323 TUNABLE_INT("hw.emx.txd", &emx_txd);
324 TUNABLE_INT("hw.emx.txr", &emx_txr);
325 TUNABLE_INT("hw.emx.smart_pwr_down", &emx_smart_pwr_down);
326 TUNABLE_INT("hw.emx.sbp", &emx_debug_sbp);
327 TUNABLE_INT("hw.emx.82573_workaround", &emx_82573_workaround);
328 TUNABLE_INT("hw.emx.msi.enable", &emx_msi_enable);
329 TUNABLE_STR("hw.emx.flow_ctrl", emx_flowctrl, sizeof(emx_flowctrl));
331 /* Global used in WOL setup with multiport cards */
332 static int emx_global_quad_port_a = 0;
334 /* Set this to one to display debug statistics */
335 static int emx_display_debug_stats = 0;
337 #if !defined(KTR_IF_EMX)
338 #define KTR_IF_EMX KTR_ALL
340 KTR_INFO_MASTER(if_emx);
341 KTR_INFO(KTR_IF_EMX, if_emx, intr_beg, 0, "intr begin");
342 KTR_INFO(KTR_IF_EMX, if_emx, intr_end, 1, "intr end");
343 KTR_INFO(KTR_IF_EMX, if_emx, pkt_receive, 4, "rx packet");
344 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txqueue, 5, "tx packet");
345 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txclean, 6, "tx clean");
346 #define logif(name) KTR_LOG(if_emx_ ## name)
349 emx_setup_rxdesc(emx_rxdesc_t *rxd, const struct emx_rxbuf *rxbuf)
351 rxd->rxd_bufaddr = htole64(rxbuf->paddr);
352 /* DD bit must be cleared */
353 rxd->rxd_staterr = 0;
357 emx_rxcsum(uint32_t staterr, struct mbuf *mp)
359 /* Ignore Checksum bit is set */
360 if (staterr & E1000_RXD_STAT_IXSM)
363 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) ==
365 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
367 if ((staterr & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) ==
368 E1000_RXD_STAT_TCPCS) {
369 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
371 CSUM_FRAG_NOT_CHECKED;
372 mp->m_pkthdr.csum_data = htons(0xffff);
376 static __inline struct pktinfo *
377 emx_rssinfo(struct mbuf *m, struct pktinfo *pi,
378 uint32_t mrq, uint32_t hash, uint32_t staterr)
380 switch (mrq & EMX_RXDMRQ_RSSTYPE_MASK) {
381 case EMX_RXDMRQ_IPV4_TCP:
382 pi->pi_netisr = NETISR_IP;
384 pi->pi_l3proto = IPPROTO_TCP;
387 case EMX_RXDMRQ_IPV6_TCP:
388 pi->pi_netisr = NETISR_IPV6;
390 pi->pi_l3proto = IPPROTO_TCP;
393 case EMX_RXDMRQ_IPV4:
394 if (staterr & E1000_RXD_STAT_IXSM)
398 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) ==
399 E1000_RXD_STAT_TCPCS) {
400 pi->pi_netisr = NETISR_IP;
402 pi->pi_l3proto = IPPROTO_UDP;
410 m->m_flags |= M_HASH;
411 m->m_pkthdr.hash = toeplitz_hash(hash);
416 emx_probe(device_t dev)
418 const struct emx_device *d;
421 vid = pci_get_vendor(dev);
422 did = pci_get_device(dev);
424 for (d = emx_devices; d->desc != NULL; ++d) {
425 if (vid == d->vid && did == d->did) {
426 device_set_desc(dev, d->desc);
427 device_set_async_attach(dev, TRUE);
435 emx_attach(device_t dev)
437 struct emx_softc *sc = device_get_softc(dev);
438 int error = 0, i, throttle, msi_enable, tx_ring_max;
440 uint16_t eeprom_data, device_id, apme_mask;
441 driver_intr_t *intr_func;
442 char flowctrl[IFM_ETH_FC_STRLEN];
444 int offset, offset_def;
450 for (i = 0; i < EMX_NRX_RING; ++i) {
451 sc->rx_data[i].sc = sc;
452 sc->rx_data[i].idx = i;
458 for (i = 0; i < EMX_NTX_RING; ++i) {
459 sc->tx_data[i].sc = sc;
460 sc->tx_data[i].idx = i;
464 * Initialize serializers
466 lwkt_serialize_init(&sc->main_serialize);
467 for (i = 0; i < EMX_NTX_RING; ++i)
468 lwkt_serialize_init(&sc->tx_data[i].tx_serialize);
469 for (i = 0; i < EMX_NRX_RING; ++i)
470 lwkt_serialize_init(&sc->rx_data[i].rx_serialize);
473 * Initialize serializer array
477 KKASSERT(i < EMX_NSERIALIZE);
478 sc->serializes[i++] = &sc->main_serialize;
480 KKASSERT(i < EMX_NSERIALIZE);
481 sc->serializes[i++] = &sc->tx_data[0].tx_serialize;
482 KKASSERT(i < EMX_NSERIALIZE);
483 sc->serializes[i++] = &sc->tx_data[1].tx_serialize;
485 KKASSERT(i < EMX_NSERIALIZE);
486 sc->serializes[i++] = &sc->rx_data[0].rx_serialize;
487 KKASSERT(i < EMX_NSERIALIZE);
488 sc->serializes[i++] = &sc->rx_data[1].rx_serialize;
490 KKASSERT(i == EMX_NSERIALIZE);
492 ifmedia_init(&sc->media, IFM_IMASK | IFM_ETH_FCMASK,
493 emx_media_change, emx_media_status);
494 callout_init_mp(&sc->timer);
496 sc->dev = sc->osdep.dev = dev;
499 * Determine hardware and mac type
501 sc->hw.vendor_id = pci_get_vendor(dev);
502 sc->hw.device_id = pci_get_device(dev);
503 sc->hw.revision_id = pci_get_revid(dev);
504 sc->hw.subsystem_vendor_id = pci_get_subvendor(dev);
505 sc->hw.subsystem_device_id = pci_get_subdevice(dev);
507 if (e1000_set_mac_type(&sc->hw))
510 /* Enable bus mastering */
511 pci_enable_busmaster(dev);
516 sc->memory_rid = EMX_BAR_MEM;
517 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
518 &sc->memory_rid, RF_ACTIVE);
519 if (sc->memory == NULL) {
520 device_printf(dev, "Unable to allocate bus resource: memory\n");
524 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory);
525 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->memory);
527 /* XXX This is quite goofy, it is not actually used */
528 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle;
531 * Don't enable MSI-X on 82574, see:
532 * 82574 specification update errata #15
534 * Don't enable MSI on 82571/82572, see:
535 * 82571/82572 specification update errata #63
537 msi_enable = emx_msi_enable;
539 (sc->hw.mac.type == e1000_82571 ||
540 sc->hw.mac.type == e1000_82572))
546 sc->intr_type = pci_alloc_1intr(dev, msi_enable,
547 &sc->intr_rid, &intr_flags);
549 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) {
552 unshared = device_getenv_int(dev, "irq.unshared", 0);
554 sc->flags |= EMX_FLAG_SHARED_INTR;
556 device_printf(dev, "IRQ shared\n");
558 intr_flags &= ~RF_SHAREABLE;
560 device_printf(dev, "IRQ unshared\n");
564 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid,
566 if (sc->intr_res == NULL) {
567 device_printf(dev, "Unable to allocate bus resource: "
573 /* Save PCI command register for Shared Code */
574 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
575 sc->hw.back = &sc->osdep;
578 * For I217/I218, we need to map the flash memory and this
579 * must happen after the MAC is identified.
581 if (sc->hw.mac.type == e1000_pch_lpt) {
582 sc->flash_rid = EMX_BAR_FLASH;
584 sc->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
585 &sc->flash_rid, RF_ACTIVE);
586 if (sc->flash == NULL) {
587 device_printf(dev, "Mapping of Flash failed\n");
591 sc->osdep.flash_bus_space_tag = rman_get_bustag(sc->flash);
592 sc->osdep.flash_bus_space_handle =
593 rman_get_bushandle(sc->flash);
596 * This is used in the shared code
597 * XXX this goof is actually not used.
599 sc->hw.flash_address = (uint8_t *)sc->flash;
602 /* Do Shared Code initialization */
603 if (e1000_setup_init_funcs(&sc->hw, TRUE)) {
604 device_printf(dev, "Setup of Shared code failed\n");
608 e1000_get_bus_info(&sc->hw);
610 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG;
611 sc->hw.phy.autoneg_wait_to_complete = FALSE;
612 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT;
615 * Interrupt throttle rate
617 throttle = device_getenv_int(dev, "int_throttle_ceil",
618 emx_int_throttle_ceil);
620 sc->int_throttle_ceil = 0;
623 throttle = EMX_DEFAULT_ITR;
625 /* Recalculate the tunable value to get the exact frequency. */
626 throttle = 1000000000 / 256 / throttle;
628 /* Upper 16bits of ITR is reserved and should be zero */
629 if (throttle & 0xffff0000)
630 throttle = 1000000000 / 256 / EMX_DEFAULT_ITR;
632 sc->int_throttle_ceil = 1000000000 / 256 / throttle;
635 e1000_init_script_state_82541(&sc->hw, TRUE);
636 e1000_set_tbi_compatibility_82543(&sc->hw, TRUE);
639 if (sc->hw.phy.media_type == e1000_media_type_copper) {
640 sc->hw.phy.mdix = EMX_AUTO_ALL_MODES;
641 sc->hw.phy.disable_polarity_correction = FALSE;
642 sc->hw.phy.ms_type = EMX_MASTER_SLAVE;
645 /* Set the frame limits assuming standard ethernet sized frames. */
646 sc->hw.mac.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
648 /* This controls when hardware reports transmit completion status. */
649 sc->hw.mac.report_tx_early = 1;
651 /* Calculate # of RX rings */
652 sc->rx_ring_cnt = device_getenv_int(dev, "rxr", emx_rxr);
653 sc->rx_ring_cnt = if_ring_count2(sc->rx_ring_cnt, EMX_NRX_RING);
656 * Calculate # of TX rings
659 * I217/I218 claims to have 2 TX queues
662 * Don't enable multiple TX queues on 82574; it always gives
663 * watchdog timeout on TX queue0, when multiple TCP streams are
664 * received. It was originally suspected that the hardware TX
665 * checksum offloading caused this watchdog timeout, since only
666 * TCP ACKs are sent during TCP receiving tests. However, even
667 * if the hardware TX checksum offloading is disable, TX queue0
668 * still will give watchdog.
671 if (sc->hw.mac.type == e1000_82571 ||
672 sc->hw.mac.type == e1000_82572 ||
673 sc->hw.mac.type == e1000_80003es2lan ||
674 sc->hw.mac.type == e1000_pch_lpt ||
675 sc->hw.mac.type == e1000_pch_spt ||
676 sc->hw.mac.type == e1000_82574)
677 tx_ring_max = EMX_NTX_RING;
678 sc->tx_ring_cnt = device_getenv_int(dev, "txr", emx_txr);
679 sc->tx_ring_cnt = if_ring_count2(sc->tx_ring_cnt, tx_ring_max);
681 /* Allocate RX/TX rings' busdma(9) stuffs */
682 error = emx_dma_alloc(sc);
686 /* Allocate multicast array memory. */
687 sc->mta = kmalloc(ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX,
690 /* Indicate SOL/IDER usage */
691 if (e1000_check_reset_block(&sc->hw)) {
693 "PHY reset is blocked due to SOL/IDER session.\n");
696 /* Disable EEE on I217/I218 */
697 sc->hw.dev_spec.ich8lan.eee_disable = 1;
700 * Start from a known state, this is important in reading the
701 * nvm and mac from that.
703 e1000_reset_hw(&sc->hw);
705 /* Make sure we have a good EEPROM before we read from it */
706 if (e1000_validate_nvm_checksum(&sc->hw) < 0) {
708 * Some PCI-E parts fail the first check due to
709 * the link being in sleep state, call it again,
710 * if it fails a second time its a real issue.
712 if (e1000_validate_nvm_checksum(&sc->hw) < 0) {
714 "The EEPROM Checksum Is Not Valid\n");
720 /* Copy the permanent MAC address out of the EEPROM */
721 if (e1000_read_mac_addr(&sc->hw) < 0) {
722 device_printf(dev, "EEPROM read error while reading MAC"
727 if (!emx_is_valid_eaddr(sc->hw.mac.addr)) {
728 device_printf(dev, "Invalid MAC address\n");
733 /* Disable ULP support */
734 e1000_disable_ulp_lpt_lp(&sc->hw, TRUE);
736 /* Determine if we have to control management hardware */
737 if (e1000_enable_mng_pass_thru(&sc->hw))
738 sc->flags |= EMX_FLAG_HAS_MGMT;
743 apme_mask = EMX_EEPROM_APME;
745 switch (sc->hw.mac.type) {
747 sc->flags |= EMX_FLAG_HAS_AMT;
752 case e1000_80003es2lan:
753 if (sc->hw.bus.func == 1) {
754 e1000_read_nvm(&sc->hw,
755 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
757 e1000_read_nvm(&sc->hw,
758 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
763 e1000_read_nvm(&sc->hw,
764 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
767 if (eeprom_data & apme_mask)
768 sc->wol = E1000_WUFC_MAG | E1000_WUFC_MC;
771 * We have the eeprom settings, now apply the special cases
772 * where the eeprom may be wrong or the board won't support
773 * wake on lan on a particular port
775 device_id = pci_get_device(dev);
777 case E1000_DEV_ID_82571EB_FIBER:
779 * Wake events only supported on port A for dual fiber
780 * regardless of eeprom setting
782 if (E1000_READ_REG(&sc->hw, E1000_STATUS) &
787 case E1000_DEV_ID_82571EB_QUAD_COPPER:
788 case E1000_DEV_ID_82571EB_QUAD_FIBER:
789 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
790 /* if quad port sc, disable WoL on all but port A */
791 if (emx_global_quad_port_a != 0)
793 /* Reset for multiple quad port adapters */
794 if (++emx_global_quad_port_a == 4)
795 emx_global_quad_port_a = 0;
799 /* XXX disable wol */
804 * NPOLLING RX CPU offset
806 if (sc->rx_ring_cnt == ncpus2) {
809 offset_def = (sc->rx_ring_cnt * device_get_unit(dev)) % ncpus2;
810 offset = device_getenv_int(dev, "npoll.rxoff", offset_def);
811 if (offset >= ncpus2 ||
812 offset % sc->rx_ring_cnt != 0) {
813 device_printf(dev, "invalid npoll.rxoff %d, use %d\n",
818 sc->rx_npoll_off = offset;
821 * NPOLLING TX CPU offset
823 if (sc->tx_ring_cnt == ncpus2) {
826 offset_def = (sc->tx_ring_cnt * device_get_unit(dev)) % ncpus2;
827 offset = device_getenv_int(dev, "npoll.txoff", offset_def);
828 if (offset >= ncpus2 ||
829 offset % sc->tx_ring_cnt != 0) {
830 device_printf(dev, "invalid npoll.txoff %d, use %d\n",
835 sc->tx_npoll_off = offset;
837 sc->tx_ring_inuse = emx_get_txring_inuse(sc, FALSE);
839 /* Setup flow control. */
840 device_getenv_string(dev, "flow_ctrl", flowctrl, sizeof(flowctrl),
842 sc->ifm_flowctrl = ifmedia_str2ethfc(flowctrl);
844 /* Setup OS specific network interface */
847 /* Add sysctl tree, must after em_setup_ifp() */
850 /* Reset the hardware */
851 error = emx_reset(sc);
854 * Some 82573 parts fail the first reset, call it again,
855 * if it fails a second time its a real issue.
857 error = emx_reset(sc);
859 device_printf(dev, "Unable to reset the hardware\n");
860 ether_ifdetach(&sc->arpcom.ac_if);
865 /* Initialize statistics */
866 emx_update_stats(sc);
868 sc->hw.mac.get_link_status = 1;
869 emx_update_link_status(sc);
871 /* Non-AMT based hardware can now take control from firmware */
872 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) ==
874 emx_get_hw_control(sc);
877 * Missing Interrupt Following ICR read:
879 * 82571/82572 specification update errata #76
880 * 82573 specification update errata #31
881 * 82574 specification update errata #12
883 intr_func = emx_intr;
884 if ((sc->flags & EMX_FLAG_SHARED_INTR) &&
885 (sc->hw.mac.type == e1000_82571 ||
886 sc->hw.mac.type == e1000_82572 ||
887 sc->hw.mac.type == e1000_82573 ||
888 sc->hw.mac.type == e1000_82574))
889 intr_func = emx_intr_mask;
891 error = bus_setup_intr(dev, sc->intr_res, INTR_MPSAFE, intr_func, sc,
892 &sc->intr_tag, &sc->main_serialize);
894 device_printf(dev, "Failed to register interrupt handler");
895 ether_ifdetach(&sc->arpcom.ac_if);
905 emx_detach(device_t dev)
907 struct emx_softc *sc = device_get_softc(dev);
909 if (device_is_attached(dev)) {
910 struct ifnet *ifp = &sc->arpcom.ac_if;
912 ifnet_serialize_all(ifp);
916 e1000_phy_hw_reset(&sc->hw);
919 emx_rel_hw_control(sc);
922 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN);
923 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol);
927 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag);
929 ifnet_deserialize_all(ifp);
932 } else if (sc->memory != NULL) {
933 emx_rel_hw_control(sc);
936 ifmedia_removeall(&sc->media);
937 bus_generic_detach(dev);
939 if (sc->intr_res != NULL) {
940 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid,
944 if (sc->intr_type == PCI_INTR_TYPE_MSI)
945 pci_release_msi(dev);
947 if (sc->memory != NULL) {
948 bus_release_resource(dev, SYS_RES_MEMORY, sc->memory_rid,
952 if (sc->flash != NULL) {
953 bus_release_resource(dev, SYS_RES_MEMORY, sc->flash_rid,
960 kfree(sc->mta, M_DEVBUF);
966 emx_shutdown(device_t dev)
968 return emx_suspend(dev);
972 emx_suspend(device_t dev)
974 struct emx_softc *sc = device_get_softc(dev);
975 struct ifnet *ifp = &sc->arpcom.ac_if;
977 ifnet_serialize_all(ifp);
982 emx_rel_hw_control(sc);
985 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN);
986 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol);
990 ifnet_deserialize_all(ifp);
992 return bus_generic_suspend(dev);
996 emx_resume(device_t dev)
998 struct emx_softc *sc = device_get_softc(dev);
999 struct ifnet *ifp = &sc->arpcom.ac_if;
1002 ifnet_serialize_all(ifp);
1006 for (i = 0; i < sc->tx_ring_inuse; ++i)
1007 ifsq_devstart_sched(sc->tx_data[i].ifsq);
1009 ifnet_deserialize_all(ifp);
1011 return bus_generic_resume(dev);
1015 emx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1017 struct emx_softc *sc = ifp->if_softc;
1018 struct emx_txdata *tdata = ifsq_get_priv(ifsq);
1019 struct mbuf *m_head;
1020 int idx = -1, nsegs = 0;
1022 KKASSERT(tdata->ifsq == ifsq);
1023 ASSERT_SERIALIZED(&tdata->tx_serialize);
1025 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq))
1028 if (!sc->link_active || (tdata->tx_flags & EMX_TXFLAG_ENABLED) == 0) {
1033 while (!ifsq_is_empty(ifsq)) {
1034 /* Now do we at least have a minimal? */
1035 if (EMX_IS_OACTIVE(tdata)) {
1036 emx_tx_collect(tdata);
1037 if (EMX_IS_OACTIVE(tdata)) {
1038 ifsq_set_oactive(ifsq);
1044 m_head = ifsq_dequeue(ifsq);
1048 if (emx_encap(tdata, &m_head, &nsegs, &idx)) {
1049 IFNET_STAT_INC(ifp, oerrors, 1);
1050 emx_tx_collect(tdata);
1055 * TX interrupt are aggressively aggregated, so increasing
1056 * opackets at TX interrupt time will make the opackets
1057 * statistics vastly inaccurate; we do the opackets increment
1060 IFNET_STAT_INC(ifp, opackets, 1);
1062 if (nsegs >= tdata->tx_wreg_nsegs) {
1063 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx);
1068 /* Send a copy of the frame to the BPF listener */
1069 ETHER_BPF_MTAP(ifp, m_head);
1071 /* Set timeout in case hardware has problems transmitting. */
1072 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT;
1075 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx);
1079 emx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
1081 struct emx_softc *sc = ifp->if_softc;
1082 struct ifreq *ifr = (struct ifreq *)data;
1083 uint16_t eeprom_data = 0;
1084 int max_frame_size, mask, reinit;
1087 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1091 switch (sc->hw.mac.type) {
1094 * 82573 only supports jumbo frames
1095 * if ASPM is disabled.
1097 e1000_read_nvm(&sc->hw, NVM_INIT_3GIO_3, 1,
1099 if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
1100 max_frame_size = ETHER_MAX_LEN;
1105 /* Limit Jumbo Frame size */
1111 case e1000_80003es2lan:
1112 max_frame_size = 9234;
1116 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1119 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1125 ifp->if_mtu = ifr->ifr_mtu;
1126 sc->hw.mac.max_frame_size = ifp->if_mtu + ETHER_HDR_LEN +
1129 if (ifp->if_flags & IFF_RUNNING)
1134 if (ifp->if_flags & IFF_UP) {
1135 if ((ifp->if_flags & IFF_RUNNING)) {
1136 if ((ifp->if_flags ^ sc->if_flags) &
1137 (IFF_PROMISC | IFF_ALLMULTI)) {
1138 emx_disable_promisc(sc);
1139 emx_set_promisc(sc);
1144 } else if (ifp->if_flags & IFF_RUNNING) {
1147 sc->if_flags = ifp->if_flags;
1152 if (ifp->if_flags & IFF_RUNNING) {
1153 emx_disable_intr(sc);
1155 #ifdef IFPOLL_ENABLE
1156 if (!(ifp->if_flags & IFF_NPOLLING))
1158 emx_enable_intr(sc);
1163 /* Check SOL/IDER usage */
1164 if (e1000_check_reset_block(&sc->hw)) {
1165 device_printf(sc->dev, "Media change is"
1166 " blocked due to SOL/IDER session.\n");
1172 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
1177 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1178 if (mask & IFCAP_RXCSUM) {
1179 ifp->if_capenable ^= IFCAP_RXCSUM;
1182 if (mask & IFCAP_VLAN_HWTAGGING) {
1183 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1186 if (mask & IFCAP_TXCSUM) {
1187 ifp->if_capenable ^= IFCAP_TXCSUM;
1188 if (ifp->if_capenable & IFCAP_TXCSUM)
1189 ifp->if_hwassist |= EMX_CSUM_FEATURES;
1191 ifp->if_hwassist &= ~EMX_CSUM_FEATURES;
1193 if (mask & IFCAP_TSO) {
1194 ifp->if_capenable ^= IFCAP_TSO;
1195 if (ifp->if_capenable & IFCAP_TSO)
1196 ifp->if_hwassist |= CSUM_TSO;
1198 ifp->if_hwassist &= ~CSUM_TSO;
1200 if (mask & IFCAP_RSS)
1201 ifp->if_capenable ^= IFCAP_RSS;
1202 if (reinit && (ifp->if_flags & IFF_RUNNING))
1207 error = ether_ioctl(ifp, command, data);
1214 emx_watchdog(struct ifaltq_subque *ifsq)
1216 struct emx_txdata *tdata = ifsq_get_priv(ifsq);
1217 struct ifnet *ifp = ifsq_get_ifp(ifsq);
1218 struct emx_softc *sc = ifp->if_softc;
1221 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1224 * The timer is set to 5 every time start queues a packet.
1225 * Then txeof keeps resetting it as long as it cleans at
1226 * least one descriptor.
1227 * Finally, anytime all descriptors are clean the timer is
1231 if (E1000_READ_REG(&sc->hw, E1000_TDT(tdata->idx)) ==
1232 E1000_READ_REG(&sc->hw, E1000_TDH(tdata->idx))) {
1234 * If we reach here, all TX jobs are completed and
1235 * the TX engine should have been idled for some time.
1236 * We don't need to call ifsq_devstart_sched() here.
1238 ifsq_clr_oactive(ifsq);
1239 tdata->tx_watchdog.wd_timer = 0;
1244 * If we are in this routine because of pause frames, then
1245 * don't reset the hardware.
1247 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_TXOFF) {
1248 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT;
1252 if_printf(ifp, "TX %d watchdog timeout -- resetting\n", tdata->idx);
1254 IFNET_STAT_INC(ifp, oerrors, 1);
1257 for (i = 0; i < sc->tx_ring_inuse; ++i)
1258 ifsq_devstart_sched(sc->tx_data[i].ifsq);
1264 struct emx_softc *sc = xsc;
1265 struct ifnet *ifp = &sc->arpcom.ac_if;
1266 device_t dev = sc->dev;
1270 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1274 /* Get the latest mac address, User can use a LAA */
1275 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN);
1277 /* Put the address into the Receive Address Array */
1278 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0);
1281 * With the 82571 sc, RAR[0] may be overwritten
1282 * when the other port is reset, we make a duplicate
1283 * in RAR[14] for that eventuality, this assures
1284 * the interface continues to function.
1286 if (sc->hw.mac.type == e1000_82571) {
1287 e1000_set_laa_state_82571(&sc->hw, TRUE);
1288 e1000_rar_set(&sc->hw, sc->hw.mac.addr,
1289 E1000_RAR_ENTRIES - 1);
1292 /* Initialize the hardware */
1293 if (emx_reset(sc)) {
1294 device_printf(dev, "Unable to reset the hardware\n");
1295 /* XXX emx_stop()? */
1298 emx_update_link_status(sc);
1300 /* Setup VLAN support, basic and offload if available */
1301 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
1303 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1306 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL);
1307 ctrl |= E1000_CTRL_VME;
1308 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl);
1311 /* Configure for OS presence */
1315 #ifdef IFPOLL_ENABLE
1316 if (ifp->if_flags & IFF_NPOLLING)
1319 sc->tx_ring_inuse = emx_get_txring_inuse(sc, polling);
1320 ifq_set_subq_mask(&ifp->if_snd, sc->tx_ring_inuse - 1);
1322 /* Prepare transmit descriptors and buffers */
1323 for (i = 0; i < sc->tx_ring_inuse; ++i)
1324 emx_init_tx_ring(&sc->tx_data[i]);
1325 emx_init_tx_unit(sc);
1327 /* Setup Multicast table */
1330 /* Prepare receive descriptors and buffers */
1331 for (i = 0; i < sc->rx_ring_cnt; ++i) {
1332 if (emx_init_rx_ring(&sc->rx_data[i])) {
1334 "Could not setup receive structures\n");
1339 emx_init_rx_unit(sc);
1341 /* Don't lose promiscuous settings */
1342 emx_set_promisc(sc);
1344 ifp->if_flags |= IFF_RUNNING;
1345 for (i = 0; i < sc->tx_ring_inuse; ++i) {
1346 ifsq_clr_oactive(sc->tx_data[i].ifsq);
1347 ifsq_watchdog_start(&sc->tx_data[i].tx_watchdog);
1350 callout_reset(&sc->timer, hz, emx_timer, sc);
1351 e1000_clear_hw_cntrs_base_generic(&sc->hw);
1353 /* MSI/X configuration for 82574 */
1354 if (sc->hw.mac.type == e1000_82574) {
1357 tmp = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
1358 tmp |= E1000_CTRL_EXT_PBA_CLR;
1359 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, tmp);
1362 * Set the IVAR - interrupt vector routing.
1363 * Each nibble represents a vector, high bit
1364 * is enable, other 3 bits are the MSIX table
1365 * entry, we map RXQ0 to 0, TXQ0 to 1, and
1366 * Link (other) to 2, hence the magic number.
1368 E1000_WRITE_REG(&sc->hw, E1000_IVAR, 0x800A0908);
1372 * Only enable interrupts if we are not polling, make sure
1373 * they are off otherwise.
1376 emx_disable_intr(sc);
1378 emx_enable_intr(sc);
1380 /* AMT based hardware can now take control from firmware */
1381 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) ==
1382 (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT))
1383 emx_get_hw_control(sc);
1389 emx_intr_body(xsc, TRUE);
1393 emx_intr_body(struct emx_softc *sc, boolean_t chk_asserted)
1395 struct ifnet *ifp = &sc->arpcom.ac_if;
1399 ASSERT_SERIALIZED(&sc->main_serialize);
1401 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR);
1403 if (chk_asserted && (reg_icr & E1000_ICR_INT_ASSERTED) == 0) {
1409 * XXX: some laptops trigger several spurious interrupts
1410 * on emx(4) when in the resume cycle. The ICR register
1411 * reports all-ones value in this case. Processing such
1412 * interrupts would lead to a freeze. I don't know why.
1414 if (reg_icr == 0xffffffff) {
1419 if (ifp->if_flags & IFF_RUNNING) {
1421 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) {
1424 for (i = 0; i < sc->rx_ring_cnt; ++i) {
1425 lwkt_serialize_enter(
1426 &sc->rx_data[i].rx_serialize);
1427 emx_rxeof(&sc->rx_data[i], -1);
1428 lwkt_serialize_exit(
1429 &sc->rx_data[i].rx_serialize);
1432 if (reg_icr & E1000_ICR_TXDW) {
1433 struct emx_txdata *tdata = &sc->tx_data[0];
1435 lwkt_serialize_enter(&tdata->tx_serialize);
1437 if (!ifsq_is_empty(tdata->ifsq))
1438 ifsq_devstart(tdata->ifsq);
1439 lwkt_serialize_exit(&tdata->tx_serialize);
1443 /* Link status change */
1444 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1445 emx_serialize_skipmain(sc);
1447 callout_stop(&sc->timer);
1448 sc->hw.mac.get_link_status = 1;
1449 emx_update_link_status(sc);
1451 /* Deal with TX cruft when link lost */
1454 callout_reset(&sc->timer, hz, emx_timer, sc);
1456 emx_deserialize_skipmain(sc);
1459 if (reg_icr & E1000_ICR_RXO)
1466 emx_intr_mask(void *xsc)
1468 struct emx_softc *sc = xsc;
1470 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff);
1473 * ICR.INT_ASSERTED bit will never be set if IMS is 0,
1474 * so don't check it.
1476 emx_intr_body(sc, FALSE);
1477 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK);
1481 emx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1483 struct emx_softc *sc = ifp->if_softc;
1485 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1487 emx_update_link_status(sc);
1489 ifmr->ifm_status = IFM_AVALID;
1490 ifmr->ifm_active = IFM_ETHER;
1492 if (!sc->link_active) {
1493 if (sc->hw.mac.autoneg)
1494 ifmr->ifm_active |= IFM_NONE;
1496 ifmr->ifm_active |= sc->media.ifm_media;
1500 ifmr->ifm_status |= IFM_ACTIVE;
1501 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE)
1502 ifmr->ifm_active |= sc->ifm_flowctrl;
1504 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
1505 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
1506 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1508 switch (sc->link_speed) {
1510 ifmr->ifm_active |= IFM_10_T;
1513 ifmr->ifm_active |= IFM_100_TX;
1517 ifmr->ifm_active |= IFM_1000_T;
1520 if (sc->link_duplex == FULL_DUPLEX)
1521 ifmr->ifm_active |= IFM_FDX;
1523 ifmr->ifm_active |= IFM_HDX;
1525 if (ifmr->ifm_active & IFM_FDX)
1526 ifmr->ifm_active |= e1000_fc2ifmedia(sc->hw.fc.current_mode);
1530 emx_media_change(struct ifnet *ifp)
1532 struct emx_softc *sc = ifp->if_softc;
1533 struct ifmedia *ifm = &sc->media;
1535 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1537 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1540 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1542 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG;
1543 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT;
1548 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG;
1549 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1553 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) {
1554 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1556 if (IFM_OPTIONS(ifm->ifm_media) &
1557 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) {
1559 if_printf(ifp, "Flow control is not "
1560 "allowed for half-duplex\n");
1564 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1566 sc->hw.mac.autoneg = FALSE;
1567 sc->hw.phy.autoneg_advertised = 0;
1571 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) {
1572 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1574 if (IFM_OPTIONS(ifm->ifm_media) &
1575 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) {
1577 if_printf(ifp, "Flow control is not "
1578 "allowed for half-duplex\n");
1582 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1584 sc->hw.mac.autoneg = FALSE;
1585 sc->hw.phy.autoneg_advertised = 0;
1590 if_printf(ifp, "Unsupported media type %d\n",
1591 IFM_SUBTYPE(ifm->ifm_media));
1595 sc->ifm_flowctrl = ifm->ifm_media & IFM_ETH_FCMASK;
1597 if (ifp->if_flags & IFF_RUNNING)
1604 emx_encap(struct emx_txdata *tdata, struct mbuf **m_headp,
1605 int *segs_used, int *idx)
1607 bus_dma_segment_t segs[EMX_MAX_SCATTER];
1609 struct emx_txbuf *tx_buffer, *tx_buffer_mapped;
1610 struct e1000_tx_desc *ctxd = NULL;
1611 struct mbuf *m_head = *m_headp;
1612 uint32_t txd_upper, txd_lower, cmd = 0;
1613 int maxsegs, nsegs, i, j, first, last = 0, error;
1615 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1616 error = emx_tso_pullup(tdata, m_headp);
1622 txd_upper = txd_lower = 0;
1625 * Capture the first descriptor index, this descriptor
1626 * will have the index of the EOP which is the only one
1627 * that now gets a DONE bit writeback.
1629 first = tdata->next_avail_tx_desc;
1630 tx_buffer = &tdata->tx_buf[first];
1631 tx_buffer_mapped = tx_buffer;
1632 map = tx_buffer->map;
1634 maxsegs = tdata->num_tx_desc_avail - EMX_TX_RESERVED;
1635 KASSERT(maxsegs >= tdata->spare_tx_desc, ("not enough spare TX desc"));
1636 if (maxsegs > EMX_MAX_SCATTER)
1637 maxsegs = EMX_MAX_SCATTER;
1639 error = bus_dmamap_load_mbuf_defrag(tdata->txtag, map, m_headp,
1640 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1646 bus_dmamap_sync(tdata->txtag, map, BUS_DMASYNC_PREWRITE);
1649 tdata->tx_nsegs += nsegs;
1650 *segs_used += nsegs;
1652 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1653 /* TSO will consume one TX desc */
1654 i = emx_tso_setup(tdata, m_head, &txd_upper, &txd_lower);
1655 tdata->tx_nsegs += i;
1657 } else if (m_head->m_pkthdr.csum_flags & EMX_CSUM_FEATURES) {
1658 /* TX csum offloading will consume one TX desc */
1659 i = emx_txcsum(tdata, m_head, &txd_upper, &txd_lower);
1660 tdata->tx_nsegs += i;
1664 /* Handle VLAN tag */
1665 if (m_head->m_flags & M_VLANTAG) {
1666 /* Set the vlan id. */
1667 txd_upper |= (htole16(m_head->m_pkthdr.ether_vlantag) << 16);
1668 /* Tell hardware to add tag */
1669 txd_lower |= htole32(E1000_TXD_CMD_VLE);
1672 i = tdata->next_avail_tx_desc;
1674 /* Set up our transmit descriptors */
1675 for (j = 0; j < nsegs; j++) {
1676 tx_buffer = &tdata->tx_buf[i];
1677 ctxd = &tdata->tx_desc_base[i];
1679 ctxd->buffer_addr = htole64(segs[j].ds_addr);
1680 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS |
1681 txd_lower | segs[j].ds_len);
1682 ctxd->upper.data = htole32(txd_upper);
1685 if (++i == tdata->num_tx_desc)
1689 tdata->next_avail_tx_desc = i;
1691 KKASSERT(tdata->num_tx_desc_avail > nsegs);
1692 tdata->num_tx_desc_avail -= nsegs;
1694 tx_buffer->m_head = m_head;
1695 tx_buffer_mapped->map = tx_buffer->map;
1696 tx_buffer->map = map;
1698 if (tdata->tx_nsegs >= tdata->tx_intr_nsegs) {
1699 tdata->tx_nsegs = 0;
1702 * Report Status (RS) is turned on
1703 * every tx_intr_nsegs descriptors.
1705 cmd = E1000_TXD_CMD_RS;
1708 * Keep track of the descriptor, which will
1709 * be written back by hardware.
1711 tdata->tx_dd[tdata->tx_dd_tail] = last;
1712 EMX_INC_TXDD_IDX(tdata->tx_dd_tail);
1713 KKASSERT(tdata->tx_dd_tail != tdata->tx_dd_head);
1717 * Last Descriptor of Packet needs End Of Packet (EOP)
1719 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd);
1722 * Defer TDT updating, until enough descriptors are setup
1726 #ifdef EMX_TSS_DEBUG
1734 emx_set_promisc(struct emx_softc *sc)
1736 struct ifnet *ifp = &sc->arpcom.ac_if;
1739 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1741 if (ifp->if_flags & IFF_PROMISC) {
1742 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1743 /* Turn this on if you want to see bad packets */
1745 reg_rctl |= E1000_RCTL_SBP;
1746 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1747 } else if (ifp->if_flags & IFF_ALLMULTI) {
1748 reg_rctl |= E1000_RCTL_MPE;
1749 reg_rctl &= ~E1000_RCTL_UPE;
1750 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1755 emx_disable_promisc(struct emx_softc *sc)
1759 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1761 reg_rctl &= ~E1000_RCTL_UPE;
1762 reg_rctl &= ~E1000_RCTL_MPE;
1763 reg_rctl &= ~E1000_RCTL_SBP;
1764 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1768 emx_set_multi(struct emx_softc *sc)
1770 struct ifnet *ifp = &sc->arpcom.ac_if;
1771 struct ifmultiaddr *ifma;
1772 uint32_t reg_rctl = 0;
1777 bzero(mta, ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX);
1779 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1780 if (ifma->ifma_addr->sa_family != AF_LINK)
1783 if (mcnt == EMX_MCAST_ADDR_MAX)
1786 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1787 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
1791 if (mcnt >= EMX_MCAST_ADDR_MAX) {
1792 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1793 reg_rctl |= E1000_RCTL_MPE;
1794 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1796 e1000_update_mc_addr_list(&sc->hw, mta, mcnt);
1801 * This routine checks for link status and updates statistics.
1804 emx_timer(void *xsc)
1806 struct emx_softc *sc = xsc;
1807 struct ifnet *ifp = &sc->arpcom.ac_if;
1809 lwkt_serialize_enter(&sc->main_serialize);
1811 emx_update_link_status(sc);
1812 emx_update_stats(sc);
1814 /* Reset LAA into RAR[0] on 82571 */
1815 if (e1000_get_laa_state_82571(&sc->hw) == TRUE)
1816 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0);
1818 if (emx_display_debug_stats && (ifp->if_flags & IFF_RUNNING))
1819 emx_print_hw_stats(sc);
1823 callout_reset(&sc->timer, hz, emx_timer, sc);
1825 lwkt_serialize_exit(&sc->main_serialize);
1829 emx_update_link_status(struct emx_softc *sc)
1831 struct e1000_hw *hw = &sc->hw;
1832 struct ifnet *ifp = &sc->arpcom.ac_if;
1833 device_t dev = sc->dev;
1834 uint32_t link_check = 0;
1836 /* Get the cached link value or read phy for real */
1837 switch (hw->phy.media_type) {
1838 case e1000_media_type_copper:
1839 if (hw->mac.get_link_status) {
1840 /* Do the work to read phy */
1841 e1000_check_for_link(hw);
1842 link_check = !hw->mac.get_link_status;
1843 if (link_check) /* ESB2 fix */
1844 e1000_cfg_on_link_up(hw);
1850 case e1000_media_type_fiber:
1851 e1000_check_for_link(hw);
1852 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU;
1855 case e1000_media_type_internal_serdes:
1856 e1000_check_for_link(hw);
1857 link_check = sc->hw.mac.serdes_has_link;
1860 case e1000_media_type_unknown:
1865 /* Now check for a transition */
1866 if (link_check && sc->link_active == 0) {
1867 e1000_get_speed_and_duplex(hw, &sc->link_speed,
1871 * Check if we should enable/disable SPEED_MODE bit on
1874 if (sc->link_speed != SPEED_1000 &&
1875 (hw->mac.type == e1000_82571 ||
1876 hw->mac.type == e1000_82572)) {
1879 tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
1880 tarc0 &= ~EMX_TARC_SPEED_MODE;
1881 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
1884 char flowctrl[IFM_ETH_FC_STRLEN];
1886 e1000_fc2str(hw->fc.current_mode, flowctrl,
1888 device_printf(dev, "Link is up %d Mbps %s, "
1889 "Flow control: %s\n",
1891 (sc->link_duplex == FULL_DUPLEX) ?
1892 "Full Duplex" : "Half Duplex",
1895 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE)
1896 e1000_force_flowctrl(hw, sc->ifm_flowctrl);
1897 sc->link_active = 1;
1899 ifp->if_baudrate = sc->link_speed * 1000000;
1900 ifp->if_link_state = LINK_STATE_UP;
1901 if_link_state_change(ifp);
1902 } else if (!link_check && sc->link_active == 1) {
1903 ifp->if_baudrate = sc->link_speed = 0;
1904 sc->link_duplex = 0;
1906 device_printf(dev, "Link is Down\n");
1907 sc->link_active = 0;
1908 ifp->if_link_state = LINK_STATE_DOWN;
1909 if_link_state_change(ifp);
1914 emx_stop(struct emx_softc *sc)
1916 struct ifnet *ifp = &sc->arpcom.ac_if;
1919 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1921 emx_disable_intr(sc);
1923 callout_stop(&sc->timer);
1925 ifp->if_flags &= ~IFF_RUNNING;
1926 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1927 struct emx_txdata *tdata = &sc->tx_data[i];
1929 ifsq_clr_oactive(tdata->ifsq);
1930 ifsq_watchdog_stop(&tdata->tx_watchdog);
1931 tdata->tx_flags &= ~EMX_TXFLAG_ENABLED;
1935 * Disable multiple receive queues.
1938 * We should disable multiple receive queues before
1939 * resetting the hardware.
1941 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 0);
1943 e1000_reset_hw(&sc->hw);
1944 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0);
1946 for (i = 0; i < sc->tx_ring_cnt; ++i)
1947 emx_free_tx_ring(&sc->tx_data[i]);
1948 for (i = 0; i < sc->rx_ring_cnt; ++i)
1949 emx_free_rx_ring(&sc->rx_data[i]);
1953 emx_reset(struct emx_softc *sc)
1955 device_t dev = sc->dev;
1956 uint16_t rx_buffer_size;
1959 /* Set up smart power down as default off on newer adapters. */
1960 if (!emx_smart_pwr_down &&
1961 (sc->hw.mac.type == e1000_82571 ||
1962 sc->hw.mac.type == e1000_82572)) {
1963 uint16_t phy_tmp = 0;
1965 /* Speed up time to link by disabling smart power down. */
1966 e1000_read_phy_reg(&sc->hw,
1967 IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
1968 phy_tmp &= ~IGP02E1000_PM_SPD;
1969 e1000_write_phy_reg(&sc->hw,
1970 IGP02E1000_PHY_POWER_MGMT, phy_tmp);
1974 * Packet Buffer Allocation (PBA)
1975 * Writing PBA sets the receive portion of the buffer
1976 * the remainder is used for the transmit buffer.
1978 switch (sc->hw.mac.type) {
1979 /* Total Packet Buffer on these is 48K */
1982 case e1000_80003es2lan:
1983 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1986 case e1000_82573: /* 82573: Total Packet Buffer is 32K */
1987 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
1991 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
1996 pba = E1000_PBA_26K;
2000 /* Devices before 82547 had a Packet Buffer of 64K. */
2001 if (sc->hw.mac.max_frame_size > 8192)
2002 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
2004 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
2006 E1000_WRITE_REG(&sc->hw, E1000_PBA, pba);
2009 * These parameters control the automatic generation (Tx) and
2010 * response (Rx) to Ethernet PAUSE frames.
2011 * - High water mark should allow for at least two frames to be
2012 * received after sending an XOFF.
2013 * - Low water mark works best when it is very near the high water mark.
2014 * This allows the receiver to restart by sending XON when it has
2015 * drained a bit. Here we use an arbitary value of 1500 which will
2016 * restart after one full frame is pulled from the buffer. There
2017 * could be several smaller frames in the buffer and if so they will
2018 * not trigger the XON until their total number reduces the buffer
2020 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2022 rx_buffer_size = (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) << 10;
2024 sc->hw.fc.high_water = rx_buffer_size -
2025 roundup2(sc->hw.mac.max_frame_size, 1024);
2026 sc->hw.fc.low_water = sc->hw.fc.high_water - 1500;
2028 sc->hw.fc.pause_time = EMX_FC_PAUSE_TIME;
2029 sc->hw.fc.send_xon = TRUE;
2030 sc->hw.fc.requested_mode = e1000_ifmedia2fc(sc->ifm_flowctrl);
2033 * Device specific overrides/settings
2035 if (sc->hw.mac.type == e1000_pch_lpt ||
2036 sc->hw.mac.type == e1000_pch_spt) {
2037 sc->hw.fc.high_water = 0x5C20;
2038 sc->hw.fc.low_water = 0x5048;
2039 sc->hw.fc.pause_time = 0x0650;
2040 sc->hw.fc.refresh_time = 0x0400;
2041 /* Jumbos need adjusted PBA */
2042 if (sc->arpcom.ac_if.if_mtu > ETHERMTU)
2043 E1000_WRITE_REG(&sc->hw, E1000_PBA, 12);
2045 E1000_WRITE_REG(&sc->hw, E1000_PBA, 26);
2046 } else if (sc->hw.mac.type == e1000_80003es2lan) {
2047 sc->hw.fc.pause_time = 0xFFFF;
2050 /* Issue a global reset */
2051 e1000_reset_hw(&sc->hw);
2052 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0);
2053 emx_disable_aspm(sc);
2055 if (e1000_init_hw(&sc->hw) < 0) {
2056 device_printf(dev, "Hardware Initialization Failed\n");
2060 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
2061 e1000_get_phy_info(&sc->hw);
2062 e1000_check_for_link(&sc->hw);
2068 emx_setup_ifp(struct emx_softc *sc)
2070 struct ifnet *ifp = &sc->arpcom.ac_if;
2073 if_initname(ifp, device_get_name(sc->dev),
2074 device_get_unit(sc->dev));
2076 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2077 ifp->if_init = emx_init;
2078 ifp->if_ioctl = emx_ioctl;
2079 ifp->if_start = emx_start;
2080 #ifdef IFPOLL_ENABLE
2081 ifp->if_npoll = emx_npoll;
2083 ifp->if_serialize = emx_serialize;
2084 ifp->if_deserialize = emx_deserialize;
2085 ifp->if_tryserialize = emx_tryserialize;
2087 ifp->if_serialize_assert = emx_serialize_assert;
2090 ifp->if_nmbclusters = sc->rx_ring_cnt * sc->rx_data[0].num_rx_desc;
2092 ifq_set_maxlen(&ifp->if_snd, sc->tx_data[0].num_tx_desc - 1);
2093 ifq_set_ready(&ifp->if_snd);
2094 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt);
2096 ifp->if_mapsubq = ifq_mapsubq_mask;
2097 ifq_set_subq_mask(&ifp->if_snd, 0);
2099 ether_ifattach(ifp, sc->hw.mac.addr, NULL);
2101 ifp->if_capabilities = IFCAP_HWCSUM |
2102 IFCAP_VLAN_HWTAGGING |
2105 if (sc->rx_ring_cnt > 1)
2106 ifp->if_capabilities |= IFCAP_RSS;
2107 ifp->if_capenable = ifp->if_capabilities;
2108 ifp->if_hwassist = EMX_CSUM_FEATURES | CSUM_TSO;
2111 * Tell the upper layer(s) we support long frames.
2113 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2115 for (i = 0; i < sc->tx_ring_cnt; ++i) {
2116 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i);
2117 struct emx_txdata *tdata = &sc->tx_data[i];
2119 ifsq_set_cpuid(ifsq, rman_get_cpuid(sc->intr_res));
2120 ifsq_set_priv(ifsq, tdata);
2121 ifsq_set_hw_serialize(ifsq, &tdata->tx_serialize);
2124 ifsq_watchdog_init(&tdata->tx_watchdog, ifsq, emx_watchdog);
2128 * Specify the media types supported by this sc and register
2129 * callbacks to update media and link information
2131 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
2132 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
2133 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2136 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
2137 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2139 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2140 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2142 if (sc->hw.phy.type != e1000_phy_ife) {
2143 ifmedia_add(&sc->media,
2144 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2147 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2148 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO | sc->ifm_flowctrl);
2152 * Workaround for SmartSpeed on 82541 and 82547 controllers
2155 emx_smartspeed(struct emx_softc *sc)
2159 if (sc->link_active || sc->hw.phy.type != e1000_phy_igp ||
2160 sc->hw.mac.autoneg == 0 ||
2161 (sc->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2164 if (sc->smartspeed == 0) {
2166 * If Master/Slave config fault is asserted twice,
2167 * we assume back-to-back
2169 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
2170 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2172 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
2173 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2174 e1000_read_phy_reg(&sc->hw,
2175 PHY_1000T_CTRL, &phy_tmp);
2176 if (phy_tmp & CR_1000T_MS_ENABLE) {
2177 phy_tmp &= ~CR_1000T_MS_ENABLE;
2178 e1000_write_phy_reg(&sc->hw,
2179 PHY_1000T_CTRL, phy_tmp);
2181 if (sc->hw.mac.autoneg &&
2182 !e1000_phy_setup_autoneg(&sc->hw) &&
2183 !e1000_read_phy_reg(&sc->hw,
2184 PHY_CONTROL, &phy_tmp)) {
2185 phy_tmp |= MII_CR_AUTO_NEG_EN |
2186 MII_CR_RESTART_AUTO_NEG;
2187 e1000_write_phy_reg(&sc->hw,
2188 PHY_CONTROL, phy_tmp);
2193 } else if (sc->smartspeed == EMX_SMARTSPEED_DOWNSHIFT) {
2194 /* If still no link, perhaps using 2/3 pair cable */
2195 e1000_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp);
2196 phy_tmp |= CR_1000T_MS_ENABLE;
2197 e1000_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp);
2198 if (sc->hw.mac.autoneg &&
2199 !e1000_phy_setup_autoneg(&sc->hw) &&
2200 !e1000_read_phy_reg(&sc->hw, PHY_CONTROL, &phy_tmp)) {
2201 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
2202 e1000_write_phy_reg(&sc->hw, PHY_CONTROL, phy_tmp);
2206 /* Restart process after EMX_SMARTSPEED_MAX iterations */
2207 if (sc->smartspeed++ == EMX_SMARTSPEED_MAX)
2212 emx_create_tx_ring(struct emx_txdata *tdata)
2214 device_t dev = tdata->sc->dev;
2215 struct emx_txbuf *tx_buffer;
2216 int error, i, tsize, ntxd;
2219 * Validate number of transmit descriptors. It must not exceed
2220 * hardware maximum, and must be multiple of E1000_DBA_ALIGN.
2222 ntxd = device_getenv_int(dev, "txd", emx_txd);
2223 if ((ntxd * sizeof(struct e1000_tx_desc)) % EMX_DBA_ALIGN != 0 ||
2224 ntxd > EMX_MAX_TXD || ntxd < EMX_MIN_TXD) {
2225 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
2226 EMX_DEFAULT_TXD, ntxd);
2227 tdata->num_tx_desc = EMX_DEFAULT_TXD;
2229 tdata->num_tx_desc = ntxd;
2233 * Allocate Transmit Descriptor ring
2235 tsize = roundup2(tdata->num_tx_desc * sizeof(struct e1000_tx_desc),
2237 tdata->tx_desc_base = bus_dmamem_coherent_any(tdata->sc->parent_dtag,
2238 EMX_DBA_ALIGN, tsize, BUS_DMA_WAITOK,
2239 &tdata->tx_desc_dtag, &tdata->tx_desc_dmap,
2240 &tdata->tx_desc_paddr);
2241 if (tdata->tx_desc_base == NULL) {
2242 device_printf(dev, "Unable to allocate tx_desc memory\n");
2246 tsize = __VM_CACHELINE_ALIGN(
2247 sizeof(struct emx_txbuf) * tdata->num_tx_desc);
2248 tdata->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO);
2251 * Create DMA tags for tx buffers
2253 error = bus_dma_tag_create(tdata->sc->parent_dtag, /* parent */
2254 1, 0, /* alignment, bounds */
2255 BUS_SPACE_MAXADDR, /* lowaddr */
2256 BUS_SPACE_MAXADDR, /* highaddr */
2257 NULL, NULL, /* filter, filterarg */
2258 EMX_TSO_SIZE, /* maxsize */
2259 EMX_MAX_SCATTER, /* nsegments */
2260 EMX_MAX_SEGSIZE, /* maxsegsize */
2261 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW |
2262 BUS_DMA_ONEBPAGE, /* flags */
2265 device_printf(dev, "Unable to allocate TX DMA tag\n");
2266 kfree(tdata->tx_buf, M_DEVBUF);
2267 tdata->tx_buf = NULL;
2272 * Create DMA maps for tx buffers
2274 for (i = 0; i < tdata->num_tx_desc; i++) {
2275 tx_buffer = &tdata->tx_buf[i];
2277 error = bus_dmamap_create(tdata->txtag,
2278 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2281 device_printf(dev, "Unable to create TX DMA map\n");
2282 emx_destroy_tx_ring(tdata, i);
2288 * Setup TX parameters
2290 tdata->spare_tx_desc = EMX_TX_SPARE;
2291 tdata->tx_wreg_nsegs = EMX_DEFAULT_TXWREG;
2294 * Keep following relationship between spare_tx_desc, oact_tx_desc
2295 * and tx_intr_nsegs:
2296 * (spare_tx_desc + EMX_TX_RESERVED) <=
2297 * oact_tx_desc <= EMX_TX_OACTIVE_MAX <= tx_intr_nsegs
2299 tdata->oact_tx_desc = tdata->num_tx_desc / 8;
2300 if (tdata->oact_tx_desc > EMX_TX_OACTIVE_MAX)
2301 tdata->oact_tx_desc = EMX_TX_OACTIVE_MAX;
2302 if (tdata->oact_tx_desc < tdata->spare_tx_desc + EMX_TX_RESERVED)
2303 tdata->oact_tx_desc = tdata->spare_tx_desc + EMX_TX_RESERVED;
2305 tdata->tx_intr_nsegs = tdata->num_tx_desc / 16;
2306 if (tdata->tx_intr_nsegs < tdata->oact_tx_desc)
2307 tdata->tx_intr_nsegs = tdata->oact_tx_desc;
2310 * Pullup extra 4bytes into the first data segment for TSO, see:
2311 * 82571/82572 specification update errata #7
2313 * Same applies to I217 (and maybe I218 and I219).
2316 * 4bytes instead of 2bytes, which are mentioned in the errata,
2317 * are pulled; mainly to keep rest of the data properly aligned.
2319 if (tdata->sc->hw.mac.type == e1000_82571 ||
2320 tdata->sc->hw.mac.type == e1000_82572 ||
2321 tdata->sc->hw.mac.type == e1000_pch_lpt ||
2322 tdata->sc->hw.mac.type == e1000_pch_spt)
2323 tdata->tx_flags |= EMX_TXFLAG_TSO_PULLEX;
2329 emx_init_tx_ring(struct emx_txdata *tdata)
2331 /* Clear the old ring contents */
2332 bzero(tdata->tx_desc_base,
2333 sizeof(struct e1000_tx_desc) * tdata->num_tx_desc);
2336 tdata->next_avail_tx_desc = 0;
2337 tdata->next_tx_to_clean = 0;
2338 tdata->num_tx_desc_avail = tdata->num_tx_desc;
2340 tdata->tx_flags |= EMX_TXFLAG_ENABLED;
2341 if (tdata->sc->tx_ring_inuse > 1) {
2342 tdata->tx_flags |= EMX_TXFLAG_FORCECTX;
2344 if_printf(&tdata->sc->arpcom.ac_if,
2345 "TX %d force ctx setup\n", tdata->idx);
2351 emx_init_tx_unit(struct emx_softc *sc)
2353 uint32_t tctl, tarc, tipg = 0, txdctl;
2356 for (i = 0; i < sc->tx_ring_inuse; ++i) {
2357 struct emx_txdata *tdata = &sc->tx_data[i];
2360 /* Setup the Base and Length of the Tx Descriptor Ring */
2361 bus_addr = tdata->tx_desc_paddr;
2362 E1000_WRITE_REG(&sc->hw, E1000_TDLEN(i),
2363 tdata->num_tx_desc * sizeof(struct e1000_tx_desc));
2364 E1000_WRITE_REG(&sc->hw, E1000_TDBAH(i),
2365 (uint32_t)(bus_addr >> 32));
2366 E1000_WRITE_REG(&sc->hw, E1000_TDBAL(i),
2367 (uint32_t)bus_addr);
2368 /* Setup the HW Tx Head and Tail descriptor pointers */
2369 E1000_WRITE_REG(&sc->hw, E1000_TDT(i), 0);
2370 E1000_WRITE_REG(&sc->hw, E1000_TDH(i), 0);
2373 /* Set the default values for the Tx Inter Packet Gap timer */
2374 switch (sc->hw.mac.type) {
2375 case e1000_80003es2lan:
2376 tipg = DEFAULT_82543_TIPG_IPGR1;
2377 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
2378 E1000_TIPG_IPGR2_SHIFT;
2382 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
2383 sc->hw.phy.media_type == e1000_media_type_internal_serdes)
2384 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2386 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2387 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2388 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2392 E1000_WRITE_REG(&sc->hw, E1000_TIPG, tipg);
2394 /* NOTE: 0 is not allowed for TIDV */
2395 E1000_WRITE_REG(&sc->hw, E1000_TIDV, 1);
2396 E1000_WRITE_REG(&sc->hw, E1000_TADV, 0);
2399 * Errata workaround (obtained from Linux). This is necessary
2400 * to make multiple TX queues work on 82574.
2401 * XXX can't find it in any published errata though.
2403 txdctl = E1000_READ_REG(&sc->hw, E1000_TXDCTL(0));
2404 E1000_WRITE_REG(&sc->hw, E1000_TXDCTL(1), txdctl);
2406 if (sc->hw.mac.type == e1000_82571 ||
2407 sc->hw.mac.type == e1000_82572) {
2408 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0));
2409 tarc |= EMX_TARC_SPEED_MODE;
2410 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc);
2411 } else if (sc->hw.mac.type == e1000_80003es2lan) {
2412 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0));
2414 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc);
2415 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1));
2417 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc);
2420 /* Program the Transmit Control Register */
2421 tctl = E1000_READ_REG(&sc->hw, E1000_TCTL);
2422 tctl &= ~E1000_TCTL_CT;
2423 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2424 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2425 tctl |= E1000_TCTL_MULR;
2427 /* This write will effectively turn on the transmit unit. */
2428 E1000_WRITE_REG(&sc->hw, E1000_TCTL, tctl);
2430 if (sc->hw.mac.type == e1000_82571 ||
2431 sc->hw.mac.type == e1000_82572 ||
2432 sc->hw.mac.type == e1000_80003es2lan) {
2433 /* Bit 28 of TARC1 must be cleared when MULR is enabled */
2434 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1));
2436 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc);
2439 if (sc->tx_ring_inuse > 1) {
2440 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0));
2441 tarc &= ~EMX_TARC_COUNT_MASK;
2443 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc);
2445 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1));
2446 tarc &= ~EMX_TARC_COUNT_MASK;
2448 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc);
2453 emx_destroy_tx_ring(struct emx_txdata *tdata, int ndesc)
2455 struct emx_txbuf *tx_buffer;
2458 /* Free Transmit Descriptor ring */
2459 if (tdata->tx_desc_base) {
2460 bus_dmamap_unload(tdata->tx_desc_dtag, tdata->tx_desc_dmap);
2461 bus_dmamem_free(tdata->tx_desc_dtag, tdata->tx_desc_base,
2462 tdata->tx_desc_dmap);
2463 bus_dma_tag_destroy(tdata->tx_desc_dtag);
2465 tdata->tx_desc_base = NULL;
2468 if (tdata->tx_buf == NULL)
2471 for (i = 0; i < ndesc; i++) {
2472 tx_buffer = &tdata->tx_buf[i];
2474 KKASSERT(tx_buffer->m_head == NULL);
2475 bus_dmamap_destroy(tdata->txtag, tx_buffer->map);
2477 bus_dma_tag_destroy(tdata->txtag);
2479 kfree(tdata->tx_buf, M_DEVBUF);
2480 tdata->tx_buf = NULL;
2484 * The offload context needs to be set when we transfer the first
2485 * packet of a particular protocol (TCP/UDP). This routine has been
2486 * enhanced to deal with inserted VLAN headers.
2488 * If the new packet's ether header length, ip header length and
2489 * csum offloading type are same as the previous packet, we should
2490 * avoid allocating a new csum context descriptor; mainly to take
2491 * advantage of the pipeline effect of the TX data read request.
2493 * This function returns number of TX descrptors allocated for
2497 emx_txcsum(struct emx_txdata *tdata, struct mbuf *mp,
2498 uint32_t *txd_upper, uint32_t *txd_lower)
2500 struct e1000_context_desc *TXD;
2501 int curr_txd, ehdrlen, csum_flags;
2502 uint32_t cmd, hdr_len, ip_hlen;
2504 csum_flags = mp->m_pkthdr.csum_flags & EMX_CSUM_FEATURES;
2505 ip_hlen = mp->m_pkthdr.csum_iphlen;
2506 ehdrlen = mp->m_pkthdr.csum_lhlen;
2508 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 &&
2509 tdata->csum_lhlen == ehdrlen && tdata->csum_iphlen == ip_hlen &&
2510 tdata->csum_flags == csum_flags) {
2512 * Same csum offload context as the previous packets;
2515 *txd_upper = tdata->csum_txd_upper;
2516 *txd_lower = tdata->csum_txd_lower;
2521 * Setup a new csum offload context.
2524 curr_txd = tdata->next_avail_tx_desc;
2525 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd];
2529 /* Setup of IP header checksum. */
2530 if (csum_flags & CSUM_IP) {
2532 * Start offset for header checksum calculation.
2533 * End offset for header checksum calculation.
2534 * Offset of place to put the checksum.
2536 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2537 TXD->lower_setup.ip_fields.ipcse =
2538 htole16(ehdrlen + ip_hlen - 1);
2539 TXD->lower_setup.ip_fields.ipcso =
2540 ehdrlen + offsetof(struct ip, ip_sum);
2541 cmd |= E1000_TXD_CMD_IP;
2542 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2544 hdr_len = ehdrlen + ip_hlen;
2546 if (csum_flags & CSUM_TCP) {
2548 * Start offset for payload checksum calculation.
2549 * End offset for payload checksum calculation.
2550 * Offset of place to put the checksum.
2552 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2553 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2554 TXD->upper_setup.tcp_fields.tucso =
2555 hdr_len + offsetof(struct tcphdr, th_sum);
2556 cmd |= E1000_TXD_CMD_TCP;
2557 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2558 } else if (csum_flags & CSUM_UDP) {
2560 * Start offset for header checksum calculation.
2561 * End offset for header checksum calculation.
2562 * Offset of place to put the checksum.
2564 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2565 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2566 TXD->upper_setup.tcp_fields.tucso =
2567 hdr_len + offsetof(struct udphdr, uh_sum);
2568 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2571 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */
2572 E1000_TXD_DTYP_D; /* Data descr */
2574 /* Save the information for this csum offloading context */
2575 tdata->csum_lhlen = ehdrlen;
2576 tdata->csum_iphlen = ip_hlen;
2577 tdata->csum_flags = csum_flags;
2578 tdata->csum_txd_upper = *txd_upper;
2579 tdata->csum_txd_lower = *txd_lower;
2581 TXD->tcp_seg_setup.data = htole32(0);
2582 TXD->cmd_and_length =
2583 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd);
2585 if (++curr_txd == tdata->num_tx_desc)
2588 KKASSERT(tdata->num_tx_desc_avail > 0);
2589 tdata->num_tx_desc_avail--;
2591 tdata->next_avail_tx_desc = curr_txd;
2596 emx_txeof(struct emx_txdata *tdata)
2598 struct emx_txbuf *tx_buffer;
2599 int first, num_avail;
2601 if (tdata->tx_dd_head == tdata->tx_dd_tail)
2604 if (tdata->num_tx_desc_avail == tdata->num_tx_desc)
2607 num_avail = tdata->num_tx_desc_avail;
2608 first = tdata->next_tx_to_clean;
2610 while (tdata->tx_dd_head != tdata->tx_dd_tail) {
2611 int dd_idx = tdata->tx_dd[tdata->tx_dd_head];
2612 struct e1000_tx_desc *tx_desc;
2614 tx_desc = &tdata->tx_desc_base[dd_idx];
2615 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2616 EMX_INC_TXDD_IDX(tdata->tx_dd_head);
2618 if (++dd_idx == tdata->num_tx_desc)
2621 while (first != dd_idx) {
2626 tx_buffer = &tdata->tx_buf[first];
2627 if (tx_buffer->m_head) {
2628 bus_dmamap_unload(tdata->txtag,
2630 m_freem(tx_buffer->m_head);
2631 tx_buffer->m_head = NULL;
2634 if (++first == tdata->num_tx_desc)
2641 tdata->next_tx_to_clean = first;
2642 tdata->num_tx_desc_avail = num_avail;
2644 if (tdata->tx_dd_head == tdata->tx_dd_tail) {
2645 tdata->tx_dd_head = 0;
2646 tdata->tx_dd_tail = 0;
2649 if (!EMX_IS_OACTIVE(tdata)) {
2650 ifsq_clr_oactive(tdata->ifsq);
2652 /* All clean, turn off the timer */
2653 if (tdata->num_tx_desc_avail == tdata->num_tx_desc)
2654 tdata->tx_watchdog.wd_timer = 0;
2659 emx_tx_collect(struct emx_txdata *tdata)
2661 struct emx_txbuf *tx_buffer;
2662 int tdh, first, num_avail, dd_idx = -1;
2664 if (tdata->num_tx_desc_avail == tdata->num_tx_desc)
2667 tdh = E1000_READ_REG(&tdata->sc->hw, E1000_TDH(tdata->idx));
2668 if (tdh == tdata->next_tx_to_clean)
2671 if (tdata->tx_dd_head != tdata->tx_dd_tail)
2672 dd_idx = tdata->tx_dd[tdata->tx_dd_head];
2674 num_avail = tdata->num_tx_desc_avail;
2675 first = tdata->next_tx_to_clean;
2677 while (first != tdh) {
2682 tx_buffer = &tdata->tx_buf[first];
2683 if (tx_buffer->m_head) {
2684 bus_dmamap_unload(tdata->txtag,
2686 m_freem(tx_buffer->m_head);
2687 tx_buffer->m_head = NULL;
2690 if (first == dd_idx) {
2691 EMX_INC_TXDD_IDX(tdata->tx_dd_head);
2692 if (tdata->tx_dd_head == tdata->tx_dd_tail) {
2693 tdata->tx_dd_head = 0;
2694 tdata->tx_dd_tail = 0;
2697 dd_idx = tdata->tx_dd[tdata->tx_dd_head];
2701 if (++first == tdata->num_tx_desc)
2704 tdata->next_tx_to_clean = first;
2705 tdata->num_tx_desc_avail = num_avail;
2707 if (!EMX_IS_OACTIVE(tdata)) {
2708 ifsq_clr_oactive(tdata->ifsq);
2710 /* All clean, turn off the timer */
2711 if (tdata->num_tx_desc_avail == tdata->num_tx_desc)
2712 tdata->tx_watchdog.wd_timer = 0;
2717 * When Link is lost sometimes there is work still in the TX ring
2718 * which will result in a watchdog, rather than allow that do an
2719 * attempted cleanup and then reinit here. Note that this has been
2720 * seens mostly with fiber adapters.
2723 emx_tx_purge(struct emx_softc *sc)
2727 if (sc->link_active)
2730 for (i = 0; i < sc->tx_ring_inuse; ++i) {
2731 struct emx_txdata *tdata = &sc->tx_data[i];
2733 if (tdata->tx_watchdog.wd_timer) {
2734 emx_tx_collect(tdata);
2735 if (tdata->tx_watchdog.wd_timer) {
2736 if_printf(&sc->arpcom.ac_if,
2737 "Link lost, TX pending, reinit\n");
2746 emx_newbuf(struct emx_rxdata *rdata, int i, int init)
2749 bus_dma_segment_t seg;
2751 struct emx_rxbuf *rx_buffer;
2754 m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR);
2757 if_printf(&rdata->sc->arpcom.ac_if,
2758 "Unable to allocate RX mbuf\n");
2762 m->m_len = m->m_pkthdr.len = MCLBYTES;
2764 if (rdata->sc->hw.mac.max_frame_size <= MCLBYTES - ETHER_ALIGN)
2765 m_adj(m, ETHER_ALIGN);
2767 error = bus_dmamap_load_mbuf_segment(rdata->rxtag,
2768 rdata->rx_sparemap, m,
2769 &seg, 1, &nseg, BUS_DMA_NOWAIT);
2773 if_printf(&rdata->sc->arpcom.ac_if,
2774 "Unable to load RX mbuf\n");
2779 rx_buffer = &rdata->rx_buf[i];
2780 if (rx_buffer->m_head != NULL)
2781 bus_dmamap_unload(rdata->rxtag, rx_buffer->map);
2783 map = rx_buffer->map;
2784 rx_buffer->map = rdata->rx_sparemap;
2785 rdata->rx_sparemap = map;
2787 rx_buffer->m_head = m;
2788 rx_buffer->paddr = seg.ds_addr;
2790 emx_setup_rxdesc(&rdata->rx_desc[i], rx_buffer);
2795 emx_create_rx_ring(struct emx_rxdata *rdata)
2797 device_t dev = rdata->sc->dev;
2798 struct emx_rxbuf *rx_buffer;
2799 int i, error, rsize, nrxd;
2802 * Validate number of receive descriptors. It must not exceed
2803 * hardware maximum, and must be multiple of E1000_DBA_ALIGN.
2805 nrxd = device_getenv_int(dev, "rxd", emx_rxd);
2806 if ((nrxd * sizeof(emx_rxdesc_t)) % EMX_DBA_ALIGN != 0 ||
2807 nrxd > EMX_MAX_RXD || nrxd < EMX_MIN_RXD) {
2808 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
2809 EMX_DEFAULT_RXD, nrxd);
2810 rdata->num_rx_desc = EMX_DEFAULT_RXD;
2812 rdata->num_rx_desc = nrxd;
2816 * Allocate Receive Descriptor ring
2818 rsize = roundup2(rdata->num_rx_desc * sizeof(emx_rxdesc_t),
2820 rdata->rx_desc = bus_dmamem_coherent_any(rdata->sc->parent_dtag,
2821 EMX_DBA_ALIGN, rsize, BUS_DMA_WAITOK,
2822 &rdata->rx_desc_dtag, &rdata->rx_desc_dmap,
2823 &rdata->rx_desc_paddr);
2824 if (rdata->rx_desc == NULL) {
2825 device_printf(dev, "Unable to allocate rx_desc memory\n");
2829 rsize = __VM_CACHELINE_ALIGN(
2830 sizeof(struct emx_rxbuf) * rdata->num_rx_desc);
2831 rdata->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO);
2834 * Create DMA tag for rx buffers
2836 error = bus_dma_tag_create(rdata->sc->parent_dtag, /* parent */
2837 1, 0, /* alignment, bounds */
2838 BUS_SPACE_MAXADDR, /* lowaddr */
2839 BUS_SPACE_MAXADDR, /* highaddr */
2840 NULL, NULL, /* filter, filterarg */
2841 MCLBYTES, /* maxsize */
2843 MCLBYTES, /* maxsegsize */
2844 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */
2847 device_printf(dev, "Unable to allocate RX DMA tag\n");
2848 kfree(rdata->rx_buf, M_DEVBUF);
2849 rdata->rx_buf = NULL;
2854 * Create spare DMA map for rx buffers
2856 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK,
2857 &rdata->rx_sparemap);
2859 device_printf(dev, "Unable to create spare RX DMA map\n");
2860 bus_dma_tag_destroy(rdata->rxtag);
2861 kfree(rdata->rx_buf, M_DEVBUF);
2862 rdata->rx_buf = NULL;
2867 * Create DMA maps for rx buffers
2869 for (i = 0; i < rdata->num_rx_desc; i++) {
2870 rx_buffer = &rdata->rx_buf[i];
2872 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK,
2875 device_printf(dev, "Unable to create RX DMA map\n");
2876 emx_destroy_rx_ring(rdata, i);
2884 emx_free_rx_ring(struct emx_rxdata *rdata)
2888 for (i = 0; i < rdata->num_rx_desc; i++) {
2889 struct emx_rxbuf *rx_buffer = &rdata->rx_buf[i];
2891 if (rx_buffer->m_head != NULL) {
2892 bus_dmamap_unload(rdata->rxtag, rx_buffer->map);
2893 m_freem(rx_buffer->m_head);
2894 rx_buffer->m_head = NULL;
2898 if (rdata->fmp != NULL)
2899 m_freem(rdata->fmp);
2905 emx_free_tx_ring(struct emx_txdata *tdata)
2909 for (i = 0; i < tdata->num_tx_desc; i++) {
2910 struct emx_txbuf *tx_buffer = &tdata->tx_buf[i];
2912 if (tx_buffer->m_head != NULL) {
2913 bus_dmamap_unload(tdata->txtag, tx_buffer->map);
2914 m_freem(tx_buffer->m_head);
2915 tx_buffer->m_head = NULL;
2919 tdata->tx_flags &= ~EMX_TXFLAG_FORCECTX;
2921 tdata->csum_flags = 0;
2922 tdata->csum_lhlen = 0;
2923 tdata->csum_iphlen = 0;
2924 tdata->csum_thlen = 0;
2925 tdata->csum_mss = 0;
2926 tdata->csum_pktlen = 0;
2928 tdata->tx_dd_head = 0;
2929 tdata->tx_dd_tail = 0;
2930 tdata->tx_nsegs = 0;
2934 emx_init_rx_ring(struct emx_rxdata *rdata)
2938 /* Reset descriptor ring */
2939 bzero(rdata->rx_desc, sizeof(emx_rxdesc_t) * rdata->num_rx_desc);
2941 /* Allocate new ones. */
2942 for (i = 0; i < rdata->num_rx_desc; i++) {
2943 error = emx_newbuf(rdata, i, 1);
2948 /* Setup our descriptor pointers */
2949 rdata->next_rx_desc_to_check = 0;
2955 emx_init_rx_unit(struct emx_softc *sc)
2957 struct ifnet *ifp = &sc->arpcom.ac_if;
2959 uint32_t rctl, itr, rfctl;
2963 * Make sure receives are disabled while setting
2964 * up the descriptor ring
2966 rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
2967 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2970 * Set the interrupt throttling rate. Value is calculated
2971 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns)
2973 if (sc->int_throttle_ceil)
2974 itr = 1000000000 / 256 / sc->int_throttle_ceil;
2977 emx_set_itr(sc, itr);
2979 /* Use extended RX descriptor */
2980 rfctl = E1000_RFCTL_EXTEN;
2982 /* Disable accelerated ackknowledge */
2983 if (sc->hw.mac.type == e1000_82574)
2984 rfctl |= E1000_RFCTL_ACK_DIS;
2986 E1000_WRITE_REG(&sc->hw, E1000_RFCTL, rfctl);
2989 * Receive Checksum Offload for TCP and UDP
2991 * Checksum offloading is also enabled if multiple receive
2992 * queue is to be supported, since we need it to figure out
2995 if ((ifp->if_capenable & IFCAP_RXCSUM) ||
2996 sc->rx_ring_cnt > 1) {
2999 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM);
3003 * PCSD must be enabled to enable multiple
3006 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
3008 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum);
3012 * Configure multiple receive queue (RSS)
3014 if (sc->rx_ring_cnt > 1) {
3015 uint8_t key[EMX_NRSSRK * EMX_RSSRK_SIZE];
3018 KASSERT(sc->rx_ring_cnt == EMX_NRX_RING,
3019 ("invalid number of RX ring (%d)", sc->rx_ring_cnt));
3023 * When we reach here, RSS has already been disabled
3024 * in emx_stop(), so we could safely configure RSS key
3025 * and redirect table.
3031 toeplitz_get_key(key, sizeof(key));
3032 for (i = 0; i < EMX_NRSSRK; ++i) {
3035 rssrk = EMX_RSSRK_VAL(key, i);
3036 EMX_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk);
3038 E1000_WRITE_REG(&sc->hw, E1000_RSSRK(i), rssrk);
3042 * Configure RSS redirect table in following fashion:
3043 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
3046 for (i = 0; i < EMX_RETA_SIZE; ++i) {
3049 q = (i % sc->rx_ring_cnt) << EMX_RETA_RINGIDX_SHIFT;
3050 reta |= q << (8 * i);
3052 EMX_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta);
3054 for (i = 0; i < EMX_NRETA; ++i)
3055 E1000_WRITE_REG(&sc->hw, E1000_RETA(i), reta);
3058 * Enable multiple receive queues.
3059 * Enable IPv4 RSS standard hash functions.
3060 * Disable RSS interrupt.
3062 E1000_WRITE_REG(&sc->hw, E1000_MRQC,
3063 E1000_MRQC_ENABLE_RSS_2Q |
3064 E1000_MRQC_RSS_FIELD_IPV4_TCP |
3065 E1000_MRQC_RSS_FIELD_IPV4);
3069 * XXX TEMPORARY WORKAROUND: on some systems with 82573
3070 * long latencies are observed, like Lenovo X60. This
3071 * change eliminates the problem, but since having positive
3072 * values in RDTR is a known source of problems on other
3073 * platforms another solution is being sought.
3075 if (emx_82573_workaround && sc->hw.mac.type == e1000_82573) {
3076 E1000_WRITE_REG(&sc->hw, E1000_RADV, EMX_RADV_82573);
3077 E1000_WRITE_REG(&sc->hw, E1000_RDTR, EMX_RDTR_82573);
3080 for (i = 0; i < sc->rx_ring_cnt; ++i) {
3081 struct emx_rxdata *rdata = &sc->rx_data[i];
3084 * Setup the Base and Length of the Rx Descriptor Ring
3086 bus_addr = rdata->rx_desc_paddr;
3087 E1000_WRITE_REG(&sc->hw, E1000_RDLEN(i),
3088 rdata->num_rx_desc * sizeof(emx_rxdesc_t));
3089 E1000_WRITE_REG(&sc->hw, E1000_RDBAH(i),
3090 (uint32_t)(bus_addr >> 32));
3091 E1000_WRITE_REG(&sc->hw, E1000_RDBAL(i),
3092 (uint32_t)bus_addr);
3095 * Setup the HW Rx Head and Tail Descriptor Pointers
3097 E1000_WRITE_REG(&sc->hw, E1000_RDH(i), 0);
3098 E1000_WRITE_REG(&sc->hw, E1000_RDT(i),
3099 sc->rx_data[i].num_rx_desc - 1);
3102 if (sc->hw.mac.type >= e1000_pch2lan) {
3103 if (ifp->if_mtu > ETHERMTU)
3104 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, TRUE);
3106 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, FALSE);
3109 /* Setup the Receive Control Register */
3110 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3111 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3112 E1000_RCTL_RDMTS_HALF | E1000_RCTL_SECRC |
3113 (sc->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3115 /* Make sure VLAN Filters are off */
3116 rctl &= ~E1000_RCTL_VFE;
3118 /* Don't store bad paket */
3119 rctl &= ~E1000_RCTL_SBP;
3122 rctl |= E1000_RCTL_SZ_2048;
3124 if (ifp->if_mtu > ETHERMTU)
3125 rctl |= E1000_RCTL_LPE;
3127 rctl &= ~E1000_RCTL_LPE;
3129 /* Enable Receives */
3130 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl);
3134 emx_destroy_rx_ring(struct emx_rxdata *rdata, int ndesc)
3136 struct emx_rxbuf *rx_buffer;
3139 /* Free Receive Descriptor ring */
3140 if (rdata->rx_desc) {
3141 bus_dmamap_unload(rdata->rx_desc_dtag, rdata->rx_desc_dmap);
3142 bus_dmamem_free(rdata->rx_desc_dtag, rdata->rx_desc,
3143 rdata->rx_desc_dmap);
3144 bus_dma_tag_destroy(rdata->rx_desc_dtag);
3146 rdata->rx_desc = NULL;
3149 if (rdata->rx_buf == NULL)
3152 for (i = 0; i < ndesc; i++) {
3153 rx_buffer = &rdata->rx_buf[i];
3155 KKASSERT(rx_buffer->m_head == NULL);
3156 bus_dmamap_destroy(rdata->rxtag, rx_buffer->map);
3158 bus_dmamap_destroy(rdata->rxtag, rdata->rx_sparemap);
3159 bus_dma_tag_destroy(rdata->rxtag);
3161 kfree(rdata->rx_buf, M_DEVBUF);
3162 rdata->rx_buf = NULL;
3166 emx_rxeof(struct emx_rxdata *rdata, int count)
3168 struct ifnet *ifp = &rdata->sc->arpcom.ac_if;
3170 emx_rxdesc_t *current_desc;
3172 int i, cpuid = mycpuid;
3174 i = rdata->next_rx_desc_to_check;
3175 current_desc = &rdata->rx_desc[i];
3176 staterr = le32toh(current_desc->rxd_staterr);
3178 if (!(staterr & E1000_RXD_STAT_DD))
3181 while ((staterr & E1000_RXD_STAT_DD) && count != 0) {
3182 struct pktinfo *pi = NULL, pi0;
3183 struct emx_rxbuf *rx_buf = &rdata->rx_buf[i];
3184 struct mbuf *m = NULL;
3189 mp = rx_buf->m_head;
3192 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3193 * needs to access the last received byte in the mbuf.
3195 bus_dmamap_sync(rdata->rxtag, rx_buf->map,
3196 BUS_DMASYNC_POSTREAD);
3198 len = le16toh(current_desc->rxd_length);
3199 if (staterr & E1000_RXD_STAT_EOP) {
3206 if (!(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
3208 uint32_t mrq, rss_hash;
3211 * Save several necessary information,
3212 * before emx_newbuf() destroy it.
3214 if ((staterr & E1000_RXD_STAT_VP) && eop)
3215 vlan = le16toh(current_desc->rxd_vlan);
3217 mrq = le32toh(current_desc->rxd_mrq);
3218 rss_hash = le32toh(current_desc->rxd_rss);
3220 EMX_RSS_DPRINTF(rdata->sc, 10,
3221 "ring%d, mrq 0x%08x, rss_hash 0x%08x\n",
3222 rdata->idx, mrq, rss_hash);
3224 if (emx_newbuf(rdata, i, 0) != 0) {
3225 IFNET_STAT_INC(ifp, iqdrops, 1);
3229 /* Assign correct length to the current fragment */
3232 if (rdata->fmp == NULL) {
3233 mp->m_pkthdr.len = len;
3234 rdata->fmp = mp; /* Store the first mbuf */
3238 * Chain mbuf's together
3240 rdata->lmp->m_next = mp;
3241 rdata->lmp = rdata->lmp->m_next;
3242 rdata->fmp->m_pkthdr.len += len;
3246 rdata->fmp->m_pkthdr.rcvif = ifp;
3247 IFNET_STAT_INC(ifp, ipackets, 1);
3249 if (ifp->if_capenable & IFCAP_RXCSUM)
3250 emx_rxcsum(staterr, rdata->fmp);
3252 if (staterr & E1000_RXD_STAT_VP) {
3253 rdata->fmp->m_pkthdr.ether_vlantag =
3255 rdata->fmp->m_flags |= M_VLANTAG;
3261 if (ifp->if_capenable & IFCAP_RSS) {
3262 pi = emx_rssinfo(m, &pi0, mrq,
3265 #ifdef EMX_RSS_DEBUG
3270 IFNET_STAT_INC(ifp, ierrors, 1);
3272 emx_setup_rxdesc(current_desc, rx_buf);
3273 if (rdata->fmp != NULL) {
3274 m_freem(rdata->fmp);
3282 ifp->if_input(ifp, m, pi, cpuid);
3284 /* Advance our pointers to the next descriptor. */
3285 if (++i == rdata->num_rx_desc)
3288 current_desc = &rdata->rx_desc[i];
3289 staterr = le32toh(current_desc->rxd_staterr);
3291 rdata->next_rx_desc_to_check = i;
3293 /* Advance the E1000's Receive Queue "Tail Pointer". */
3295 i = rdata->num_rx_desc - 1;
3296 E1000_WRITE_REG(&rdata->sc->hw, E1000_RDT(rdata->idx), i);
3300 emx_enable_intr(struct emx_softc *sc)
3302 uint32_t ims_mask = IMS_ENABLE_MASK;
3304 lwkt_serialize_handler_enable(&sc->main_serialize);
3307 if (sc->hw.mac.type == e1000_82574) {
3308 E1000_WRITE_REG(hw, EMX_EIAC, EM_MSIX_MASK);
3309 ims_mask |= EM_MSIX_MASK;
3312 E1000_WRITE_REG(&sc->hw, E1000_IMS, ims_mask);
3316 emx_disable_intr(struct emx_softc *sc)
3318 if (sc->hw.mac.type == e1000_82574)
3319 E1000_WRITE_REG(&sc->hw, EMX_EIAC, 0);
3320 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff);
3322 lwkt_serialize_handler_disable(&sc->main_serialize);
3326 * Bit of a misnomer, what this really means is
3327 * to enable OS management of the system... aka
3328 * to disable special hardware management features
3331 emx_get_mgmt(struct emx_softc *sc)
3333 /* A shared code workaround */
3334 if (sc->flags & EMX_FLAG_HAS_MGMT) {
3335 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H);
3336 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
3338 /* disable hardware interception of ARP */
3339 manc &= ~(E1000_MANC_ARP_EN);
3341 /* enable receiving management packets to the host */
3342 manc |= E1000_MANC_EN_MNG2HOST;
3343 #define E1000_MNG2HOST_PORT_623 (1 << 5)
3344 #define E1000_MNG2HOST_PORT_664 (1 << 6)
3345 manc2h |= E1000_MNG2HOST_PORT_623;
3346 manc2h |= E1000_MNG2HOST_PORT_664;
3347 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h);
3349 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
3354 * Give control back to hardware management
3355 * controller if there is one.
3358 emx_rel_mgmt(struct emx_softc *sc)
3360 if (sc->flags & EMX_FLAG_HAS_MGMT) {
3361 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
3363 /* re-enable hardware interception of ARP */
3364 manc |= E1000_MANC_ARP_EN;
3365 manc &= ~E1000_MANC_EN_MNG2HOST;
3367 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
3372 * emx_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3373 * For ASF and Pass Through versions of f/w this means that
3374 * the driver is loaded. For AMT version (only with 82573)
3375 * of the f/w this means that the network i/f is open.
3378 emx_get_hw_control(struct emx_softc *sc)
3380 /* Let firmware know the driver has taken over */
3381 if (sc->hw.mac.type == e1000_82573) {
3384 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM);
3385 E1000_WRITE_REG(&sc->hw, E1000_SWSM,
3386 swsm | E1000_SWSM_DRV_LOAD);
3390 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
3391 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
3392 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
3394 sc->flags |= EMX_FLAG_HW_CTRL;
3398 * emx_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3399 * For ASF and Pass Through versions of f/w this means that the
3400 * driver is no longer loaded. For AMT version (only with 82573)
3401 * of the f/w this means that the network i/f is closed.
3404 emx_rel_hw_control(struct emx_softc *sc)
3406 if ((sc->flags & EMX_FLAG_HW_CTRL) == 0)
3408 sc->flags &= ~EMX_FLAG_HW_CTRL;
3410 /* Let firmware taken over control of h/w */
3411 if (sc->hw.mac.type == e1000_82573) {
3414 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM);
3415 E1000_WRITE_REG(&sc->hw, E1000_SWSM,
3416 swsm & ~E1000_SWSM_DRV_LOAD);
3420 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
3421 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
3422 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
3427 emx_is_valid_eaddr(const uint8_t *addr)
3429 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
3431 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN))
3438 * Enable PCI Wake On Lan capability
3441 emx_enable_wol(device_t dev)
3443 uint16_t cap, status;
3446 /* First find the capabilities pointer*/
3447 cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
3449 /* Read the PM Capabilities */
3450 id = pci_read_config(dev, cap, 1);
3451 if (id != PCIY_PMG) /* Something wrong */
3455 * OK, we have the power capabilities,
3456 * so now get the status register
3458 cap += PCIR_POWER_STATUS;
3459 status = pci_read_config(dev, cap, 2);
3460 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3461 pci_write_config(dev, cap, status, 2);
3465 emx_update_stats(struct emx_softc *sc)
3467 struct ifnet *ifp = &sc->arpcom.ac_if;
3469 if (sc->hw.phy.media_type == e1000_media_type_copper ||
3470 (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) {
3471 sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS);
3472 sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC);
3474 sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS);
3475 sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC);
3476 sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC);
3477 sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL);
3479 sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC);
3480 sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL);
3481 sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC);
3482 sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC);
3483 sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC);
3484 sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC);
3485 sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC);
3486 sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC);
3487 sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC);
3488 sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC);
3489 sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64);
3490 sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127);
3491 sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255);
3492 sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511);
3493 sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023);
3494 sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522);
3495 sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC);
3496 sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC);
3497 sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC);
3498 sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC);
3500 /* For the 64-bit byte counters the low dword must be read first. */
3501 /* Both registers clear on the read of the high dword */
3503 sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCH);
3504 sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCH);
3506 sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC);
3507 sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC);
3508 sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC);
3509 sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC);
3510 sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC);
3512 sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH);
3513 sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH);
3515 sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR);
3516 sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT);
3517 sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64);
3518 sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127);
3519 sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255);
3520 sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511);
3521 sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023);
3522 sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522);
3523 sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC);
3524 sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC);
3526 sc->stats.algnerrc += E1000_READ_REG(&sc->hw, E1000_ALGNERRC);
3527 sc->stats.rxerrc += E1000_READ_REG(&sc->hw, E1000_RXERRC);
3528 sc->stats.tncrs += E1000_READ_REG(&sc->hw, E1000_TNCRS);
3529 sc->stats.cexterr += E1000_READ_REG(&sc->hw, E1000_CEXTERR);
3530 sc->stats.tsctc += E1000_READ_REG(&sc->hw, E1000_TSCTC);
3531 sc->stats.tsctfc += E1000_READ_REG(&sc->hw, E1000_TSCTFC);
3533 IFNET_STAT_SET(ifp, collisions, sc->stats.colc);
3536 IFNET_STAT_SET(ifp, ierrors,
3537 sc->stats.rxerrc + sc->stats.crcerrs + sc->stats.algnerrc +
3538 sc->stats.ruc + sc->stats.roc + sc->stats.mpc + sc->stats.cexterr);
3541 IFNET_STAT_SET(ifp, oerrors, sc->stats.ecol + sc->stats.latecol);
3545 emx_print_debug_info(struct emx_softc *sc)
3547 device_t dev = sc->dev;
3548 uint8_t *hw_addr = sc->hw.hw_addr;
3551 device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
3552 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n",
3553 E1000_READ_REG(&sc->hw, E1000_CTRL),
3554 E1000_READ_REG(&sc->hw, E1000_RCTL));
3555 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n",
3556 ((E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff0000) >> 16),\
3557 (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) );
3558 device_printf(dev, "Flow control watermarks high = %d low = %d\n",
3559 sc->hw.fc.high_water, sc->hw.fc.low_water);
3560 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
3561 E1000_READ_REG(&sc->hw, E1000_TIDV),
3562 E1000_READ_REG(&sc->hw, E1000_TADV));
3563 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
3564 E1000_READ_REG(&sc->hw, E1000_RDTR),
3565 E1000_READ_REG(&sc->hw, E1000_RADV));
3567 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3568 device_printf(dev, "hw %d tdh = %d, hw tdt = %d\n", i,
3569 E1000_READ_REG(&sc->hw, E1000_TDH(i)),
3570 E1000_READ_REG(&sc->hw, E1000_TDT(i)));
3572 for (i = 0; i < sc->rx_ring_cnt; ++i) {
3573 device_printf(dev, "hw %d rdh = %d, hw rdt = %d\n", i,
3574 E1000_READ_REG(&sc->hw, E1000_RDH(i)),
3575 E1000_READ_REG(&sc->hw, E1000_RDT(i)));
3578 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3579 device_printf(dev, "TX %d Tx descriptors avail = %d\n", i,
3580 sc->tx_data[i].num_tx_desc_avail);
3581 device_printf(dev, "TX %d TSO segments = %lu\n", i,
3582 sc->tx_data[i].tso_segments);
3583 device_printf(dev, "TX %d TSO ctx reused = %lu\n", i,
3584 sc->tx_data[i].tso_ctx_reused);
3589 emx_print_hw_stats(struct emx_softc *sc)
3591 device_t dev = sc->dev;
3593 device_printf(dev, "Excessive collisions = %lld\n",
3594 (long long)sc->stats.ecol);
3595 #if (DEBUG_HW > 0) /* Dont output these errors normally */
3596 device_printf(dev, "Symbol errors = %lld\n",
3597 (long long)sc->stats.symerrs);
3599 device_printf(dev, "Sequence errors = %lld\n",
3600 (long long)sc->stats.sec);
3601 device_printf(dev, "Defer count = %lld\n",
3602 (long long)sc->stats.dc);
3603 device_printf(dev, "Missed Packets = %lld\n",
3604 (long long)sc->stats.mpc);
3605 device_printf(dev, "Receive No Buffers = %lld\n",
3606 (long long)sc->stats.rnbc);
3607 /* RLEC is inaccurate on some hardware, calculate our own. */
3608 device_printf(dev, "Receive Length Errors = %lld\n",
3609 ((long long)sc->stats.roc + (long long)sc->stats.ruc));
3610 device_printf(dev, "Receive errors = %lld\n",
3611 (long long)sc->stats.rxerrc);
3612 device_printf(dev, "Crc errors = %lld\n",
3613 (long long)sc->stats.crcerrs);
3614 device_printf(dev, "Alignment errors = %lld\n",
3615 (long long)sc->stats.algnerrc);
3616 device_printf(dev, "Collision/Carrier extension errors = %lld\n",
3617 (long long)sc->stats.cexterr);
3618 device_printf(dev, "RX overruns = %ld\n", sc->rx_overruns);
3619 device_printf(dev, "XON Rcvd = %lld\n",
3620 (long long)sc->stats.xonrxc);
3621 device_printf(dev, "XON Xmtd = %lld\n",
3622 (long long)sc->stats.xontxc);
3623 device_printf(dev, "XOFF Rcvd = %lld\n",
3624 (long long)sc->stats.xoffrxc);
3625 device_printf(dev, "XOFF Xmtd = %lld\n",
3626 (long long)sc->stats.xofftxc);
3627 device_printf(dev, "Good Packets Rcvd = %lld\n",
3628 (long long)sc->stats.gprc);
3629 device_printf(dev, "Good Packets Xmtd = %lld\n",
3630 (long long)sc->stats.gptc);
3634 emx_print_nvm_info(struct emx_softc *sc)
3636 uint16_t eeprom_data;
3639 /* Its a bit crude, but it gets the job done */
3640 kprintf("\nInterface EEPROM Dump:\n");
3641 kprintf("Offset\n0x0000 ");
3642 for (i = 0, j = 0; i < 32; i++, j++) {
3643 if (j == 8) { /* Make the offset block */
3645 kprintf("\n0x00%x0 ",row);
3647 e1000_read_nvm(&sc->hw, i, 1, &eeprom_data);
3648 kprintf("%04x ", eeprom_data);
3654 emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3656 struct emx_softc *sc;
3661 error = sysctl_handle_int(oidp, &result, 0, req);
3662 if (error || !req->newptr)
3665 sc = (struct emx_softc *)arg1;
3666 ifp = &sc->arpcom.ac_if;
3668 ifnet_serialize_all(ifp);
3671 emx_print_debug_info(sc);
3674 * This value will cause a hex dump of the
3675 * first 32 16-bit words of the EEPROM to
3679 emx_print_nvm_info(sc);
3681 ifnet_deserialize_all(ifp);
3687 emx_sysctl_stats(SYSCTL_HANDLER_ARGS)
3692 error = sysctl_handle_int(oidp, &result, 0, req);
3693 if (error || !req->newptr)
3697 struct emx_softc *sc = (struct emx_softc *)arg1;
3698 struct ifnet *ifp = &sc->arpcom.ac_if;
3700 ifnet_serialize_all(ifp);
3701 emx_print_hw_stats(sc);
3702 ifnet_deserialize_all(ifp);
3708 emx_add_sysctl(struct emx_softc *sc)
3710 struct sysctl_ctx_list *ctx;
3711 struct sysctl_oid *tree;
3712 #if defined(EMX_RSS_DEBUG) || defined(EMX_TSS_DEBUG)
3717 ctx = device_get_sysctl_ctx(sc->dev);
3718 tree = device_get_sysctl_tree(sc->dev);
3719 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
3720 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
3721 emx_sysctl_debug_info, "I", "Debug Information");
3723 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
3724 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
3725 emx_sysctl_stats, "I", "Statistics");
3727 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
3728 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_data[0].num_rx_desc, 0,
3730 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
3731 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_data[0].num_tx_desc, 0,
3734 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
3735 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
3736 emx_sysctl_int_throttle, "I", "interrupt throttling rate");
3737 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
3738 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
3739 emx_sysctl_tx_intr_nsegs, "I", "# segments per TX interrupt");
3740 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
3741 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
3742 emx_sysctl_tx_wreg_nsegs, "I",
3743 "# segments sent before write to hardware register");
3745 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
3746 OID_AUTO, "rx_ring_cnt", CTLFLAG_RD, &sc->rx_ring_cnt, 0,
3748 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
3749 OID_AUTO, "tx_ring_cnt", CTLFLAG_RD, &sc->tx_ring_cnt, 0,
3751 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
3752 OID_AUTO, "tx_ring_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0,
3753 "# of TX rings used");
3755 #ifdef IFPOLL_ENABLE
3756 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
3757 OID_AUTO, "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW,
3758 sc, 0, emx_sysctl_npoll_rxoff, "I",
3759 "NPOLLING RX cpu offset");
3760 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
3761 OID_AUTO, "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW,
3762 sc, 0, emx_sysctl_npoll_txoff, "I",
3763 "NPOLLING TX cpu offset");
3766 #ifdef EMX_RSS_DEBUG
3767 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
3768 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug,
3769 0, "RSS debug level");
3770 for (i = 0; i < sc->rx_ring_cnt; ++i) {
3771 ksnprintf(pkt_desc, sizeof(pkt_desc), "rx%d_pkt", i);
3772 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
3773 pkt_desc, CTLFLAG_RW, &sc->rx_data[i].rx_pkts,
3777 #ifdef EMX_TSS_DEBUG
3778 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3779 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_pkt", i);
3780 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
3781 pkt_desc, CTLFLAG_RW, &sc->tx_data[i].tx_pkts,
3788 emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS)
3790 struct emx_softc *sc = (void *)arg1;
3791 struct ifnet *ifp = &sc->arpcom.ac_if;
3792 int error, throttle;
3794 throttle = sc->int_throttle_ceil;
3795 error = sysctl_handle_int(oidp, &throttle, 0, req);
3796 if (error || req->newptr == NULL)
3798 if (throttle < 0 || throttle > 1000000000 / 256)
3803 * Set the interrupt throttling rate in 256ns increments,
3804 * recalculate sysctl value assignment to get exact frequency.
3806 throttle = 1000000000 / 256 / throttle;
3808 /* Upper 16bits of ITR is reserved and should be zero */
3809 if (throttle & 0xffff0000)
3813 ifnet_serialize_all(ifp);
3816 sc->int_throttle_ceil = 1000000000 / 256 / throttle;
3818 sc->int_throttle_ceil = 0;
3820 if (ifp->if_flags & IFF_RUNNING)
3821 emx_set_itr(sc, throttle);
3823 ifnet_deserialize_all(ifp);
3826 if_printf(ifp, "Interrupt moderation set to %d/sec\n",
3827 sc->int_throttle_ceil);
3833 emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS)
3835 struct emx_softc *sc = (void *)arg1;
3836 struct ifnet *ifp = &sc->arpcom.ac_if;
3837 struct emx_txdata *tdata = &sc->tx_data[0];
3840 segs = tdata->tx_intr_nsegs;
3841 error = sysctl_handle_int(oidp, &segs, 0, req);
3842 if (error || req->newptr == NULL)
3847 ifnet_serialize_all(ifp);
3850 * Don't allow tx_intr_nsegs to become:
3851 * o Less the oact_tx_desc
3852 * o Too large that no TX desc will cause TX interrupt to
3853 * be generated (OACTIVE will never recover)
3854 * o Too small that will cause tx_dd[] overflow
3856 if (segs < tdata->oact_tx_desc ||
3857 segs >= tdata->num_tx_desc - tdata->oact_tx_desc ||
3858 segs < tdata->num_tx_desc / EMX_TXDD_SAFE) {
3864 for (i = 0; i < sc->tx_ring_cnt; ++i)
3865 sc->tx_data[i].tx_intr_nsegs = segs;
3868 ifnet_deserialize_all(ifp);
3874 emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS)
3876 struct emx_softc *sc = (void *)arg1;
3877 struct ifnet *ifp = &sc->arpcom.ac_if;
3878 int error, nsegs, i;
3880 nsegs = sc->tx_data[0].tx_wreg_nsegs;
3881 error = sysctl_handle_int(oidp, &nsegs, 0, req);
3882 if (error || req->newptr == NULL)
3885 ifnet_serialize_all(ifp);
3886 for (i = 0; i < sc->tx_ring_cnt; ++i)
3887 sc->tx_data[i].tx_wreg_nsegs =nsegs;
3888 ifnet_deserialize_all(ifp);
3893 #ifdef IFPOLL_ENABLE
3896 emx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS)
3898 struct emx_softc *sc = (void *)arg1;
3899 struct ifnet *ifp = &sc->arpcom.ac_if;
3902 off = sc->rx_npoll_off;
3903 error = sysctl_handle_int(oidp, &off, 0, req);
3904 if (error || req->newptr == NULL)
3909 ifnet_serialize_all(ifp);
3910 if (off >= ncpus2 || off % sc->rx_ring_cnt != 0) {
3914 sc->rx_npoll_off = off;
3916 ifnet_deserialize_all(ifp);
3922 emx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS)
3924 struct emx_softc *sc = (void *)arg1;
3925 struct ifnet *ifp = &sc->arpcom.ac_if;
3928 off = sc->tx_npoll_off;
3929 error = sysctl_handle_int(oidp, &off, 0, req);
3930 if (error || req->newptr == NULL)
3935 ifnet_serialize_all(ifp);
3936 if (off >= ncpus2 || off % sc->tx_ring_cnt != 0) {
3940 sc->tx_npoll_off = off;
3942 ifnet_deserialize_all(ifp);
3947 #endif /* IFPOLL_ENABLE */
3950 emx_dma_alloc(struct emx_softc *sc)
3955 * Create top level busdma tag
3957 error = bus_dma_tag_create(NULL, 1, 0,
3958 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3960 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
3961 0, &sc->parent_dtag);
3963 device_printf(sc->dev, "could not create top level DMA tag\n");
3968 * Allocate transmit descriptors ring and buffers
3970 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3971 error = emx_create_tx_ring(&sc->tx_data[i]);
3973 device_printf(sc->dev,
3974 "Could not setup transmit structures\n");
3980 * Allocate receive descriptors ring and buffers
3982 for (i = 0; i < sc->rx_ring_cnt; ++i) {
3983 error = emx_create_rx_ring(&sc->rx_data[i]);
3985 device_printf(sc->dev,
3986 "Could not setup receive structures\n");
3994 emx_dma_free(struct emx_softc *sc)
3998 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3999 emx_destroy_tx_ring(&sc->tx_data[i],
4000 sc->tx_data[i].num_tx_desc);
4003 for (i = 0; i < sc->rx_ring_cnt; ++i) {
4004 emx_destroy_rx_ring(&sc->rx_data[i],
4005 sc->rx_data[i].num_rx_desc);
4008 /* Free top level busdma tag */
4009 if (sc->parent_dtag != NULL)
4010 bus_dma_tag_destroy(sc->parent_dtag);
4014 emx_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
4016 struct emx_softc *sc = ifp->if_softc;
4018 ifnet_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, slz);
4022 emx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
4024 struct emx_softc *sc = ifp->if_softc;
4026 ifnet_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, slz);
4030 emx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
4032 struct emx_softc *sc = ifp->if_softc;
4034 return ifnet_serialize_array_try(sc->serializes, EMX_NSERIALIZE, slz);
4038 emx_serialize_skipmain(struct emx_softc *sc)
4040 lwkt_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, 1);
4044 emx_deserialize_skipmain(struct emx_softc *sc)
4046 lwkt_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, 1);
4052 emx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
4053 boolean_t serialized)
4055 struct emx_softc *sc = ifp->if_softc;
4057 ifnet_serialize_array_assert(sc->serializes, EMX_NSERIALIZE,
4061 #endif /* INVARIANTS */
4063 #ifdef IFPOLL_ENABLE
4066 emx_npoll_status(struct ifnet *ifp)
4068 struct emx_softc *sc = ifp->if_softc;
4071 ASSERT_SERIALIZED(&sc->main_serialize);
4073 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR);
4074 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4075 callout_stop(&sc->timer);
4076 sc->hw.mac.get_link_status = 1;
4077 emx_update_link_status(sc);
4078 callout_reset(&sc->timer, hz, emx_timer, sc);
4083 emx_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused)
4085 struct emx_txdata *tdata = arg;
4087 ASSERT_SERIALIZED(&tdata->tx_serialize);
4090 if (!ifsq_is_empty(tdata->ifsq))
4091 ifsq_devstart(tdata->ifsq);
4095 emx_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle)
4097 struct emx_rxdata *rdata = arg;
4099 ASSERT_SERIALIZED(&rdata->rx_serialize);
4101 emx_rxeof(rdata, cycle);
4105 emx_npoll(struct ifnet *ifp, struct ifpoll_info *info)
4107 struct emx_softc *sc = ifp->if_softc;
4110 ASSERT_IFNET_SERIALIZED_ALL(ifp);
4115 info->ifpi_status.status_func = emx_npoll_status;
4116 info->ifpi_status.serializer = &sc->main_serialize;
4118 txr_cnt = emx_get_txring_inuse(sc, TRUE);
4119 off = sc->tx_npoll_off;
4120 for (i = 0; i < txr_cnt; ++i) {
4121 struct emx_txdata *tdata = &sc->tx_data[i];
4124 KKASSERT(idx < ncpus2);
4125 info->ifpi_tx[idx].poll_func = emx_npoll_tx;
4126 info->ifpi_tx[idx].arg = tdata;
4127 info->ifpi_tx[idx].serializer = &tdata->tx_serialize;
4128 ifsq_set_cpuid(tdata->ifsq, idx);
4131 off = sc->rx_npoll_off;
4132 for (i = 0; i < sc->rx_ring_cnt; ++i) {
4133 struct emx_rxdata *rdata = &sc->rx_data[i];
4136 KKASSERT(idx < ncpus2);
4137 info->ifpi_rx[idx].poll_func = emx_npoll_rx;
4138 info->ifpi_rx[idx].arg = rdata;
4139 info->ifpi_rx[idx].serializer = &rdata->rx_serialize;
4142 if (ifp->if_flags & IFF_RUNNING) {
4143 if (txr_cnt == sc->tx_ring_inuse)
4144 emx_disable_intr(sc);
4149 for (i = 0; i < sc->tx_ring_cnt; ++i) {
4150 struct emx_txdata *tdata = &sc->tx_data[i];
4152 ifsq_set_cpuid(tdata->ifsq,
4153 rman_get_cpuid(sc->intr_res));
4156 if (ifp->if_flags & IFF_RUNNING) {
4157 txr_cnt = emx_get_txring_inuse(sc, FALSE);
4158 if (txr_cnt == sc->tx_ring_inuse)
4159 emx_enable_intr(sc);
4166 #endif /* IFPOLL_ENABLE */
4169 emx_set_itr(struct emx_softc *sc, uint32_t itr)
4171 E1000_WRITE_REG(&sc->hw, E1000_ITR, itr);
4172 if (sc->hw.mac.type == e1000_82574) {
4176 * When using MSIX interrupts we need to
4177 * throttle using the EITR register
4179 for (i = 0; i < 4; ++i)
4180 E1000_WRITE_REG(&sc->hw, E1000_EITR_82574(i), itr);
4185 * Disable the L0s, 82574L Errata #20
4188 emx_disable_aspm(struct emx_softc *sc)
4190 uint16_t link_cap, link_ctrl, disable;
4191 uint8_t pcie_ptr, reg;
4192 device_t dev = sc->dev;
4194 switch (sc->hw.mac.type) {
4199 * 82573 specification update
4200 * errata #8 disable L0s
4201 * errata #41 disable L1
4203 * 82571/82572 specification update
4204 # errata #13 disable L1
4205 * errata #68 disable L0s
4207 disable = PCIEM_LNKCTL_ASPM_L0S | PCIEM_LNKCTL_ASPM_L1;
4212 * 82574 specification update errata #20
4214 * There is no need to disable L1
4216 disable = PCIEM_LNKCTL_ASPM_L0S;
4223 pcie_ptr = pci_get_pciecap_ptr(dev);
4227 link_cap = pci_read_config(dev, pcie_ptr + PCIER_LINKCAP, 2);
4228 if ((link_cap & PCIEM_LNKCAP_ASPM_MASK) == 0)
4232 if_printf(&sc->arpcom.ac_if, "disable ASPM %#02x\n", disable);
4234 reg = pcie_ptr + PCIER_LINKCTRL;
4235 link_ctrl = pci_read_config(dev, reg, 2);
4236 link_ctrl &= ~disable;
4237 pci_write_config(dev, reg, link_ctrl, 2);
4241 emx_tso_pullup(struct emx_txdata *tdata, struct mbuf **mp)
4243 int iphlen, hoff, thoff, ex = 0;
4248 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
4250 iphlen = m->m_pkthdr.csum_iphlen;
4251 thoff = m->m_pkthdr.csum_thlen;
4252 hoff = m->m_pkthdr.csum_lhlen;
4254 KASSERT(iphlen > 0, ("invalid ip hlen"));
4255 KASSERT(thoff > 0, ("invalid tcp hlen"));
4256 KASSERT(hoff > 0, ("invalid ether hlen"));
4258 if (tdata->tx_flags & EMX_TXFLAG_TSO_PULLEX)
4261 if (m->m_len < hoff + iphlen + thoff + ex) {
4262 m = m_pullup(m, hoff + iphlen + thoff + ex);
4269 ip = mtodoff(m, struct ip *, hoff);
4276 emx_tso_setup(struct emx_txdata *tdata, struct mbuf *mp,
4277 uint32_t *txd_upper, uint32_t *txd_lower)
4279 struct e1000_context_desc *TXD;
4280 int hoff, iphlen, thoff, hlen;
4281 int mss, pktlen, curr_txd;
4283 #ifdef EMX_TSO_DEBUG
4284 tdata->tso_segments++;
4287 iphlen = mp->m_pkthdr.csum_iphlen;
4288 thoff = mp->m_pkthdr.csum_thlen;
4289 hoff = mp->m_pkthdr.csum_lhlen;
4290 mss = mp->m_pkthdr.tso_segsz;
4291 pktlen = mp->m_pkthdr.len;
4293 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 &&
4294 tdata->csum_flags == CSUM_TSO &&
4295 tdata->csum_iphlen == iphlen &&
4296 tdata->csum_lhlen == hoff &&
4297 tdata->csum_thlen == thoff &&
4298 tdata->csum_mss == mss &&
4299 tdata->csum_pktlen == pktlen) {
4300 *txd_upper = tdata->csum_txd_upper;
4301 *txd_lower = tdata->csum_txd_lower;
4302 #ifdef EMX_TSO_DEBUG
4303 tdata->tso_ctx_reused++;
4307 hlen = hoff + iphlen + thoff;
4310 * Setup a new TSO context.
4313 curr_txd = tdata->next_avail_tx_desc;
4314 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd];
4316 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */
4317 E1000_TXD_DTYP_D | /* Data descr type */
4318 E1000_TXD_CMD_TSE; /* Do TSE on this packet */
4320 /* IP and/or TCP header checksum calculation and insertion. */
4321 *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
4324 * Start offset for header checksum calculation.
4325 * End offset for header checksum calculation.
4326 * Offset of place put the checksum.
4328 TXD->lower_setup.ip_fields.ipcss = hoff;
4329 TXD->lower_setup.ip_fields.ipcse = htole16(hoff + iphlen - 1);
4330 TXD->lower_setup.ip_fields.ipcso = hoff + offsetof(struct ip, ip_sum);
4333 * Start offset for payload checksum calculation.
4334 * End offset for payload checksum calculation.
4335 * Offset of place to put the checksum.
4337 TXD->upper_setup.tcp_fields.tucss = hoff + iphlen;
4338 TXD->upper_setup.tcp_fields.tucse = 0;
4339 TXD->upper_setup.tcp_fields.tucso =
4340 hoff + iphlen + offsetof(struct tcphdr, th_sum);
4343 * Payload size per packet w/o any headers.
4344 * Length of all headers up to payload.
4346 TXD->tcp_seg_setup.fields.mss = htole16(mss);
4347 TXD->tcp_seg_setup.fields.hdr_len = hlen;
4348 TXD->cmd_and_length = htole32(E1000_TXD_CMD_IFCS |
4349 E1000_TXD_CMD_DEXT | /* Extended descr */
4350 E1000_TXD_CMD_TSE | /* TSE context */
4351 E1000_TXD_CMD_IP | /* Do IP csum */
4352 E1000_TXD_CMD_TCP | /* Do TCP checksum */
4353 (pktlen - hlen)); /* Total len */
4355 /* Save the information for this TSO context */
4356 tdata->csum_flags = CSUM_TSO;
4357 tdata->csum_lhlen = hoff;
4358 tdata->csum_iphlen = iphlen;
4359 tdata->csum_thlen = thoff;
4360 tdata->csum_mss = mss;
4361 tdata->csum_pktlen = pktlen;
4362 tdata->csum_txd_upper = *txd_upper;
4363 tdata->csum_txd_lower = *txd_lower;
4365 if (++curr_txd == tdata->num_tx_desc)
4368 KKASSERT(tdata->num_tx_desc_avail > 0);
4369 tdata->num_tx_desc_avail--;
4371 tdata->next_avail_tx_desc = curr_txd;
4376 emx_get_txring_inuse(const struct emx_softc *sc, boolean_t polling)
4379 return sc->tx_ring_cnt;