2 * Copyright (c) 2001-2011, Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the Intel Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #include "opt_polling.h"
35 #include <sys/param.h>
37 #include <sys/endian.h>
38 #include <sys/interrupt.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
44 #include <sys/serialize.h>
45 #include <sys/serialize2.h>
46 #include <sys/socket.h>
47 #include <sys/sockio.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
52 #include <net/ethernet.h>
54 #include <net/if_arp.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57 #include <net/ifq_var.h>
58 #include <net/toeplitz.h>
59 #include <net/toeplitz2.h>
60 #include <net/vlan/if_vlan_var.h>
61 #include <net/vlan/if_vlan_ether.h>
62 #include <net/if_poll.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/in.h>
66 #include <netinet/ip.h>
67 #include <netinet/tcp.h>
68 #include <netinet/udp.h>
70 #include <bus/pci/pcivar.h>
71 #include <bus/pci/pcireg.h>
73 #include <dev/netif/ig_hal/e1000_api.h>
74 #include <dev/netif/ig_hal/e1000_82575.h>
75 #include <dev/netif/igb/if_igb.h>
78 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) \
80 if (sc->rss_debug >= lvl) \
81 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
83 #else /* !IGB_RSS_DEBUG */
84 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
85 #endif /* IGB_RSS_DEBUG */
87 #define IGB_NAME "Intel(R) PRO/1000 "
88 #define IGB_DEVICE(id) \
89 { IGB_VENDOR_ID, E1000_DEV_ID_##id, IGB_NAME #id }
90 #define IGB_DEVICE_NULL { 0, 0, NULL }
92 static struct igb_device {
97 IGB_DEVICE(82575EB_COPPER),
98 IGB_DEVICE(82575EB_FIBER_SERDES),
99 IGB_DEVICE(82575GB_QUAD_COPPER),
101 IGB_DEVICE(82576_NS),
102 IGB_DEVICE(82576_NS_SERDES),
103 IGB_DEVICE(82576_FIBER),
104 IGB_DEVICE(82576_SERDES),
105 IGB_DEVICE(82576_SERDES_QUAD),
106 IGB_DEVICE(82576_QUAD_COPPER),
107 IGB_DEVICE(82576_QUAD_COPPER_ET2),
108 IGB_DEVICE(82576_VF),
109 IGB_DEVICE(82580_COPPER),
110 IGB_DEVICE(82580_FIBER),
111 IGB_DEVICE(82580_SERDES),
112 IGB_DEVICE(82580_SGMII),
113 IGB_DEVICE(82580_COPPER_DUAL),
114 IGB_DEVICE(82580_QUAD_FIBER),
115 IGB_DEVICE(DH89XXCC_SERDES),
116 IGB_DEVICE(DH89XXCC_SGMII),
117 IGB_DEVICE(DH89XXCC_SFP),
118 IGB_DEVICE(DH89XXCC_BACKPLANE),
119 IGB_DEVICE(I350_COPPER),
120 IGB_DEVICE(I350_FIBER),
121 IGB_DEVICE(I350_SERDES),
122 IGB_DEVICE(I350_SGMII),
125 /* required last entry */
129 static int igb_probe(device_t);
130 static int igb_attach(device_t);
131 static int igb_detach(device_t);
132 static int igb_shutdown(device_t);
133 static int igb_suspend(device_t);
134 static int igb_resume(device_t);
136 static boolean_t igb_is_valid_ether_addr(const uint8_t *);
137 static void igb_setup_ifp(struct igb_softc *);
138 static int igb_txctx_pullup(struct igb_tx_ring *, struct mbuf **);
139 static boolean_t igb_txctx(struct igb_tx_ring *, struct mbuf *);
140 static void igb_add_sysctl(struct igb_softc *);
141 static int igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS);
142 static int igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS);
144 static void igb_vf_init_stats(struct igb_softc *);
145 static void igb_reset(struct igb_softc *);
146 static void igb_update_stats_counters(struct igb_softc *);
147 static void igb_update_vf_stats_counters(struct igb_softc *);
148 static void igb_update_link_status(struct igb_softc *);
149 static void igb_init_tx_unit(struct igb_softc *);
150 static void igb_init_rx_unit(struct igb_softc *);
152 static void igb_set_vlan(struct igb_softc *);
153 static void igb_set_multi(struct igb_softc *);
154 static void igb_set_promisc(struct igb_softc *);
155 static void igb_disable_promisc(struct igb_softc *);
157 static int igb_alloc_rings(struct igb_softc *);
158 static void igb_free_rings(struct igb_softc *);
159 static int igb_create_tx_ring(struct igb_tx_ring *);
160 static int igb_create_rx_ring(struct igb_rx_ring *);
161 static void igb_free_tx_ring(struct igb_tx_ring *);
162 static void igb_free_rx_ring(struct igb_rx_ring *);
163 static void igb_destroy_tx_ring(struct igb_tx_ring *, int);
164 static void igb_destroy_rx_ring(struct igb_rx_ring *, int);
165 static void igb_init_tx_ring(struct igb_tx_ring *);
166 static int igb_init_rx_ring(struct igb_rx_ring *);
167 static int igb_newbuf(struct igb_rx_ring *, int, boolean_t);
168 static int igb_encap(struct igb_tx_ring *, struct mbuf **);
170 static void igb_stop(struct igb_softc *);
171 static void igb_init(void *);
172 static int igb_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
173 static void igb_media_status(struct ifnet *, struct ifmediareq *);
174 static int igb_media_change(struct ifnet *);
175 static void igb_timer(void *);
176 static void igb_watchdog(struct ifnet *);
177 static void igb_start(struct ifnet *);
178 #ifdef DEVICE_POLLING
179 static void igb_poll(struct ifnet *, enum poll_cmd, int);
181 static void igb_serialize(struct ifnet *, enum ifnet_serialize);
182 static void igb_deserialize(struct ifnet *, enum ifnet_serialize);
183 static int igb_tryserialize(struct ifnet *, enum ifnet_serialize);
185 static void igb_serialize_assert(struct ifnet *, enum ifnet_serialize,
189 static void igb_intr(void *);
190 static void igb_shared_intr(void *);
191 static void igb_rxeof(struct igb_rx_ring *, int);
192 static void igb_txeof(struct igb_tx_ring *);
193 static void igb_set_eitr(struct igb_softc *);
194 static void igb_enable_intr(struct igb_softc *);
195 static void igb_disable_intr(struct igb_softc *);
196 static void igb_init_unshared_intr(struct igb_softc *);
197 static void igb_init_intr(struct igb_softc *);
198 static int igb_setup_intr(struct igb_softc *);
199 static void igb_setup_tx_intr(struct igb_tx_ring *, int *, int);
200 static void igb_setup_rx_intr(struct igb_rx_ring *, int *, int);
202 /* Management and WOL Support */
203 static void igb_get_mgmt(struct igb_softc *);
204 static void igb_rel_mgmt(struct igb_softc *);
205 static void igb_get_hw_control(struct igb_softc *);
206 static void igb_rel_hw_control(struct igb_softc *);
207 static void igb_enable_wol(device_t);
209 static void igb_serialize_skipmain(struct igb_softc *);
210 static void igb_deserialize_skipmain(struct igb_softc *);
212 static device_method_t igb_methods[] = {
213 /* Device interface */
214 DEVMETHOD(device_probe, igb_probe),
215 DEVMETHOD(device_attach, igb_attach),
216 DEVMETHOD(device_detach, igb_detach),
217 DEVMETHOD(device_shutdown, igb_shutdown),
218 DEVMETHOD(device_suspend, igb_suspend),
219 DEVMETHOD(device_resume, igb_resume),
223 static driver_t igb_driver = {
226 sizeof(struct igb_softc),
229 static devclass_t igb_devclass;
231 DECLARE_DUMMY_MODULE(if_igb);
232 MODULE_DEPEND(igb, ig_hal, 1, 1, 1);
233 DRIVER_MODULE(if_igb, pci, igb_driver, igb_devclass, NULL, NULL);
235 static int igb_rxd = IGB_DEFAULT_RXD;
236 static int igb_txd = IGB_DEFAULT_TXD;
237 static int igb_rxr = 0;
238 static int igb_msi_enable = 1;
239 static int igb_msix_enable = 1;
240 static int igb_eee_disabled = 1; /* Energy Efficient Ethernet */
241 static int igb_fc_setting = e1000_fc_full;
244 * DMA Coalescing, only for i350 - default to off,
245 * this feature is for power savings
247 static int igb_dma_coalesce = 0;
249 TUNABLE_INT("hw.igb.rxd", &igb_rxd);
250 TUNABLE_INT("hw.igb.txd", &igb_txd);
251 TUNABLE_INT("hw.igb.rxr", &igb_rxr);
252 TUNABLE_INT("hw.igb.msi.enable", &igb_msi_enable);
253 TUNABLE_INT("hw.igb.msix.enable", &igb_msix_enable);
254 TUNABLE_INT("hw.igb.fc_setting", &igb_fc_setting);
257 TUNABLE_INT("hw.igb.eee_disabled", &igb_eee_disabled);
258 TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce);
261 igb_rxcsum(uint32_t staterr, struct mbuf *mp)
263 /* Ignore Checksum bit is set */
264 if (staterr & E1000_RXD_STAT_IXSM)
267 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) ==
269 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
271 if (staterr & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) {
272 if ((staterr & E1000_RXDEXT_STATERR_TCPE) == 0) {
273 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
274 CSUM_PSEUDO_HDR | CSUM_FRAG_NOT_CHECKED;
275 mp->m_pkthdr.csum_data = htons(0xffff);
280 static __inline struct pktinfo *
281 igb_rssinfo(struct mbuf *m, struct pktinfo *pi,
282 uint32_t hash, uint32_t hashtype, uint32_t staterr)
285 case E1000_RXDADV_RSSTYPE_IPV4_TCP:
286 pi->pi_netisr = NETISR_IP;
288 pi->pi_l3proto = IPPROTO_TCP;
291 case E1000_RXDADV_RSSTYPE_IPV4:
292 if (staterr & E1000_RXD_STAT_IXSM)
296 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) ==
297 E1000_RXD_STAT_TCPCS) {
298 pi->pi_netisr = NETISR_IP;
300 pi->pi_l3proto = IPPROTO_UDP;
308 m->m_flags |= M_HASH;
309 m->m_pkthdr.hash = toeplitz_hash(hash);
314 igb_probe(device_t dev)
316 const struct igb_device *d;
319 vid = pci_get_vendor(dev);
320 did = pci_get_device(dev);
322 for (d = igb_devices; d->desc != NULL; ++d) {
323 if (vid == d->vid && did == d->did) {
324 device_set_desc(dev, d->desc);
332 igb_attach(device_t dev)
334 struct igb_softc *sc = device_get_softc(dev);
335 uint16_t eeprom_data;
337 int error = 0, i, j, ring_max;
341 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
342 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
343 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
344 igb_sysctl_nvm_info, "I", "NVM Information");
346 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
347 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
348 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
349 &igb_enable_aim, 1, "Interrupt Moderation");
351 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
352 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
353 OID_AUTO, "flow_control", CTLTYPE_INT|CTLFLAG_RW,
354 adapter, 0, igb_set_flowcntl, "I", "Flow Control");
357 callout_init_mp(&sc->timer);
359 sc->dev = sc->osdep.dev = dev;
362 * Determine hardware and mac type
364 sc->hw.vendor_id = pci_get_vendor(dev);
365 sc->hw.device_id = pci_get_device(dev);
366 sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
367 sc->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
368 sc->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
370 if (e1000_set_mac_type(&sc->hw))
373 /* Are we a VF device? */
374 if (sc->hw.mac.type == e1000_vfadapt ||
375 sc->hw.mac.type == e1000_vfadapt_i350)
380 /* Enable bus mastering */
381 pci_enable_busmaster(dev);
386 sc->mem_rid = PCIR_BAR(0);
387 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
389 if (sc->mem_res == NULL) {
390 device_printf(dev, "Unable to allocate bus resource: memory\n");
394 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->mem_res);
395 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->mem_res);
397 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle;
402 sc->intr_type = pci_alloc_1intr(dev, igb_msi_enable,
403 &sc->intr_rid, &intr_flags);
405 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid,
407 if (sc->intr_res == NULL) {
408 device_printf(dev, "Unable to allocate bus resource: "
414 /* Save PCI command register for Shared Code */
415 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
416 sc->hw.back = &sc->osdep;
418 switch (sc->hw.mac.type) {
420 ring_max = IGB_MAX_RING_82575;
423 ring_max = IGB_MAX_RING_82580;
426 ring_max = IGB_MAX_RING_I350;
429 ring_max = IGB_MAX_RING_82576;
432 ring_max = IGB_MIN_RING;
435 sc->rx_ring_cnt = device_getenv_int(dev, "rxr", igb_rxr);
436 sc->rx_ring_cnt = if_ring_count2(sc->rx_ring_cnt, ring_max);
437 sc->tx_ring_cnt = 1; /* XXX */
439 sc->intr_rate = IGB_INTR_RATE;
441 /* Do Shared Code initialization */
442 if (e1000_setup_init_funcs(&sc->hw, TRUE)) {
443 device_printf(dev, "Setup of Shared code failed\n");
448 e1000_get_bus_info(&sc->hw);
450 sc->hw.mac.autoneg = DO_AUTO_NEG;
451 sc->hw.phy.autoneg_wait_to_complete = FALSE;
452 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
455 if (sc->hw.phy.media_type == e1000_media_type_copper) {
456 sc->hw.phy.mdix = AUTO_ALL_MODES;
457 sc->hw.phy.disable_polarity_correction = FALSE;
458 sc->hw.phy.ms_type = IGB_MASTER_SLAVE;
461 /* Set the frame limits assuming standard ethernet sized frames. */
462 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
464 /* Allocate RX/TX rings */
465 error = igb_alloc_rings(sc);
472 lwkt_serialize_init(&sc->main_serialize);
474 sc->serializes[i++] = &sc->main_serialize;
476 sc->tx_serialize = i;
477 for (j = 0; j < sc->tx_ring_cnt; ++j)
478 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize;
480 sc->rx_serialize = i;
481 for (j = 0; j < sc->rx_ring_cnt; ++j)
482 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize;
484 sc->serialize_cnt = i;
485 KKASSERT(sc->serialize_cnt <= IGB_NSERIALIZE);
487 /* Allocate the appropriate stats memory */
489 sc->stats = kmalloc(sizeof(struct e1000_vf_stats), M_DEVBUF,
491 igb_vf_init_stats(sc);
493 sc->stats = kmalloc(sizeof(struct e1000_hw_stats), M_DEVBUF,
497 /* Allocate multicast array memory. */
498 sc->mta = kmalloc(ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES,
501 /* Some adapter-specific advanced features */
502 if (sc->hw.mac.type >= e1000_i350) {
504 igb_set_sysctl_value(adapter, "dma_coalesce",
505 "configure dma coalesce",
506 &adapter->dma_coalesce, igb_dma_coalesce);
507 igb_set_sysctl_value(adapter, "eee_disabled",
508 "enable Energy Efficient Ethernet",
509 &adapter->hw.dev_spec._82575.eee_disable,
512 sc->dma_coalesce = igb_dma_coalesce;
513 sc->hw.dev_spec._82575.eee_disable = igb_eee_disabled;
515 e1000_set_eee_i350(&sc->hw);
519 * Start from a known state, this is important in reading the nvm and
522 e1000_reset_hw(&sc->hw);
524 /* Make sure we have a good EEPROM before we read from it */
525 if (e1000_validate_nvm_checksum(&sc->hw) < 0) {
527 * Some PCI-E parts fail the first check due to
528 * the link being in sleep state, call it again,
529 * if it fails a second time its a real issue.
531 if (e1000_validate_nvm_checksum(&sc->hw) < 0) {
533 "The EEPROM Checksum Is Not Valid\n");
539 /* Copy the permanent MAC address out of the EEPROM */
540 if (e1000_read_mac_addr(&sc->hw) < 0) {
541 device_printf(dev, "EEPROM read error while reading MAC"
546 if (!igb_is_valid_ether_addr(sc->hw.mac.addr)) {
547 device_printf(dev, "Invalid MAC address\n");
554 ** Configure Interrupts
556 if ((adapter->msix > 1) && (igb_enable_msix))
557 error = igb_allocate_msix(adapter);
558 else /* MSI or Legacy */
559 error = igb_allocate_legacy(adapter);
564 /* Setup OS specific network interface */
567 /* Add sysctl tree, must after igb_setup_ifp() */
570 /* Now get a good starting state */
573 /* Initialize statistics */
574 igb_update_stats_counters(sc);
576 sc->hw.mac.get_link_status = 1;
577 igb_update_link_status(sc);
579 /* Indicate SOL/IDER usage */
580 if (e1000_check_reset_block(&sc->hw)) {
582 "PHY reset is blocked due to SOL/IDER session.\n");
585 /* Determine if we have to control management hardware */
586 if (e1000_enable_mng_pass_thru(&sc->hw))
587 sc->flags |= IGB_FLAG_HAS_MGMT;
592 /* APME bit in EEPROM is mapped to WUC.APME */
593 eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC) & E1000_WUC_APME;
595 sc->wol = E1000_WUFC_MAG;
596 /* XXX disable WOL */
600 /* Register for VLAN events */
601 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
602 igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
603 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
604 igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
608 igb_add_hw_stats(adapter);
611 error = igb_setup_intr(sc);
613 ether_ifdetach(&sc->arpcom.ac_if);
624 igb_detach(device_t dev)
626 struct igb_softc *sc = device_get_softc(dev);
628 if (device_is_attached(dev)) {
629 struct ifnet *ifp = &sc->arpcom.ac_if;
631 ifnet_serialize_all(ifp);
635 e1000_phy_hw_reset(&sc->hw);
637 /* Give control back to firmware */
639 igb_rel_hw_control(sc);
642 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN);
643 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol);
647 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag);
649 ifnet_deserialize_all(ifp);
652 } else if (sc->mem_res != NULL) {
653 igb_rel_hw_control(sc);
655 bus_generic_detach(dev);
657 if (sc->intr_res != NULL) {
658 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid,
661 if (sc->intr_type == PCI_INTR_TYPE_MSI)
662 pci_release_msi(dev);
664 if (sc->mem_res != NULL) {
665 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid,
672 kfree(sc->mta, M_DEVBUF);
673 if (sc->stats != NULL)
674 kfree(sc->stats, M_DEVBUF);
676 if (sc->sysctl_tree != NULL)
677 sysctl_ctx_free(&sc->sysctl_ctx);
683 igb_shutdown(device_t dev)
685 return igb_suspend(dev);
689 igb_suspend(device_t dev)
691 struct igb_softc *sc = device_get_softc(dev);
692 struct ifnet *ifp = &sc->arpcom.ac_if;
694 ifnet_serialize_all(ifp);
699 igb_rel_hw_control(sc);
702 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN);
703 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol);
707 ifnet_deserialize_all(ifp);
709 return bus_generic_suspend(dev);
713 igb_resume(device_t dev)
715 struct igb_softc *sc = device_get_softc(dev);
716 struct ifnet *ifp = &sc->arpcom.ac_if;
718 ifnet_serialize_all(ifp);
725 ifnet_deserialize_all(ifp);
727 return bus_generic_resume(dev);
731 igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
733 struct igb_softc *sc = ifp->if_softc;
734 struct ifreq *ifr = (struct ifreq *)data;
735 int max_frame_size, mask, reinit;
738 ASSERT_IFNET_SERIALIZED_ALL(ifp);
742 max_frame_size = 9234;
743 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
749 ifp->if_mtu = ifr->ifr_mtu;
750 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN +
753 if (ifp->if_flags & IFF_RUNNING)
758 if (ifp->if_flags & IFF_UP) {
759 if (ifp->if_flags & IFF_RUNNING) {
760 if ((ifp->if_flags ^ sc->if_flags) &
761 (IFF_PROMISC | IFF_ALLMULTI)) {
762 igb_disable_promisc(sc);
768 } else if (ifp->if_flags & IFF_RUNNING) {
771 sc->if_flags = ifp->if_flags;
776 if (ifp->if_flags & IFF_RUNNING) {
777 igb_disable_intr(sc);
779 #ifdef DEVICE_POLLING
780 if (!(ifp->if_flags & IFF_POLLING))
788 * As the speed/duplex settings are being
789 * changed, we need toreset the PHY.
791 sc->hw.phy.reset_disable = FALSE;
793 /* Check SOL/IDER usage */
794 if (e1000_check_reset_block(&sc->hw)) {
795 if_printf(ifp, "Media change is "
796 "blocked due to SOL/IDER session.\n");
802 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
807 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
808 if (mask & IFCAP_HWCSUM) {
809 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
812 if (mask & IFCAP_VLAN_HWTAGGING) {
813 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
816 if (mask & IFCAP_RSS)
817 ifp->if_capenable ^= IFCAP_RSS;
818 if (reinit && (ifp->if_flags & IFF_RUNNING))
823 error = ether_ioctl(ifp, command, data);
832 struct igb_softc *sc = xsc;
833 struct ifnet *ifp = &sc->arpcom.ac_if;
836 ASSERT_IFNET_SERIALIZED_ALL(ifp);
840 /* Get the latest mac address, User can use a LAA */
841 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN);
843 /* Put the address into the Receive Address Array */
844 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0);
847 igb_update_link_status(sc);
849 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
851 /* Set hardware offload abilities */
852 if (ifp->if_capenable & IFCAP_TXCSUM)
853 ifp->if_hwassist = IGB_CSUM_FEATURES;
855 ifp->if_hwassist = 0;
857 /* Configure for OS presence */
860 /* Prepare transmit descriptors and buffers */
861 for (i = 0; i < sc->tx_ring_cnt; ++i)
862 igb_init_tx_ring(&sc->tx_rings[i]);
863 igb_init_tx_unit(sc);
865 /* Setup Multicast table */
870 * Figure out the desired mbuf pool
871 * for doing jumbo/packetsplit
873 if (adapter->max_frame_size <= 2048)
874 adapter->rx_mbuf_sz = MCLBYTES;
875 else if (adapter->max_frame_size <= 4096)
876 adapter->rx_mbuf_sz = MJUMPAGESIZE;
878 adapter->rx_mbuf_sz = MJUM9BYTES;
881 /* Initialize interrupt */
884 /* Prepare receive descriptors and buffers */
885 for (i = 0; i < sc->rx_ring_cnt; ++i) {
888 error = igb_init_rx_ring(&sc->rx_rings[i]);
890 if_printf(ifp, "Could not setup receive structures\n");
895 igb_init_rx_unit(sc);
897 /* Enable VLAN support */
898 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
901 /* Don't lose promiscuous settings */
904 ifp->if_flags |= IFF_RUNNING;
905 ifp->if_flags &= ~IFF_OACTIVE;
907 callout_reset(&sc->timer, hz, igb_timer, sc);
908 e1000_clear_hw_cntrs_base_generic(&sc->hw);
911 if (adapter->msix > 1) /* Set up queue routing */
912 igb_configure_queues(adapter);
915 /* this clears any pending interrupts */
916 E1000_READ_REG(&sc->hw, E1000_ICR);
917 #ifdef DEVICE_POLLING
919 * Only enable interrupts if we are not polling, make sure
920 * they are off otherwise.
922 if (ifp->if_flags & IFF_POLLING)
923 igb_disable_intr(sc);
925 #endif /* DEVICE_POLLING */
928 E1000_WRITE_REG(&sc->hw, E1000_ICS, E1000_ICS_LSC);
931 /* Set Energy Efficient Ethernet */
932 e1000_set_eee_i350(&sc->hw);
934 /* Don't reset the phy next time init gets called */
935 sc->hw.phy.reset_disable = TRUE;
939 igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
941 struct igb_softc *sc = ifp->if_softc;
942 u_char fiber_type = IFM_1000_SX;
944 ASSERT_IFNET_SERIALIZED_ALL(ifp);
946 igb_update_link_status(sc);
948 ifmr->ifm_status = IFM_AVALID;
949 ifmr->ifm_active = IFM_ETHER;
951 if (!sc->link_active)
954 ifmr->ifm_status |= IFM_ACTIVE;
956 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
957 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
958 ifmr->ifm_active |= fiber_type | IFM_FDX;
960 switch (sc->link_speed) {
962 ifmr->ifm_active |= IFM_10_T;
966 ifmr->ifm_active |= IFM_100_TX;
970 ifmr->ifm_active |= IFM_1000_T;
973 if (sc->link_duplex == FULL_DUPLEX)
974 ifmr->ifm_active |= IFM_FDX;
976 ifmr->ifm_active |= IFM_HDX;
981 igb_media_change(struct ifnet *ifp)
983 struct igb_softc *sc = ifp->if_softc;
984 struct ifmedia *ifm = &sc->media;
986 ASSERT_IFNET_SERIALIZED_ALL(ifp);
988 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
991 switch (IFM_SUBTYPE(ifm->ifm_media)) {
993 sc->hw.mac.autoneg = DO_AUTO_NEG;
994 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1000 sc->hw.mac.autoneg = DO_AUTO_NEG;
1001 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1005 sc->hw.mac.autoneg = FALSE;
1006 sc->hw.phy.autoneg_advertised = 0;
1007 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1008 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1010 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1014 sc->hw.mac.autoneg = FALSE;
1015 sc->hw.phy.autoneg_advertised = 0;
1016 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1017 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1019 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1023 if_printf(ifp, "Unsupported media type\n");
1033 igb_set_promisc(struct igb_softc *sc)
1035 struct ifnet *ifp = &sc->arpcom.ac_if;
1036 struct e1000_hw *hw = &sc->hw;
1040 e1000_promisc_set_vf(hw, e1000_promisc_enabled);
1044 reg = E1000_READ_REG(hw, E1000_RCTL);
1045 if (ifp->if_flags & IFF_PROMISC) {
1046 reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1047 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1048 } else if (ifp->if_flags & IFF_ALLMULTI) {
1049 reg |= E1000_RCTL_MPE;
1050 reg &= ~E1000_RCTL_UPE;
1051 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1056 igb_disable_promisc(struct igb_softc *sc)
1058 struct e1000_hw *hw = &sc->hw;
1062 e1000_promisc_set_vf(hw, e1000_promisc_disabled);
1065 reg = E1000_READ_REG(hw, E1000_RCTL);
1066 reg &= ~E1000_RCTL_UPE;
1067 reg &= ~E1000_RCTL_MPE;
1068 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1072 igb_set_multi(struct igb_softc *sc)
1074 struct ifnet *ifp = &sc->arpcom.ac_if;
1075 struct ifmultiaddr *ifma;
1076 uint32_t reg_rctl = 0;
1081 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1083 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1084 if (ifma->ifma_addr->sa_family != AF_LINK)
1087 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1090 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1091 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
1095 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1096 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1097 reg_rctl |= E1000_RCTL_MPE;
1098 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1100 e1000_update_mc_addr_list(&sc->hw, mta, mcnt);
1105 igb_timer(void *xsc)
1107 struct igb_softc *sc = xsc;
1108 struct ifnet *ifp = &sc->arpcom.ac_if;
1110 ifnet_serialize_all(ifp);
1112 igb_update_link_status(sc);
1113 igb_update_stats_counters(sc);
1115 callout_reset(&sc->timer, hz, igb_timer, sc);
1117 ifnet_deserialize_all(ifp);
1121 igb_update_link_status(struct igb_softc *sc)
1123 struct ifnet *ifp = &sc->arpcom.ac_if;
1124 struct e1000_hw *hw = &sc->hw;
1125 uint32_t link_check, thstat, ctrl;
1127 link_check = thstat = ctrl = 0;
1129 /* Get the cached link value or read for real */
1130 switch (hw->phy.media_type) {
1131 case e1000_media_type_copper:
1132 if (hw->mac.get_link_status) {
1133 /* Do the work to read phy */
1134 e1000_check_for_link(hw);
1135 link_check = !hw->mac.get_link_status;
1141 case e1000_media_type_fiber:
1142 e1000_check_for_link(hw);
1143 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU;
1146 case e1000_media_type_internal_serdes:
1147 e1000_check_for_link(hw);
1148 link_check = hw->mac.serdes_has_link;
1151 /* VF device is type_unknown */
1152 case e1000_media_type_unknown:
1153 e1000_check_for_link(hw);
1154 link_check = !hw->mac.get_link_status;
1160 /* Check for thermal downshift or shutdown */
1161 if (hw->mac.type == e1000_i350) {
1162 thstat = E1000_READ_REG(hw, E1000_THSTAT);
1163 ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT);
1166 /* Now we check if a transition has happened */
1167 if (link_check && sc->link_active == 0) {
1168 e1000_get_speed_and_duplex(hw,
1169 &sc->link_speed, &sc->link_duplex);
1171 if_printf(ifp, "Link is up %d Mbps %s\n",
1173 sc->link_duplex == FULL_DUPLEX ?
1174 "Full Duplex" : "Half Duplex");
1176 sc->link_active = 1;
1178 ifp->if_baudrate = sc->link_speed * 1000000;
1179 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) &&
1180 (thstat & E1000_THSTAT_LINK_THROTTLE))
1181 if_printf(ifp, "Link: thermal downshift\n");
1182 /* This can sleep */
1183 ifp->if_link_state = LINK_STATE_UP;
1184 if_link_state_change(ifp);
1185 } else if (!link_check && sc->link_active == 1) {
1186 ifp->if_baudrate = sc->link_speed = 0;
1187 sc->link_duplex = 0;
1189 if_printf(ifp, "Link is Down\n");
1190 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) &&
1191 (thstat & E1000_THSTAT_PWR_DOWN))
1192 if_printf(ifp, "Link: thermal shutdown\n");
1193 sc->link_active = 0;
1194 /* This can sleep */
1195 ifp->if_link_state = LINK_STATE_DOWN;
1196 if_link_state_change(ifp);
1201 igb_stop(struct igb_softc *sc)
1203 struct ifnet *ifp = &sc->arpcom.ac_if;
1206 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1208 igb_disable_intr(sc);
1210 callout_stop(&sc->timer);
1212 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1215 e1000_reset_hw(&sc->hw);
1216 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0);
1218 e1000_led_off(&sc->hw);
1219 e1000_cleanup_led(&sc->hw);
1221 for (i = 0; i < sc->tx_ring_cnt; ++i)
1222 igb_free_tx_ring(&sc->tx_rings[i]);
1223 for (i = 0; i < sc->rx_ring_cnt; ++i)
1224 igb_free_rx_ring(&sc->rx_rings[i]);
1228 igb_reset(struct igb_softc *sc)
1230 struct ifnet *ifp = &sc->arpcom.ac_if;
1231 struct e1000_hw *hw = &sc->hw;
1232 struct e1000_fc_info *fc = &hw->fc;
1236 /* Let the firmware know the OS is in control */
1237 igb_get_hw_control(sc);
1240 * Packet Buffer Allocation (PBA)
1241 * Writing PBA sets the receive portion of the buffer
1242 * the remainder is used for the transmit buffer.
1244 switch (hw->mac.type) {
1246 pba = E1000_PBA_32K;
1251 pba = E1000_READ_REG(hw, E1000_RXPBS);
1252 pba &= E1000_RXPBS_SIZE_MASK_82576;
1257 case e1000_vfadapt_i350:
1258 pba = E1000_READ_REG(hw, E1000_RXPBS);
1259 pba = e1000_rxpbs_adjust_82580(pba);
1261 /* XXX pba = E1000_PBA_35K; */
1267 /* Special needs in case of Jumbo frames */
1268 if (hw->mac.type == e1000_82575 && ifp->if_mtu > ETHERMTU) {
1269 uint32_t tx_space, min_tx, min_rx;
1271 pba = E1000_READ_REG(hw, E1000_PBA);
1272 tx_space = pba >> 16;
1275 min_tx = (sc->max_frame_size +
1276 sizeof(struct e1000_tx_desc) - ETHER_CRC_LEN) * 2;
1277 min_tx = roundup2(min_tx, 1024);
1279 min_rx = sc->max_frame_size;
1280 min_rx = roundup2(min_rx, 1024);
1282 if (tx_space < min_tx && (min_tx - tx_space) < pba) {
1283 pba = pba - (min_tx - tx_space);
1285 * if short on rx space, rx wins
1286 * and must trump tx adjustment
1291 E1000_WRITE_REG(hw, E1000_PBA, pba);
1295 * These parameters control the automatic generation (Tx) and
1296 * response (Rx) to Ethernet PAUSE frames.
1297 * - High water mark should allow for at least two frames to be
1298 * received after sending an XOFF.
1299 * - Low water mark works best when it is very near the high water mark.
1300 * This allows the receiver to restart by sending XON when it has
1303 hwm = min(((pba << 10) * 9 / 10),
1304 ((pba << 10) - 2 * sc->max_frame_size));
1306 if (hw->mac.type < e1000_82576) {
1307 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
1308 fc->low_water = fc->high_water - 8;
1310 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1311 fc->low_water = fc->high_water - 16;
1313 fc->pause_time = IGB_FC_PAUSE_TIME;
1314 fc->send_xon = TRUE;
1316 /* Issue a global reset */
1318 E1000_WRITE_REG(hw, E1000_WUC, 0);
1320 if (e1000_init_hw(hw) < 0)
1321 if_printf(ifp, "Hardware Initialization Failed\n");
1323 /* Setup DMA Coalescing */
1324 if (hw->mac.type == e1000_i350 && sc->dma_coalesce) {
1327 hwm = (pba - 4) << 10;
1328 reg = ((pba - 6) << E1000_DMACR_DMACTHR_SHIFT)
1329 & E1000_DMACR_DMACTHR_MASK;
1331 /* transition to L0x or L1 if available..*/
1332 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
1334 /* timer = +-1000 usec in 32usec intervals */
1336 E1000_WRITE_REG(hw, E1000_DMACR, reg);
1338 /* No lower threshold */
1339 E1000_WRITE_REG(hw, E1000_DMCRTRH, 0);
1341 /* set hwm to PBA - 2 * max frame size */
1342 E1000_WRITE_REG(hw, E1000_FCRTC, hwm);
1344 /* Set the interval before transition */
1345 reg = E1000_READ_REG(hw, E1000_DMCTLX);
1346 reg |= 0x800000FF; /* 255 usec */
1347 E1000_WRITE_REG(hw, E1000_DMCTLX, reg);
1349 /* free space in tx packet buffer to wake from DMA coal */
1350 E1000_WRITE_REG(hw, E1000_DMCTXTH,
1351 (20480 - (2 * sc->max_frame_size)) >> 6);
1353 /* make low power state decision controlled by DMA coal */
1354 reg = E1000_READ_REG(hw, E1000_PCIEMISC);
1355 E1000_WRITE_REG(hw, E1000_PCIEMISC,
1356 reg | E1000_PCIEMISC_LX_DECISION);
1357 if_printf(ifp, "DMA Coalescing enabled\n");
1360 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
1361 e1000_get_phy_info(hw);
1362 e1000_check_for_link(hw);
1366 igb_setup_ifp(struct igb_softc *sc)
1368 struct ifnet *ifp = &sc->arpcom.ac_if;
1370 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
1372 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1373 ifp->if_init = igb_init;
1374 ifp->if_ioctl = igb_ioctl;
1375 ifp->if_start = igb_start;
1376 ifp->if_serialize = igb_serialize;
1377 ifp->if_deserialize = igb_deserialize;
1378 ifp->if_tryserialize = igb_tryserialize;
1380 ifp->if_serialize_assert = igb_serialize_assert;
1382 #ifdef DEVICE_POLLING
1383 ifp->if_poll = igb_poll;
1385 ifp->if_watchdog = igb_watchdog;
1387 ifq_set_maxlen(&ifp->if_snd, sc->tx_rings[0].num_tx_desc - 1);
1388 ifq_set_ready(&ifp->if_snd);
1390 ether_ifattach(ifp, sc->hw.mac.addr, NULL);
1392 ifp->if_capabilities =
1393 IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1394 if (IGB_ENABLE_HWRSS(sc))
1395 ifp->if_capabilities |= IFCAP_RSS;
1396 ifp->if_capenable = ifp->if_capabilities;
1397 ifp->if_hwassist = IGB_CSUM_FEATURES;
1400 * Tell the upper layer(s) we support long frames
1402 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1405 * Specify the media types supported by this adapter and register
1406 * callbacks to update media and link information
1408 ifmedia_init(&sc->media, IFM_IMASK, igb_media_change, igb_media_status);
1409 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
1410 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
1411 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1413 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1415 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1416 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1418 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1419 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1421 if (sc->hw.phy.type != e1000_phy_ife) {
1422 ifmedia_add(&sc->media,
1423 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1424 ifmedia_add(&sc->media,
1425 IFM_ETHER | IFM_1000_T, 0, NULL);
1428 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1429 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1433 igb_add_sysctl(struct igb_softc *sc)
1435 #ifdef IGB_RSS_DEBUG
1440 sysctl_ctx_init(&sc->sysctl_ctx);
1441 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
1442 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
1443 device_get_nameunit(sc->dev), CTLFLAG_RD, 0, "");
1444 if (sc->sysctl_tree == NULL) {
1445 device_printf(sc->dev, "can't add sysctl node\n");
1449 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1450 OID_AUTO, "rxr", CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings");
1451 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1452 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_rings[0].num_rx_desc, 0,
1454 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1455 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_rings[0].num_tx_desc, 0,
1458 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1459 OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW,
1460 sc, 0, igb_sysctl_intr_rate, "I", "interrupt rate");
1462 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1463 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT | CTLFLAG_RW,
1464 sc, 0, igb_sysctl_tx_intr_nsegs, "I",
1465 "# of segments per TX interrupt");
1467 #ifdef IGB_RSS_DEBUG
1468 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1469 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 0,
1471 for (i = 0; i < sc->rx_ring_cnt; ++i) {
1472 ksnprintf(rx_pkt, sizeof(rx_pkt), "rx%d_pkt", i);
1473 SYSCTL_ADD_ULONG(&sc->sysctl_ctx,
1474 SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, rx_pkt,
1475 CTLFLAG_RW, &sc->rx_rings[i].rx_packets, "RXed packets");
1481 igb_alloc_rings(struct igb_softc *sc)
1486 * Create top level busdma tag
1488 error = bus_dma_tag_create(NULL, 1, 0,
1489 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1490 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
1493 device_printf(sc->dev, "could not create top level DMA tag\n");
1498 * Allocate TX descriptor rings and buffers
1500 sc->tx_rings = kmalloc(sizeof(struct igb_tx_ring) * sc->tx_ring_cnt,
1501 M_DEVBUF, M_WAITOK | M_ZERO);
1502 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1503 struct igb_tx_ring *txr = &sc->tx_rings[i];
1505 /* Set up some basics */
1508 lwkt_serialize_init(&txr->tx_serialize);
1510 error = igb_create_tx_ring(txr);
1516 * Allocate RX descriptor rings and buffers
1518 sc->rx_rings = kmalloc(sizeof(struct igb_rx_ring) * sc->rx_ring_cnt,
1519 M_DEVBUF, M_WAITOK | M_ZERO);
1520 for (i = 0; i < sc->rx_ring_cnt; ++i) {
1521 struct igb_rx_ring *rxr = &sc->rx_rings[i];
1523 /* Set up some basics */
1526 lwkt_serialize_init(&rxr->rx_serialize);
1528 error = igb_create_rx_ring(rxr);
1537 igb_free_rings(struct igb_softc *sc)
1541 if (sc->tx_rings != NULL) {
1542 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1543 struct igb_tx_ring *txr = &sc->tx_rings[i];
1545 igb_destroy_tx_ring(txr, txr->num_tx_desc);
1547 kfree(sc->tx_rings, M_DEVBUF);
1550 if (sc->rx_rings != NULL) {
1551 for (i = 0; i < sc->rx_ring_cnt; ++i) {
1552 struct igb_rx_ring *rxr = &sc->rx_rings[i];
1554 igb_destroy_rx_ring(rxr, rxr->num_rx_desc);
1556 kfree(sc->rx_rings, M_DEVBUF);
1561 igb_create_tx_ring(struct igb_tx_ring *txr)
1563 int tsize, error, i;
1566 * Validate number of transmit descriptors. It must not exceed
1567 * hardware maximum, and must be multiple of IGB_DBA_ALIGN.
1569 if (((igb_txd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN) != 0 ||
1570 (igb_txd > IGB_MAX_TXD) || (igb_txd < IGB_MIN_TXD)) {
1571 device_printf(txr->sc->dev,
1572 "Using %d TX descriptors instead of %d!\n",
1573 IGB_DEFAULT_TXD, igb_txd);
1574 txr->num_tx_desc = IGB_DEFAULT_TXD;
1576 txr->num_tx_desc = igb_txd;
1580 * Allocate TX descriptor ring
1582 tsize = roundup2(txr->num_tx_desc * sizeof(union e1000_adv_tx_desc),
1584 txr->txdma.dma_vaddr = bus_dmamem_coherent_any(txr->sc->parent_tag,
1585 IGB_DBA_ALIGN, tsize, BUS_DMA_WAITOK,
1586 &txr->txdma.dma_tag, &txr->txdma.dma_map, &txr->txdma.dma_paddr);
1587 if (txr->txdma.dma_vaddr == NULL) {
1588 device_printf(txr->sc->dev,
1589 "Unable to allocate TX Descriptor memory\n");
1592 txr->tx_base = txr->txdma.dma_vaddr;
1593 bzero(txr->tx_base, tsize);
1595 txr->tx_buf = kmalloc(sizeof(struct igb_tx_buf) * txr->num_tx_desc,
1596 M_DEVBUF, M_WAITOK | M_ZERO);
1599 * Allocate TX head write-back buffer
1601 txr->tx_hdr = bus_dmamem_coherent_any(txr->sc->parent_tag,
1602 __VM_CACHELINE_SIZE, __VM_CACHELINE_SIZE, BUS_DMA_WAITOK,
1603 &txr->tx_hdr_dtag, &txr->tx_hdr_dmap, &txr->tx_hdr_paddr);
1604 if (txr->tx_hdr == NULL) {
1605 device_printf(txr->sc->dev,
1606 "Unable to allocate TX head write-back buffer\n");
1611 * Create DMA tag for TX buffers
1613 error = bus_dma_tag_create(txr->sc->parent_tag,
1614 1, 0, /* alignment, bounds */
1615 BUS_SPACE_MAXADDR, /* lowaddr */
1616 BUS_SPACE_MAXADDR, /* highaddr */
1617 NULL, NULL, /* filter, filterarg */
1618 IGB_TSO_SIZE, /* maxsize */
1619 IGB_MAX_SCATTER, /* nsegments */
1620 PAGE_SIZE, /* maxsegsize */
1621 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW |
1622 BUS_DMA_ONEBPAGE, /* flags */
1625 device_printf(txr->sc->dev, "Unable to allocate TX DMA tag\n");
1626 kfree(txr->tx_buf, M_DEVBUF);
1632 * Create DMA maps for TX buffers
1634 for (i = 0; i < txr->num_tx_desc; ++i) {
1635 struct igb_tx_buf *txbuf = &txr->tx_buf[i];
1637 error = bus_dmamap_create(txr->tx_tag,
1638 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, &txbuf->map);
1640 device_printf(txr->sc->dev,
1641 "Unable to create TX DMA map\n");
1642 igb_destroy_tx_ring(txr, i);
1648 * Initialize various watermark
1650 txr->spare_desc = IGB_TX_SPARE;
1651 txr->intr_nsegs = txr->num_tx_desc / 16;
1652 txr->oact_hi_desc = txr->num_tx_desc / 2;
1653 txr->oact_lo_desc = txr->num_tx_desc / 8;
1654 if (txr->oact_lo_desc > IGB_TX_OACTIVE_MAX)
1655 txr->oact_lo_desc = IGB_TX_OACTIVE_MAX;
1656 if (txr->oact_lo_desc < txr->spare_desc + IGB_TX_RESERVED)
1657 txr->oact_lo_desc = txr->spare_desc + IGB_TX_RESERVED;
1663 igb_free_tx_ring(struct igb_tx_ring *txr)
1667 for (i = 0; i < txr->num_tx_desc; ++i) {
1668 struct igb_tx_buf *txbuf = &txr->tx_buf[i];
1670 if (txbuf->m_head != NULL) {
1671 bus_dmamap_unload(txr->tx_tag, txbuf->map);
1672 m_freem(txbuf->m_head);
1673 txbuf->m_head = NULL;
1679 igb_destroy_tx_ring(struct igb_tx_ring *txr, int ndesc)
1683 if (txr->txdma.dma_vaddr != NULL) {
1684 bus_dmamap_unload(txr->txdma.dma_tag, txr->txdma.dma_map);
1685 bus_dmamem_free(txr->txdma.dma_tag, txr->txdma.dma_vaddr,
1686 txr->txdma.dma_map);
1687 bus_dma_tag_destroy(txr->txdma.dma_tag);
1688 txr->txdma.dma_vaddr = NULL;
1691 if (txr->tx_hdr != NULL) {
1692 bus_dmamap_unload(txr->tx_hdr_dtag, txr->tx_hdr_dmap);
1693 bus_dmamem_free(txr->tx_hdr_dtag, txr->tx_hdr,
1695 bus_dma_tag_destroy(txr->tx_hdr_dtag);
1699 if (txr->tx_buf == NULL)
1702 for (i = 0; i < ndesc; ++i) {
1703 struct igb_tx_buf *txbuf = &txr->tx_buf[i];
1705 KKASSERT(txbuf->m_head == NULL);
1706 bus_dmamap_destroy(txr->tx_tag, txbuf->map);
1708 bus_dma_tag_destroy(txr->tx_tag);
1710 kfree(txr->tx_buf, M_DEVBUF);
1715 igb_init_tx_ring(struct igb_tx_ring *txr)
1717 /* Clear the old descriptor contents */
1719 sizeof(union e1000_adv_tx_desc) * txr->num_tx_desc);
1721 /* Clear TX head write-back buffer */
1725 txr->next_avail_desc = 0;
1726 txr->next_to_clean = 0;
1729 /* Set number of descriptors available */
1730 txr->tx_avail = txr->num_tx_desc;
1734 igb_init_tx_unit(struct igb_softc *sc)
1736 struct e1000_hw *hw = &sc->hw;
1740 /* Setup the Tx Descriptor Rings */
1741 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1742 struct igb_tx_ring *txr = &sc->tx_rings[i];
1743 uint64_t bus_addr = txr->txdma.dma_paddr;
1744 uint64_t hdr_paddr = txr->tx_hdr_paddr;
1745 uint32_t txdctl = 0;
1746 uint32_t dca_txctrl;
1748 E1000_WRITE_REG(hw, E1000_TDLEN(i),
1749 txr->num_tx_desc * sizeof(struct e1000_tx_desc));
1750 E1000_WRITE_REG(hw, E1000_TDBAH(i),
1751 (uint32_t)(bus_addr >> 32));
1752 E1000_WRITE_REG(hw, E1000_TDBAL(i),
1753 (uint32_t)bus_addr);
1755 /* Setup the HW Tx Head and Tail descriptor pointers */
1756 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
1757 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
1759 txdctl |= IGB_TX_PTHRESH;
1760 txdctl |= IGB_TX_HTHRESH << 8;
1761 txdctl |= IGB_TX_WTHRESH << 16;
1762 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1763 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
1765 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i));
1766 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1767 E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(i), dca_txctrl);
1769 E1000_WRITE_REG(hw, E1000_TDWBAH(i),
1770 (uint32_t)(hdr_paddr >> 32));
1771 E1000_WRITE_REG(hw, E1000_TDWBAL(i),
1772 ((uint32_t)hdr_paddr) | E1000_TX_HEAD_WB_ENABLE);
1778 e1000_config_collision_dist(hw);
1780 /* Program the Transmit Control Register */
1781 tctl = E1000_READ_REG(hw, E1000_TCTL);
1782 tctl &= ~E1000_TCTL_CT;
1783 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
1784 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
1786 /* This write will effectively turn on the transmit unit. */
1787 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1791 igb_txctx(struct igb_tx_ring *txr, struct mbuf *mp)
1793 struct e1000_adv_tx_context_desc *TXD;
1794 struct igb_tx_buf *txbuf;
1795 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx;
1796 struct ether_vlan_header *eh;
1797 struct ip *ip = NULL;
1798 int ehdrlen, ctxd, ip_hlen = 0;
1799 uint16_t etype, vlantag = 0;
1800 boolean_t offload = TRUE;
1802 if ((mp->m_pkthdr.csum_flags & IGB_CSUM_FEATURES) == 0)
1805 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0;
1806 ctxd = txr->next_avail_desc;
1807 txbuf = &txr->tx_buf[ctxd];
1808 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd];
1811 * In advanced descriptors the vlan tag must
1812 * be placed into the context descriptor, thus
1813 * we need to be here just for that setup.
1815 if (mp->m_flags & M_VLANTAG) {
1816 vlantag = htole16(mp->m_pkthdr.ether_vlantag);
1817 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT);
1818 } else if (!offload) {
1823 * Determine where frame payload starts.
1824 * Jump over vlan headers if already present,
1825 * helpful for QinQ too.
1827 KASSERT(mp->m_len >= ETHER_HDR_LEN,
1828 ("igb_txctx_pullup is not called (eh)?\n"));
1829 eh = mtod(mp, struct ether_vlan_header *);
1830 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1831 KASSERT(mp->m_len >= ETHER_HDR_LEN + EVL_ENCAPLEN,
1832 ("igb_txctx_pullup is not called (evh)?\n"));
1833 etype = ntohs(eh->evl_proto);
1834 ehdrlen = ETHER_HDR_LEN + EVL_ENCAPLEN;
1836 etype = ntohs(eh->evl_encap_proto);
1837 ehdrlen = ETHER_HDR_LEN;
1840 /* Set the ether header length */
1841 vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
1845 KASSERT(mp->m_len >= ehdrlen + IGB_IPVHL_SIZE,
1846 ("igb_txctx_pullup is not called (eh+ip_vhl)?\n"));
1848 /* NOTE: We could only safely access ip.ip_vhl part */
1849 ip = (struct ip *)(mp->m_data + ehdrlen);
1850 ip_hlen = ip->ip_hl << 2;
1852 if (mp->m_pkthdr.csum_flags & CSUM_IP)
1853 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
1857 case ETHERTYPE_IPV6:
1858 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1859 ip_hlen = sizeof(struct ip6_hdr);
1860 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
1869 vlan_macip_lens |= ip_hlen;
1870 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
1872 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
1873 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
1874 else if (mp->m_pkthdr.csum_flags & CSUM_UDP)
1875 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP;
1877 /* 82575 needs the queue index added */
1878 if (txr->sc->hw.mac.type == e1000_82575)
1879 mss_l4len_idx = txr->me << 4;
1881 /* Now copy bits into descriptor */
1882 TXD->vlan_macip_lens = htole32(vlan_macip_lens);
1883 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
1884 TXD->seqnum_seed = htole32(0);
1885 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
1887 txbuf->m_head = NULL;
1889 /* We've consumed the first desc, adjust counters */
1890 if (++ctxd == txr->num_tx_desc)
1892 txr->next_avail_desc = ctxd;
1899 igb_txeof(struct igb_tx_ring *txr)
1901 struct ifnet *ifp = &txr->sc->arpcom.ac_if;
1902 int first, hdr, avail;
1904 if (txr->tx_avail == txr->num_tx_desc)
1907 first = txr->next_to_clean;
1908 hdr = *(txr->tx_hdr);
1913 avail = txr->tx_avail;
1914 while (first != hdr) {
1915 struct igb_tx_buf *txbuf = &txr->tx_buf[first];
1918 if (txbuf->m_head) {
1919 bus_dmamap_unload(txr->tx_tag, txbuf->map);
1920 m_freem(txbuf->m_head);
1921 txbuf->m_head = NULL;
1924 if (++first == txr->num_tx_desc)
1927 txr->next_to_clean = first;
1928 txr->tx_avail = avail;
1931 * If we have a minimum free, clear IFF_OACTIVE
1932 * to tell the stack that it is OK to send packets.
1934 if (IGB_IS_NOT_OACTIVE(txr)) {
1935 ifp->if_flags &= ~IFF_OACTIVE;
1938 * We have enough TX descriptors, turn off
1939 * the watchdog. We allow small amount of
1940 * packets (roughly intr_nsegs) pending on
1941 * the transmit ring.
1948 igb_create_rx_ring(struct igb_rx_ring *rxr)
1950 int rsize, i, error;
1953 * Validate number of receive descriptors. It must not exceed
1954 * hardware maximum, and must be multiple of IGB_DBA_ALIGN.
1956 if (((igb_rxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN) != 0 ||
1957 (igb_rxd > IGB_MAX_RXD) || (igb_rxd < IGB_MIN_RXD)) {
1958 device_printf(rxr->sc->dev,
1959 "Using %d RX descriptors instead of %d!\n",
1960 IGB_DEFAULT_RXD, igb_rxd);
1961 rxr->num_rx_desc = IGB_DEFAULT_RXD;
1963 rxr->num_rx_desc = igb_rxd;
1967 * Allocate RX descriptor ring
1969 rsize = roundup2(rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc),
1971 rxr->rxdma.dma_vaddr = bus_dmamem_coherent_any(rxr->sc->parent_tag,
1972 IGB_DBA_ALIGN, rsize, BUS_DMA_WAITOK,
1973 &rxr->rxdma.dma_tag, &rxr->rxdma.dma_map,
1974 &rxr->rxdma.dma_paddr);
1975 if (rxr->rxdma.dma_vaddr == NULL) {
1976 device_printf(rxr->sc->dev,
1977 "Unable to allocate RxDescriptor memory\n");
1980 rxr->rx_base = rxr->rxdma.dma_vaddr;
1981 bzero(rxr->rx_base, rsize);
1983 rxr->rx_buf = kmalloc(sizeof(struct igb_rx_buf) * rxr->num_rx_desc,
1984 M_DEVBUF, M_WAITOK | M_ZERO);
1987 * Create DMA tag for RX buffers
1989 error = bus_dma_tag_create(rxr->sc->parent_tag,
1990 1, 0, /* alignment, bounds */
1991 BUS_SPACE_MAXADDR, /* lowaddr */
1992 BUS_SPACE_MAXADDR, /* highaddr */
1993 NULL, NULL, /* filter, filterarg */
1994 MCLBYTES, /* maxsize */
1996 MCLBYTES, /* maxsegsize */
1997 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */
2000 device_printf(rxr->sc->dev,
2001 "Unable to create RX payload DMA tag\n");
2002 kfree(rxr->rx_buf, M_DEVBUF);
2008 * Create spare DMA map for RX buffers
2010 error = bus_dmamap_create(rxr->rx_tag, BUS_DMA_WAITOK,
2013 device_printf(rxr->sc->dev,
2014 "Unable to create spare RX DMA maps\n");
2015 bus_dma_tag_destroy(rxr->rx_tag);
2016 kfree(rxr->rx_buf, M_DEVBUF);
2022 * Create DMA maps for RX buffers
2024 for (i = 0; i < rxr->num_rx_desc; i++) {
2025 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
2027 error = bus_dmamap_create(rxr->rx_tag,
2028 BUS_DMA_WAITOK, &rxbuf->map);
2030 device_printf(rxr->sc->dev,
2031 "Unable to create RX DMA maps\n");
2032 igb_destroy_rx_ring(rxr, i);
2040 igb_free_rx_ring(struct igb_rx_ring *rxr)
2044 for (i = 0; i < rxr->num_rx_desc; ++i) {
2045 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
2047 if (rxbuf->m_head != NULL) {
2048 bus_dmamap_unload(rxr->rx_tag, rxbuf->map);
2049 m_freem(rxbuf->m_head);
2050 rxbuf->m_head = NULL;
2054 if (rxr->fmp != NULL)
2061 igb_destroy_rx_ring(struct igb_rx_ring *rxr, int ndesc)
2065 if (rxr->rxdma.dma_vaddr != NULL) {
2066 bus_dmamap_unload(rxr->rxdma.dma_tag, rxr->rxdma.dma_map);
2067 bus_dmamem_free(rxr->rxdma.dma_tag, rxr->rxdma.dma_vaddr,
2068 rxr->rxdma.dma_map);
2069 bus_dma_tag_destroy(rxr->rxdma.dma_tag);
2070 rxr->rxdma.dma_vaddr = NULL;
2073 if (rxr->rx_buf == NULL)
2076 for (i = 0; i < ndesc; ++i) {
2077 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
2079 KKASSERT(rxbuf->m_head == NULL);
2080 bus_dmamap_destroy(rxr->rx_tag, rxbuf->map);
2082 bus_dmamap_destroy(rxr->rx_tag, rxr->rx_sparemap);
2083 bus_dma_tag_destroy(rxr->rx_tag);
2085 kfree(rxr->rx_buf, M_DEVBUF);
2090 igb_setup_rxdesc(union e1000_adv_rx_desc *rxd, const struct igb_rx_buf *rxbuf)
2092 rxd->read.pkt_addr = htole64(rxbuf->paddr);
2093 rxd->wb.upper.status_error = 0;
2097 igb_newbuf(struct igb_rx_ring *rxr, int i, boolean_t wait)
2100 bus_dma_segment_t seg;
2102 struct igb_rx_buf *rxbuf;
2105 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2108 if_printf(&rxr->sc->arpcom.ac_if,
2109 "Unable to allocate RX mbuf\n");
2113 m->m_len = m->m_pkthdr.len = MCLBYTES;
2115 if (rxr->sc->max_frame_size <= MCLBYTES - ETHER_ALIGN)
2116 m_adj(m, ETHER_ALIGN);
2118 error = bus_dmamap_load_mbuf_segment(rxr->rx_tag,
2119 rxr->rx_sparemap, m, &seg, 1, &nseg, BUS_DMA_NOWAIT);
2123 if_printf(&rxr->sc->arpcom.ac_if,
2124 "Unable to load RX mbuf\n");
2129 rxbuf = &rxr->rx_buf[i];
2130 if (rxbuf->m_head != NULL)
2131 bus_dmamap_unload(rxr->rx_tag, rxbuf->map);
2134 rxbuf->map = rxr->rx_sparemap;
2135 rxr->rx_sparemap = map;
2138 rxbuf->paddr = seg.ds_addr;
2140 igb_setup_rxdesc(&rxr->rx_base[i], rxbuf);
2145 igb_init_rx_ring(struct igb_rx_ring *rxr)
2149 /* Clear the ring contents */
2151 rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc));
2153 /* Now replenish the ring mbufs */
2154 for (i = 0; i < rxr->num_rx_desc; ++i) {
2157 error = igb_newbuf(rxr, i, TRUE);
2162 /* Setup our descriptor indices */
2163 rxr->next_to_check = 0;
2167 rxr->discard = FALSE;
2173 igb_init_rx_unit(struct igb_softc *sc)
2175 struct ifnet *ifp = &sc->arpcom.ac_if;
2176 struct e1000_hw *hw = &sc->hw;
2177 uint32_t rctl, rxcsum, srrctl = 0;
2181 * Make sure receives are disabled while setting
2182 * up the descriptor ring
2184 rctl = E1000_READ_REG(hw, E1000_RCTL);
2185 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2189 ** Set up for header split
2191 if (igb_header_split) {
2192 /* Use a standard mbuf for the header */
2193 srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2194 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2197 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2200 ** Set up for jumbo frames
2202 if (ifp->if_mtu > ETHERMTU) {
2203 rctl |= E1000_RCTL_LPE;
2205 if (adapter->rx_mbuf_sz == MJUMPAGESIZE) {
2206 srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2207 rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
2208 } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) {
2209 srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2210 rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
2212 /* Set maximum packet len */
2213 psize = adapter->max_frame_size;
2214 /* are we on a vlan? */
2215 if (adapter->ifp->if_vlantrunk != NULL)
2216 psize += VLAN_TAG_SIZE;
2217 E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize);
2219 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2220 rctl |= E1000_RCTL_SZ_2048;
2223 rctl &= ~E1000_RCTL_LPE;
2224 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2225 rctl |= E1000_RCTL_SZ_2048;
2228 /* Setup the Base and Length of the Rx Descriptor Rings */
2229 for (i = 0; i < sc->rx_ring_cnt; ++i) {
2230 struct igb_rx_ring *rxr = &sc->rx_rings[i];
2231 uint64_t bus_addr = rxr->rxdma.dma_paddr;
2234 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2235 rxr->num_rx_desc * sizeof(struct e1000_rx_desc));
2236 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2237 (uint32_t)(bus_addr >> 32));
2238 E1000_WRITE_REG(hw, E1000_RDBAL(i),
2239 (uint32_t)bus_addr);
2240 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2241 /* Enable this Queue */
2242 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2243 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2244 rxdctl &= 0xFFF00000;
2245 rxdctl |= IGB_RX_PTHRESH;
2246 rxdctl |= IGB_RX_HTHRESH << 8;
2247 rxdctl |= IGB_RX_WTHRESH << 16;
2248 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2251 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM);
2252 rxcsum &= ~(E1000_RXCSUM_PCSS_MASK | E1000_RXCSUM_IPPCSE);
2255 * Receive Checksum Offload for TCP and UDP
2257 * Checksum offloading is also enabled if multiple receive
2258 * queue is to be supported, since we need it to figure out
2261 if ((ifp->if_capenable & IFCAP_RXCSUM) || IGB_ENABLE_HWRSS(sc)) {
2264 * PCSD must be enabled to enable multiple
2267 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
2270 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
2273 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum);
2275 if (IGB_ENABLE_HWRSS(sc)) {
2276 uint8_t key[IGB_NRSSRK * IGB_RSSRK_SIZE];
2277 uint32_t reta, reta_shift;
2281 * When we reach here, RSS has already been disabled
2282 * in igb_stop(), so we could safely configure RSS key
2283 * and redirect table.
2289 toeplitz_get_key(key, sizeof(key));
2290 for (i = 0; i < IGB_NRSSRK; ++i) {
2293 rssrk = IGB_RSSRK_VAL(key, i);
2294 IGB_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk);
2296 E1000_WRITE_REG(hw, E1000_RSSRK(i), rssrk);
2300 * Configure RSS redirect table in following fashion:
2301 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
2303 reta_shift = IGB_RETA_SHIFT;
2304 if (hw->mac.type == e1000_82575)
2305 reta_shift = IGB_RETA_SHIFT_82575;
2307 for (i = 0; i < IGB_RETA_SIZE; ++i) {
2310 q = (i % sc->rx_ring_cnt) << reta_shift;
2311 reta |= q << (8 * i);
2313 IGB_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta);
2315 for (i = 0; i < IGB_NRETA; ++i)
2316 E1000_WRITE_REG(hw, E1000_RETA(i), reta);
2319 * Enable multiple receive queues.
2320 * Enable IPv4 RSS standard hash functions.
2321 * Disable RSS interrupt on 82575
2323 E1000_WRITE_REG(&sc->hw, E1000_MRQC,
2324 E1000_MRQC_ENABLE_RSS_4Q |
2325 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2326 E1000_MRQC_RSS_FIELD_IPV4);
2329 /* Setup the Receive Control Register */
2330 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2331 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2332 E1000_RCTL_RDMTS_HALF |
2333 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2334 /* Strip CRC bytes. */
2335 rctl |= E1000_RCTL_SECRC;
2336 /* Make sure VLAN Filters are off */
2337 rctl &= ~E1000_RCTL_VFE;
2338 /* Don't store bad packets */
2339 rctl &= ~E1000_RCTL_SBP;
2341 /* Enable Receives */
2342 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2345 * Setup the HW Rx Head and Tail Descriptor Pointers
2346 * - needs to be after enable
2348 for (i = 0; i < sc->rx_ring_cnt; ++i) {
2349 struct igb_rx_ring *rxr = &sc->rx_rings[i];
2351 E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check);
2352 E1000_WRITE_REG(hw, E1000_RDT(i), rxr->num_rx_desc - 1);
2357 igb_rxeof(struct igb_rx_ring *rxr, int count)
2359 struct ifnet *ifp = &rxr->sc->arpcom.ac_if;
2360 union e1000_adv_rx_desc *cur;
2364 i = rxr->next_to_check;
2365 cur = &rxr->rx_base[i];
2366 staterr = le32toh(cur->wb.upper.status_error);
2368 if ((staterr & E1000_RXD_STAT_DD) == 0)
2371 while ((staterr & E1000_RXD_STAT_DD) && count != 0) {
2372 struct pktinfo *pi = NULL, pi0;
2373 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
2374 struct mbuf *m = NULL;
2377 eop = (staterr & E1000_RXD_STAT_EOP) ? TRUE : FALSE;
2381 if ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) == 0 &&
2383 struct mbuf *mp = rxbuf->m_head;
2384 uint32_t hash, hashtype;
2388 len = le16toh(cur->wb.upper.length);
2389 if (rxr->sc->hw.mac.type == e1000_i350 &&
2390 (staterr & E1000_RXDEXT_STATERR_LB))
2391 vlan = be16toh(cur->wb.upper.vlan);
2393 vlan = le16toh(cur->wb.upper.vlan);
2395 hash = le32toh(cur->wb.lower.hi_dword.rss);
2396 hashtype = le32toh(cur->wb.lower.lo_dword.data) &
2397 E1000_RXDADV_RSSTYPE_MASK;
2399 IGB_RSS_DPRINTF(rxr->sc, 10,
2400 "ring%d, hash 0x%08x, hashtype %u\n",
2401 rxr->me, hash, hashtype);
2403 bus_dmamap_sync(rxr->rx_tag, rxbuf->map,
2404 BUS_DMASYNC_POSTREAD);
2406 if (igb_newbuf(rxr, i, FALSE) != 0) {
2412 if (rxr->fmp == NULL) {
2413 mp->m_pkthdr.len = len;
2417 rxr->lmp->m_next = mp;
2418 rxr->lmp = rxr->lmp->m_next;
2419 rxr->fmp->m_pkthdr.len += len;
2427 m->m_pkthdr.rcvif = ifp;
2430 if (ifp->if_capenable & IFCAP_RXCSUM)
2431 igb_rxcsum(staterr, m);
2433 if (staterr & E1000_RXD_STAT_VP) {
2434 m->m_pkthdr.ether_vlantag = vlan;
2435 m->m_flags |= M_VLANTAG;
2438 if (ifp->if_capenable & IFCAP_RSS) {
2439 pi = igb_rssinfo(m, &pi0,
2440 hash, hashtype, staterr);
2442 #ifdef IGB_RSS_DEBUG
2449 igb_setup_rxdesc(cur, rxbuf);
2451 rxr->discard = TRUE;
2453 rxr->discard = FALSE;
2454 if (rxr->fmp != NULL) {
2463 ether_input_pkt(ifp, m, pi);
2465 /* Advance our pointers to the next descriptor. */
2466 if (++i == rxr->num_rx_desc)
2469 cur = &rxr->rx_base[i];
2470 staterr = le32toh(cur->wb.upper.status_error);
2472 rxr->next_to_check = i;
2475 i = rxr->num_rx_desc - 1;
2476 E1000_WRITE_REG(&rxr->sc->hw, E1000_RDT(rxr->me), i);
2481 igb_set_vlan(struct igb_softc *sc)
2483 struct e1000_hw *hw = &sc->hw;
2486 struct ifnet *ifp = sc->arpcom.ac_if;
2490 e1000_rlpml_set_vf(hw, sc->max_frame_size + VLAN_TAG_SIZE);
2494 reg = E1000_READ_REG(hw, E1000_CTRL);
2495 reg |= E1000_CTRL_VME;
2496 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2499 /* Enable the Filter Table */
2500 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
2501 reg = E1000_READ_REG(hw, E1000_RCTL);
2502 reg &= ~E1000_RCTL_CFIEN;
2503 reg |= E1000_RCTL_VFE;
2504 E1000_WRITE_REG(hw, E1000_RCTL, reg);
2508 /* Update the frame size */
2509 E1000_WRITE_REG(&sc->hw, E1000_RLPML,
2510 sc->max_frame_size + VLAN_TAG_SIZE);
2513 /* Don't bother with table if no vlans */
2514 if ((adapter->num_vlans == 0) ||
2515 ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0))
2518 ** A soft reset zero's out the VFTA, so
2519 ** we need to repopulate it now.
2521 for (int i = 0; i < IGB_VFTA_SIZE; i++)
2522 if (adapter->shadow_vfta[i] != 0) {
2523 if (adapter->vf_ifp)
2524 e1000_vfta_set_vf(hw,
2525 adapter->shadow_vfta[i], TRUE);
2527 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
2528 i, adapter->shadow_vfta[i]);
2534 igb_enable_intr(struct igb_softc *sc)
2536 lwkt_serialize_handler_enable(&sc->main_serialize);
2538 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) {
2539 /* XXX MSI-X should use sc->intr_mask */
2540 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0);
2541 E1000_WRITE_REG(&sc->hw, E1000_EIAM, sc->intr_mask);
2542 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask);
2543 E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC);
2545 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK);
2547 E1000_WRITE_FLUSH(&sc->hw);
2551 igb_disable_intr(struct igb_softc *sc)
2553 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) {
2554 E1000_WRITE_REG(&sc->hw, E1000_EIMC, 0xffffffff);
2555 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0);
2557 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff);
2558 E1000_WRITE_FLUSH(&sc->hw);
2560 lwkt_serialize_handler_disable(&sc->main_serialize);
2564 * Bit of a misnomer, what this really means is
2565 * to enable OS management of the system... aka
2566 * to disable special hardware management features
2569 igb_get_mgmt(struct igb_softc *sc)
2571 if (sc->flags & IGB_FLAG_HAS_MGMT) {
2572 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H);
2573 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
2575 /* disable hardware interception of ARP */
2576 manc &= ~E1000_MANC_ARP_EN;
2578 /* enable receiving management packets to the host */
2579 manc |= E1000_MANC_EN_MNG2HOST;
2580 manc2h |= 1 << 5; /* Mng Port 623 */
2581 manc2h |= 1 << 6; /* Mng Port 664 */
2582 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h);
2583 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
2588 * Give control back to hardware management controller
2592 igb_rel_mgmt(struct igb_softc *sc)
2594 if (sc->flags & IGB_FLAG_HAS_MGMT) {
2595 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
2597 /* Re-enable hardware interception of ARP */
2598 manc |= E1000_MANC_ARP_EN;
2599 manc &= ~E1000_MANC_EN_MNG2HOST;
2601 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
2606 * Sets CTRL_EXT:DRV_LOAD bit.
2608 * For ASF and Pass Through versions of f/w this means that
2609 * the driver is loaded.
2612 igb_get_hw_control(struct igb_softc *sc)
2619 /* Let firmware know the driver has taken over */
2620 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
2621 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
2622 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2626 * Resets CTRL_EXT:DRV_LOAD bit.
2628 * For ASF and Pass Through versions of f/w this means that the
2629 * driver is no longer loaded.
2632 igb_rel_hw_control(struct igb_softc *sc)
2639 /* Let firmware taken over control of h/w */
2640 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
2641 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
2642 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2646 igb_is_valid_ether_addr(const uint8_t *addr)
2648 uint8_t zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
2650 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN))
2656 * Enable PCI Wake On Lan capability
2659 igb_enable_wol(device_t dev)
2661 uint16_t cap, status;
2664 /* First find the capabilities pointer*/
2665 cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
2667 /* Read the PM Capabilities */
2668 id = pci_read_config(dev, cap, 1);
2669 if (id != PCIY_PMG) /* Something wrong */
2673 * OK, we have the power capabilities,
2674 * so now get the status register
2676 cap += PCIR_POWER_STATUS;
2677 status = pci_read_config(dev, cap, 2);
2678 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2679 pci_write_config(dev, cap, status, 2);
2683 igb_update_stats_counters(struct igb_softc *sc)
2685 struct e1000_hw *hw = &sc->hw;
2686 struct e1000_hw_stats *stats;
2687 struct ifnet *ifp = &sc->arpcom.ac_if;
2690 * The virtual function adapter has only a
2691 * small controlled set of stats, do only
2695 igb_update_vf_stats_counters(sc);
2700 if (sc->hw.phy.media_type == e1000_media_type_copper ||
2701 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
2703 E1000_READ_REG(hw,E1000_SYMERRS);
2704 stats->sec += E1000_READ_REG(hw, E1000_SEC);
2707 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
2708 stats->mpc += E1000_READ_REG(hw, E1000_MPC);
2709 stats->scc += E1000_READ_REG(hw, E1000_SCC);
2710 stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
2712 stats->mcc += E1000_READ_REG(hw, E1000_MCC);
2713 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
2714 stats->colc += E1000_READ_REG(hw, E1000_COLC);
2715 stats->dc += E1000_READ_REG(hw, E1000_DC);
2716 stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
2717 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
2718 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
2721 * For watchdog management we need to know if we have been
2722 * paused during the last interval, so capture that here.
2724 sc->pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
2725 stats->xoffrxc += sc->pause_frames;
2726 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
2727 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
2728 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
2729 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
2730 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
2731 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
2732 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
2733 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
2734 stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
2735 stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
2736 stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
2737 stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
2739 /* For the 64-bit byte counters the low dword must be read first. */
2740 /* Both registers clear on the read of the high dword */
2742 stats->gorc += E1000_READ_REG(hw, E1000_GORCL) +
2743 ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
2744 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) +
2745 ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
2747 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
2748 stats->ruc += E1000_READ_REG(hw, E1000_RUC);
2749 stats->rfc += E1000_READ_REG(hw, E1000_RFC);
2750 stats->roc += E1000_READ_REG(hw, E1000_ROC);
2751 stats->rjc += E1000_READ_REG(hw, E1000_RJC);
2753 stats->tor += E1000_READ_REG(hw, E1000_TORH);
2754 stats->tot += E1000_READ_REG(hw, E1000_TOTH);
2756 stats->tpr += E1000_READ_REG(hw, E1000_TPR);
2757 stats->tpt += E1000_READ_REG(hw, E1000_TPT);
2758 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
2759 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
2760 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
2761 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
2762 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
2763 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
2764 stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
2765 stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
2767 /* Interrupt Counts */
2769 stats->iac += E1000_READ_REG(hw, E1000_IAC);
2770 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
2771 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
2772 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
2773 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
2774 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
2775 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
2776 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
2777 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
2779 /* Host to Card Statistics */
2781 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
2782 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
2783 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
2784 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
2785 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
2786 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
2787 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
2788 stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) +
2789 ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32));
2790 stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) +
2791 ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32));
2792 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
2793 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
2794 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
2796 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
2797 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
2798 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
2799 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
2800 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
2801 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
2803 ifp->if_collisions = stats->colc;
2806 ifp->if_ierrors = stats->rxerrc + stats->crcerrs + stats->algnerrc +
2807 stats->ruc + stats->roc + stats->mpc + stats->cexterr;
2810 ifp->if_oerrors = stats->ecol + stats->latecol + sc->watchdog_events;
2812 /* Driver specific counters */
2813 sc->device_control = E1000_READ_REG(hw, E1000_CTRL);
2814 sc->rx_control = E1000_READ_REG(hw, E1000_RCTL);
2815 sc->int_mask = E1000_READ_REG(hw, E1000_IMS);
2816 sc->eint_mask = E1000_READ_REG(hw, E1000_EIMS);
2817 sc->packet_buf_alloc_tx =
2818 ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16);
2819 sc->packet_buf_alloc_rx =
2820 (E1000_READ_REG(hw, E1000_PBA) & 0xffff);
2824 igb_vf_init_stats(struct igb_softc *sc)
2826 struct e1000_hw *hw = &sc->hw;
2827 struct e1000_vf_stats *stats;
2830 stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC);
2831 stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC);
2832 stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC);
2833 stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC);
2834 stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC);
2838 igb_update_vf_stats_counters(struct igb_softc *sc)
2840 struct e1000_hw *hw = &sc->hw;
2841 struct e1000_vf_stats *stats;
2843 if (sc->link_speed == 0)
2847 UPDATE_VF_REG(E1000_VFGPRC, stats->last_gprc, stats->gprc);
2848 UPDATE_VF_REG(E1000_VFGORC, stats->last_gorc, stats->gorc);
2849 UPDATE_VF_REG(E1000_VFGPTC, stats->last_gptc, stats->gptc);
2850 UPDATE_VF_REG(E1000_VFGOTC, stats->last_gotc, stats->gotc);
2851 UPDATE_VF_REG(E1000_VFMPRC, stats->last_mprc, stats->mprc);
2854 #ifdef DEVICE_POLLING
2857 igb_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2859 struct igb_softc *sc = ifp->if_softc;
2862 ASSERT_SERIALIZED(&sc->main_serialize);
2866 igb_disable_intr(sc);
2869 case POLL_DEREGISTER:
2870 igb_enable_intr(sc);
2873 case POLL_AND_CHECK_STATUS:
2874 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR);
2875 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
2876 igb_serialize_skipmain(sc);
2877 sc->hw.mac.get_link_status = 1;
2878 igb_update_link_status(sc);
2879 igb_deserialize_skipmain(sc);
2883 if (ifp->if_flags & IFF_RUNNING) {
2884 struct igb_tx_ring *txr;
2887 for (i = 0; i < sc->rx_ring_cnt; ++i) {
2888 struct igb_rx_ring *rxr = &sc->rx_rings[i];
2890 lwkt_serialize_enter(&rxr->rx_serialize);
2891 igb_rxeof(rxr, count);
2892 lwkt_serialize_exit(&rxr->rx_serialize);
2895 txr = &sc->tx_rings[0];
2896 lwkt_serialize_enter(&txr->tx_serialize);
2898 if (!ifq_is_empty(&ifp->if_snd))
2900 lwkt_serialize_exit(&txr->tx_serialize);
2906 #endif /* DEVICE_POLLING */
2911 struct igb_softc *sc = xsc;
2912 struct ifnet *ifp = &sc->arpcom.ac_if;
2915 ASSERT_SERIALIZED(&sc->main_serialize);
2917 eicr = E1000_READ_REG(&sc->hw, E1000_EICR);
2922 if (ifp->if_flags & IFF_RUNNING) {
2923 struct igb_tx_ring *txr;
2926 for (i = 0; i < sc->rx_ring_cnt; ++i) {
2927 struct igb_rx_ring *rxr = &sc->rx_rings[i];
2929 if (eicr & rxr->rx_intr_mask) {
2930 lwkt_serialize_enter(&rxr->rx_serialize);
2932 lwkt_serialize_exit(&rxr->rx_serialize);
2936 txr = &sc->tx_rings[0];
2937 if (eicr & txr->tx_intr_mask) {
2938 lwkt_serialize_enter(&txr->tx_serialize);
2940 if (!ifq_is_empty(&ifp->if_snd))
2942 lwkt_serialize_exit(&txr->tx_serialize);
2946 if (eicr & E1000_EICR_OTHER) {
2947 uint32_t icr = E1000_READ_REG(&sc->hw, E1000_ICR);
2949 /* Link status change */
2950 if (icr & E1000_ICR_LSC) {
2951 igb_serialize_skipmain(sc);
2952 sc->hw.mac.get_link_status = 1;
2953 igb_update_link_status(sc);
2954 igb_deserialize_skipmain(sc);
2959 * Reading EICR has the side effect to clear interrupt mask,
2960 * so all interrupts need to be enabled here.
2962 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask);
2966 igb_shared_intr(void *xsc)
2968 struct igb_softc *sc = xsc;
2969 struct ifnet *ifp = &sc->arpcom.ac_if;
2972 ASSERT_SERIALIZED(&sc->main_serialize);
2974 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR);
2977 if (reg_icr == 0xffffffff)
2980 /* Definitely not our interrupt. */
2984 if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0)
2987 if (ifp->if_flags & IFF_RUNNING) {
2988 struct igb_tx_ring *txr;
2991 for (i = 0; i < sc->rx_ring_cnt; ++i) {
2992 struct igb_rx_ring *rxr = &sc->rx_rings[i];
2994 lwkt_serialize_enter(&rxr->rx_serialize);
2996 lwkt_serialize_exit(&rxr->rx_serialize);
2999 txr = &sc->tx_rings[0];
3000 lwkt_serialize_enter(&txr->tx_serialize);
3002 if (!ifq_is_empty(&ifp->if_snd))
3004 lwkt_serialize_exit(&txr->tx_serialize);
3007 /* Link status change */
3008 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3009 igb_serialize_skipmain(sc);
3010 sc->hw.mac.get_link_status = 1;
3011 igb_update_link_status(sc);
3012 igb_deserialize_skipmain(sc);
3015 if (reg_icr & E1000_ICR_RXO)
3020 igb_txctx_pullup(struct igb_tx_ring *txr, struct mbuf **m0)
3022 struct mbuf *m = *m0;
3023 struct ether_header *eh;
3026 txr->ctx_try_pullup++;
3028 len = ETHER_HDR_LEN + IGB_IPVHL_SIZE;
3030 if (__predict_false(!M_WRITABLE(m))) {
3031 if (__predict_false(m->m_len < ETHER_HDR_LEN)) {
3037 eh = mtod(m, struct ether_header *);
3039 if (eh->ether_type == htons(ETHERTYPE_VLAN))
3040 len += EVL_ENCAPLEN;
3042 if (m->m_len < len) {
3051 if (__predict_false(m->m_len < ETHER_HDR_LEN)) {
3053 m = m_pullup(m, ETHER_HDR_LEN);
3055 txr->ctx_pullup1_failed++;
3061 eh = mtod(m, struct ether_header *);
3063 if (eh->ether_type == htons(ETHERTYPE_VLAN))
3064 len += EVL_ENCAPLEN;
3066 if (m->m_len < len) {
3068 m = m_pullup(m, len);
3070 txr->ctx_pullup2_failed++;
3080 igb_encap(struct igb_tx_ring *txr, struct mbuf **m_headp)
3082 bus_dma_segment_t segs[IGB_MAX_SCATTER];
3084 struct igb_tx_buf *tx_buf, *tx_buf_mapped;
3085 union e1000_adv_tx_desc *txd = NULL;
3086 struct mbuf *m_head = *m_headp;
3087 uint32_t olinfo_status = 0, cmd_type_len = 0, cmd_rs = 0;
3088 int maxsegs, nsegs, i, j, error, last = 0;
3089 uint32_t hdrlen = 0;
3091 if (m_head->m_len < IGB_TXCSUM_MINHL &&
3092 ((m_head->m_pkthdr.csum_flags & IGB_CSUM_FEATURES) ||
3093 (m_head->m_flags & M_VLANTAG))) {
3095 * Make sure that ethernet header and ip.ip_hl are in
3096 * contiguous memory, since if TXCSUM or VLANTAG is
3097 * enabled, later TX context descriptor's setup need
3098 * to access ip.ip_hl.
3100 error = igb_txctx_pullup(txr, m_headp);
3102 KKASSERT(*m_headp == NULL);
3108 /* Set basic descriptor constants */
3109 cmd_type_len |= E1000_ADVTXD_DTYP_DATA;
3110 cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
3111 if (m_head->m_flags & M_VLANTAG)
3112 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3115 * Map the packet for DMA.
3117 tx_buf = &txr->tx_buf[txr->next_avail_desc];
3118 tx_buf_mapped = tx_buf;
3121 maxsegs = txr->tx_avail - IGB_TX_RESERVED;
3122 KASSERT(maxsegs >= txr->spare_desc, ("not enough spare TX desc\n"));
3123 if (maxsegs > IGB_MAX_SCATTER)
3124 maxsegs = IGB_MAX_SCATTER;
3126 error = bus_dmamap_load_mbuf_defrag(txr->tx_tag, map, m_headp,
3127 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
3129 if (error == ENOBUFS)
3130 txr->sc->mbuf_defrag_failed++;
3132 txr->sc->no_tx_dma_setup++;
3138 bus_dmamap_sync(txr->tx_tag, map, BUS_DMASYNC_PREWRITE);
3144 * Set up the context descriptor:
3145 * used when any hardware offload is done.
3146 * This includes CSUM, VLAN, and TSO. It
3147 * will use the first descriptor.
3149 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3150 if (igb_tso_setup(txr, m_head, &hdrlen)) {
3151 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3152 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3153 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3156 } else if (igb_tx_ctx_setup(txr, m_head))
3157 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3159 if (igb_txctx(txr, m_head)) {
3160 olinfo_status |= (E1000_TXD_POPTS_IXSM << 8);
3161 if (m_head->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_TCP))
3162 olinfo_status |= (E1000_TXD_POPTS_TXSM << 8);
3167 txr->tx_nsegs += nsegs;
3168 if (txr->tx_nsegs >= txr->intr_nsegs) {
3170 * Report Status (RS) is turned on every intr_nsegs
3171 * descriptors (roughly).
3174 cmd_rs = E1000_ADVTXD_DCMD_RS;
3177 /* Calculate payload length */
3178 olinfo_status |= ((m_head->m_pkthdr.len - hdrlen)
3179 << E1000_ADVTXD_PAYLEN_SHIFT);
3181 /* 82575 needs the queue index added */
3182 if (txr->sc->hw.mac.type == e1000_82575)
3183 olinfo_status |= txr->me << 4;
3185 /* Set up our transmit descriptors */
3186 i = txr->next_avail_desc;
3187 for (j = 0; j < nsegs; j++) {
3189 bus_addr_t seg_addr;
3191 tx_buf = &txr->tx_buf[i];
3192 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i];
3193 seg_addr = segs[j].ds_addr;
3194 seg_len = segs[j].ds_len;
3196 txd->read.buffer_addr = htole64(seg_addr);
3197 txd->read.cmd_type_len = htole32(cmd_type_len | seg_len);
3198 txd->read.olinfo_status = htole32(olinfo_status);
3200 if (++i == txr->num_tx_desc)
3202 tx_buf->m_head = NULL;
3205 KASSERT(txr->tx_avail > nsegs, ("invalid avail TX desc\n"));
3206 txr->next_avail_desc = i;
3207 txr->tx_avail -= nsegs;
3209 tx_buf->m_head = m_head;
3210 tx_buf_mapped->map = tx_buf->map;
3214 * Last Descriptor of Packet needs End Of Packet (EOP)
3216 txd->read.cmd_type_len |= htole32(E1000_ADVTXD_DCMD_EOP | cmd_rs);
3219 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
3220 * that this frame is available to transmit.
3222 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), i);
3229 igb_start(struct ifnet *ifp)
3231 struct igb_softc *sc = ifp->if_softc;
3232 struct igb_tx_ring *txr = &sc->tx_rings[0];
3233 struct mbuf *m_head;
3235 ASSERT_SERIALIZED(&txr->tx_serialize);
3237 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
3240 if (!sc->link_active) {
3241 ifq_purge(&ifp->if_snd);
3245 if (!IGB_IS_NOT_OACTIVE(txr))
3248 while (!ifq_is_empty(&ifp->if_snd)) {
3249 if (IGB_IS_OACTIVE(txr)) {
3250 ifp->if_flags |= IFF_OACTIVE;
3251 /* Set watchdog on */
3256 m_head = ifq_dequeue(&ifp->if_snd, NULL);
3260 if (igb_encap(txr, &m_head)) {
3265 /* Send a copy of the frame to the BPF listener */
3266 ETHER_BPF_MTAP(ifp, m_head);
3271 igb_watchdog(struct ifnet *ifp)
3273 struct igb_softc *sc = ifp->if_softc;
3274 struct igb_tx_ring *txr = &sc->tx_rings[0];
3276 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3279 * If flow control has paused us since last checking
3280 * it invalidates the watchdog timing, so dont run it.
3282 if (sc->pause_frames) {
3283 sc->pause_frames = 0;
3288 if_printf(ifp, "Watchdog timeout -- resetting\n");
3289 if_printf(ifp, "Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
3290 E1000_READ_REG(&sc->hw, E1000_TDH(txr->me)),
3291 E1000_READ_REG(&sc->hw, E1000_TDT(txr->me)));
3292 if_printf(ifp, "TX(%d) desc avail = %d, "
3293 "Next TX to Clean = %d\n",
3294 txr->me, txr->tx_avail, txr->next_to_clean);
3297 sc->watchdog_events++;
3300 if (!ifq_is_empty(&ifp->if_snd))
3305 igb_set_eitr(struct igb_softc *sc)
3309 if (sc->intr_rate > 0) {
3310 if (sc->hw.mac.type == e1000_82575) {
3311 itr = 1000000000 / 256 / sc->intr_rate;
3314 * Document is wrong on the 2 bits left shift
3317 itr = 1000000 / sc->intr_rate;
3322 if (sc->hw.mac.type == e1000_82575)
3325 itr |= E1000_EITR_CNT_IGNR;
3326 E1000_WRITE_REG(&sc->hw, E1000_EITR(0), itr);
3330 igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS)
3332 struct igb_softc *sc = (void *)arg1;
3333 struct ifnet *ifp = &sc->arpcom.ac_if;
3334 int error, intr_rate;
3336 intr_rate = sc->intr_rate;
3337 error = sysctl_handle_int(oidp, &intr_rate, 0, req);
3338 if (error || req->newptr == NULL)
3343 ifnet_serialize_all(ifp);
3345 sc->intr_rate = intr_rate;
3346 if (ifp->if_flags & IFF_RUNNING)
3349 ifnet_deserialize_all(ifp);
3352 if_printf(ifp, "Interrupt rate set to %d/sec\n", sc->intr_rate);
3357 igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS)
3359 struct igb_softc *sc = (void *)arg1;
3360 struct ifnet *ifp = &sc->arpcom.ac_if;
3361 struct igb_tx_ring *txr = &sc->tx_rings[0];
3364 nsegs = txr->intr_nsegs;
3365 error = sysctl_handle_int(oidp, &nsegs, 0, req);
3366 if (error || req->newptr == NULL)
3371 ifnet_serialize_all(ifp);
3373 if (nsegs >= txr->num_tx_desc - txr->oact_lo_desc ||
3374 nsegs >= txr->oact_hi_desc - IGB_MAX_SCATTER) {
3378 txr->intr_nsegs = nsegs;
3381 ifnet_deserialize_all(ifp);
3387 igb_init_intr(struct igb_softc *sc)
3389 if (sc->flags & IGB_FLAG_SHARED_INTR)
3392 igb_init_unshared_intr(sc);
3396 igb_init_unshared_intr(struct igb_softc *sc)
3398 struct e1000_hw *hw = &sc->hw;
3399 const struct igb_rx_ring *rxr;
3400 const struct igb_tx_ring *txr;
3401 uint32_t ivar, index;
3405 * Enable extended mode
3407 if (sc->hw.mac.type != e1000_82575) {
3408 E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_NSICR);
3412 tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
3413 tmp |= E1000_CTRL_EXT_IRCA;
3414 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
3418 * Map TX/RX interrupts to EICR
3420 switch (sc->hw.mac.type) {
3424 case e1000_vfadapt_i350:
3426 for (i = 0; i < sc->rx_ring_cnt; ++i) {
3427 rxr = &sc->rx_rings[i];
3430 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3435 (rxr->rx_intr_bit | E1000_IVAR_VALID) << 16;
3439 (rxr->rx_intr_bit | E1000_IVAR_VALID);
3441 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3444 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3445 txr = &sc->tx_rings[i];
3448 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3453 (txr->tx_intr_bit | E1000_IVAR_VALID) << 24;
3457 (txr->tx_intr_bit | E1000_IVAR_VALID) << 8;
3459 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3461 /* Clear unused IVAR_MISC */
3462 E1000_WRITE_REG(hw, E1000_IVAR_MISC, 0);
3467 for (i = 0; i < sc->rx_ring_cnt; ++i) {
3468 rxr = &sc->rx_rings[i];
3470 index = i & 0x7; /* Each IVAR has two entries */
3471 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3476 (rxr->rx_intr_bit | E1000_IVAR_VALID);
3480 (rxr->rx_intr_bit | E1000_IVAR_VALID) << 16;
3482 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3485 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3486 txr = &sc->tx_rings[i];
3488 index = i & 0x7; /* Each IVAR has two entries */
3489 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3494 (txr->tx_intr_bit | E1000_IVAR_VALID) << 8;
3498 (txr->tx_intr_bit | E1000_IVAR_VALID) << 24;
3500 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3502 /* Clear unused IVAR_MISC */
3503 E1000_WRITE_REG(hw, E1000_IVAR_MISC, 0);
3508 * Enable necessary interrupt bits.
3510 * The name of the register is confusing; in addition to
3511 * configuring the first vector of MSI-X, it also configures
3512 * which bits of EICR could be set by the hardware even when
3513 * MSI or line interrupt is used; it thus controls interrupt
3514 * generation. It MUST be configured explicitly; the default
3515 * value mentioned in the datasheet is wrong: RX queue0 and
3516 * TX queue0 are NOT enabled by default.
3518 E1000_WRITE_REG(&sc->hw, E1000_MSIXBM(0), sc->intr_mask);
3526 * Configure interrupt moderation
3532 igb_setup_intr(struct igb_softc *sc)
3534 struct ifnet *ifp = &sc->arpcom.ac_if;
3535 int error, i, intr_bit, intr_bitmax;
3538 * Setup interrupt mask
3540 switch (sc->hw.mac.type) {
3542 intr_bitmax = IGB_MAX_TXRXINT_82575;
3545 intr_bitmax = IGB_MAX_TXRXINT_82580;
3548 intr_bitmax = IGB_MAX_TXRXINT_I350;
3551 intr_bitmax = IGB_MAX_TXRXINT_82576;
3554 intr_bitmax = IGB_MIN_TXRXINT;
3558 for (i = 0; i < sc->tx_ring_cnt; ++i)
3559 igb_setup_tx_intr(&sc->tx_rings[i], &intr_bit, intr_bitmax);
3560 for (i = 0; i < sc->rx_ring_cnt; ++i)
3561 igb_setup_rx_intr(&sc->rx_rings[i], &intr_bit, intr_bitmax);
3563 sc->intr_mask = E1000_EICR_OTHER;
3564 for (i = 0; i < sc->rx_ring_cnt; ++i)
3565 sc->intr_mask |= sc->rx_rings[i].rx_intr_mask;
3566 for (i = 0; i < sc->tx_ring_cnt; ++i)
3567 sc->intr_mask |= sc->tx_rings[i].tx_intr_mask;
3569 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) {
3572 unshared = device_getenv_int(sc->dev, "irq.unshared", 0);
3574 sc->flags |= IGB_FLAG_SHARED_INTR;
3576 device_printf(sc->dev, "IRQ shared\n");
3577 } else if (bootverbose) {
3578 device_printf(sc->dev, "IRQ unshared\n");
3582 error = bus_setup_intr(sc->dev, sc->intr_res, INTR_MPSAFE,
3583 (sc->flags & IGB_FLAG_SHARED_INTR) ? igb_shared_intr : igb_intr,
3584 sc, &sc->intr_tag, &sc->main_serialize);
3586 device_printf(sc->dev, "Failed to register interrupt handler");
3590 ifp->if_cpuid = rman_get_cpuid(sc->intr_res);
3591 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
3597 igb_setup_tx_intr(struct igb_tx_ring *txr, int *intr_bit0, int intr_bitmax)
3599 if (txr->sc->hw.mac.type == e1000_82575) {
3600 txr->tx_intr_bit = 0; /* unused */
3603 txr->tx_intr_mask = E1000_EICR_TX_QUEUE0;
3606 txr->tx_intr_mask = E1000_EICR_TX_QUEUE1;
3609 txr->tx_intr_mask = E1000_EICR_TX_QUEUE2;
3612 txr->tx_intr_mask = E1000_EICR_TX_QUEUE3;
3615 panic("unsupported # of TX ring, %d\n", txr->me);
3618 int intr_bit = *intr_bit0;
3620 txr->tx_intr_bit = intr_bit % intr_bitmax;
3621 txr->tx_intr_mask = 1 << txr->tx_intr_bit;
3623 *intr_bit0 = intr_bit + 1;
3628 igb_setup_rx_intr(struct igb_rx_ring *rxr, int *intr_bit0, int intr_bitmax)
3630 if (rxr->sc->hw.mac.type == e1000_82575) {
3631 rxr->rx_intr_bit = 0; /* unused */
3634 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE0;
3637 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE1;
3640 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE2;
3643 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE3;
3646 panic("unsupported # of RX ring, %d\n", rxr->me);
3649 int intr_bit = *intr_bit0;
3651 rxr->rx_intr_bit = intr_bit % intr_bitmax;
3652 rxr->rx_intr_mask = 1 << rxr->rx_intr_bit;
3654 *intr_bit0 = intr_bit + 1;
3659 igb_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3661 struct igb_softc *sc = ifp->if_softc;
3663 ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt,
3664 sc->tx_serialize, sc->rx_serialize, slz);
3668 igb_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3670 struct igb_softc *sc = ifp->if_softc;
3672 ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt,
3673 sc->tx_serialize, sc->rx_serialize, slz);
3677 igb_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3679 struct igb_softc *sc = ifp->if_softc;
3681 return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt,
3682 sc->tx_serialize, sc->rx_serialize, slz);
3686 igb_serialize_skipmain(struct igb_softc *sc)
3688 lwkt_serialize_array_enter(sc->serializes, sc->serialize_cnt, 1);
3692 igb_deserialize_skipmain(struct igb_softc *sc)
3694 lwkt_serialize_array_exit(sc->serializes, sc->serialize_cnt, 1);
3700 igb_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
3701 boolean_t serialized)
3703 struct igb_softc *sc = ifp->if_softc;
3705 ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt,
3706 sc->tx_serialize, sc->rx_serialize, slz, serialized);
3709 #endif /* INVARIANTS */