2 * Copyright (c) 2001-2011, Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the Intel Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #include "opt_ifpoll.h"
35 #include <sys/param.h>
37 #include <sys/endian.h>
38 #include <sys/interrupt.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
44 #include <sys/serialize.h>
45 #include <sys/serialize2.h>
46 #include <sys/socket.h>
47 #include <sys/sockio.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
52 #include <net/ethernet.h>
54 #include <net/if_arp.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57 #include <net/ifq_var.h>
58 #include <net/toeplitz.h>
59 #include <net/toeplitz2.h>
60 #include <net/vlan/if_vlan_var.h>
61 #include <net/vlan/if_vlan_ether.h>
62 #include <net/if_poll.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/in.h>
66 #include <netinet/ip.h>
67 #include <netinet/tcp.h>
68 #include <netinet/udp.h>
70 #include <bus/pci/pcivar.h>
71 #include <bus/pci/pcireg.h>
73 #include <dev/netif/ig_hal/e1000_api.h>
74 #include <dev/netif/ig_hal/e1000_82575.h>
75 #include <dev/netif/igb/if_igb.h>
78 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) \
80 if (sc->rss_debug >= lvl) \
81 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
83 #else /* !IGB_RSS_DEBUG */
84 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
85 #endif /* IGB_RSS_DEBUG */
87 #define IGB_NAME "Intel(R) PRO/1000 "
88 #define IGB_DEVICE(id) \
89 { IGB_VENDOR_ID, E1000_DEV_ID_##id, IGB_NAME #id }
90 #define IGB_DEVICE_NULL { 0, 0, NULL }
92 static struct igb_device {
97 IGB_DEVICE(82575EB_COPPER),
98 IGB_DEVICE(82575EB_FIBER_SERDES),
99 IGB_DEVICE(82575GB_QUAD_COPPER),
101 IGB_DEVICE(82576_NS),
102 IGB_DEVICE(82576_NS_SERDES),
103 IGB_DEVICE(82576_FIBER),
104 IGB_DEVICE(82576_SERDES),
105 IGB_DEVICE(82576_SERDES_QUAD),
106 IGB_DEVICE(82576_QUAD_COPPER),
107 IGB_DEVICE(82576_QUAD_COPPER_ET2),
108 IGB_DEVICE(82576_VF),
109 IGB_DEVICE(82580_COPPER),
110 IGB_DEVICE(82580_FIBER),
111 IGB_DEVICE(82580_SERDES),
112 IGB_DEVICE(82580_SGMII),
113 IGB_DEVICE(82580_COPPER_DUAL),
114 IGB_DEVICE(82580_QUAD_FIBER),
115 IGB_DEVICE(DH89XXCC_SERDES),
116 IGB_DEVICE(DH89XXCC_SGMII),
117 IGB_DEVICE(DH89XXCC_SFP),
118 IGB_DEVICE(DH89XXCC_BACKPLANE),
119 IGB_DEVICE(I350_COPPER),
120 IGB_DEVICE(I350_FIBER),
121 IGB_DEVICE(I350_SERDES),
122 IGB_DEVICE(I350_SGMII),
125 /* required last entry */
129 static int igb_probe(device_t);
130 static int igb_attach(device_t);
131 static int igb_detach(device_t);
132 static int igb_shutdown(device_t);
133 static int igb_suspend(device_t);
134 static int igb_resume(device_t);
136 static boolean_t igb_is_valid_ether_addr(const uint8_t *);
137 static void igb_setup_ifp(struct igb_softc *);
138 static boolean_t igb_txcsum_ctx(struct igb_tx_ring *, struct mbuf *);
139 static int igb_tso_pullup(struct igb_tx_ring *, struct mbuf **);
140 static void igb_tso_ctx(struct igb_tx_ring *, struct mbuf *, uint32_t *);
141 static void igb_add_sysctl(struct igb_softc *);
142 static int igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS);
143 static int igb_sysctl_msix_rate(SYSCTL_HANDLER_ARGS);
144 static int igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS);
145 static void igb_set_ring_inuse(struct igb_softc *, boolean_t);
147 static int igb_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS);
148 static int igb_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS);
151 static void igb_vf_init_stats(struct igb_softc *);
152 static void igb_reset(struct igb_softc *);
153 static void igb_update_stats_counters(struct igb_softc *);
154 static void igb_update_vf_stats_counters(struct igb_softc *);
155 static void igb_update_link_status(struct igb_softc *);
156 static void igb_init_tx_unit(struct igb_softc *);
157 static void igb_init_rx_unit(struct igb_softc *);
159 static void igb_set_vlan(struct igb_softc *);
160 static void igb_set_multi(struct igb_softc *);
161 static void igb_set_promisc(struct igb_softc *);
162 static void igb_disable_promisc(struct igb_softc *);
164 static int igb_alloc_rings(struct igb_softc *);
165 static void igb_free_rings(struct igb_softc *);
166 static int igb_create_tx_ring(struct igb_tx_ring *);
167 static int igb_create_rx_ring(struct igb_rx_ring *);
168 static void igb_free_tx_ring(struct igb_tx_ring *);
169 static void igb_free_rx_ring(struct igb_rx_ring *);
170 static void igb_destroy_tx_ring(struct igb_tx_ring *, int);
171 static void igb_destroy_rx_ring(struct igb_rx_ring *, int);
172 static void igb_init_tx_ring(struct igb_tx_ring *);
173 static int igb_init_rx_ring(struct igb_rx_ring *);
174 static int igb_newbuf(struct igb_rx_ring *, int, boolean_t);
175 static int igb_encap(struct igb_tx_ring *, struct mbuf **, int *, int *);
176 static void igb_rx_refresh(struct igb_rx_ring *, int);
178 static void igb_stop(struct igb_softc *);
179 static void igb_init(void *);
180 static int igb_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
181 static void igb_media_status(struct ifnet *, struct ifmediareq *);
182 static int igb_media_change(struct ifnet *);
183 static void igb_timer(void *);
184 static void igb_watchdog(struct ifnet *);
185 static void igb_start(struct ifnet *);
187 static void igb_npoll(struct ifnet *, struct ifpoll_info *);
188 static void igb_npoll_rx(struct ifnet *, void *, int);
189 static void igb_npoll_tx(struct ifnet *, void *, int);
190 static void igb_npoll_status(struct ifnet *);
192 static void igb_serialize(struct ifnet *, enum ifnet_serialize);
193 static void igb_deserialize(struct ifnet *, enum ifnet_serialize);
194 static int igb_tryserialize(struct ifnet *, enum ifnet_serialize);
196 static void igb_serialize_assert(struct ifnet *, enum ifnet_serialize,
200 static void igb_intr(void *);
201 static void igb_intr_shared(void *);
202 static void igb_rxeof(struct igb_rx_ring *, int);
203 static void igb_txeof(struct igb_tx_ring *);
204 static void igb_set_eitr(struct igb_softc *, int, int);
205 static void igb_enable_intr(struct igb_softc *);
206 static void igb_disable_intr(struct igb_softc *);
207 static void igb_init_unshared_intr(struct igb_softc *);
208 static void igb_init_intr(struct igb_softc *);
209 static int igb_setup_intr(struct igb_softc *);
210 static void igb_set_txintr_mask(struct igb_tx_ring *, int *, int);
211 static void igb_set_rxintr_mask(struct igb_rx_ring *, int *, int);
212 static void igb_set_intr_mask(struct igb_softc *);
213 static int igb_alloc_intr(struct igb_softc *);
214 static void igb_free_intr(struct igb_softc *);
215 static void igb_teardown_intr(struct igb_softc *);
216 static void igb_msix_try_alloc(struct igb_softc *);
217 static void igb_msix_free(struct igb_softc *, boolean_t);
218 static int igb_msix_setup(struct igb_softc *);
219 static void igb_msix_teardown(struct igb_softc *, int);
220 static void igb_msix_rx(void *);
221 static void igb_msix_tx(void *);
222 static void igb_msix_status(void *);
224 /* Management and WOL Support */
225 static void igb_get_mgmt(struct igb_softc *);
226 static void igb_rel_mgmt(struct igb_softc *);
227 static void igb_get_hw_control(struct igb_softc *);
228 static void igb_rel_hw_control(struct igb_softc *);
229 static void igb_enable_wol(device_t);
231 static device_method_t igb_methods[] = {
232 /* Device interface */
233 DEVMETHOD(device_probe, igb_probe),
234 DEVMETHOD(device_attach, igb_attach),
235 DEVMETHOD(device_detach, igb_detach),
236 DEVMETHOD(device_shutdown, igb_shutdown),
237 DEVMETHOD(device_suspend, igb_suspend),
238 DEVMETHOD(device_resume, igb_resume),
242 static driver_t igb_driver = {
245 sizeof(struct igb_softc),
248 static devclass_t igb_devclass;
250 DECLARE_DUMMY_MODULE(if_igb);
251 MODULE_DEPEND(igb, ig_hal, 1, 1, 1);
252 DRIVER_MODULE(if_igb, pci, igb_driver, igb_devclass, NULL, NULL);
254 static int igb_rxd = IGB_DEFAULT_RXD;
255 static int igb_txd = IGB_DEFAULT_TXD;
256 static int igb_rxr = 0;
257 static int igb_msi_enable = 1;
258 static int igb_msix_enable = 1;
259 static int igb_eee_disabled = 1; /* Energy Efficient Ethernet */
260 static int igb_fc_setting = e1000_fc_full;
263 * DMA Coalescing, only for i350 - default to off,
264 * this feature is for power savings
266 static int igb_dma_coalesce = 0;
268 TUNABLE_INT("hw.igb.rxd", &igb_rxd);
269 TUNABLE_INT("hw.igb.txd", &igb_txd);
270 TUNABLE_INT("hw.igb.rxr", &igb_rxr);
271 TUNABLE_INT("hw.igb.msi.enable", &igb_msi_enable);
272 TUNABLE_INT("hw.igb.msix.enable", &igb_msix_enable);
273 TUNABLE_INT("hw.igb.fc_setting", &igb_fc_setting);
276 TUNABLE_INT("hw.igb.eee_disabled", &igb_eee_disabled);
277 TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce);
280 igb_rxcsum(uint32_t staterr, struct mbuf *mp)
282 /* Ignore Checksum bit is set */
283 if (staterr & E1000_RXD_STAT_IXSM)
286 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) ==
288 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
290 if (staterr & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) {
291 if ((staterr & E1000_RXDEXT_STATERR_TCPE) == 0) {
292 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
293 CSUM_PSEUDO_HDR | CSUM_FRAG_NOT_CHECKED;
294 mp->m_pkthdr.csum_data = htons(0xffff);
299 static __inline struct pktinfo *
300 igb_rssinfo(struct mbuf *m, struct pktinfo *pi,
301 uint32_t hash, uint32_t hashtype, uint32_t staterr)
304 case E1000_RXDADV_RSSTYPE_IPV4_TCP:
305 pi->pi_netisr = NETISR_IP;
307 pi->pi_l3proto = IPPROTO_TCP;
310 case E1000_RXDADV_RSSTYPE_IPV4:
311 if (staterr & E1000_RXD_STAT_IXSM)
315 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) ==
316 E1000_RXD_STAT_TCPCS) {
317 pi->pi_netisr = NETISR_IP;
319 pi->pi_l3proto = IPPROTO_UDP;
327 m->m_flags |= M_HASH;
328 m->m_pkthdr.hash = toeplitz_hash(hash);
333 igb_probe(device_t dev)
335 const struct igb_device *d;
338 vid = pci_get_vendor(dev);
339 did = pci_get_device(dev);
341 for (d = igb_devices; d->desc != NULL; ++d) {
342 if (vid == d->vid && did == d->did) {
343 device_set_desc(dev, d->desc);
351 igb_attach(device_t dev)
353 struct igb_softc *sc = device_get_softc(dev);
354 uint16_t eeprom_data;
355 int error = 0, i, j, ring_max;
357 int offset, offset_def;
362 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
363 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
364 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
365 igb_sysctl_nvm_info, "I", "NVM Information");
366 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
367 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
368 OID_AUTO, "flow_control", CTLTYPE_INT|CTLFLAG_RW,
369 adapter, 0, igb_set_flowcntl, "I", "Flow Control");
372 callout_init_mp(&sc->timer);
373 lwkt_serialize_init(&sc->main_serialize);
375 if_initname(&sc->arpcom.ac_if, device_get_name(dev),
376 device_get_unit(dev));
377 sc->dev = sc->osdep.dev = dev;
380 * Determine hardware and mac type
382 sc->hw.vendor_id = pci_get_vendor(dev);
383 sc->hw.device_id = pci_get_device(dev);
384 sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
385 sc->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
386 sc->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
388 if (e1000_set_mac_type(&sc->hw))
391 /* Are we a VF device? */
392 if (sc->hw.mac.type == e1000_vfadapt ||
393 sc->hw.mac.type == e1000_vfadapt_i350)
399 * Configure total supported RX/TX ring count
401 switch (sc->hw.mac.type) {
403 ring_max = IGB_MAX_RING_82575;
406 ring_max = IGB_MAX_RING_82580;
409 ring_max = IGB_MAX_RING_I350;
412 ring_max = IGB_MAX_RING_82576;
415 ring_max = IGB_MIN_RING;
418 sc->rx_ring_cnt = device_getenv_int(dev, "rxr", igb_rxr);
419 sc->rx_ring_cnt = if_ring_count2(sc->rx_ring_cnt, ring_max);
421 sc->rx_ring_cnt = device_getenv_int(dev, "rxr_debug", sc->rx_ring_cnt);
423 sc->rx_ring_inuse = sc->rx_ring_cnt;
424 sc->tx_ring_cnt = 1; /* XXX */
426 if (sc->hw.mac.type == e1000_82575)
427 sc->flags |= IGB_FLAG_TSO_IPLEN0;
429 /* Enable bus mastering */
430 pci_enable_busmaster(dev);
435 sc->mem_rid = PCIR_BAR(0);
436 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
438 if (sc->mem_res == NULL) {
439 device_printf(dev, "Unable to allocate bus resource: memory\n");
443 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->mem_res);
444 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->mem_res);
446 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle;
448 /* Save PCI command register for Shared Code */
449 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
450 sc->hw.back = &sc->osdep;
452 /* Do Shared Code initialization */
453 if (e1000_setup_init_funcs(&sc->hw, TRUE)) {
454 device_printf(dev, "Setup of Shared code failed\n");
459 e1000_get_bus_info(&sc->hw);
461 sc->hw.mac.autoneg = DO_AUTO_NEG;
462 sc->hw.phy.autoneg_wait_to_complete = FALSE;
463 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
466 if (sc->hw.phy.media_type == e1000_media_type_copper) {
467 sc->hw.phy.mdix = AUTO_ALL_MODES;
468 sc->hw.phy.disable_polarity_correction = FALSE;
469 sc->hw.phy.ms_type = IGB_MASTER_SLAVE;
472 /* Set the frame limits assuming standard ethernet sized frames. */
473 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
475 /* Allocate RX/TX rings */
476 error = igb_alloc_rings(sc);
482 * NPOLLING RX CPU offset
484 if (sc->rx_ring_cnt == ncpus2) {
487 offset_def = (sc->rx_ring_cnt * device_get_unit(dev)) % ncpus2;
488 offset = device_getenv_int(dev, "npoll.rxoff", offset_def);
489 if (offset >= ncpus2 ||
490 offset % sc->rx_ring_cnt != 0) {
491 device_printf(dev, "invalid npoll.rxoff %d, use %d\n",
496 sc->rx_npoll_off = offset;
499 * NPOLLING TX CPU offset
501 offset_def = sc->rx_npoll_off;
502 offset = device_getenv_int(dev, "npoll.txoff", offset_def);
503 if (offset >= ncpus2) {
504 device_printf(dev, "invalid npoll.txoff %d, use %d\n",
508 sc->tx_npoll_off = offset;
511 /* Allocate interrupt */
512 error = igb_alloc_intr(sc);
520 sc->serializes[i++] = &sc->main_serialize;
522 sc->tx_serialize = i;
523 for (j = 0; j < sc->tx_ring_cnt; ++j)
524 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize;
526 sc->rx_serialize = i;
527 for (j = 0; j < sc->rx_ring_cnt; ++j)
528 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize;
530 sc->serialize_cnt = i;
531 KKASSERT(sc->serialize_cnt <= IGB_NSERIALIZE);
533 /* Allocate the appropriate stats memory */
535 sc->stats = kmalloc(sizeof(struct e1000_vf_stats), M_DEVBUF,
537 igb_vf_init_stats(sc);
539 sc->stats = kmalloc(sizeof(struct e1000_hw_stats), M_DEVBUF,
543 /* Allocate multicast array memory. */
544 sc->mta = kmalloc(ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES,
547 /* Some adapter-specific advanced features */
548 if (sc->hw.mac.type >= e1000_i350) {
550 igb_set_sysctl_value(adapter, "dma_coalesce",
551 "configure dma coalesce",
552 &adapter->dma_coalesce, igb_dma_coalesce);
553 igb_set_sysctl_value(adapter, "eee_disabled",
554 "enable Energy Efficient Ethernet",
555 &adapter->hw.dev_spec._82575.eee_disable,
558 sc->dma_coalesce = igb_dma_coalesce;
559 sc->hw.dev_spec._82575.eee_disable = igb_eee_disabled;
561 e1000_set_eee_i350(&sc->hw);
565 * Start from a known state, this is important in reading the nvm and
568 e1000_reset_hw(&sc->hw);
570 /* Make sure we have a good EEPROM before we read from it */
571 if (e1000_validate_nvm_checksum(&sc->hw) < 0) {
573 * Some PCI-E parts fail the first check due to
574 * the link being in sleep state, call it again,
575 * if it fails a second time its a real issue.
577 if (e1000_validate_nvm_checksum(&sc->hw) < 0) {
579 "The EEPROM Checksum Is Not Valid\n");
585 /* Copy the permanent MAC address out of the EEPROM */
586 if (e1000_read_mac_addr(&sc->hw) < 0) {
587 device_printf(dev, "EEPROM read error while reading MAC"
592 if (!igb_is_valid_ether_addr(sc->hw.mac.addr)) {
593 device_printf(dev, "Invalid MAC address\n");
598 /* Setup OS specific network interface */
601 /* Add sysctl tree, must after igb_setup_ifp() */
604 /* Now get a good starting state */
607 /* Initialize statistics */
608 igb_update_stats_counters(sc);
610 sc->hw.mac.get_link_status = 1;
611 igb_update_link_status(sc);
613 /* Indicate SOL/IDER usage */
614 if (e1000_check_reset_block(&sc->hw)) {
616 "PHY reset is blocked due to SOL/IDER session.\n");
619 /* Determine if we have to control management hardware */
620 if (e1000_enable_mng_pass_thru(&sc->hw))
621 sc->flags |= IGB_FLAG_HAS_MGMT;
626 /* APME bit in EEPROM is mapped to WUC.APME */
627 eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC) & E1000_WUC_APME;
629 sc->wol = E1000_WUFC_MAG;
630 /* XXX disable WOL */
634 /* Register for VLAN events */
635 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
636 igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
637 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
638 igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
642 igb_add_hw_stats(adapter);
645 error = igb_setup_intr(sc);
647 ether_ifdetach(&sc->arpcom.ac_if);
650 sc->arpcom.ac_if.if_cpuid = sc->tx_rings[0].tx_intr_cpuid;
660 igb_detach(device_t dev)
662 struct igb_softc *sc = device_get_softc(dev);
664 if (device_is_attached(dev)) {
665 struct ifnet *ifp = &sc->arpcom.ac_if;
667 ifnet_serialize_all(ifp);
671 e1000_phy_hw_reset(&sc->hw);
673 /* Give control back to firmware */
675 igb_rel_hw_control(sc);
678 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN);
679 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol);
683 igb_teardown_intr(sc);
685 ifnet_deserialize_all(ifp);
688 } else if (sc->mem_res != NULL) {
689 igb_rel_hw_control(sc);
691 bus_generic_detach(dev);
693 if (sc->sysctl_tree != NULL)
694 sysctl_ctx_free(&sc->sysctl_ctx);
698 if (sc->msix_mem_res != NULL) {
699 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_mem_rid,
702 if (sc->mem_res != NULL) {
703 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid,
710 kfree(sc->mta, M_DEVBUF);
711 if (sc->stats != NULL)
712 kfree(sc->stats, M_DEVBUF);
718 igb_shutdown(device_t dev)
720 return igb_suspend(dev);
724 igb_suspend(device_t dev)
726 struct igb_softc *sc = device_get_softc(dev);
727 struct ifnet *ifp = &sc->arpcom.ac_if;
729 ifnet_serialize_all(ifp);
734 igb_rel_hw_control(sc);
737 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN);
738 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol);
742 ifnet_deserialize_all(ifp);
744 return bus_generic_suspend(dev);
748 igb_resume(device_t dev)
750 struct igb_softc *sc = device_get_softc(dev);
751 struct ifnet *ifp = &sc->arpcom.ac_if;
753 ifnet_serialize_all(ifp);
760 ifnet_deserialize_all(ifp);
762 return bus_generic_resume(dev);
766 igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
768 struct igb_softc *sc = ifp->if_softc;
769 struct ifreq *ifr = (struct ifreq *)data;
770 int max_frame_size, mask, reinit;
773 ASSERT_IFNET_SERIALIZED_ALL(ifp);
777 max_frame_size = 9234;
778 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
784 ifp->if_mtu = ifr->ifr_mtu;
785 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN +
788 if (ifp->if_flags & IFF_RUNNING)
793 if (ifp->if_flags & IFF_UP) {
794 if (ifp->if_flags & IFF_RUNNING) {
795 if ((ifp->if_flags ^ sc->if_flags) &
796 (IFF_PROMISC | IFF_ALLMULTI)) {
797 igb_disable_promisc(sc);
803 } else if (ifp->if_flags & IFF_RUNNING) {
806 sc->if_flags = ifp->if_flags;
811 if (ifp->if_flags & IFF_RUNNING) {
812 igb_disable_intr(sc);
815 if (!(ifp->if_flags & IFF_NPOLLING))
823 * As the speed/duplex settings are being
824 * changed, we need toreset the PHY.
826 sc->hw.phy.reset_disable = FALSE;
828 /* Check SOL/IDER usage */
829 if (e1000_check_reset_block(&sc->hw)) {
830 if_printf(ifp, "Media change is "
831 "blocked due to SOL/IDER session.\n");
837 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
842 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
843 if (mask & IFCAP_RXCSUM) {
844 ifp->if_capenable ^= IFCAP_RXCSUM;
847 if (mask & IFCAP_VLAN_HWTAGGING) {
848 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
851 if (mask & IFCAP_TXCSUM) {
852 ifp->if_capenable ^= IFCAP_TXCSUM;
853 if (ifp->if_capenable & IFCAP_TXCSUM)
854 ifp->if_hwassist |= IGB_CSUM_FEATURES;
856 ifp->if_hwassist &= ~IGB_CSUM_FEATURES;
858 if (mask & IFCAP_TSO) {
859 ifp->if_capenable ^= IFCAP_TSO;
860 if (ifp->if_capenable & IFCAP_TSO)
861 ifp->if_hwassist |= CSUM_TSO;
863 ifp->if_hwassist &= ~CSUM_TSO;
865 if (mask & IFCAP_RSS)
866 ifp->if_capenable ^= IFCAP_RSS;
867 if (reinit && (ifp->if_flags & IFF_RUNNING))
872 error = ether_ioctl(ifp, command, data);
881 struct igb_softc *sc = xsc;
882 struct ifnet *ifp = &sc->arpcom.ac_if;
886 ASSERT_IFNET_SERIALIZED_ALL(ifp);
890 /* Get the latest mac address, User can use a LAA */
891 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN);
893 /* Put the address into the Receive Address Array */
894 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0);
897 igb_update_link_status(sc);
899 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
901 /* Configure for OS presence */
906 if (ifp->if_flags & IFF_NPOLLING)
910 /* Configured used RX/TX rings */
911 igb_set_ring_inuse(sc, polling);
913 /* Initialize interrupt */
916 /* Prepare transmit descriptors and buffers */
917 for (i = 0; i < sc->tx_ring_cnt; ++i)
918 igb_init_tx_ring(&sc->tx_rings[i]);
919 igb_init_tx_unit(sc);
921 /* Setup Multicast table */
926 * Figure out the desired mbuf pool
927 * for doing jumbo/packetsplit
929 if (adapter->max_frame_size <= 2048)
930 adapter->rx_mbuf_sz = MCLBYTES;
931 else if (adapter->max_frame_size <= 4096)
932 adapter->rx_mbuf_sz = MJUMPAGESIZE;
934 adapter->rx_mbuf_sz = MJUM9BYTES;
937 /* Prepare receive descriptors and buffers */
938 for (i = 0; i < sc->rx_ring_inuse; ++i) {
941 error = igb_init_rx_ring(&sc->rx_rings[i]);
943 if_printf(ifp, "Could not setup receive structures\n");
948 igb_init_rx_unit(sc);
950 /* Enable VLAN support */
951 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
954 /* Don't lose promiscuous settings */
957 ifp->if_flags |= IFF_RUNNING;
958 ifq_clr_oactive(&ifp->if_snd);
960 if (polling || sc->intr_type == PCI_INTR_TYPE_MSIX)
961 sc->timer_cpuid = 0; /* XXX fixed */
963 sc->timer_cpuid = rman_get_cpuid(sc->intr_res);
964 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid);
965 e1000_clear_hw_cntrs_base_generic(&sc->hw);
967 /* This clears any pending interrupts */
968 E1000_READ_REG(&sc->hw, E1000_ICR);
971 * Only enable interrupts if we are not polling, make sure
972 * they are off otherwise.
975 igb_disable_intr(sc);
978 E1000_WRITE_REG(&sc->hw, E1000_ICS, E1000_ICS_LSC);
981 /* Set Energy Efficient Ethernet */
982 e1000_set_eee_i350(&sc->hw);
984 /* Don't reset the phy next time init gets called */
985 sc->hw.phy.reset_disable = TRUE;
989 igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
991 struct igb_softc *sc = ifp->if_softc;
992 u_char fiber_type = IFM_1000_SX;
994 ASSERT_IFNET_SERIALIZED_ALL(ifp);
996 igb_update_link_status(sc);
998 ifmr->ifm_status = IFM_AVALID;
999 ifmr->ifm_active = IFM_ETHER;
1001 if (!sc->link_active)
1004 ifmr->ifm_status |= IFM_ACTIVE;
1006 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
1007 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
1008 ifmr->ifm_active |= fiber_type | IFM_FDX;
1010 switch (sc->link_speed) {
1012 ifmr->ifm_active |= IFM_10_T;
1016 ifmr->ifm_active |= IFM_100_TX;
1020 ifmr->ifm_active |= IFM_1000_T;
1023 if (sc->link_duplex == FULL_DUPLEX)
1024 ifmr->ifm_active |= IFM_FDX;
1026 ifmr->ifm_active |= IFM_HDX;
1031 igb_media_change(struct ifnet *ifp)
1033 struct igb_softc *sc = ifp->if_softc;
1034 struct ifmedia *ifm = &sc->media;
1036 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1038 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1041 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1043 sc->hw.mac.autoneg = DO_AUTO_NEG;
1044 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1050 sc->hw.mac.autoneg = DO_AUTO_NEG;
1051 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1055 sc->hw.mac.autoneg = FALSE;
1056 sc->hw.phy.autoneg_advertised = 0;
1057 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1058 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1060 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1064 sc->hw.mac.autoneg = FALSE;
1065 sc->hw.phy.autoneg_advertised = 0;
1066 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1067 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1069 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1073 if_printf(ifp, "Unsupported media type\n");
1083 igb_set_promisc(struct igb_softc *sc)
1085 struct ifnet *ifp = &sc->arpcom.ac_if;
1086 struct e1000_hw *hw = &sc->hw;
1090 e1000_promisc_set_vf(hw, e1000_promisc_enabled);
1094 reg = E1000_READ_REG(hw, E1000_RCTL);
1095 if (ifp->if_flags & IFF_PROMISC) {
1096 reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1097 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1098 } else if (ifp->if_flags & IFF_ALLMULTI) {
1099 reg |= E1000_RCTL_MPE;
1100 reg &= ~E1000_RCTL_UPE;
1101 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1106 igb_disable_promisc(struct igb_softc *sc)
1108 struct e1000_hw *hw = &sc->hw;
1112 e1000_promisc_set_vf(hw, e1000_promisc_disabled);
1115 reg = E1000_READ_REG(hw, E1000_RCTL);
1116 reg &= ~E1000_RCTL_UPE;
1117 reg &= ~E1000_RCTL_MPE;
1118 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1122 igb_set_multi(struct igb_softc *sc)
1124 struct ifnet *ifp = &sc->arpcom.ac_if;
1125 struct ifmultiaddr *ifma;
1126 uint32_t reg_rctl = 0;
1131 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1133 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1134 if (ifma->ifma_addr->sa_family != AF_LINK)
1137 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1140 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1141 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
1145 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1146 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1147 reg_rctl |= E1000_RCTL_MPE;
1148 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1150 e1000_update_mc_addr_list(&sc->hw, mta, mcnt);
1155 igb_timer(void *xsc)
1157 struct igb_softc *sc = xsc;
1159 lwkt_serialize_enter(&sc->main_serialize);
1161 igb_update_link_status(sc);
1162 igb_update_stats_counters(sc);
1164 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid);
1166 lwkt_serialize_exit(&sc->main_serialize);
1170 igb_update_link_status(struct igb_softc *sc)
1172 struct ifnet *ifp = &sc->arpcom.ac_if;
1173 struct e1000_hw *hw = &sc->hw;
1174 uint32_t link_check, thstat, ctrl;
1176 link_check = thstat = ctrl = 0;
1178 /* Get the cached link value or read for real */
1179 switch (hw->phy.media_type) {
1180 case e1000_media_type_copper:
1181 if (hw->mac.get_link_status) {
1182 /* Do the work to read phy */
1183 e1000_check_for_link(hw);
1184 link_check = !hw->mac.get_link_status;
1190 case e1000_media_type_fiber:
1191 e1000_check_for_link(hw);
1192 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU;
1195 case e1000_media_type_internal_serdes:
1196 e1000_check_for_link(hw);
1197 link_check = hw->mac.serdes_has_link;
1200 /* VF device is type_unknown */
1201 case e1000_media_type_unknown:
1202 e1000_check_for_link(hw);
1203 link_check = !hw->mac.get_link_status;
1209 /* Check for thermal downshift or shutdown */
1210 if (hw->mac.type == e1000_i350) {
1211 thstat = E1000_READ_REG(hw, E1000_THSTAT);
1212 ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT);
1215 /* Now we check if a transition has happened */
1216 if (link_check && sc->link_active == 0) {
1217 e1000_get_speed_and_duplex(hw,
1218 &sc->link_speed, &sc->link_duplex);
1220 if_printf(ifp, "Link is up %d Mbps %s\n",
1222 sc->link_duplex == FULL_DUPLEX ?
1223 "Full Duplex" : "Half Duplex");
1225 sc->link_active = 1;
1227 ifp->if_baudrate = sc->link_speed * 1000000;
1228 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) &&
1229 (thstat & E1000_THSTAT_LINK_THROTTLE))
1230 if_printf(ifp, "Link: thermal downshift\n");
1231 /* This can sleep */
1232 ifp->if_link_state = LINK_STATE_UP;
1233 if_link_state_change(ifp);
1234 } else if (!link_check && sc->link_active == 1) {
1235 ifp->if_baudrate = sc->link_speed = 0;
1236 sc->link_duplex = 0;
1238 if_printf(ifp, "Link is Down\n");
1239 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) &&
1240 (thstat & E1000_THSTAT_PWR_DOWN))
1241 if_printf(ifp, "Link: thermal shutdown\n");
1242 sc->link_active = 0;
1243 /* This can sleep */
1244 ifp->if_link_state = LINK_STATE_DOWN;
1245 if_link_state_change(ifp);
1250 igb_stop(struct igb_softc *sc)
1252 struct ifnet *ifp = &sc->arpcom.ac_if;
1255 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1257 igb_disable_intr(sc);
1259 callout_stop(&sc->timer);
1261 ifp->if_flags &= ~IFF_RUNNING;
1262 ifq_clr_oactive(&ifp->if_snd);
1265 e1000_reset_hw(&sc->hw);
1266 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0);
1268 e1000_led_off(&sc->hw);
1269 e1000_cleanup_led(&sc->hw);
1271 for (i = 0; i < sc->tx_ring_cnt; ++i)
1272 igb_free_tx_ring(&sc->tx_rings[i]);
1273 for (i = 0; i < sc->rx_ring_cnt; ++i)
1274 igb_free_rx_ring(&sc->rx_rings[i]);
1278 igb_reset(struct igb_softc *sc)
1280 struct ifnet *ifp = &sc->arpcom.ac_if;
1281 struct e1000_hw *hw = &sc->hw;
1282 struct e1000_fc_info *fc = &hw->fc;
1286 /* Let the firmware know the OS is in control */
1287 igb_get_hw_control(sc);
1290 * Packet Buffer Allocation (PBA)
1291 * Writing PBA sets the receive portion of the buffer
1292 * the remainder is used for the transmit buffer.
1294 switch (hw->mac.type) {
1296 pba = E1000_PBA_32K;
1301 pba = E1000_READ_REG(hw, E1000_RXPBS);
1302 pba &= E1000_RXPBS_SIZE_MASK_82576;
1307 case e1000_vfadapt_i350:
1308 pba = E1000_READ_REG(hw, E1000_RXPBS);
1309 pba = e1000_rxpbs_adjust_82580(pba);
1311 /* XXX pba = E1000_PBA_35K; */
1317 /* Special needs in case of Jumbo frames */
1318 if (hw->mac.type == e1000_82575 && ifp->if_mtu > ETHERMTU) {
1319 uint32_t tx_space, min_tx, min_rx;
1321 pba = E1000_READ_REG(hw, E1000_PBA);
1322 tx_space = pba >> 16;
1325 min_tx = (sc->max_frame_size +
1326 sizeof(struct e1000_tx_desc) - ETHER_CRC_LEN) * 2;
1327 min_tx = roundup2(min_tx, 1024);
1329 min_rx = sc->max_frame_size;
1330 min_rx = roundup2(min_rx, 1024);
1332 if (tx_space < min_tx && (min_tx - tx_space) < pba) {
1333 pba = pba - (min_tx - tx_space);
1335 * if short on rx space, rx wins
1336 * and must trump tx adjustment
1341 E1000_WRITE_REG(hw, E1000_PBA, pba);
1345 * These parameters control the automatic generation (Tx) and
1346 * response (Rx) to Ethernet PAUSE frames.
1347 * - High water mark should allow for at least two frames to be
1348 * received after sending an XOFF.
1349 * - Low water mark works best when it is very near the high water mark.
1350 * This allows the receiver to restart by sending XON when it has
1353 hwm = min(((pba << 10) * 9 / 10),
1354 ((pba << 10) - 2 * sc->max_frame_size));
1356 if (hw->mac.type < e1000_82576) {
1357 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
1358 fc->low_water = fc->high_water - 8;
1360 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1361 fc->low_water = fc->high_water - 16;
1363 fc->pause_time = IGB_FC_PAUSE_TIME;
1364 fc->send_xon = TRUE;
1366 /* Issue a global reset */
1368 E1000_WRITE_REG(hw, E1000_WUC, 0);
1370 if (e1000_init_hw(hw) < 0)
1371 if_printf(ifp, "Hardware Initialization Failed\n");
1373 /* Setup DMA Coalescing */
1374 if (hw->mac.type == e1000_i350 && sc->dma_coalesce) {
1377 hwm = (pba - 4) << 10;
1378 reg = ((pba - 6) << E1000_DMACR_DMACTHR_SHIFT)
1379 & E1000_DMACR_DMACTHR_MASK;
1381 /* transition to L0x or L1 if available..*/
1382 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
1384 /* timer = +-1000 usec in 32usec intervals */
1386 E1000_WRITE_REG(hw, E1000_DMACR, reg);
1388 /* No lower threshold */
1389 E1000_WRITE_REG(hw, E1000_DMCRTRH, 0);
1391 /* set hwm to PBA - 2 * max frame size */
1392 E1000_WRITE_REG(hw, E1000_FCRTC, hwm);
1394 /* Set the interval before transition */
1395 reg = E1000_READ_REG(hw, E1000_DMCTLX);
1396 reg |= 0x800000FF; /* 255 usec */
1397 E1000_WRITE_REG(hw, E1000_DMCTLX, reg);
1399 /* free space in tx packet buffer to wake from DMA coal */
1400 E1000_WRITE_REG(hw, E1000_DMCTXTH,
1401 (20480 - (2 * sc->max_frame_size)) >> 6);
1403 /* make low power state decision controlled by DMA coal */
1404 reg = E1000_READ_REG(hw, E1000_PCIEMISC);
1405 E1000_WRITE_REG(hw, E1000_PCIEMISC,
1406 reg | E1000_PCIEMISC_LX_DECISION);
1407 if_printf(ifp, "DMA Coalescing enabled\n");
1410 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
1411 e1000_get_phy_info(hw);
1412 e1000_check_for_link(hw);
1416 igb_setup_ifp(struct igb_softc *sc)
1418 struct ifnet *ifp = &sc->arpcom.ac_if;
1421 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1422 ifp->if_init = igb_init;
1423 ifp->if_ioctl = igb_ioctl;
1424 ifp->if_start = igb_start;
1425 ifp->if_serialize = igb_serialize;
1426 ifp->if_deserialize = igb_deserialize;
1427 ifp->if_tryserialize = igb_tryserialize;
1429 ifp->if_serialize_assert = igb_serialize_assert;
1431 #ifdef IFPOLL_ENABLE
1432 ifp->if_npoll = igb_npoll;
1434 ifp->if_watchdog = igb_watchdog;
1436 ifq_set_maxlen(&ifp->if_snd, sc->tx_rings[0].num_tx_desc - 1);
1437 ifq_set_ready(&ifp->if_snd);
1439 ether_ifattach(ifp, sc->hw.mac.addr, NULL);
1441 ifp->if_capabilities =
1442 IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_TSO;
1443 if (IGB_ENABLE_HWRSS(sc))
1444 ifp->if_capabilities |= IFCAP_RSS;
1445 ifp->if_capenable = ifp->if_capabilities;
1446 ifp->if_hwassist = IGB_CSUM_FEATURES | CSUM_TSO;
1449 * Tell the upper layer(s) we support long frames
1451 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1454 * Specify the media types supported by this adapter and register
1455 * callbacks to update media and link information
1457 ifmedia_init(&sc->media, IFM_IMASK, igb_media_change, igb_media_status);
1458 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
1459 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
1460 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1462 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1464 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1465 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1467 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1468 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1470 if (sc->hw.phy.type != e1000_phy_ife) {
1471 ifmedia_add(&sc->media,
1472 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1473 ifmedia_add(&sc->media,
1474 IFM_ETHER | IFM_1000_T, 0, NULL);
1477 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1478 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1482 igb_add_sysctl(struct igb_softc *sc)
1487 sysctl_ctx_init(&sc->sysctl_ctx);
1488 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
1489 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
1490 device_get_nameunit(sc->dev), CTLFLAG_RD, 0, "");
1491 if (sc->sysctl_tree == NULL) {
1492 device_printf(sc->dev, "can't add sysctl node\n");
1496 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1497 OID_AUTO, "rxr", CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings");
1498 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1499 OID_AUTO, "rxr_inuse", CTLFLAG_RD, &sc->rx_ring_inuse, 0,
1500 "# of RX rings used");
1501 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1502 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_rings[0].num_rx_desc, 0,
1504 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1505 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_rings[0].num_tx_desc, 0,
1508 if (sc->intr_type != PCI_INTR_TYPE_MSIX) {
1509 SYSCTL_ADD_PROC(&sc->sysctl_ctx,
1510 SYSCTL_CHILDREN(sc->sysctl_tree),
1511 OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW,
1512 sc, 0, igb_sysctl_intr_rate, "I", "interrupt rate");
1514 for (i = 0; i < sc->msix_cnt; ++i) {
1515 struct igb_msix_data *msix = &sc->msix_data[i];
1517 ksnprintf(node, sizeof(node), "msix%d_rate", i);
1518 SYSCTL_ADD_PROC(&sc->sysctl_ctx,
1519 SYSCTL_CHILDREN(sc->sysctl_tree),
1520 OID_AUTO, node, CTLTYPE_INT | CTLFLAG_RW,
1521 msix, 0, igb_sysctl_msix_rate, "I",
1522 msix->msix_rate_desc);
1526 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1527 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT | CTLFLAG_RW,
1528 sc, 0, igb_sysctl_tx_intr_nsegs, "I",
1529 "# of segments per TX interrupt");
1531 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1532 OID_AUTO, "tx_wreg_nsegs", CTLFLAG_RW,
1533 &sc->tx_rings[0].wreg_nsegs, 0,
1534 "# of segments before write to hardare register");
1536 #ifdef IFPOLL_ENABLE
1537 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1538 OID_AUTO, "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW,
1539 sc, 0, igb_sysctl_npoll_rxoff, "I", "NPOLLING RX cpu offset");
1540 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1541 OID_AUTO, "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW,
1542 sc, 0, igb_sysctl_npoll_txoff, "I", "NPOLLING TX cpu offset");
1545 #ifdef IGB_RSS_DEBUG
1546 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1547 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 0,
1550 for (i = 0; i < sc->rx_ring_cnt; ++i) {
1551 #ifdef IGB_RSS_DEBUG
1552 ksnprintf(node, sizeof(node), "rx%d_pkt", i);
1553 SYSCTL_ADD_ULONG(&sc->sysctl_ctx,
1554 SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, node,
1555 CTLFLAG_RW, &sc->rx_rings[i].rx_packets, "RXed packets");
1557 ksnprintf(node, sizeof(node), "rx%d_wreg", i);
1558 SYSCTL_ADD_INT(&sc->sysctl_ctx,
1559 SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, node,
1560 CTLFLAG_RW, &sc->rx_rings[i].rx_wreg, 0,
1561 "# of segments before write to hardare register");
1566 igb_alloc_rings(struct igb_softc *sc)
1571 * Create top level busdma tag
1573 error = bus_dma_tag_create(NULL, 1, 0,
1574 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1575 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
1578 device_printf(sc->dev, "could not create top level DMA tag\n");
1583 * Allocate TX descriptor rings and buffers
1585 sc->tx_rings = kmalloc_cachealign(
1586 sizeof(struct igb_tx_ring) * sc->tx_ring_cnt,
1587 M_DEVBUF, M_WAITOK | M_ZERO);
1588 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1589 struct igb_tx_ring *txr = &sc->tx_rings[i];
1591 /* Set up some basics */
1594 lwkt_serialize_init(&txr->tx_serialize);
1596 error = igb_create_tx_ring(txr);
1602 * Allocate RX descriptor rings and buffers
1604 sc->rx_rings = kmalloc_cachealign(
1605 sizeof(struct igb_rx_ring) * sc->rx_ring_cnt,
1606 M_DEVBUF, M_WAITOK | M_ZERO);
1607 for (i = 0; i < sc->rx_ring_cnt; ++i) {
1608 struct igb_rx_ring *rxr = &sc->rx_rings[i];
1610 /* Set up some basics */
1613 lwkt_serialize_init(&rxr->rx_serialize);
1615 error = igb_create_rx_ring(rxr);
1624 igb_free_rings(struct igb_softc *sc)
1628 if (sc->tx_rings != NULL) {
1629 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1630 struct igb_tx_ring *txr = &sc->tx_rings[i];
1632 igb_destroy_tx_ring(txr, txr->num_tx_desc);
1634 kfree(sc->tx_rings, M_DEVBUF);
1637 if (sc->rx_rings != NULL) {
1638 for (i = 0; i < sc->rx_ring_cnt; ++i) {
1639 struct igb_rx_ring *rxr = &sc->rx_rings[i];
1641 igb_destroy_rx_ring(rxr, rxr->num_rx_desc);
1643 kfree(sc->rx_rings, M_DEVBUF);
1648 igb_create_tx_ring(struct igb_tx_ring *txr)
1650 int tsize, error, i, ntxd;
1653 * Validate number of transmit descriptors. It must not exceed
1654 * hardware maximum, and must be multiple of IGB_DBA_ALIGN.
1656 ntxd = device_getenv_int(txr->sc->dev, "txd", igb_txd);
1657 if ((ntxd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN != 0 ||
1658 ntxd > IGB_MAX_TXD || ntxd < IGB_MIN_TXD) {
1659 device_printf(txr->sc->dev,
1660 "Using %d TX descriptors instead of %d!\n",
1661 IGB_DEFAULT_TXD, ntxd);
1662 txr->num_tx_desc = IGB_DEFAULT_TXD;
1664 txr->num_tx_desc = ntxd;
1668 * Allocate TX descriptor ring
1670 tsize = roundup2(txr->num_tx_desc * sizeof(union e1000_adv_tx_desc),
1672 txr->txdma.dma_vaddr = bus_dmamem_coherent_any(txr->sc->parent_tag,
1673 IGB_DBA_ALIGN, tsize, BUS_DMA_WAITOK,
1674 &txr->txdma.dma_tag, &txr->txdma.dma_map, &txr->txdma.dma_paddr);
1675 if (txr->txdma.dma_vaddr == NULL) {
1676 device_printf(txr->sc->dev,
1677 "Unable to allocate TX Descriptor memory\n");
1680 txr->tx_base = txr->txdma.dma_vaddr;
1681 bzero(txr->tx_base, tsize);
1683 tsize = __VM_CACHELINE_ALIGN(
1684 sizeof(struct igb_tx_buf) * txr->num_tx_desc);
1685 txr->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO);
1688 * Allocate TX head write-back buffer
1690 txr->tx_hdr = bus_dmamem_coherent_any(txr->sc->parent_tag,
1691 __VM_CACHELINE_SIZE, __VM_CACHELINE_SIZE, BUS_DMA_WAITOK,
1692 &txr->tx_hdr_dtag, &txr->tx_hdr_dmap, &txr->tx_hdr_paddr);
1693 if (txr->tx_hdr == NULL) {
1694 device_printf(txr->sc->dev,
1695 "Unable to allocate TX head write-back buffer\n");
1700 * Create DMA tag for TX buffers
1702 error = bus_dma_tag_create(txr->sc->parent_tag,
1703 1, 0, /* alignment, bounds */
1704 BUS_SPACE_MAXADDR, /* lowaddr */
1705 BUS_SPACE_MAXADDR, /* highaddr */
1706 NULL, NULL, /* filter, filterarg */
1707 IGB_TSO_SIZE, /* maxsize */
1708 IGB_MAX_SCATTER, /* nsegments */
1709 PAGE_SIZE, /* maxsegsize */
1710 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW |
1711 BUS_DMA_ONEBPAGE, /* flags */
1714 device_printf(txr->sc->dev, "Unable to allocate TX DMA tag\n");
1715 kfree(txr->tx_buf, M_DEVBUF);
1721 * Create DMA maps for TX buffers
1723 for (i = 0; i < txr->num_tx_desc; ++i) {
1724 struct igb_tx_buf *txbuf = &txr->tx_buf[i];
1726 error = bus_dmamap_create(txr->tx_tag,
1727 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, &txbuf->map);
1729 device_printf(txr->sc->dev,
1730 "Unable to create TX DMA map\n");
1731 igb_destroy_tx_ring(txr, i);
1737 * Initialize various watermark
1739 txr->spare_desc = IGB_TX_SPARE;
1740 txr->intr_nsegs = txr->num_tx_desc / 16;
1741 txr->wreg_nsegs = 8;
1742 txr->oact_hi_desc = txr->num_tx_desc / 2;
1743 txr->oact_lo_desc = txr->num_tx_desc / 8;
1744 if (txr->oact_lo_desc > IGB_TX_OACTIVE_MAX)
1745 txr->oact_lo_desc = IGB_TX_OACTIVE_MAX;
1746 if (txr->oact_lo_desc < txr->spare_desc + IGB_TX_RESERVED)
1747 txr->oact_lo_desc = txr->spare_desc + IGB_TX_RESERVED;
1753 igb_free_tx_ring(struct igb_tx_ring *txr)
1757 for (i = 0; i < txr->num_tx_desc; ++i) {
1758 struct igb_tx_buf *txbuf = &txr->tx_buf[i];
1760 if (txbuf->m_head != NULL) {
1761 bus_dmamap_unload(txr->tx_tag, txbuf->map);
1762 m_freem(txbuf->m_head);
1763 txbuf->m_head = NULL;
1769 igb_destroy_tx_ring(struct igb_tx_ring *txr, int ndesc)
1773 if (txr->txdma.dma_vaddr != NULL) {
1774 bus_dmamap_unload(txr->txdma.dma_tag, txr->txdma.dma_map);
1775 bus_dmamem_free(txr->txdma.dma_tag, txr->txdma.dma_vaddr,
1776 txr->txdma.dma_map);
1777 bus_dma_tag_destroy(txr->txdma.dma_tag);
1778 txr->txdma.dma_vaddr = NULL;
1781 if (txr->tx_hdr != NULL) {
1782 bus_dmamap_unload(txr->tx_hdr_dtag, txr->tx_hdr_dmap);
1783 bus_dmamem_free(txr->tx_hdr_dtag, txr->tx_hdr,
1785 bus_dma_tag_destroy(txr->tx_hdr_dtag);
1789 if (txr->tx_buf == NULL)
1792 for (i = 0; i < ndesc; ++i) {
1793 struct igb_tx_buf *txbuf = &txr->tx_buf[i];
1795 KKASSERT(txbuf->m_head == NULL);
1796 bus_dmamap_destroy(txr->tx_tag, txbuf->map);
1798 bus_dma_tag_destroy(txr->tx_tag);
1800 kfree(txr->tx_buf, M_DEVBUF);
1805 igb_init_tx_ring(struct igb_tx_ring *txr)
1807 /* Clear the old descriptor contents */
1809 sizeof(union e1000_adv_tx_desc) * txr->num_tx_desc);
1811 /* Clear TX head write-back buffer */
1815 txr->next_avail_desc = 0;
1816 txr->next_to_clean = 0;
1819 /* Set number of descriptors available */
1820 txr->tx_avail = txr->num_tx_desc;
1824 igb_init_tx_unit(struct igb_softc *sc)
1826 struct e1000_hw *hw = &sc->hw;
1830 /* Setup the Tx Descriptor Rings */
1831 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1832 struct igb_tx_ring *txr = &sc->tx_rings[i];
1833 uint64_t bus_addr = txr->txdma.dma_paddr;
1834 uint64_t hdr_paddr = txr->tx_hdr_paddr;
1835 uint32_t txdctl = 0;
1836 uint32_t dca_txctrl;
1838 E1000_WRITE_REG(hw, E1000_TDLEN(i),
1839 txr->num_tx_desc * sizeof(struct e1000_tx_desc));
1840 E1000_WRITE_REG(hw, E1000_TDBAH(i),
1841 (uint32_t)(bus_addr >> 32));
1842 E1000_WRITE_REG(hw, E1000_TDBAL(i),
1843 (uint32_t)bus_addr);
1845 /* Setup the HW Tx Head and Tail descriptor pointers */
1846 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
1847 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
1849 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i));
1850 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1851 E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(i), dca_txctrl);
1854 * Don't set WB_on_EITR:
1855 * - 82575 does not have it
1856 * - It almost has no effect on 82576, see:
1857 * 82576 specification update errata #26
1858 * - It causes unnecessary bus traffic
1860 E1000_WRITE_REG(hw, E1000_TDWBAH(i),
1861 (uint32_t)(hdr_paddr >> 32));
1862 E1000_WRITE_REG(hw, E1000_TDWBAL(i),
1863 ((uint32_t)hdr_paddr) | E1000_TX_HEAD_WB_ENABLE);
1866 * WTHRESH is ignored by the hardware, since header
1867 * write back mode is used.
1869 txdctl |= IGB_TX_PTHRESH;
1870 txdctl |= IGB_TX_HTHRESH << 8;
1871 txdctl |= IGB_TX_WTHRESH << 16;
1872 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1873 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
1879 e1000_config_collision_dist(hw);
1881 /* Program the Transmit Control Register */
1882 tctl = E1000_READ_REG(hw, E1000_TCTL);
1883 tctl &= ~E1000_TCTL_CT;
1884 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
1885 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
1887 /* This write will effectively turn on the transmit unit. */
1888 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1892 igb_txcsum_ctx(struct igb_tx_ring *txr, struct mbuf *mp)
1894 struct e1000_adv_tx_context_desc *TXD;
1895 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx;
1896 int ehdrlen, ctxd, ip_hlen = 0;
1897 boolean_t offload = TRUE;
1899 if ((mp->m_pkthdr.csum_flags & IGB_CSUM_FEATURES) == 0)
1902 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0;
1904 ctxd = txr->next_avail_desc;
1905 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd];
1908 * In advanced descriptors the vlan tag must
1909 * be placed into the context descriptor, thus
1910 * we need to be here just for that setup.
1912 if (mp->m_flags & M_VLANTAG) {
1915 vlantag = htole16(mp->m_pkthdr.ether_vlantag);
1916 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT);
1917 } else if (!offload) {
1921 ehdrlen = mp->m_pkthdr.csum_lhlen;
1922 KASSERT(ehdrlen > 0, ("invalid ether hlen"));
1924 /* Set the ether header length */
1925 vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
1926 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
1927 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
1928 ip_hlen = mp->m_pkthdr.csum_iphlen;
1929 KASSERT(ip_hlen > 0, ("invalid ip hlen"));
1931 vlan_macip_lens |= ip_hlen;
1933 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
1934 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
1935 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
1936 else if (mp->m_pkthdr.csum_flags & CSUM_UDP)
1937 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP;
1939 /* 82575 needs the queue index added */
1940 if (txr->sc->hw.mac.type == e1000_82575)
1941 mss_l4len_idx = txr->me << 4;
1943 /* Now copy bits into descriptor */
1944 TXD->vlan_macip_lens = htole32(vlan_macip_lens);
1945 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
1946 TXD->seqnum_seed = htole32(0);
1947 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
1949 /* We've consumed the first desc, adjust counters */
1950 if (++ctxd == txr->num_tx_desc)
1952 txr->next_avail_desc = ctxd;
1959 igb_txeof(struct igb_tx_ring *txr)
1961 struct ifnet *ifp = &txr->sc->arpcom.ac_if;
1962 int first, hdr, avail;
1964 if (txr->tx_avail == txr->num_tx_desc)
1967 first = txr->next_to_clean;
1968 hdr = *(txr->tx_hdr);
1973 avail = txr->tx_avail;
1974 while (first != hdr) {
1975 struct igb_tx_buf *txbuf = &txr->tx_buf[first];
1978 if (txbuf->m_head) {
1979 bus_dmamap_unload(txr->tx_tag, txbuf->map);
1980 m_freem(txbuf->m_head);
1981 txbuf->m_head = NULL;
1984 if (++first == txr->num_tx_desc)
1987 txr->next_to_clean = first;
1988 txr->tx_avail = avail;
1991 * If we have a minimum free, clear OACTIVE
1992 * to tell the stack that it is OK to send packets.
1994 if (IGB_IS_NOT_OACTIVE(txr)) {
1995 ifq_clr_oactive(&ifp->if_snd);
1998 * We have enough TX descriptors, turn off
1999 * the watchdog. We allow small amount of
2000 * packets (roughly intr_nsegs) pending on
2001 * the transmit ring.
2008 igb_create_rx_ring(struct igb_rx_ring *rxr)
2010 int rsize, i, error, nrxd;
2013 * Validate number of receive descriptors. It must not exceed
2014 * hardware maximum, and must be multiple of IGB_DBA_ALIGN.
2016 nrxd = device_getenv_int(rxr->sc->dev, "rxd", igb_rxd);
2017 if ((nrxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN != 0 ||
2018 nrxd > IGB_MAX_RXD || nrxd < IGB_MIN_RXD) {
2019 device_printf(rxr->sc->dev,
2020 "Using %d RX descriptors instead of %d!\n",
2021 IGB_DEFAULT_RXD, nrxd);
2022 rxr->num_rx_desc = IGB_DEFAULT_RXD;
2024 rxr->num_rx_desc = nrxd;
2028 * Allocate RX descriptor ring
2030 rsize = roundup2(rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc),
2032 rxr->rxdma.dma_vaddr = bus_dmamem_coherent_any(rxr->sc->parent_tag,
2033 IGB_DBA_ALIGN, rsize, BUS_DMA_WAITOK,
2034 &rxr->rxdma.dma_tag, &rxr->rxdma.dma_map,
2035 &rxr->rxdma.dma_paddr);
2036 if (rxr->rxdma.dma_vaddr == NULL) {
2037 device_printf(rxr->sc->dev,
2038 "Unable to allocate RxDescriptor memory\n");
2041 rxr->rx_base = rxr->rxdma.dma_vaddr;
2042 bzero(rxr->rx_base, rsize);
2044 rsize = __VM_CACHELINE_ALIGN(
2045 sizeof(struct igb_rx_buf) * rxr->num_rx_desc);
2046 rxr->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO);
2049 * Create DMA tag for RX buffers
2051 error = bus_dma_tag_create(rxr->sc->parent_tag,
2052 1, 0, /* alignment, bounds */
2053 BUS_SPACE_MAXADDR, /* lowaddr */
2054 BUS_SPACE_MAXADDR, /* highaddr */
2055 NULL, NULL, /* filter, filterarg */
2056 MCLBYTES, /* maxsize */
2058 MCLBYTES, /* maxsegsize */
2059 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */
2062 device_printf(rxr->sc->dev,
2063 "Unable to create RX payload DMA tag\n");
2064 kfree(rxr->rx_buf, M_DEVBUF);
2070 * Create spare DMA map for RX buffers
2072 error = bus_dmamap_create(rxr->rx_tag, BUS_DMA_WAITOK,
2075 device_printf(rxr->sc->dev,
2076 "Unable to create spare RX DMA maps\n");
2077 bus_dma_tag_destroy(rxr->rx_tag);
2078 kfree(rxr->rx_buf, M_DEVBUF);
2084 * Create DMA maps for RX buffers
2086 for (i = 0; i < rxr->num_rx_desc; i++) {
2087 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
2089 error = bus_dmamap_create(rxr->rx_tag,
2090 BUS_DMA_WAITOK, &rxbuf->map);
2092 device_printf(rxr->sc->dev,
2093 "Unable to create RX DMA maps\n");
2094 igb_destroy_rx_ring(rxr, i);
2100 * Initialize various watermark
2108 igb_free_rx_ring(struct igb_rx_ring *rxr)
2112 for (i = 0; i < rxr->num_rx_desc; ++i) {
2113 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
2115 if (rxbuf->m_head != NULL) {
2116 bus_dmamap_unload(rxr->rx_tag, rxbuf->map);
2117 m_freem(rxbuf->m_head);
2118 rxbuf->m_head = NULL;
2122 if (rxr->fmp != NULL)
2129 igb_destroy_rx_ring(struct igb_rx_ring *rxr, int ndesc)
2133 if (rxr->rxdma.dma_vaddr != NULL) {
2134 bus_dmamap_unload(rxr->rxdma.dma_tag, rxr->rxdma.dma_map);
2135 bus_dmamem_free(rxr->rxdma.dma_tag, rxr->rxdma.dma_vaddr,
2136 rxr->rxdma.dma_map);
2137 bus_dma_tag_destroy(rxr->rxdma.dma_tag);
2138 rxr->rxdma.dma_vaddr = NULL;
2141 if (rxr->rx_buf == NULL)
2144 for (i = 0; i < ndesc; ++i) {
2145 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
2147 KKASSERT(rxbuf->m_head == NULL);
2148 bus_dmamap_destroy(rxr->rx_tag, rxbuf->map);
2150 bus_dmamap_destroy(rxr->rx_tag, rxr->rx_sparemap);
2151 bus_dma_tag_destroy(rxr->rx_tag);
2153 kfree(rxr->rx_buf, M_DEVBUF);
2158 igb_setup_rxdesc(union e1000_adv_rx_desc *rxd, const struct igb_rx_buf *rxbuf)
2160 rxd->read.pkt_addr = htole64(rxbuf->paddr);
2161 rxd->wb.upper.status_error = 0;
2165 igb_newbuf(struct igb_rx_ring *rxr, int i, boolean_t wait)
2168 bus_dma_segment_t seg;
2170 struct igb_rx_buf *rxbuf;
2173 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2176 if_printf(&rxr->sc->arpcom.ac_if,
2177 "Unable to allocate RX mbuf\n");
2181 m->m_len = m->m_pkthdr.len = MCLBYTES;
2183 if (rxr->sc->max_frame_size <= MCLBYTES - ETHER_ALIGN)
2184 m_adj(m, ETHER_ALIGN);
2186 error = bus_dmamap_load_mbuf_segment(rxr->rx_tag,
2187 rxr->rx_sparemap, m, &seg, 1, &nseg, BUS_DMA_NOWAIT);
2191 if_printf(&rxr->sc->arpcom.ac_if,
2192 "Unable to load RX mbuf\n");
2197 rxbuf = &rxr->rx_buf[i];
2198 if (rxbuf->m_head != NULL)
2199 bus_dmamap_unload(rxr->rx_tag, rxbuf->map);
2202 rxbuf->map = rxr->rx_sparemap;
2203 rxr->rx_sparemap = map;
2206 rxbuf->paddr = seg.ds_addr;
2208 igb_setup_rxdesc(&rxr->rx_base[i], rxbuf);
2213 igb_init_rx_ring(struct igb_rx_ring *rxr)
2217 /* Clear the ring contents */
2219 rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc));
2221 /* Now replenish the ring mbufs */
2222 for (i = 0; i < rxr->num_rx_desc; ++i) {
2225 error = igb_newbuf(rxr, i, TRUE);
2230 /* Setup our descriptor indices */
2231 rxr->next_to_check = 0;
2235 rxr->discard = FALSE;
2241 igb_init_rx_unit(struct igb_softc *sc)
2243 struct ifnet *ifp = &sc->arpcom.ac_if;
2244 struct e1000_hw *hw = &sc->hw;
2245 uint32_t rctl, rxcsum, srrctl = 0;
2249 * Make sure receives are disabled while setting
2250 * up the descriptor ring
2252 rctl = E1000_READ_REG(hw, E1000_RCTL);
2253 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2257 ** Set up for header split
2259 if (igb_header_split) {
2260 /* Use a standard mbuf for the header */
2261 srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2262 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2265 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2268 ** Set up for jumbo frames
2270 if (ifp->if_mtu > ETHERMTU) {
2271 rctl |= E1000_RCTL_LPE;
2273 if (adapter->rx_mbuf_sz == MJUMPAGESIZE) {
2274 srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2275 rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
2276 } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) {
2277 srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2278 rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
2280 /* Set maximum packet len */
2281 psize = adapter->max_frame_size;
2282 /* are we on a vlan? */
2283 if (adapter->ifp->if_vlantrunk != NULL)
2284 psize += VLAN_TAG_SIZE;
2285 E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize);
2287 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2288 rctl |= E1000_RCTL_SZ_2048;
2291 rctl &= ~E1000_RCTL_LPE;
2292 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2293 rctl |= E1000_RCTL_SZ_2048;
2296 /* Setup the Base and Length of the Rx Descriptor Rings */
2297 for (i = 0; i < sc->rx_ring_inuse; ++i) {
2298 struct igb_rx_ring *rxr = &sc->rx_rings[i];
2299 uint64_t bus_addr = rxr->rxdma.dma_paddr;
2302 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2303 rxr->num_rx_desc * sizeof(struct e1000_rx_desc));
2304 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2305 (uint32_t)(bus_addr >> 32));
2306 E1000_WRITE_REG(hw, E1000_RDBAL(i),
2307 (uint32_t)bus_addr);
2308 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2309 /* Enable this Queue */
2310 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2311 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2312 rxdctl &= 0xFFF00000;
2313 rxdctl |= IGB_RX_PTHRESH;
2314 rxdctl |= IGB_RX_HTHRESH << 8;
2316 * Don't set WTHRESH to a value above 1 on 82576, see:
2317 * 82576 specification update errata #26
2319 rxdctl |= IGB_RX_WTHRESH << 16;
2320 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2323 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM);
2324 rxcsum &= ~(E1000_RXCSUM_PCSS_MASK | E1000_RXCSUM_IPPCSE);
2327 * Receive Checksum Offload for TCP and UDP
2329 * Checksum offloading is also enabled if multiple receive
2330 * queue is to be supported, since we need it to figure out
2333 if ((ifp->if_capenable & IFCAP_RXCSUM) || IGB_ENABLE_HWRSS(sc)) {
2336 * PCSD must be enabled to enable multiple
2339 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
2342 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
2345 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum);
2347 if (IGB_ENABLE_HWRSS(sc)) {
2348 uint8_t key[IGB_NRSSRK * IGB_RSSRK_SIZE];
2349 uint32_t reta_shift;
2354 * When we reach here, RSS has already been disabled
2355 * in igb_stop(), so we could safely configure RSS key
2356 * and redirect table.
2362 toeplitz_get_key(key, sizeof(key));
2363 for (i = 0; i < IGB_NRSSRK; ++i) {
2366 rssrk = IGB_RSSRK_VAL(key, i);
2367 IGB_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk);
2369 E1000_WRITE_REG(hw, E1000_RSSRK(i), rssrk);
2373 * Configure RSS redirect table in following fashion:
2374 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
2376 reta_shift = IGB_RETA_SHIFT;
2377 if (hw->mac.type == e1000_82575)
2378 reta_shift = IGB_RETA_SHIFT_82575;
2381 for (j = 0; j < IGB_NRETA; ++j) {
2384 for (i = 0; i < IGB_RETA_SIZE; ++i) {
2387 q = (r % sc->rx_ring_inuse) << reta_shift;
2388 reta |= q << (8 * i);
2391 IGB_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta);
2392 E1000_WRITE_REG(hw, E1000_RETA(j), reta);
2396 * Enable multiple receive queues.
2397 * Enable IPv4 RSS standard hash functions.
2398 * Disable RSS interrupt on 82575
2400 E1000_WRITE_REG(&sc->hw, E1000_MRQC,
2401 E1000_MRQC_ENABLE_RSS_4Q |
2402 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2403 E1000_MRQC_RSS_FIELD_IPV4);
2406 /* Setup the Receive Control Register */
2407 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2408 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2409 E1000_RCTL_RDMTS_HALF |
2410 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2411 /* Strip CRC bytes. */
2412 rctl |= E1000_RCTL_SECRC;
2413 /* Make sure VLAN Filters are off */
2414 rctl &= ~E1000_RCTL_VFE;
2415 /* Don't store bad packets */
2416 rctl &= ~E1000_RCTL_SBP;
2418 /* Enable Receives */
2419 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2422 * Setup the HW Rx Head and Tail Descriptor Pointers
2423 * - needs to be after enable
2425 for (i = 0; i < sc->rx_ring_inuse; ++i) {
2426 struct igb_rx_ring *rxr = &sc->rx_rings[i];
2428 E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check);
2429 E1000_WRITE_REG(hw, E1000_RDT(i), rxr->num_rx_desc - 1);
2434 igb_rx_refresh(struct igb_rx_ring *rxr, int i)
2437 i = rxr->num_rx_desc - 1;
2438 E1000_WRITE_REG(&rxr->sc->hw, E1000_RDT(rxr->me), i);
2442 igb_rxeof(struct igb_rx_ring *rxr, int count)
2444 struct ifnet *ifp = &rxr->sc->arpcom.ac_if;
2445 union e1000_adv_rx_desc *cur;
2449 i = rxr->next_to_check;
2450 cur = &rxr->rx_base[i];
2451 staterr = le32toh(cur->wb.upper.status_error);
2453 if ((staterr & E1000_RXD_STAT_DD) == 0)
2456 while ((staterr & E1000_RXD_STAT_DD) && count != 0) {
2457 struct pktinfo *pi = NULL, pi0;
2458 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
2459 struct mbuf *m = NULL;
2462 eop = (staterr & E1000_RXD_STAT_EOP) ? TRUE : FALSE;
2467 if ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) == 0 &&
2469 struct mbuf *mp = rxbuf->m_head;
2470 uint32_t hash, hashtype;
2474 len = le16toh(cur->wb.upper.length);
2475 if (rxr->sc->hw.mac.type == e1000_i350 &&
2476 (staterr & E1000_RXDEXT_STATERR_LB))
2477 vlan = be16toh(cur->wb.upper.vlan);
2479 vlan = le16toh(cur->wb.upper.vlan);
2481 hash = le32toh(cur->wb.lower.hi_dword.rss);
2482 hashtype = le32toh(cur->wb.lower.lo_dword.data) &
2483 E1000_RXDADV_RSSTYPE_MASK;
2485 IGB_RSS_DPRINTF(rxr->sc, 10,
2486 "ring%d, hash 0x%08x, hashtype %u\n",
2487 rxr->me, hash, hashtype);
2489 bus_dmamap_sync(rxr->rx_tag, rxbuf->map,
2490 BUS_DMASYNC_POSTREAD);
2492 if (igb_newbuf(rxr, i, FALSE) != 0) {
2498 if (rxr->fmp == NULL) {
2499 mp->m_pkthdr.len = len;
2503 rxr->lmp->m_next = mp;
2504 rxr->lmp = rxr->lmp->m_next;
2505 rxr->fmp->m_pkthdr.len += len;
2513 m->m_pkthdr.rcvif = ifp;
2516 if (ifp->if_capenable & IFCAP_RXCSUM)
2517 igb_rxcsum(staterr, m);
2519 if (staterr & E1000_RXD_STAT_VP) {
2520 m->m_pkthdr.ether_vlantag = vlan;
2521 m->m_flags |= M_VLANTAG;
2524 if (ifp->if_capenable & IFCAP_RSS) {
2525 pi = igb_rssinfo(m, &pi0,
2526 hash, hashtype, staterr);
2528 #ifdef IGB_RSS_DEBUG
2535 igb_setup_rxdesc(cur, rxbuf);
2537 rxr->discard = TRUE;
2539 rxr->discard = FALSE;
2540 if (rxr->fmp != NULL) {
2549 ether_input_pkt(ifp, m, pi);
2551 /* Advance our pointers to the next descriptor. */
2552 if (++i == rxr->num_rx_desc)
2555 if (ncoll >= rxr->rx_wreg) {
2556 igb_rx_refresh(rxr, i);
2560 cur = &rxr->rx_base[i];
2561 staterr = le32toh(cur->wb.upper.status_error);
2563 rxr->next_to_check = i;
2566 igb_rx_refresh(rxr, i);
2571 igb_set_vlan(struct igb_softc *sc)
2573 struct e1000_hw *hw = &sc->hw;
2576 struct ifnet *ifp = sc->arpcom.ac_if;
2580 e1000_rlpml_set_vf(hw, sc->max_frame_size + VLAN_TAG_SIZE);
2584 reg = E1000_READ_REG(hw, E1000_CTRL);
2585 reg |= E1000_CTRL_VME;
2586 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2589 /* Enable the Filter Table */
2590 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
2591 reg = E1000_READ_REG(hw, E1000_RCTL);
2592 reg &= ~E1000_RCTL_CFIEN;
2593 reg |= E1000_RCTL_VFE;
2594 E1000_WRITE_REG(hw, E1000_RCTL, reg);
2598 /* Update the frame size */
2599 E1000_WRITE_REG(&sc->hw, E1000_RLPML,
2600 sc->max_frame_size + VLAN_TAG_SIZE);
2603 /* Don't bother with table if no vlans */
2604 if ((adapter->num_vlans == 0) ||
2605 ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0))
2608 ** A soft reset zero's out the VFTA, so
2609 ** we need to repopulate it now.
2611 for (int i = 0; i < IGB_VFTA_SIZE; i++)
2612 if (adapter->shadow_vfta[i] != 0) {
2613 if (adapter->vf_ifp)
2614 e1000_vfta_set_vf(hw,
2615 adapter->shadow_vfta[i], TRUE);
2617 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
2618 i, adapter->shadow_vfta[i]);
2624 igb_enable_intr(struct igb_softc *sc)
2626 if (sc->intr_type != PCI_INTR_TYPE_MSIX) {
2627 lwkt_serialize_handler_enable(&sc->main_serialize);
2631 for (i = 0; i < sc->msix_cnt; ++i) {
2632 lwkt_serialize_handler_enable(
2633 sc->msix_data[i].msix_serialize);
2637 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) {
2638 if (sc->intr_type == PCI_INTR_TYPE_MSIX)
2639 E1000_WRITE_REG(&sc->hw, E1000_EIAC, sc->intr_mask);
2641 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0);
2642 E1000_WRITE_REG(&sc->hw, E1000_EIAM, sc->intr_mask);
2643 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask);
2644 E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC);
2646 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK);
2648 E1000_WRITE_FLUSH(&sc->hw);
2652 igb_disable_intr(struct igb_softc *sc)
2654 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) {
2655 E1000_WRITE_REG(&sc->hw, E1000_EIMC, 0xffffffff);
2656 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0);
2658 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff);
2659 E1000_WRITE_FLUSH(&sc->hw);
2661 if (sc->intr_type != PCI_INTR_TYPE_MSIX) {
2662 lwkt_serialize_handler_disable(&sc->main_serialize);
2666 for (i = 0; i < sc->msix_cnt; ++i) {
2667 lwkt_serialize_handler_disable(
2668 sc->msix_data[i].msix_serialize);
2674 * Bit of a misnomer, what this really means is
2675 * to enable OS management of the system... aka
2676 * to disable special hardware management features
2679 igb_get_mgmt(struct igb_softc *sc)
2681 if (sc->flags & IGB_FLAG_HAS_MGMT) {
2682 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H);
2683 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
2685 /* disable hardware interception of ARP */
2686 manc &= ~E1000_MANC_ARP_EN;
2688 /* enable receiving management packets to the host */
2689 manc |= E1000_MANC_EN_MNG2HOST;
2690 manc2h |= 1 << 5; /* Mng Port 623 */
2691 manc2h |= 1 << 6; /* Mng Port 664 */
2692 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h);
2693 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
2698 * Give control back to hardware management controller
2702 igb_rel_mgmt(struct igb_softc *sc)
2704 if (sc->flags & IGB_FLAG_HAS_MGMT) {
2705 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
2707 /* Re-enable hardware interception of ARP */
2708 manc |= E1000_MANC_ARP_EN;
2709 manc &= ~E1000_MANC_EN_MNG2HOST;
2711 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
2716 * Sets CTRL_EXT:DRV_LOAD bit.
2718 * For ASF and Pass Through versions of f/w this means that
2719 * the driver is loaded.
2722 igb_get_hw_control(struct igb_softc *sc)
2729 /* Let firmware know the driver has taken over */
2730 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
2731 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
2732 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2736 * Resets CTRL_EXT:DRV_LOAD bit.
2738 * For ASF and Pass Through versions of f/w this means that the
2739 * driver is no longer loaded.
2742 igb_rel_hw_control(struct igb_softc *sc)
2749 /* Let firmware taken over control of h/w */
2750 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
2751 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
2752 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2756 igb_is_valid_ether_addr(const uint8_t *addr)
2758 uint8_t zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
2760 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN))
2766 * Enable PCI Wake On Lan capability
2769 igb_enable_wol(device_t dev)
2771 uint16_t cap, status;
2774 /* First find the capabilities pointer*/
2775 cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
2777 /* Read the PM Capabilities */
2778 id = pci_read_config(dev, cap, 1);
2779 if (id != PCIY_PMG) /* Something wrong */
2783 * OK, we have the power capabilities,
2784 * so now get the status register
2786 cap += PCIR_POWER_STATUS;
2787 status = pci_read_config(dev, cap, 2);
2788 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2789 pci_write_config(dev, cap, status, 2);
2793 igb_update_stats_counters(struct igb_softc *sc)
2795 struct e1000_hw *hw = &sc->hw;
2796 struct e1000_hw_stats *stats;
2797 struct ifnet *ifp = &sc->arpcom.ac_if;
2800 * The virtual function adapter has only a
2801 * small controlled set of stats, do only
2805 igb_update_vf_stats_counters(sc);
2810 if (sc->hw.phy.media_type == e1000_media_type_copper ||
2811 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
2813 E1000_READ_REG(hw,E1000_SYMERRS);
2814 stats->sec += E1000_READ_REG(hw, E1000_SEC);
2817 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
2818 stats->mpc += E1000_READ_REG(hw, E1000_MPC);
2819 stats->scc += E1000_READ_REG(hw, E1000_SCC);
2820 stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
2822 stats->mcc += E1000_READ_REG(hw, E1000_MCC);
2823 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
2824 stats->colc += E1000_READ_REG(hw, E1000_COLC);
2825 stats->dc += E1000_READ_REG(hw, E1000_DC);
2826 stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
2827 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
2828 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
2831 * For watchdog management we need to know if we have been
2832 * paused during the last interval, so capture that here.
2834 sc->pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
2835 stats->xoffrxc += sc->pause_frames;
2836 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
2837 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
2838 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
2839 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
2840 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
2841 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
2842 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
2843 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
2844 stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
2845 stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
2846 stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
2847 stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
2849 /* For the 64-bit byte counters the low dword must be read first. */
2850 /* Both registers clear on the read of the high dword */
2852 stats->gorc += E1000_READ_REG(hw, E1000_GORCL) +
2853 ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
2854 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) +
2855 ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
2857 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
2858 stats->ruc += E1000_READ_REG(hw, E1000_RUC);
2859 stats->rfc += E1000_READ_REG(hw, E1000_RFC);
2860 stats->roc += E1000_READ_REG(hw, E1000_ROC);
2861 stats->rjc += E1000_READ_REG(hw, E1000_RJC);
2863 stats->tor += E1000_READ_REG(hw, E1000_TORH);
2864 stats->tot += E1000_READ_REG(hw, E1000_TOTH);
2866 stats->tpr += E1000_READ_REG(hw, E1000_TPR);
2867 stats->tpt += E1000_READ_REG(hw, E1000_TPT);
2868 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
2869 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
2870 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
2871 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
2872 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
2873 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
2874 stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
2875 stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
2877 /* Interrupt Counts */
2879 stats->iac += E1000_READ_REG(hw, E1000_IAC);
2880 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
2881 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
2882 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
2883 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
2884 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
2885 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
2886 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
2887 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
2889 /* Host to Card Statistics */
2891 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
2892 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
2893 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
2894 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
2895 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
2896 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
2897 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
2898 stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) +
2899 ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32));
2900 stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) +
2901 ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32));
2902 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
2903 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
2904 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
2906 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
2907 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
2908 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
2909 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
2910 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
2911 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
2913 ifp->if_collisions = stats->colc;
2916 ifp->if_ierrors = stats->rxerrc + stats->crcerrs + stats->algnerrc +
2917 stats->ruc + stats->roc + stats->mpc + stats->cexterr;
2920 ifp->if_oerrors = stats->ecol + stats->latecol + sc->watchdog_events;
2922 /* Driver specific counters */
2923 sc->device_control = E1000_READ_REG(hw, E1000_CTRL);
2924 sc->rx_control = E1000_READ_REG(hw, E1000_RCTL);
2925 sc->int_mask = E1000_READ_REG(hw, E1000_IMS);
2926 sc->eint_mask = E1000_READ_REG(hw, E1000_EIMS);
2927 sc->packet_buf_alloc_tx =
2928 ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16);
2929 sc->packet_buf_alloc_rx =
2930 (E1000_READ_REG(hw, E1000_PBA) & 0xffff);
2934 igb_vf_init_stats(struct igb_softc *sc)
2936 struct e1000_hw *hw = &sc->hw;
2937 struct e1000_vf_stats *stats;
2940 stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC);
2941 stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC);
2942 stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC);
2943 stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC);
2944 stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC);
2948 igb_update_vf_stats_counters(struct igb_softc *sc)
2950 struct e1000_hw *hw = &sc->hw;
2951 struct e1000_vf_stats *stats;
2953 if (sc->link_speed == 0)
2957 UPDATE_VF_REG(E1000_VFGPRC, stats->last_gprc, stats->gprc);
2958 UPDATE_VF_REG(E1000_VFGORC, stats->last_gorc, stats->gorc);
2959 UPDATE_VF_REG(E1000_VFGPTC, stats->last_gptc, stats->gptc);
2960 UPDATE_VF_REG(E1000_VFGOTC, stats->last_gotc, stats->gotc);
2961 UPDATE_VF_REG(E1000_VFMPRC, stats->last_mprc, stats->mprc);
2964 #ifdef IFPOLL_ENABLE
2967 igb_npoll_status(struct ifnet *ifp)
2969 struct igb_softc *sc = ifp->if_softc;
2972 ASSERT_SERIALIZED(&sc->main_serialize);
2974 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR);
2975 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
2976 sc->hw.mac.get_link_status = 1;
2977 igb_update_link_status(sc);
2982 igb_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused)
2984 struct igb_tx_ring *txr = arg;
2986 ASSERT_SERIALIZED(&txr->tx_serialize);
2989 if (!ifq_is_empty(&ifp->if_snd))
2994 igb_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle)
2996 struct igb_rx_ring *rxr = arg;
2998 ASSERT_SERIALIZED(&rxr->rx_serialize);
3000 igb_rxeof(rxr, cycle);
3004 igb_npoll(struct ifnet *ifp, struct ifpoll_info *info)
3006 struct igb_softc *sc = ifp->if_softc;
3008 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3011 struct igb_tx_ring *txr;
3014 info->ifpi_status.status_func = igb_npoll_status;
3015 info->ifpi_status.serializer = &sc->main_serialize;
3017 off = sc->tx_npoll_off;
3018 KKASSERT(off < ncpus2);
3019 txr = &sc->tx_rings[0];
3020 info->ifpi_tx[off].poll_func = igb_npoll_tx;
3021 info->ifpi_tx[off].arg = txr;
3022 info->ifpi_tx[off].serializer = &txr->tx_serialize;
3024 off = sc->rx_npoll_off;
3025 for (i = 0; i < sc->rx_ring_cnt; ++i) {
3026 struct igb_rx_ring *rxr = &sc->rx_rings[i];
3029 KKASSERT(idx < ncpus2);
3030 info->ifpi_rx[idx].poll_func = igb_npoll_rx;
3031 info->ifpi_rx[idx].arg = rxr;
3032 info->ifpi_rx[idx].serializer = &rxr->rx_serialize;
3035 if (ifp->if_flags & IFF_RUNNING) {
3036 if (sc->rx_ring_inuse == sc->rx_ring_cnt)
3037 igb_disable_intr(sc);
3041 ifp->if_npoll_cpuid = sc->tx_npoll_off;
3043 if (ifp->if_flags & IFF_RUNNING) {
3044 if (sc->rx_ring_inuse == sc->rx_ring_cnt)
3045 igb_enable_intr(sc);
3049 ifp->if_npoll_cpuid = -1;
3053 #endif /* IFPOLL_ENABLE */
3058 struct igb_softc *sc = xsc;
3059 struct ifnet *ifp = &sc->arpcom.ac_if;
3062 ASSERT_SERIALIZED(&sc->main_serialize);
3064 eicr = E1000_READ_REG(&sc->hw, E1000_EICR);
3069 if (ifp->if_flags & IFF_RUNNING) {
3070 struct igb_tx_ring *txr;
3073 for (i = 0; i < sc->rx_ring_inuse; ++i) {
3074 struct igb_rx_ring *rxr = &sc->rx_rings[i];
3076 if (eicr & rxr->rx_intr_mask) {
3077 lwkt_serialize_enter(&rxr->rx_serialize);
3079 lwkt_serialize_exit(&rxr->rx_serialize);
3083 txr = &sc->tx_rings[0];
3084 if (eicr & txr->tx_intr_mask) {
3085 lwkt_serialize_enter(&txr->tx_serialize);
3087 if (!ifq_is_empty(&ifp->if_snd))
3089 lwkt_serialize_exit(&txr->tx_serialize);
3093 if (eicr & E1000_EICR_OTHER) {
3094 uint32_t icr = E1000_READ_REG(&sc->hw, E1000_ICR);
3096 /* Link status change */
3097 if (icr & E1000_ICR_LSC) {
3098 sc->hw.mac.get_link_status = 1;
3099 igb_update_link_status(sc);
3104 * Reading EICR has the side effect to clear interrupt mask,
3105 * so all interrupts need to be enabled here.
3107 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask);
3111 igb_intr_shared(void *xsc)
3113 struct igb_softc *sc = xsc;
3114 struct ifnet *ifp = &sc->arpcom.ac_if;
3117 ASSERT_SERIALIZED(&sc->main_serialize);
3119 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR);
3122 if (reg_icr == 0xffffffff)
3125 /* Definitely not our interrupt. */
3129 if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0)
3132 if (ifp->if_flags & IFF_RUNNING) {
3134 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) {
3137 for (i = 0; i < sc->rx_ring_inuse; ++i) {
3138 struct igb_rx_ring *rxr = &sc->rx_rings[i];
3140 lwkt_serialize_enter(&rxr->rx_serialize);
3142 lwkt_serialize_exit(&rxr->rx_serialize);
3146 if (reg_icr & E1000_ICR_TXDW) {
3147 struct igb_tx_ring *txr = &sc->tx_rings[0];
3149 lwkt_serialize_enter(&txr->tx_serialize);
3151 if (!ifq_is_empty(&ifp->if_snd))
3153 lwkt_serialize_exit(&txr->tx_serialize);
3157 /* Link status change */
3158 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3159 sc->hw.mac.get_link_status = 1;
3160 igb_update_link_status(sc);
3163 if (reg_icr & E1000_ICR_RXO)
3168 igb_encap(struct igb_tx_ring *txr, struct mbuf **m_headp,
3169 int *segs_used, int *idx)
3171 bus_dma_segment_t segs[IGB_MAX_SCATTER];
3173 struct igb_tx_buf *tx_buf, *tx_buf_mapped;
3174 union e1000_adv_tx_desc *txd = NULL;
3175 struct mbuf *m_head = *m_headp;
3176 uint32_t olinfo_status = 0, cmd_type_len = 0, cmd_rs = 0;
3177 int maxsegs, nsegs, i, j, error, last = 0;
3178 uint32_t hdrlen = 0;
3180 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3181 error = igb_tso_pullup(txr, m_headp);
3187 /* Set basic descriptor constants */
3188 cmd_type_len |= E1000_ADVTXD_DTYP_DATA;
3189 cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
3190 if (m_head->m_flags & M_VLANTAG)
3191 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3194 * Map the packet for DMA.
3196 tx_buf = &txr->tx_buf[txr->next_avail_desc];
3197 tx_buf_mapped = tx_buf;
3200 maxsegs = txr->tx_avail - IGB_TX_RESERVED;
3201 KASSERT(maxsegs >= txr->spare_desc, ("not enough spare TX desc\n"));
3202 if (maxsegs > IGB_MAX_SCATTER)
3203 maxsegs = IGB_MAX_SCATTER;
3205 error = bus_dmamap_load_mbuf_defrag(txr->tx_tag, map, m_headp,
3206 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
3208 if (error == ENOBUFS)
3209 txr->sc->mbuf_defrag_failed++;
3211 txr->sc->no_tx_dma_setup++;
3217 bus_dmamap_sync(txr->tx_tag, map, BUS_DMASYNC_PREWRITE);
3222 * Set up the TX context descriptor, if any hardware offloading is
3223 * needed. This includes CSUM, VLAN, and TSO. It will consume one
3226 * Unlike these chips' predecessors (em/emx), TX context descriptor
3227 * will _not_ interfere TX data fetching pipelining.
3229 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3230 igb_tso_ctx(txr, m_head, &hdrlen);
3231 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3232 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3233 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3236 } else if (igb_txcsum_ctx(txr, m_head)) {
3237 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3238 olinfo_status |= (E1000_TXD_POPTS_IXSM << 8);
3239 if (m_head->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_TCP))
3240 olinfo_status |= (E1000_TXD_POPTS_TXSM << 8);
3245 *segs_used += nsegs;
3246 txr->tx_nsegs += nsegs;
3247 if (txr->tx_nsegs >= txr->intr_nsegs) {
3249 * Report Status (RS) is turned on every intr_nsegs
3250 * descriptors (roughly).
3253 cmd_rs = E1000_ADVTXD_DCMD_RS;
3256 /* Calculate payload length */
3257 olinfo_status |= ((m_head->m_pkthdr.len - hdrlen)
3258 << E1000_ADVTXD_PAYLEN_SHIFT);
3260 /* 82575 needs the queue index added */
3261 if (txr->sc->hw.mac.type == e1000_82575)
3262 olinfo_status |= txr->me << 4;
3264 /* Set up our transmit descriptors */
3265 i = txr->next_avail_desc;
3266 for (j = 0; j < nsegs; j++) {
3268 bus_addr_t seg_addr;
3270 tx_buf = &txr->tx_buf[i];
3271 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i];
3272 seg_addr = segs[j].ds_addr;
3273 seg_len = segs[j].ds_len;
3275 txd->read.buffer_addr = htole64(seg_addr);
3276 txd->read.cmd_type_len = htole32(cmd_type_len | seg_len);
3277 txd->read.olinfo_status = htole32(olinfo_status);
3279 if (++i == txr->num_tx_desc)
3281 tx_buf->m_head = NULL;
3284 KASSERT(txr->tx_avail > nsegs, ("invalid avail TX desc\n"));
3285 txr->next_avail_desc = i;
3286 txr->tx_avail -= nsegs;
3288 tx_buf->m_head = m_head;
3289 tx_buf_mapped->map = tx_buf->map;
3293 * Last Descriptor of Packet needs End Of Packet (EOP)
3295 txd->read.cmd_type_len |= htole32(E1000_ADVTXD_DCMD_EOP | cmd_rs);
3298 * Defer TDT updating, until enough descrptors are setup
3307 igb_start(struct ifnet *ifp)
3309 struct igb_softc *sc = ifp->if_softc;
3310 struct igb_tx_ring *txr = &sc->tx_rings[0];
3311 struct mbuf *m_head;
3312 int idx = -1, nsegs = 0;
3314 ASSERT_SERIALIZED(&txr->tx_serialize);
3316 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
3319 if (!sc->link_active) {
3320 ifq_purge(&ifp->if_snd);
3324 if (!IGB_IS_NOT_OACTIVE(txr))
3327 while (!ifq_is_empty(&ifp->if_snd)) {
3328 if (IGB_IS_OACTIVE(txr)) {
3329 ifq_set_oactive(&ifp->if_snd);
3330 /* Set watchdog on */
3335 m_head = ifq_dequeue(&ifp->if_snd, NULL);
3339 if (igb_encap(txr, &m_head, &nsegs, &idx)) {
3344 if (nsegs >= txr->wreg_nsegs) {
3345 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx);
3350 /* Send a copy of the frame to the BPF listener */
3351 ETHER_BPF_MTAP(ifp, m_head);
3354 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx);
3358 igb_watchdog(struct ifnet *ifp)
3360 struct igb_softc *sc = ifp->if_softc;
3361 struct igb_tx_ring *txr = &sc->tx_rings[0];
3363 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3366 * If flow control has paused us since last checking
3367 * it invalidates the watchdog timing, so dont run it.
3369 if (sc->pause_frames) {
3370 sc->pause_frames = 0;
3375 if_printf(ifp, "Watchdog timeout -- resetting\n");
3376 if_printf(ifp, "Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
3377 E1000_READ_REG(&sc->hw, E1000_TDH(txr->me)),
3378 E1000_READ_REG(&sc->hw, E1000_TDT(txr->me)));
3379 if_printf(ifp, "TX(%d) desc avail = %d, "
3380 "Next TX to Clean = %d\n",
3381 txr->me, txr->tx_avail, txr->next_to_clean);
3384 sc->watchdog_events++;
3387 if (!ifq_is_empty(&ifp->if_snd))
3392 igb_set_eitr(struct igb_softc *sc, int idx, int rate)
3397 if (sc->hw.mac.type == e1000_82575) {
3398 eitr = 1000000000 / 256 / rate;
3401 * Document is wrong on the 2 bits left shift
3404 eitr = 1000000 / rate;
3405 eitr <<= IGB_EITR_INTVL_SHIFT;
3409 /* Don't disable it */
3410 eitr = 1 << IGB_EITR_INTVL_SHIFT;
3411 } else if (eitr > IGB_EITR_INTVL_MASK) {
3412 /* Don't allow it to be too large */
3413 eitr = IGB_EITR_INTVL_MASK;
3416 if (sc->hw.mac.type == e1000_82575)
3419 eitr |= E1000_EITR_CNT_IGNR;
3420 E1000_WRITE_REG(&sc->hw, E1000_EITR(idx), eitr);
3424 igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS)
3426 struct igb_softc *sc = (void *)arg1;
3427 struct ifnet *ifp = &sc->arpcom.ac_if;
3428 int error, intr_rate;
3430 intr_rate = sc->intr_rate;
3431 error = sysctl_handle_int(oidp, &intr_rate, 0, req);
3432 if (error || req->newptr == NULL)
3437 ifnet_serialize_all(ifp);
3439 sc->intr_rate = intr_rate;
3440 if (ifp->if_flags & IFF_RUNNING)
3441 igb_set_eitr(sc, 0, sc->intr_rate);
3444 if_printf(ifp, "interrupt rate set to %d/sec\n", sc->intr_rate);
3446 ifnet_deserialize_all(ifp);
3452 igb_sysctl_msix_rate(SYSCTL_HANDLER_ARGS)
3454 struct igb_msix_data *msix = (void *)arg1;
3455 struct igb_softc *sc = msix->msix_sc;
3456 struct ifnet *ifp = &sc->arpcom.ac_if;
3457 int error, msix_rate;
3459 msix_rate = msix->msix_rate;
3460 error = sysctl_handle_int(oidp, &msix_rate, 0, req);
3461 if (error || req->newptr == NULL)
3466 lwkt_serialize_enter(msix->msix_serialize);
3468 msix->msix_rate = msix_rate;
3469 if (ifp->if_flags & IFF_RUNNING)
3470 igb_set_eitr(sc, msix->msix_vector, msix->msix_rate);
3473 if_printf(ifp, "%s set to %d/sec\n", msix->msix_rate_desc,
3477 lwkt_serialize_exit(msix->msix_serialize);
3483 igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS)
3485 struct igb_softc *sc = (void *)arg1;
3486 struct ifnet *ifp = &sc->arpcom.ac_if;
3487 struct igb_tx_ring *txr = &sc->tx_rings[0];
3490 nsegs = txr->intr_nsegs;
3491 error = sysctl_handle_int(oidp, &nsegs, 0, req);
3492 if (error || req->newptr == NULL)
3497 ifnet_serialize_all(ifp);
3499 if (nsegs >= txr->num_tx_desc - txr->oact_lo_desc ||
3500 nsegs >= txr->oact_hi_desc - IGB_MAX_SCATTER) {
3504 txr->intr_nsegs = nsegs;
3507 ifnet_deserialize_all(ifp);
3512 #ifdef IFPOLL_ENABLE
3515 igb_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS)
3517 struct igb_softc *sc = (void *)arg1;
3518 struct ifnet *ifp = &sc->arpcom.ac_if;
3521 off = sc->rx_npoll_off;
3522 error = sysctl_handle_int(oidp, &off, 0, req);
3523 if (error || req->newptr == NULL)
3528 ifnet_serialize_all(ifp);
3529 if (off >= ncpus2 || off % sc->rx_ring_cnt != 0) {
3533 sc->rx_npoll_off = off;
3535 ifnet_deserialize_all(ifp);
3541 igb_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS)
3543 struct igb_softc *sc = (void *)arg1;
3544 struct ifnet *ifp = &sc->arpcom.ac_if;
3547 off = sc->tx_npoll_off;
3548 error = sysctl_handle_int(oidp, &off, 0, req);
3549 if (error || req->newptr == NULL)
3554 ifnet_serialize_all(ifp);
3555 if (off >= ncpus2) {
3559 sc->tx_npoll_off = off;
3561 ifnet_deserialize_all(ifp);
3566 #endif /* IFPOLL_ENABLE */
3569 igb_init_intr(struct igb_softc *sc)
3571 igb_set_intr_mask(sc);
3573 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0)
3574 igb_init_unshared_intr(sc);
3576 if (sc->intr_type != PCI_INTR_TYPE_MSIX) {
3577 igb_set_eitr(sc, 0, sc->intr_rate);
3581 for (i = 0; i < sc->msix_cnt; ++i)
3582 igb_set_eitr(sc, i, sc->msix_data[i].msix_rate);
3587 igb_init_unshared_intr(struct igb_softc *sc)
3589 struct e1000_hw *hw = &sc->hw;
3590 const struct igb_rx_ring *rxr;
3591 const struct igb_tx_ring *txr;
3592 uint32_t ivar, index;
3596 * Enable extended mode
3598 if (sc->hw.mac.type != e1000_82575) {
3602 gpie = E1000_GPIE_NSICR;
3603 if (sc->intr_type == PCI_INTR_TYPE_MSIX) {
3604 gpie |= E1000_GPIE_MSIX_MODE |
3608 E1000_WRITE_REG(hw, E1000_GPIE, gpie);
3613 switch (sc->hw.mac.type) {
3615 ivar_max = IGB_MAX_IVAR_82580;
3619 ivar_max = IGB_MAX_IVAR_I350;
3623 case e1000_vfadapt_i350:
3624 ivar_max = IGB_MAX_IVAR_VF;
3628 ivar_max = IGB_MAX_IVAR_82576;
3632 panic("unknown mac type %d\n", sc->hw.mac.type);
3634 for (i = 0; i < ivar_max; ++i)
3635 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, 0);
3636 E1000_WRITE_REG(hw, E1000_IVAR_MISC, 0);
3640 KASSERT(sc->intr_type != PCI_INTR_TYPE_MSIX,
3641 ("82575 w/ MSI-X"));
3642 tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
3643 tmp |= E1000_CTRL_EXT_IRCA;
3644 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
3648 * Map TX/RX interrupts to EICR
3650 switch (sc->hw.mac.type) {
3654 case e1000_vfadapt_i350:
3656 for (i = 0; i < sc->rx_ring_inuse; ++i) {
3657 rxr = &sc->rx_rings[i];
3660 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3665 (rxr->rx_intr_bit | E1000_IVAR_VALID) << 16;
3669 (rxr->rx_intr_bit | E1000_IVAR_VALID);
3671 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3674 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3675 txr = &sc->tx_rings[i];
3678 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3683 (txr->tx_intr_bit | E1000_IVAR_VALID) << 24;
3687 (txr->tx_intr_bit | E1000_IVAR_VALID) << 8;
3689 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3691 if (sc->intr_type == PCI_INTR_TYPE_MSIX) {
3692 ivar = (sc->sts_intr_bit | E1000_IVAR_VALID) << 8;
3693 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
3699 for (i = 0; i < sc->rx_ring_inuse; ++i) {
3700 rxr = &sc->rx_rings[i];
3702 index = i & 0x7; /* Each IVAR has two entries */
3703 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3708 (rxr->rx_intr_bit | E1000_IVAR_VALID);
3712 (rxr->rx_intr_bit | E1000_IVAR_VALID) << 16;
3714 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3717 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3718 txr = &sc->tx_rings[i];
3720 index = i & 0x7; /* Each IVAR has two entries */
3721 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3726 (txr->tx_intr_bit | E1000_IVAR_VALID) << 8;
3730 (txr->tx_intr_bit | E1000_IVAR_VALID) << 24;
3732 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3734 if (sc->intr_type == PCI_INTR_TYPE_MSIX) {
3735 ivar = (sc->sts_intr_bit | E1000_IVAR_VALID) << 8;
3736 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
3742 * Enable necessary interrupt bits.
3744 * The name of the register is confusing; in addition to
3745 * configuring the first vector of MSI-X, it also configures
3746 * which bits of EICR could be set by the hardware even when
3747 * MSI or line interrupt is used; it thus controls interrupt
3748 * generation. It MUST be configured explicitly; the default
3749 * value mentioned in the datasheet is wrong: RX queue0 and
3750 * TX queue0 are NOT enabled by default.
3752 E1000_WRITE_REG(&sc->hw, E1000_MSIXBM(0), sc->intr_mask);
3756 panic("unknown mac type %d\n", sc->hw.mac.type);
3761 igb_setup_intr(struct igb_softc *sc)
3765 if (sc->intr_type == PCI_INTR_TYPE_MSIX)
3766 return igb_msix_setup(sc);
3768 error = bus_setup_intr(sc->dev, sc->intr_res, INTR_MPSAFE,
3769 (sc->flags & IGB_FLAG_SHARED_INTR) ? igb_intr_shared : igb_intr,
3770 sc, &sc->intr_tag, &sc->main_serialize);
3772 device_printf(sc->dev, "Failed to register interrupt handler");
3775 sc->tx_rings[0].tx_intr_cpuid = rman_get_cpuid(sc->intr_res);
3781 igb_set_txintr_mask(struct igb_tx_ring *txr, int *intr_bit0, int intr_bitmax)
3783 if (txr->sc->hw.mac.type == e1000_82575) {
3784 txr->tx_intr_bit = 0; /* unused */
3787 txr->tx_intr_mask = E1000_EICR_TX_QUEUE0;
3790 txr->tx_intr_mask = E1000_EICR_TX_QUEUE1;
3793 txr->tx_intr_mask = E1000_EICR_TX_QUEUE2;
3796 txr->tx_intr_mask = E1000_EICR_TX_QUEUE3;
3799 panic("unsupported # of TX ring, %d\n", txr->me);
3802 int intr_bit = *intr_bit0;
3804 txr->tx_intr_bit = intr_bit % intr_bitmax;
3805 txr->tx_intr_mask = 1 << txr->tx_intr_bit;
3807 *intr_bit0 = intr_bit + 1;
3812 igb_set_rxintr_mask(struct igb_rx_ring *rxr, int *intr_bit0, int intr_bitmax)
3814 if (rxr->sc->hw.mac.type == e1000_82575) {
3815 rxr->rx_intr_bit = 0; /* unused */
3818 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE0;
3821 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE1;
3824 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE2;
3827 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE3;
3830 panic("unsupported # of RX ring, %d\n", rxr->me);
3833 int intr_bit = *intr_bit0;
3835 rxr->rx_intr_bit = intr_bit % intr_bitmax;
3836 rxr->rx_intr_mask = 1 << rxr->rx_intr_bit;
3838 *intr_bit0 = intr_bit + 1;
3843 igb_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3845 struct igb_softc *sc = ifp->if_softc;
3847 ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt,
3848 sc->tx_serialize, sc->rx_serialize, slz);
3852 igb_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3854 struct igb_softc *sc = ifp->if_softc;
3856 ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt,
3857 sc->tx_serialize, sc->rx_serialize, slz);
3861 igb_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3863 struct igb_softc *sc = ifp->if_softc;
3865 return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt,
3866 sc->tx_serialize, sc->rx_serialize, slz);
3872 igb_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
3873 boolean_t serialized)
3875 struct igb_softc *sc = ifp->if_softc;
3877 ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt,
3878 sc->tx_serialize, sc->rx_serialize, slz, serialized);
3881 #endif /* INVARIANTS */
3884 igb_set_intr_mask(struct igb_softc *sc)
3888 sc->intr_mask = sc->sts_intr_mask;
3889 for (i = 0; i < sc->rx_ring_inuse; ++i)
3890 sc->intr_mask |= sc->rx_rings[i].rx_intr_mask;
3891 for (i = 0; i < sc->tx_ring_cnt; ++i)
3892 sc->intr_mask |= sc->tx_rings[i].tx_intr_mask;
3894 if_printf(&sc->arpcom.ac_if, "intr mask 0x%08x\n",
3900 igb_alloc_intr(struct igb_softc *sc)
3902 int i, intr_bit, intr_bitmax;
3905 igb_msix_try_alloc(sc);
3906 if (sc->intr_type == PCI_INTR_TYPE_MSIX)
3910 * Allocate MSI/legacy interrupt resource
3912 sc->intr_type = pci_alloc_1intr(sc->dev, igb_msi_enable,
3913 &sc->intr_rid, &intr_flags);
3915 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) {
3918 unshared = device_getenv_int(sc->dev, "irq.unshared", 0);
3920 sc->flags |= IGB_FLAG_SHARED_INTR;
3922 device_printf(sc->dev, "IRQ shared\n");
3924 intr_flags &= ~RF_SHAREABLE;
3926 device_printf(sc->dev, "IRQ unshared\n");
3930 sc->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
3931 &sc->intr_rid, intr_flags);
3932 if (sc->intr_res == NULL) {
3933 device_printf(sc->dev, "Unable to allocate bus resource: "
3939 * Setup MSI/legacy interrupt mask
3941 switch (sc->hw.mac.type) {
3943 intr_bitmax = IGB_MAX_TXRXINT_82575;
3946 intr_bitmax = IGB_MAX_TXRXINT_82580;
3949 intr_bitmax = IGB_MAX_TXRXINT_I350;
3952 intr_bitmax = IGB_MAX_TXRXINT_82576;
3955 intr_bitmax = IGB_MIN_TXRXINT;
3959 for (i = 0; i < sc->tx_ring_cnt; ++i)
3960 igb_set_txintr_mask(&sc->tx_rings[i], &intr_bit, intr_bitmax);
3961 for (i = 0; i < sc->rx_ring_cnt; ++i)
3962 igb_set_rxintr_mask(&sc->rx_rings[i], &intr_bit, intr_bitmax);
3963 sc->sts_intr_bit = 0;
3964 sc->sts_intr_mask = E1000_EICR_OTHER;
3966 /* Initialize interrupt rate */
3967 sc->intr_rate = IGB_INTR_RATE;
3969 igb_set_ring_inuse(sc, FALSE);
3970 igb_set_intr_mask(sc);
3975 igb_free_intr(struct igb_softc *sc)
3977 if (sc->intr_type != PCI_INTR_TYPE_MSIX) {
3978 if (sc->intr_res != NULL) {
3979 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr_rid,
3982 if (sc->intr_type == PCI_INTR_TYPE_MSI)
3983 pci_release_msi(sc->dev);
3985 igb_msix_free(sc, TRUE);
3990 igb_teardown_intr(struct igb_softc *sc)
3992 if (sc->intr_type != PCI_INTR_TYPE_MSIX)
3993 bus_teardown_intr(sc->dev, sc->intr_res, sc->intr_tag);
3995 igb_msix_teardown(sc, sc->msix_cnt);
3999 igb_msix_try_alloc(struct igb_softc *sc)
4001 int msix_enable, msix_cnt, msix_cnt2, alloc_cnt;
4003 struct igb_msix_data *msix;
4004 boolean_t aggregate, setup = FALSE;
4007 * Don't enable MSI-X on 82575, see:
4008 * 82575 specification update errata #25
4010 if (sc->hw.mac.type == e1000_82575)
4013 /* Don't enable MSI-X on VF */
4017 msix_enable = device_getenv_int(sc->dev, "msix.enable",
4022 msix_cnt = pci_msix_count(sc->dev);
4023 #ifdef IGB_MSIX_DEBUG
4024 msix_cnt = device_getenv_int(sc->dev, "msix.count", msix_cnt);
4026 if (msix_cnt <= 1) {
4027 /* One MSI-X model does not make sense */
4032 while ((1 << (i + 1)) <= msix_cnt)
4037 device_printf(sc->dev, "MSI-X count %d/%d\n",
4038 msix_cnt2, msix_cnt);
4041 KKASSERT(msix_cnt2 <= msix_cnt);
4042 if (msix_cnt == msix_cnt2) {
4043 /* We need at least one MSI-X for link status */
4045 if (msix_cnt2 <= 1) {
4046 /* One MSI-X for RX/TX does not make sense */
4047 device_printf(sc->dev, "not enough MSI-X for TX/RX, "
4048 "MSI-X count %d/%d\n", msix_cnt2, msix_cnt);
4051 KKASSERT(msix_cnt > msix_cnt2);
4054 device_printf(sc->dev, "MSI-X count fixup %d/%d\n",
4055 msix_cnt2, msix_cnt);
4059 sc->rx_ring_msix = sc->rx_ring_cnt;
4060 if (sc->rx_ring_msix > msix_cnt2)
4061 sc->rx_ring_msix = msix_cnt2;
4063 if (msix_cnt >= sc->tx_ring_cnt + sc->rx_ring_msix + 1) {
4065 * Independent TX/RX MSI-X
4069 device_printf(sc->dev, "independent TX/RX MSI-X\n");
4070 alloc_cnt = sc->tx_ring_cnt + sc->rx_ring_msix;
4073 * Aggregate TX/RX MSI-X
4077 device_printf(sc->dev, "aggregate TX/RX MSI-X\n");
4078 alloc_cnt = msix_cnt2;
4079 if (alloc_cnt > ncpus2)
4081 if (sc->rx_ring_msix > alloc_cnt)
4082 sc->rx_ring_msix = alloc_cnt;
4084 ++alloc_cnt; /* For link status */
4087 device_printf(sc->dev, "MSI-X alloc %d, RX ring %d\n",
4088 alloc_cnt, sc->rx_ring_msix);
4091 sc->msix_mem_rid = PCIR_BAR(IGB_MSIX_BAR);
4092 sc->msix_mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
4093 &sc->msix_mem_rid, RF_ACTIVE);
4094 if (sc->msix_mem_res == NULL) {
4095 device_printf(sc->dev, "Unable to map MSI-X table\n");
4099 sc->msix_cnt = alloc_cnt;
4100 sc->msix_data = kmalloc_cachealign(
4101 sizeof(struct igb_msix_data) * sc->msix_cnt,
4102 M_DEVBUF, M_WAITOK | M_ZERO);
4103 for (x = 0; x < sc->msix_cnt; ++x) {
4104 msix = &sc->msix_data[x];
4106 lwkt_serialize_init(&msix->msix_serialize0);
4108 msix->msix_rid = -1;
4109 msix->msix_vector = x;
4110 msix->msix_mask = 1 << msix->msix_vector;
4111 msix->msix_rate = IGB_INTR_RATE;
4116 int offset, offset_def;
4118 if (sc->rx_ring_msix == ncpus2) {
4121 offset_def = (sc->rx_ring_msix *
4122 device_get_unit(sc->dev)) % ncpus2;
4124 offset = device_getenv_int(sc->dev,
4125 "msix.rxoff", offset_def);
4126 if (offset >= ncpus2 ||
4127 offset % sc->rx_ring_msix != 0) {
4128 device_printf(sc->dev,
4129 "invalid msix.rxoff %d, use %d\n",
4130 offset, offset_def);
4131 offset = offset_def;
4136 for (i = 0; i < sc->rx_ring_msix; ++i) {
4137 struct igb_rx_ring *rxr = &sc->rx_rings[i];
4139 KKASSERT(x < sc->msix_cnt);
4140 msix = &sc->msix_data[x++];
4141 rxr->rx_intr_bit = msix->msix_vector;
4142 rxr->rx_intr_mask = msix->msix_mask;
4144 msix->msix_serialize = &rxr->rx_serialize;
4145 msix->msix_func = igb_msix_rx;
4146 msix->msix_arg = rxr;
4147 msix->msix_cpuid = i + offset;
4148 KKASSERT(msix->msix_cpuid < ncpus2);
4149 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc),
4150 "%s rx%d", device_get_nameunit(sc->dev), i);
4151 msix->msix_rate = IGB_MSIX_RX_RATE;
4152 ksnprintf(msix->msix_rate_desc,
4153 sizeof(msix->msix_rate_desc),
4154 "RX%d interrupt rate", i);
4157 offset_def = device_get_unit(sc->dev) % ncpus2;
4158 offset = device_getenv_int(sc->dev, "msix.txoff", offset_def);
4159 if (offset >= ncpus2) {
4160 device_printf(sc->dev, "invalid msix.txoff %d, "
4161 "use %d\n", offset, offset_def);
4162 offset = offset_def;
4166 for (i = 0; i < sc->tx_ring_cnt; ++i) {
4167 struct igb_tx_ring *txr = &sc->tx_rings[i];
4169 KKASSERT(x < sc->msix_cnt);
4170 msix = &sc->msix_data[x++];
4171 txr->tx_intr_bit = msix->msix_vector;
4172 txr->tx_intr_mask = msix->msix_mask;
4174 msix->msix_serialize = &txr->tx_serialize;
4175 msix->msix_func = igb_msix_tx;
4176 msix->msix_arg = txr;
4177 msix->msix_cpuid = i + offset;
4178 txr->tx_intr_cpuid = msix->msix_cpuid;
4179 KKASSERT(msix->msix_cpuid < ncpus2);
4180 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc),
4181 "%s tx%d", device_get_nameunit(sc->dev), i);
4182 msix->msix_rate = IGB_MSIX_TX_RATE;
4183 ksnprintf(msix->msix_rate_desc,
4184 sizeof(msix->msix_rate_desc),
4185 "TX%d interrupt rate", i);
4196 KKASSERT(x < sc->msix_cnt);
4197 msix = &sc->msix_data[x++];
4198 sc->sts_intr_bit = msix->msix_vector;
4199 sc->sts_intr_mask = msix->msix_mask;
4201 msix->msix_serialize = &sc->main_serialize;
4202 msix->msix_func = igb_msix_status;
4203 msix->msix_arg = sc;
4204 msix->msix_cpuid = 0; /* TODO tunable */
4205 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s sts",
4206 device_get_nameunit(sc->dev));
4207 ksnprintf(msix->msix_rate_desc, sizeof(msix->msix_rate_desc),
4208 "status interrupt rate");
4210 KKASSERT(x == sc->msix_cnt);
4212 error = pci_setup_msix(sc->dev);
4214 device_printf(sc->dev, "Setup MSI-X failed\n");
4219 for (i = 0; i < sc->msix_cnt; ++i) {
4220 msix = &sc->msix_data[i];
4222 error = pci_alloc_msix_vector(sc->dev, msix->msix_vector,
4223 &msix->msix_rid, msix->msix_cpuid);
4225 device_printf(sc->dev,
4226 "Unable to allocate MSI-X %d on cpu%d\n",
4227 msix->msix_vector, msix->msix_cpuid);
4231 msix->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
4232 &msix->msix_rid, RF_ACTIVE);
4233 if (msix->msix_res == NULL) {
4234 device_printf(sc->dev,
4235 "Unable to allocate MSI-X %d resource\n",
4242 pci_enable_msix(sc->dev);
4243 sc->intr_type = PCI_INTR_TYPE_MSIX;
4246 igb_msix_free(sc, setup);
4250 igb_msix_free(struct igb_softc *sc, boolean_t setup)
4254 KKASSERT(sc->msix_cnt > 1);
4256 for (i = 0; i < sc->msix_cnt; ++i) {
4257 struct igb_msix_data *msix = &sc->msix_data[i];
4259 if (msix->msix_res != NULL) {
4260 bus_release_resource(sc->dev, SYS_RES_IRQ,
4261 msix->msix_rid, msix->msix_res);
4263 if (msix->msix_rid >= 0)
4264 pci_release_msix_vector(sc->dev, msix->msix_rid);
4267 pci_teardown_msix(sc->dev);
4270 kfree(sc->msix_data, M_DEVBUF);
4271 sc->msix_data = NULL;
4275 igb_msix_setup(struct igb_softc *sc)
4279 for (i = 0; i < sc->msix_cnt; ++i) {
4280 struct igb_msix_data *msix = &sc->msix_data[i];
4283 error = bus_setup_intr_descr(sc->dev, msix->msix_res,
4284 INTR_MPSAFE, msix->msix_func, msix->msix_arg,
4285 &msix->msix_handle, msix->msix_serialize, msix->msix_desc);
4287 device_printf(sc->dev, "could not set up %s "
4288 "interrupt handler.\n", msix->msix_desc);
4289 igb_msix_teardown(sc, i);
4297 igb_msix_teardown(struct igb_softc *sc, int msix_cnt)
4301 for (i = 0; i < msix_cnt; ++i) {
4302 struct igb_msix_data *msix = &sc->msix_data[i];
4304 bus_teardown_intr(sc->dev, msix->msix_res, msix->msix_handle);
4309 igb_msix_rx(void *arg)
4311 struct igb_rx_ring *rxr = arg;
4313 ASSERT_SERIALIZED(&rxr->rx_serialize);
4316 E1000_WRITE_REG(&rxr->sc->hw, E1000_EIMS, rxr->rx_intr_mask);
4320 igb_msix_tx(void *arg)
4322 struct igb_tx_ring *txr = arg;
4323 struct ifnet *ifp = &txr->sc->arpcom.ac_if;
4325 ASSERT_SERIALIZED(&txr->tx_serialize);
4328 if (!ifq_is_empty(&ifp->if_snd))
4331 E1000_WRITE_REG(&txr->sc->hw, E1000_EIMS, txr->tx_intr_mask);
4335 igb_msix_status(void *arg)
4337 struct igb_softc *sc = arg;
4340 ASSERT_SERIALIZED(&sc->main_serialize);
4342 icr = E1000_READ_REG(&sc->hw, E1000_ICR);
4343 if (icr & E1000_ICR_LSC) {
4344 sc->hw.mac.get_link_status = 1;
4345 igb_update_link_status(sc);
4348 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->sts_intr_mask);
4352 igb_set_ring_inuse(struct igb_softc *sc, boolean_t polling)
4354 if (!IGB_ENABLE_HWRSS(sc))
4358 sc->rx_ring_inuse = sc->rx_ring_cnt;
4359 else if (sc->intr_type != PCI_INTR_TYPE_MSIX)
4360 sc->rx_ring_inuse = IGB_MIN_RING_RSS;
4362 sc->rx_ring_inuse = sc->rx_ring_msix;
4364 if_printf(&sc->arpcom.ac_if, "RX rings %d/%d\n",
4365 sc->rx_ring_inuse, sc->rx_ring_cnt);
4370 igb_tso_pullup(struct igb_tx_ring *txr, struct mbuf **mp)
4372 int hoff, iphlen, thoff;
4376 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
4378 iphlen = m->m_pkthdr.csum_iphlen;
4379 thoff = m->m_pkthdr.csum_thlen;
4380 hoff = m->m_pkthdr.csum_lhlen;
4382 KASSERT(iphlen > 0, ("invalid ip hlen"));
4383 KASSERT(thoff > 0, ("invalid tcp hlen"));
4384 KASSERT(hoff > 0, ("invalid ether hlen"));
4386 if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
4387 m = m_pullup(m, hoff + iphlen + thoff);
4394 if (txr->sc->flags & IGB_FLAG_TSO_IPLEN0) {
4397 ip = mtodoff(m, struct ip *, hoff);
4405 igb_tso_ctx(struct igb_tx_ring *txr, struct mbuf *m, uint32_t *hlen)
4407 struct e1000_adv_tx_context_desc *TXD;
4408 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx;
4409 int hoff, ctxd, iphlen, thoff;
4411 iphlen = m->m_pkthdr.csum_iphlen;
4412 thoff = m->m_pkthdr.csum_thlen;
4413 hoff = m->m_pkthdr.csum_lhlen;
4415 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0;
4417 ctxd = txr->next_avail_desc;
4418 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd];
4420 if (m->m_flags & M_VLANTAG) {
4423 vlantag = htole16(m->m_pkthdr.ether_vlantag);
4424 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT);
4427 vlan_macip_lens |= (hoff << E1000_ADVTXD_MACLEN_SHIFT);
4428 vlan_macip_lens |= iphlen;
4430 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
4431 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
4432 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
4434 mss_l4len_idx |= (m->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT);
4435 mss_l4len_idx |= (thoff << E1000_ADVTXD_L4LEN_SHIFT);
4436 /* 82575 needs the queue index added */
4437 if (txr->sc->hw.mac.type == e1000_82575)
4438 mss_l4len_idx |= txr->me << 4;
4440 TXD->vlan_macip_lens = htole32(vlan_macip_lens);
4441 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
4442 TXD->seqnum_seed = htole32(0);
4443 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
4445 /* We've consumed the first desc, adjust counters */
4446 if (++ctxd == txr->num_tx_desc)
4448 txr->next_avail_desc = ctxd;
4451 *hlen = hoff + iphlen + thoff;