2 * Copyright (c) 2001-2011, Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the Intel Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #include "opt_polling.h"
35 #include <sys/param.h>
37 #include <sys/endian.h>
38 #include <sys/interrupt.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
44 #include <sys/serialize.h>
45 #include <sys/serialize2.h>
46 #include <sys/socket.h>
47 #include <sys/sockio.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
52 #include <net/ethernet.h>
54 #include <net/if_arp.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57 #include <net/ifq_var.h>
58 #include <net/toeplitz.h>
59 #include <net/toeplitz2.h>
60 #include <net/vlan/if_vlan_var.h>
61 #include <net/vlan/if_vlan_ether.h>
62 #include <net/if_poll.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/in.h>
66 #include <netinet/ip.h>
67 #include <netinet/tcp.h>
68 #include <netinet/udp.h>
70 #include <bus/pci/pcivar.h>
71 #include <bus/pci/pcireg.h>
73 #include <dev/netif/ig_hal/e1000_api.h>
74 #include <dev/netif/ig_hal/e1000_82575.h>
75 #include <dev/netif/igb/if_igb.h>
78 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) \
80 if (sc->rss_debug >= lvl) \
81 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
83 #else /* !IGB_RSS_DEBUG */
84 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
85 #endif /* IGB_RSS_DEBUG */
87 #define IGB_NAME "Intel(R) PRO/1000 "
88 #define IGB_DEVICE(id) \
89 { IGB_VENDOR_ID, E1000_DEV_ID_##id, IGB_NAME #id }
90 #define IGB_DEVICE_NULL { 0, 0, NULL }
92 static struct igb_device {
97 IGB_DEVICE(82575EB_COPPER),
98 IGB_DEVICE(82575EB_FIBER_SERDES),
99 IGB_DEVICE(82575GB_QUAD_COPPER),
101 IGB_DEVICE(82576_NS),
102 IGB_DEVICE(82576_NS_SERDES),
103 IGB_DEVICE(82576_FIBER),
104 IGB_DEVICE(82576_SERDES),
105 IGB_DEVICE(82576_SERDES_QUAD),
106 IGB_DEVICE(82576_QUAD_COPPER),
107 IGB_DEVICE(82576_QUAD_COPPER_ET2),
108 IGB_DEVICE(82576_VF),
109 IGB_DEVICE(82580_COPPER),
110 IGB_DEVICE(82580_FIBER),
111 IGB_DEVICE(82580_SERDES),
112 IGB_DEVICE(82580_SGMII),
113 IGB_DEVICE(82580_COPPER_DUAL),
114 IGB_DEVICE(82580_QUAD_FIBER),
115 IGB_DEVICE(DH89XXCC_SERDES),
116 IGB_DEVICE(DH89XXCC_SGMII),
117 IGB_DEVICE(DH89XXCC_SFP),
118 IGB_DEVICE(DH89XXCC_BACKPLANE),
119 IGB_DEVICE(I350_COPPER),
120 IGB_DEVICE(I350_FIBER),
121 IGB_DEVICE(I350_SERDES),
122 IGB_DEVICE(I350_SGMII),
125 /* required last entry */
129 static int igb_probe(device_t);
130 static int igb_attach(device_t);
131 static int igb_detach(device_t);
132 static int igb_shutdown(device_t);
133 static int igb_suspend(device_t);
134 static int igb_resume(device_t);
136 static boolean_t igb_is_valid_ether_addr(const uint8_t *);
137 static void igb_setup_ifp(struct igb_softc *);
138 static boolean_t igb_txcsum_ctx(struct igb_tx_ring *, struct mbuf *);
139 static void igb_add_sysctl(struct igb_softc *);
140 static int igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS);
141 static int igb_sysctl_msix_rate(SYSCTL_HANDLER_ARGS);
142 static int igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS);
143 static void igb_set_ring_inuse(struct igb_softc *, boolean_t);
145 static void igb_vf_init_stats(struct igb_softc *);
146 static void igb_reset(struct igb_softc *);
147 static void igb_update_stats_counters(struct igb_softc *);
148 static void igb_update_vf_stats_counters(struct igb_softc *);
149 static void igb_update_link_status(struct igb_softc *);
150 static void igb_init_tx_unit(struct igb_softc *);
151 static void igb_init_rx_unit(struct igb_softc *);
153 static void igb_set_vlan(struct igb_softc *);
154 static void igb_set_multi(struct igb_softc *);
155 static void igb_set_promisc(struct igb_softc *);
156 static void igb_disable_promisc(struct igb_softc *);
158 static int igb_alloc_rings(struct igb_softc *);
159 static void igb_free_rings(struct igb_softc *);
160 static int igb_create_tx_ring(struct igb_tx_ring *);
161 static int igb_create_rx_ring(struct igb_rx_ring *);
162 static void igb_free_tx_ring(struct igb_tx_ring *);
163 static void igb_free_rx_ring(struct igb_rx_ring *);
164 static void igb_destroy_tx_ring(struct igb_tx_ring *, int);
165 static void igb_destroy_rx_ring(struct igb_rx_ring *, int);
166 static void igb_init_tx_ring(struct igb_tx_ring *);
167 static int igb_init_rx_ring(struct igb_rx_ring *);
168 static int igb_newbuf(struct igb_rx_ring *, int, boolean_t);
169 static int igb_encap(struct igb_tx_ring *, struct mbuf **);
171 static void igb_stop(struct igb_softc *);
172 static void igb_init(void *);
173 static int igb_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
174 static void igb_media_status(struct ifnet *, struct ifmediareq *);
175 static int igb_media_change(struct ifnet *);
176 static void igb_timer(void *);
177 static void igb_watchdog(struct ifnet *);
178 static void igb_start(struct ifnet *);
179 #ifdef DEVICE_POLLING
180 static void igb_poll(struct ifnet *, enum poll_cmd, int);
182 static void igb_serialize(struct ifnet *, enum ifnet_serialize);
183 static void igb_deserialize(struct ifnet *, enum ifnet_serialize);
184 static int igb_tryserialize(struct ifnet *, enum ifnet_serialize);
186 static void igb_serialize_assert(struct ifnet *, enum ifnet_serialize,
190 static void igb_intr(void *);
191 static void igb_intr_shared(void *);
192 static void igb_rxeof(struct igb_rx_ring *, int);
193 static void igb_txeof(struct igb_tx_ring *);
194 static void igb_set_eitr(struct igb_softc *, int, int);
195 static void igb_enable_intr(struct igb_softc *);
196 static void igb_disable_intr(struct igb_softc *);
197 static void igb_init_unshared_intr(struct igb_softc *);
198 static void igb_init_intr(struct igb_softc *);
199 static int igb_setup_intr(struct igb_softc *);
200 static void igb_set_txintr_mask(struct igb_tx_ring *, int *, int);
201 static void igb_set_rxintr_mask(struct igb_rx_ring *, int *, int);
202 static void igb_set_intr_mask(struct igb_softc *);
203 static int igb_alloc_intr(struct igb_softc *);
204 static void igb_free_intr(struct igb_softc *);
205 static void igb_teardown_intr(struct igb_softc *);
206 static void igb_msix_try_alloc(struct igb_softc *);
207 static void igb_msix_free(struct igb_softc *, boolean_t);
208 static int igb_msix_setup(struct igb_softc *);
209 static void igb_msix_teardown(struct igb_softc *, int);
210 static void igb_msix_rx(void *);
211 static void igb_msix_tx(void *);
212 static void igb_msix_status(void *);
214 /* Management and WOL Support */
215 static void igb_get_mgmt(struct igb_softc *);
216 static void igb_rel_mgmt(struct igb_softc *);
217 static void igb_get_hw_control(struct igb_softc *);
218 static void igb_rel_hw_control(struct igb_softc *);
219 static void igb_enable_wol(device_t);
221 static device_method_t igb_methods[] = {
222 /* Device interface */
223 DEVMETHOD(device_probe, igb_probe),
224 DEVMETHOD(device_attach, igb_attach),
225 DEVMETHOD(device_detach, igb_detach),
226 DEVMETHOD(device_shutdown, igb_shutdown),
227 DEVMETHOD(device_suspend, igb_suspend),
228 DEVMETHOD(device_resume, igb_resume),
232 static driver_t igb_driver = {
235 sizeof(struct igb_softc),
238 static devclass_t igb_devclass;
240 DECLARE_DUMMY_MODULE(if_igb);
241 MODULE_DEPEND(igb, ig_hal, 1, 1, 1);
242 DRIVER_MODULE(if_igb, pci, igb_driver, igb_devclass, NULL, NULL);
244 static int igb_rxd = IGB_DEFAULT_RXD;
245 static int igb_txd = IGB_DEFAULT_TXD;
246 static int igb_rxr = 0;
247 static int igb_msi_enable = 1;
248 static int igb_msix_enable = 1;
249 static int igb_eee_disabled = 1; /* Energy Efficient Ethernet */
250 static int igb_fc_setting = e1000_fc_full;
253 * DMA Coalescing, only for i350 - default to off,
254 * this feature is for power savings
256 static int igb_dma_coalesce = 0;
258 TUNABLE_INT("hw.igb.rxd", &igb_rxd);
259 TUNABLE_INT("hw.igb.txd", &igb_txd);
260 TUNABLE_INT("hw.igb.rxr", &igb_rxr);
261 TUNABLE_INT("hw.igb.msi.enable", &igb_msi_enable);
262 TUNABLE_INT("hw.igb.msix.enable", &igb_msix_enable);
263 TUNABLE_INT("hw.igb.fc_setting", &igb_fc_setting);
266 TUNABLE_INT("hw.igb.eee_disabled", &igb_eee_disabled);
267 TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce);
270 igb_rxcsum(uint32_t staterr, struct mbuf *mp)
272 /* Ignore Checksum bit is set */
273 if (staterr & E1000_RXD_STAT_IXSM)
276 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) ==
278 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
280 if (staterr & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) {
281 if ((staterr & E1000_RXDEXT_STATERR_TCPE) == 0) {
282 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
283 CSUM_PSEUDO_HDR | CSUM_FRAG_NOT_CHECKED;
284 mp->m_pkthdr.csum_data = htons(0xffff);
289 static __inline struct pktinfo *
290 igb_rssinfo(struct mbuf *m, struct pktinfo *pi,
291 uint32_t hash, uint32_t hashtype, uint32_t staterr)
294 case E1000_RXDADV_RSSTYPE_IPV4_TCP:
295 pi->pi_netisr = NETISR_IP;
297 pi->pi_l3proto = IPPROTO_TCP;
300 case E1000_RXDADV_RSSTYPE_IPV4:
301 if (staterr & E1000_RXD_STAT_IXSM)
305 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) ==
306 E1000_RXD_STAT_TCPCS) {
307 pi->pi_netisr = NETISR_IP;
309 pi->pi_l3proto = IPPROTO_UDP;
317 m->m_flags |= M_HASH;
318 m->m_pkthdr.hash = toeplitz_hash(hash);
323 igb_probe(device_t dev)
325 const struct igb_device *d;
328 vid = pci_get_vendor(dev);
329 did = pci_get_device(dev);
331 for (d = igb_devices; d->desc != NULL; ++d) {
332 if (vid == d->vid && did == d->did) {
333 device_set_desc(dev, d->desc);
341 igb_attach(device_t dev)
343 struct igb_softc *sc = device_get_softc(dev);
344 uint16_t eeprom_data;
345 int error = 0, i, j, ring_max;
349 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
350 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
351 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
352 igb_sysctl_nvm_info, "I", "NVM Information");
354 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
355 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
356 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
357 &igb_enable_aim, 1, "Interrupt Moderation");
359 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
360 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
361 OID_AUTO, "flow_control", CTLTYPE_INT|CTLFLAG_RW,
362 adapter, 0, igb_set_flowcntl, "I", "Flow Control");
365 callout_init_mp(&sc->timer);
366 lwkt_serialize_init(&sc->main_serialize);
368 sc->dev = sc->osdep.dev = dev;
371 * Determine hardware and mac type
373 sc->hw.vendor_id = pci_get_vendor(dev);
374 sc->hw.device_id = pci_get_device(dev);
375 sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
376 sc->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
377 sc->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
379 if (e1000_set_mac_type(&sc->hw))
382 /* Are we a VF device? */
383 if (sc->hw.mac.type == e1000_vfadapt ||
384 sc->hw.mac.type == e1000_vfadapt_i350)
390 * Configure total supported RX/TX ring count
392 switch (sc->hw.mac.type) {
394 ring_max = IGB_MAX_RING_82575;
397 ring_max = IGB_MAX_RING_82580;
400 ring_max = IGB_MAX_RING_I350;
403 ring_max = IGB_MAX_RING_82576;
406 ring_max = IGB_MIN_RING;
409 sc->rx_ring_cnt = device_getenv_int(dev, "rxr", igb_rxr);
410 sc->rx_ring_cnt = if_ring_count2(sc->rx_ring_cnt, ring_max);
412 sc->rx_ring_cnt = device_getenv_int(dev, "rxr_debug", sc->rx_ring_cnt);
414 sc->rx_ring_inuse = sc->rx_ring_cnt;
415 sc->tx_ring_cnt = 1; /* XXX */
417 /* Enable bus mastering */
418 pci_enable_busmaster(dev);
423 sc->mem_rid = PCIR_BAR(0);
424 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
426 if (sc->mem_res == NULL) {
427 device_printf(dev, "Unable to allocate bus resource: memory\n");
431 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->mem_res);
432 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->mem_res);
434 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle;
436 /* Save PCI command register for Shared Code */
437 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
438 sc->hw.back = &sc->osdep;
440 /* Do Shared Code initialization */
441 if (e1000_setup_init_funcs(&sc->hw, TRUE)) {
442 device_printf(dev, "Setup of Shared code failed\n");
447 e1000_get_bus_info(&sc->hw);
449 sc->hw.mac.autoneg = DO_AUTO_NEG;
450 sc->hw.phy.autoneg_wait_to_complete = FALSE;
451 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
454 if (sc->hw.phy.media_type == e1000_media_type_copper) {
455 sc->hw.phy.mdix = AUTO_ALL_MODES;
456 sc->hw.phy.disable_polarity_correction = FALSE;
457 sc->hw.phy.ms_type = IGB_MASTER_SLAVE;
460 /* Set the frame limits assuming standard ethernet sized frames. */
461 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
463 /* Allocate RX/TX rings */
464 error = igb_alloc_rings(sc);
468 /* Allocate interrupt */
469 error = igb_alloc_intr(sc);
477 sc->serializes[i++] = &sc->main_serialize;
479 sc->tx_serialize = i;
480 for (j = 0; j < sc->tx_ring_cnt; ++j)
481 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize;
483 sc->rx_serialize = i;
484 for (j = 0; j < sc->rx_ring_cnt; ++j)
485 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize;
487 sc->serialize_cnt = i;
488 KKASSERT(sc->serialize_cnt <= IGB_NSERIALIZE);
490 /* Allocate the appropriate stats memory */
492 sc->stats = kmalloc(sizeof(struct e1000_vf_stats), M_DEVBUF,
494 igb_vf_init_stats(sc);
496 sc->stats = kmalloc(sizeof(struct e1000_hw_stats), M_DEVBUF,
500 /* Allocate multicast array memory. */
501 sc->mta = kmalloc(ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES,
504 /* Some adapter-specific advanced features */
505 if (sc->hw.mac.type >= e1000_i350) {
507 igb_set_sysctl_value(adapter, "dma_coalesce",
508 "configure dma coalesce",
509 &adapter->dma_coalesce, igb_dma_coalesce);
510 igb_set_sysctl_value(adapter, "eee_disabled",
511 "enable Energy Efficient Ethernet",
512 &adapter->hw.dev_spec._82575.eee_disable,
515 sc->dma_coalesce = igb_dma_coalesce;
516 sc->hw.dev_spec._82575.eee_disable = igb_eee_disabled;
518 e1000_set_eee_i350(&sc->hw);
522 * Start from a known state, this is important in reading the nvm and
525 e1000_reset_hw(&sc->hw);
527 /* Make sure we have a good EEPROM before we read from it */
528 if (e1000_validate_nvm_checksum(&sc->hw) < 0) {
530 * Some PCI-E parts fail the first check due to
531 * the link being in sleep state, call it again,
532 * if it fails a second time its a real issue.
534 if (e1000_validate_nvm_checksum(&sc->hw) < 0) {
536 "The EEPROM Checksum Is Not Valid\n");
542 /* Copy the permanent MAC address out of the EEPROM */
543 if (e1000_read_mac_addr(&sc->hw) < 0) {
544 device_printf(dev, "EEPROM read error while reading MAC"
549 if (!igb_is_valid_ether_addr(sc->hw.mac.addr)) {
550 device_printf(dev, "Invalid MAC address\n");
557 ** Configure Interrupts
559 if ((adapter->msix > 1) && (igb_enable_msix))
560 error = igb_allocate_msix(adapter);
561 else /* MSI or Legacy */
562 error = igb_allocate_legacy(adapter);
567 /* Setup OS specific network interface */
570 /* Add sysctl tree, must after igb_setup_ifp() */
573 /* Now get a good starting state */
576 /* Initialize statistics */
577 igb_update_stats_counters(sc);
579 sc->hw.mac.get_link_status = 1;
580 igb_update_link_status(sc);
582 /* Indicate SOL/IDER usage */
583 if (e1000_check_reset_block(&sc->hw)) {
585 "PHY reset is blocked due to SOL/IDER session.\n");
588 /* Determine if we have to control management hardware */
589 if (e1000_enable_mng_pass_thru(&sc->hw))
590 sc->flags |= IGB_FLAG_HAS_MGMT;
595 /* APME bit in EEPROM is mapped to WUC.APME */
596 eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC) & E1000_WUC_APME;
598 sc->wol = E1000_WUFC_MAG;
599 /* XXX disable WOL */
603 /* Register for VLAN events */
604 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
605 igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
606 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
607 igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
611 igb_add_hw_stats(adapter);
614 error = igb_setup_intr(sc);
616 ether_ifdetach(&sc->arpcom.ac_if);
627 igb_detach(device_t dev)
629 struct igb_softc *sc = device_get_softc(dev);
631 if (device_is_attached(dev)) {
632 struct ifnet *ifp = &sc->arpcom.ac_if;
634 ifnet_serialize_all(ifp);
638 e1000_phy_hw_reset(&sc->hw);
640 /* Give control back to firmware */
642 igb_rel_hw_control(sc);
645 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN);
646 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol);
650 igb_teardown_intr(sc);
652 ifnet_deserialize_all(ifp);
655 } else if (sc->mem_res != NULL) {
656 igb_rel_hw_control(sc);
658 bus_generic_detach(dev);
660 if (sc->sysctl_tree != NULL)
661 sysctl_ctx_free(&sc->sysctl_ctx);
665 if (sc->msix_mem_res != NULL) {
666 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_mem_rid,
669 if (sc->mem_res != NULL) {
670 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid,
677 kfree(sc->mta, M_DEVBUF);
678 if (sc->stats != NULL)
679 kfree(sc->stats, M_DEVBUF);
685 igb_shutdown(device_t dev)
687 return igb_suspend(dev);
691 igb_suspend(device_t dev)
693 struct igb_softc *sc = device_get_softc(dev);
694 struct ifnet *ifp = &sc->arpcom.ac_if;
696 ifnet_serialize_all(ifp);
701 igb_rel_hw_control(sc);
704 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN);
705 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol);
709 ifnet_deserialize_all(ifp);
711 return bus_generic_suspend(dev);
715 igb_resume(device_t dev)
717 struct igb_softc *sc = device_get_softc(dev);
718 struct ifnet *ifp = &sc->arpcom.ac_if;
720 ifnet_serialize_all(ifp);
727 ifnet_deserialize_all(ifp);
729 return bus_generic_resume(dev);
733 igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
735 struct igb_softc *sc = ifp->if_softc;
736 struct ifreq *ifr = (struct ifreq *)data;
737 int max_frame_size, mask, reinit;
740 ASSERT_IFNET_SERIALIZED_ALL(ifp);
744 max_frame_size = 9234;
745 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
751 ifp->if_mtu = ifr->ifr_mtu;
752 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN +
755 if (ifp->if_flags & IFF_RUNNING)
760 if (ifp->if_flags & IFF_UP) {
761 if (ifp->if_flags & IFF_RUNNING) {
762 if ((ifp->if_flags ^ sc->if_flags) &
763 (IFF_PROMISC | IFF_ALLMULTI)) {
764 igb_disable_promisc(sc);
770 } else if (ifp->if_flags & IFF_RUNNING) {
773 sc->if_flags = ifp->if_flags;
778 if (ifp->if_flags & IFF_RUNNING) {
779 igb_disable_intr(sc);
781 #ifdef DEVICE_POLLING
782 if (!(ifp->if_flags & IFF_POLLING))
790 * As the speed/duplex settings are being
791 * changed, we need toreset the PHY.
793 sc->hw.phy.reset_disable = FALSE;
795 /* Check SOL/IDER usage */
796 if (e1000_check_reset_block(&sc->hw)) {
797 if_printf(ifp, "Media change is "
798 "blocked due to SOL/IDER session.\n");
804 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
809 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
810 if (mask & IFCAP_RXCSUM) {
811 ifp->if_capenable ^= IFCAP_RXCSUM;
814 if (mask & IFCAP_VLAN_HWTAGGING) {
815 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
818 if (mask & IFCAP_TXCSUM) {
819 ifp->if_capenable ^= IFCAP_TXCSUM;
820 if (ifp->if_capenable & IFCAP_TXCSUM)
821 ifp->if_hwassist |= IGB_CSUM_FEATURES;
823 ifp->if_hwassist &= ~IGB_CSUM_FEATURES;
825 if (mask & IFCAP_RSS)
826 ifp->if_capenable ^= IFCAP_RSS;
827 if (reinit && (ifp->if_flags & IFF_RUNNING))
832 error = ether_ioctl(ifp, command, data);
841 struct igb_softc *sc = xsc;
842 struct ifnet *ifp = &sc->arpcom.ac_if;
846 ASSERT_IFNET_SERIALIZED_ALL(ifp);
850 /* Get the latest mac address, User can use a LAA */
851 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN);
853 /* Put the address into the Receive Address Array */
854 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0);
857 igb_update_link_status(sc);
859 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
861 /* Configure for OS presence */
865 #ifdef DEVICE_POLLING
866 if (ifp->if_flags & IFF_POLLING)
870 /* Configured used RX/TX rings */
871 igb_set_ring_inuse(sc, polling);
873 /* Initialize interrupt */
876 /* Prepare transmit descriptors and buffers */
877 for (i = 0; i < sc->tx_ring_cnt; ++i)
878 igb_init_tx_ring(&sc->tx_rings[i]);
879 igb_init_tx_unit(sc);
881 /* Setup Multicast table */
886 * Figure out the desired mbuf pool
887 * for doing jumbo/packetsplit
889 if (adapter->max_frame_size <= 2048)
890 adapter->rx_mbuf_sz = MCLBYTES;
891 else if (adapter->max_frame_size <= 4096)
892 adapter->rx_mbuf_sz = MJUMPAGESIZE;
894 adapter->rx_mbuf_sz = MJUM9BYTES;
897 /* Prepare receive descriptors and buffers */
898 for (i = 0; i < sc->rx_ring_inuse; ++i) {
901 error = igb_init_rx_ring(&sc->rx_rings[i]);
903 if_printf(ifp, "Could not setup receive structures\n");
908 igb_init_rx_unit(sc);
910 /* Enable VLAN support */
911 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
914 /* Don't lose promiscuous settings */
917 ifp->if_flags |= IFF_RUNNING;
918 ifp->if_flags &= ~IFF_OACTIVE;
920 callout_reset(&sc->timer, hz, igb_timer, sc);
921 e1000_clear_hw_cntrs_base_generic(&sc->hw);
924 if (adapter->msix > 1) /* Set up queue routing */
925 igb_configure_queues(adapter);
928 /* This clears any pending interrupts */
929 E1000_READ_REG(&sc->hw, E1000_ICR);
932 * Only enable interrupts if we are not polling, make sure
933 * they are off otherwise.
936 igb_disable_intr(sc);
939 E1000_WRITE_REG(&sc->hw, E1000_ICS, E1000_ICS_LSC);
942 /* Set Energy Efficient Ethernet */
943 e1000_set_eee_i350(&sc->hw);
945 /* Don't reset the phy next time init gets called */
946 sc->hw.phy.reset_disable = TRUE;
950 igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
952 struct igb_softc *sc = ifp->if_softc;
953 u_char fiber_type = IFM_1000_SX;
955 ASSERT_IFNET_SERIALIZED_ALL(ifp);
957 igb_update_link_status(sc);
959 ifmr->ifm_status = IFM_AVALID;
960 ifmr->ifm_active = IFM_ETHER;
962 if (!sc->link_active)
965 ifmr->ifm_status |= IFM_ACTIVE;
967 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
968 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
969 ifmr->ifm_active |= fiber_type | IFM_FDX;
971 switch (sc->link_speed) {
973 ifmr->ifm_active |= IFM_10_T;
977 ifmr->ifm_active |= IFM_100_TX;
981 ifmr->ifm_active |= IFM_1000_T;
984 if (sc->link_duplex == FULL_DUPLEX)
985 ifmr->ifm_active |= IFM_FDX;
987 ifmr->ifm_active |= IFM_HDX;
992 igb_media_change(struct ifnet *ifp)
994 struct igb_softc *sc = ifp->if_softc;
995 struct ifmedia *ifm = &sc->media;
997 ASSERT_IFNET_SERIALIZED_ALL(ifp);
999 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1002 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1004 sc->hw.mac.autoneg = DO_AUTO_NEG;
1005 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1011 sc->hw.mac.autoneg = DO_AUTO_NEG;
1012 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1016 sc->hw.mac.autoneg = FALSE;
1017 sc->hw.phy.autoneg_advertised = 0;
1018 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1019 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1021 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1025 sc->hw.mac.autoneg = FALSE;
1026 sc->hw.phy.autoneg_advertised = 0;
1027 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1028 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1030 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1034 if_printf(ifp, "Unsupported media type\n");
1044 igb_set_promisc(struct igb_softc *sc)
1046 struct ifnet *ifp = &sc->arpcom.ac_if;
1047 struct e1000_hw *hw = &sc->hw;
1051 e1000_promisc_set_vf(hw, e1000_promisc_enabled);
1055 reg = E1000_READ_REG(hw, E1000_RCTL);
1056 if (ifp->if_flags & IFF_PROMISC) {
1057 reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1058 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1059 } else if (ifp->if_flags & IFF_ALLMULTI) {
1060 reg |= E1000_RCTL_MPE;
1061 reg &= ~E1000_RCTL_UPE;
1062 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1067 igb_disable_promisc(struct igb_softc *sc)
1069 struct e1000_hw *hw = &sc->hw;
1073 e1000_promisc_set_vf(hw, e1000_promisc_disabled);
1076 reg = E1000_READ_REG(hw, E1000_RCTL);
1077 reg &= ~E1000_RCTL_UPE;
1078 reg &= ~E1000_RCTL_MPE;
1079 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1083 igb_set_multi(struct igb_softc *sc)
1085 struct ifnet *ifp = &sc->arpcom.ac_if;
1086 struct ifmultiaddr *ifma;
1087 uint32_t reg_rctl = 0;
1092 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1094 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1095 if (ifma->ifma_addr->sa_family != AF_LINK)
1098 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1101 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1102 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
1106 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1107 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1108 reg_rctl |= E1000_RCTL_MPE;
1109 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1111 e1000_update_mc_addr_list(&sc->hw, mta, mcnt);
1116 igb_timer(void *xsc)
1118 struct igb_softc *sc = xsc;
1120 lwkt_serialize_enter(&sc->main_serialize);
1122 igb_update_link_status(sc);
1123 igb_update_stats_counters(sc);
1125 callout_reset(&sc->timer, hz, igb_timer, sc);
1127 lwkt_serialize_exit(&sc->main_serialize);
1131 igb_update_link_status(struct igb_softc *sc)
1133 struct ifnet *ifp = &sc->arpcom.ac_if;
1134 struct e1000_hw *hw = &sc->hw;
1135 uint32_t link_check, thstat, ctrl;
1137 link_check = thstat = ctrl = 0;
1139 /* Get the cached link value or read for real */
1140 switch (hw->phy.media_type) {
1141 case e1000_media_type_copper:
1142 if (hw->mac.get_link_status) {
1143 /* Do the work to read phy */
1144 e1000_check_for_link(hw);
1145 link_check = !hw->mac.get_link_status;
1151 case e1000_media_type_fiber:
1152 e1000_check_for_link(hw);
1153 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU;
1156 case e1000_media_type_internal_serdes:
1157 e1000_check_for_link(hw);
1158 link_check = hw->mac.serdes_has_link;
1161 /* VF device is type_unknown */
1162 case e1000_media_type_unknown:
1163 e1000_check_for_link(hw);
1164 link_check = !hw->mac.get_link_status;
1170 /* Check for thermal downshift or shutdown */
1171 if (hw->mac.type == e1000_i350) {
1172 thstat = E1000_READ_REG(hw, E1000_THSTAT);
1173 ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT);
1176 /* Now we check if a transition has happened */
1177 if (link_check && sc->link_active == 0) {
1178 e1000_get_speed_and_duplex(hw,
1179 &sc->link_speed, &sc->link_duplex);
1181 if_printf(ifp, "Link is up %d Mbps %s\n",
1183 sc->link_duplex == FULL_DUPLEX ?
1184 "Full Duplex" : "Half Duplex");
1186 sc->link_active = 1;
1188 ifp->if_baudrate = sc->link_speed * 1000000;
1189 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) &&
1190 (thstat & E1000_THSTAT_LINK_THROTTLE))
1191 if_printf(ifp, "Link: thermal downshift\n");
1192 /* This can sleep */
1193 ifp->if_link_state = LINK_STATE_UP;
1194 if_link_state_change(ifp);
1195 } else if (!link_check && sc->link_active == 1) {
1196 ifp->if_baudrate = sc->link_speed = 0;
1197 sc->link_duplex = 0;
1199 if_printf(ifp, "Link is Down\n");
1200 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) &&
1201 (thstat & E1000_THSTAT_PWR_DOWN))
1202 if_printf(ifp, "Link: thermal shutdown\n");
1203 sc->link_active = 0;
1204 /* This can sleep */
1205 ifp->if_link_state = LINK_STATE_DOWN;
1206 if_link_state_change(ifp);
1211 igb_stop(struct igb_softc *sc)
1213 struct ifnet *ifp = &sc->arpcom.ac_if;
1216 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1218 igb_disable_intr(sc);
1220 callout_stop(&sc->timer);
1222 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1225 e1000_reset_hw(&sc->hw);
1226 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0);
1228 e1000_led_off(&sc->hw);
1229 e1000_cleanup_led(&sc->hw);
1231 for (i = 0; i < sc->tx_ring_cnt; ++i)
1232 igb_free_tx_ring(&sc->tx_rings[i]);
1233 for (i = 0; i < sc->rx_ring_cnt; ++i)
1234 igb_free_rx_ring(&sc->rx_rings[i]);
1238 igb_reset(struct igb_softc *sc)
1240 struct ifnet *ifp = &sc->arpcom.ac_if;
1241 struct e1000_hw *hw = &sc->hw;
1242 struct e1000_fc_info *fc = &hw->fc;
1246 /* Let the firmware know the OS is in control */
1247 igb_get_hw_control(sc);
1250 * Packet Buffer Allocation (PBA)
1251 * Writing PBA sets the receive portion of the buffer
1252 * the remainder is used for the transmit buffer.
1254 switch (hw->mac.type) {
1256 pba = E1000_PBA_32K;
1261 pba = E1000_READ_REG(hw, E1000_RXPBS);
1262 pba &= E1000_RXPBS_SIZE_MASK_82576;
1267 case e1000_vfadapt_i350:
1268 pba = E1000_READ_REG(hw, E1000_RXPBS);
1269 pba = e1000_rxpbs_adjust_82580(pba);
1271 /* XXX pba = E1000_PBA_35K; */
1277 /* Special needs in case of Jumbo frames */
1278 if (hw->mac.type == e1000_82575 && ifp->if_mtu > ETHERMTU) {
1279 uint32_t tx_space, min_tx, min_rx;
1281 pba = E1000_READ_REG(hw, E1000_PBA);
1282 tx_space = pba >> 16;
1285 min_tx = (sc->max_frame_size +
1286 sizeof(struct e1000_tx_desc) - ETHER_CRC_LEN) * 2;
1287 min_tx = roundup2(min_tx, 1024);
1289 min_rx = sc->max_frame_size;
1290 min_rx = roundup2(min_rx, 1024);
1292 if (tx_space < min_tx && (min_tx - tx_space) < pba) {
1293 pba = pba - (min_tx - tx_space);
1295 * if short on rx space, rx wins
1296 * and must trump tx adjustment
1301 E1000_WRITE_REG(hw, E1000_PBA, pba);
1305 * These parameters control the automatic generation (Tx) and
1306 * response (Rx) to Ethernet PAUSE frames.
1307 * - High water mark should allow for at least two frames to be
1308 * received after sending an XOFF.
1309 * - Low water mark works best when it is very near the high water mark.
1310 * This allows the receiver to restart by sending XON when it has
1313 hwm = min(((pba << 10) * 9 / 10),
1314 ((pba << 10) - 2 * sc->max_frame_size));
1316 if (hw->mac.type < e1000_82576) {
1317 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
1318 fc->low_water = fc->high_water - 8;
1320 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1321 fc->low_water = fc->high_water - 16;
1323 fc->pause_time = IGB_FC_PAUSE_TIME;
1324 fc->send_xon = TRUE;
1326 /* Issue a global reset */
1328 E1000_WRITE_REG(hw, E1000_WUC, 0);
1330 if (e1000_init_hw(hw) < 0)
1331 if_printf(ifp, "Hardware Initialization Failed\n");
1333 /* Setup DMA Coalescing */
1334 if (hw->mac.type == e1000_i350 && sc->dma_coalesce) {
1337 hwm = (pba - 4) << 10;
1338 reg = ((pba - 6) << E1000_DMACR_DMACTHR_SHIFT)
1339 & E1000_DMACR_DMACTHR_MASK;
1341 /* transition to L0x or L1 if available..*/
1342 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
1344 /* timer = +-1000 usec in 32usec intervals */
1346 E1000_WRITE_REG(hw, E1000_DMACR, reg);
1348 /* No lower threshold */
1349 E1000_WRITE_REG(hw, E1000_DMCRTRH, 0);
1351 /* set hwm to PBA - 2 * max frame size */
1352 E1000_WRITE_REG(hw, E1000_FCRTC, hwm);
1354 /* Set the interval before transition */
1355 reg = E1000_READ_REG(hw, E1000_DMCTLX);
1356 reg |= 0x800000FF; /* 255 usec */
1357 E1000_WRITE_REG(hw, E1000_DMCTLX, reg);
1359 /* free space in tx packet buffer to wake from DMA coal */
1360 E1000_WRITE_REG(hw, E1000_DMCTXTH,
1361 (20480 - (2 * sc->max_frame_size)) >> 6);
1363 /* make low power state decision controlled by DMA coal */
1364 reg = E1000_READ_REG(hw, E1000_PCIEMISC);
1365 E1000_WRITE_REG(hw, E1000_PCIEMISC,
1366 reg | E1000_PCIEMISC_LX_DECISION);
1367 if_printf(ifp, "DMA Coalescing enabled\n");
1370 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
1371 e1000_get_phy_info(hw);
1372 e1000_check_for_link(hw);
1376 igb_setup_ifp(struct igb_softc *sc)
1378 struct ifnet *ifp = &sc->arpcom.ac_if;
1380 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
1382 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1383 ifp->if_init = igb_init;
1384 ifp->if_ioctl = igb_ioctl;
1385 ifp->if_start = igb_start;
1386 ifp->if_serialize = igb_serialize;
1387 ifp->if_deserialize = igb_deserialize;
1388 ifp->if_tryserialize = igb_tryserialize;
1390 ifp->if_serialize_assert = igb_serialize_assert;
1392 #ifdef DEVICE_POLLING
1393 ifp->if_poll = igb_poll;
1395 ifp->if_watchdog = igb_watchdog;
1397 ifq_set_maxlen(&ifp->if_snd, sc->tx_rings[0].num_tx_desc - 1);
1398 ifq_set_ready(&ifp->if_snd);
1400 ether_ifattach(ifp, sc->hw.mac.addr, NULL);
1402 ifp->if_capabilities =
1403 IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1404 if (IGB_ENABLE_HWRSS(sc))
1405 ifp->if_capabilities |= IFCAP_RSS;
1406 ifp->if_capenable = ifp->if_capabilities;
1407 ifp->if_hwassist = IGB_CSUM_FEATURES;
1410 * Tell the upper layer(s) we support long frames
1412 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1415 * Specify the media types supported by this adapter and register
1416 * callbacks to update media and link information
1418 ifmedia_init(&sc->media, IFM_IMASK, igb_media_change, igb_media_status);
1419 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
1420 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
1421 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1423 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1425 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1426 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1428 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1429 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1431 if (sc->hw.phy.type != e1000_phy_ife) {
1432 ifmedia_add(&sc->media,
1433 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1434 ifmedia_add(&sc->media,
1435 IFM_ETHER | IFM_1000_T, 0, NULL);
1438 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1439 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1443 igb_add_sysctl(struct igb_softc *sc)
1448 sysctl_ctx_init(&sc->sysctl_ctx);
1449 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
1450 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
1451 device_get_nameunit(sc->dev), CTLFLAG_RD, 0, "");
1452 if (sc->sysctl_tree == NULL) {
1453 device_printf(sc->dev, "can't add sysctl node\n");
1457 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1458 OID_AUTO, "rxr", CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings");
1459 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1460 OID_AUTO, "rxr_inuse", CTLFLAG_RD, &sc->rx_ring_inuse, 0,
1461 "# of RX rings used");
1462 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1463 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_rings[0].num_rx_desc, 0,
1465 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1466 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_rings[0].num_tx_desc, 0,
1469 if (sc->intr_type != PCI_INTR_TYPE_MSIX) {
1470 SYSCTL_ADD_PROC(&sc->sysctl_ctx,
1471 SYSCTL_CHILDREN(sc->sysctl_tree),
1472 OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW,
1473 sc, 0, igb_sysctl_intr_rate, "I", "interrupt rate");
1475 for (i = 0; i < sc->msix_cnt; ++i) {
1476 struct igb_msix_data *msix = &sc->msix_data[i];
1478 ksnprintf(node, sizeof(node), "msix%d_rate", i);
1479 SYSCTL_ADD_PROC(&sc->sysctl_ctx,
1480 SYSCTL_CHILDREN(sc->sysctl_tree),
1481 OID_AUTO, node, CTLTYPE_INT | CTLFLAG_RW,
1482 msix, 0, igb_sysctl_msix_rate, "I",
1483 msix->msix_rate_desc);
1487 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1488 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT | CTLFLAG_RW,
1489 sc, 0, igb_sysctl_tx_intr_nsegs, "I",
1490 "# of segments per TX interrupt");
1492 #ifdef IGB_RSS_DEBUG
1493 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1494 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 0,
1496 for (i = 0; i < sc->rx_ring_cnt; ++i) {
1497 ksnprintf(node, sizeof(node), "rx%d_pkt", i);
1498 SYSCTL_ADD_ULONG(&sc->sysctl_ctx,
1499 SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, node,
1500 CTLFLAG_RW, &sc->rx_rings[i].rx_packets, "RXed packets");
1506 igb_alloc_rings(struct igb_softc *sc)
1511 * Create top level busdma tag
1513 error = bus_dma_tag_create(NULL, 1, 0,
1514 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1515 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
1518 device_printf(sc->dev, "could not create top level DMA tag\n");
1523 * Allocate TX descriptor rings and buffers
1525 sc->tx_rings = kmalloc(sizeof(struct igb_tx_ring) * sc->tx_ring_cnt,
1526 M_DEVBUF, M_WAITOK | M_ZERO);
1527 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1528 struct igb_tx_ring *txr = &sc->tx_rings[i];
1530 /* Set up some basics */
1533 lwkt_serialize_init(&txr->tx_serialize);
1535 error = igb_create_tx_ring(txr);
1541 * Allocate RX descriptor rings and buffers
1543 sc->rx_rings = kmalloc(sizeof(struct igb_rx_ring) * sc->rx_ring_cnt,
1544 M_DEVBUF, M_WAITOK | M_ZERO);
1545 for (i = 0; i < sc->rx_ring_cnt; ++i) {
1546 struct igb_rx_ring *rxr = &sc->rx_rings[i];
1548 /* Set up some basics */
1551 lwkt_serialize_init(&rxr->rx_serialize);
1553 error = igb_create_rx_ring(rxr);
1562 igb_free_rings(struct igb_softc *sc)
1566 if (sc->tx_rings != NULL) {
1567 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1568 struct igb_tx_ring *txr = &sc->tx_rings[i];
1570 igb_destroy_tx_ring(txr, txr->num_tx_desc);
1572 kfree(sc->tx_rings, M_DEVBUF);
1575 if (sc->rx_rings != NULL) {
1576 for (i = 0; i < sc->rx_ring_cnt; ++i) {
1577 struct igb_rx_ring *rxr = &sc->rx_rings[i];
1579 igb_destroy_rx_ring(rxr, rxr->num_rx_desc);
1581 kfree(sc->rx_rings, M_DEVBUF);
1586 igb_create_tx_ring(struct igb_tx_ring *txr)
1588 int tsize, error, i;
1591 * Validate number of transmit descriptors. It must not exceed
1592 * hardware maximum, and must be multiple of IGB_DBA_ALIGN.
1594 if (((igb_txd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN) != 0 ||
1595 (igb_txd > IGB_MAX_TXD) || (igb_txd < IGB_MIN_TXD)) {
1596 device_printf(txr->sc->dev,
1597 "Using %d TX descriptors instead of %d!\n",
1598 IGB_DEFAULT_TXD, igb_txd);
1599 txr->num_tx_desc = IGB_DEFAULT_TXD;
1601 txr->num_tx_desc = igb_txd;
1605 * Allocate TX descriptor ring
1607 tsize = roundup2(txr->num_tx_desc * sizeof(union e1000_adv_tx_desc),
1609 txr->txdma.dma_vaddr = bus_dmamem_coherent_any(txr->sc->parent_tag,
1610 IGB_DBA_ALIGN, tsize, BUS_DMA_WAITOK,
1611 &txr->txdma.dma_tag, &txr->txdma.dma_map, &txr->txdma.dma_paddr);
1612 if (txr->txdma.dma_vaddr == NULL) {
1613 device_printf(txr->sc->dev,
1614 "Unable to allocate TX Descriptor memory\n");
1617 txr->tx_base = txr->txdma.dma_vaddr;
1618 bzero(txr->tx_base, tsize);
1620 txr->tx_buf = kmalloc(sizeof(struct igb_tx_buf) * txr->num_tx_desc,
1621 M_DEVBUF, M_WAITOK | M_ZERO);
1624 * Allocate TX head write-back buffer
1626 txr->tx_hdr = bus_dmamem_coherent_any(txr->sc->parent_tag,
1627 __VM_CACHELINE_SIZE, __VM_CACHELINE_SIZE, BUS_DMA_WAITOK,
1628 &txr->tx_hdr_dtag, &txr->tx_hdr_dmap, &txr->tx_hdr_paddr);
1629 if (txr->tx_hdr == NULL) {
1630 device_printf(txr->sc->dev,
1631 "Unable to allocate TX head write-back buffer\n");
1636 * Create DMA tag for TX buffers
1638 error = bus_dma_tag_create(txr->sc->parent_tag,
1639 1, 0, /* alignment, bounds */
1640 BUS_SPACE_MAXADDR, /* lowaddr */
1641 BUS_SPACE_MAXADDR, /* highaddr */
1642 NULL, NULL, /* filter, filterarg */
1643 IGB_TSO_SIZE, /* maxsize */
1644 IGB_MAX_SCATTER, /* nsegments */
1645 PAGE_SIZE, /* maxsegsize */
1646 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW |
1647 BUS_DMA_ONEBPAGE, /* flags */
1650 device_printf(txr->sc->dev, "Unable to allocate TX DMA tag\n");
1651 kfree(txr->tx_buf, M_DEVBUF);
1657 * Create DMA maps for TX buffers
1659 for (i = 0; i < txr->num_tx_desc; ++i) {
1660 struct igb_tx_buf *txbuf = &txr->tx_buf[i];
1662 error = bus_dmamap_create(txr->tx_tag,
1663 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, &txbuf->map);
1665 device_printf(txr->sc->dev,
1666 "Unable to create TX DMA map\n");
1667 igb_destroy_tx_ring(txr, i);
1673 * Initialize various watermark
1675 txr->spare_desc = IGB_TX_SPARE;
1676 txr->intr_nsegs = txr->num_tx_desc / 16;
1677 txr->oact_hi_desc = txr->num_tx_desc / 2;
1678 txr->oact_lo_desc = txr->num_tx_desc / 8;
1679 if (txr->oact_lo_desc > IGB_TX_OACTIVE_MAX)
1680 txr->oact_lo_desc = IGB_TX_OACTIVE_MAX;
1681 if (txr->oact_lo_desc < txr->spare_desc + IGB_TX_RESERVED)
1682 txr->oact_lo_desc = txr->spare_desc + IGB_TX_RESERVED;
1688 igb_free_tx_ring(struct igb_tx_ring *txr)
1692 for (i = 0; i < txr->num_tx_desc; ++i) {
1693 struct igb_tx_buf *txbuf = &txr->tx_buf[i];
1695 if (txbuf->m_head != NULL) {
1696 bus_dmamap_unload(txr->tx_tag, txbuf->map);
1697 m_freem(txbuf->m_head);
1698 txbuf->m_head = NULL;
1704 igb_destroy_tx_ring(struct igb_tx_ring *txr, int ndesc)
1708 if (txr->txdma.dma_vaddr != NULL) {
1709 bus_dmamap_unload(txr->txdma.dma_tag, txr->txdma.dma_map);
1710 bus_dmamem_free(txr->txdma.dma_tag, txr->txdma.dma_vaddr,
1711 txr->txdma.dma_map);
1712 bus_dma_tag_destroy(txr->txdma.dma_tag);
1713 txr->txdma.dma_vaddr = NULL;
1716 if (txr->tx_hdr != NULL) {
1717 bus_dmamap_unload(txr->tx_hdr_dtag, txr->tx_hdr_dmap);
1718 bus_dmamem_free(txr->tx_hdr_dtag, txr->tx_hdr,
1720 bus_dma_tag_destroy(txr->tx_hdr_dtag);
1724 if (txr->tx_buf == NULL)
1727 for (i = 0; i < ndesc; ++i) {
1728 struct igb_tx_buf *txbuf = &txr->tx_buf[i];
1730 KKASSERT(txbuf->m_head == NULL);
1731 bus_dmamap_destroy(txr->tx_tag, txbuf->map);
1733 bus_dma_tag_destroy(txr->tx_tag);
1735 kfree(txr->tx_buf, M_DEVBUF);
1740 igb_init_tx_ring(struct igb_tx_ring *txr)
1742 /* Clear the old descriptor contents */
1744 sizeof(union e1000_adv_tx_desc) * txr->num_tx_desc);
1746 /* Clear TX head write-back buffer */
1750 txr->next_avail_desc = 0;
1751 txr->next_to_clean = 0;
1754 /* Set number of descriptors available */
1755 txr->tx_avail = txr->num_tx_desc;
1759 igb_init_tx_unit(struct igb_softc *sc)
1761 struct e1000_hw *hw = &sc->hw;
1765 /* Setup the Tx Descriptor Rings */
1766 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1767 struct igb_tx_ring *txr = &sc->tx_rings[i];
1768 uint64_t bus_addr = txr->txdma.dma_paddr;
1769 uint64_t hdr_paddr = txr->tx_hdr_paddr;
1770 uint32_t txdctl = 0;
1771 uint32_t dca_txctrl;
1773 E1000_WRITE_REG(hw, E1000_TDLEN(i),
1774 txr->num_tx_desc * sizeof(struct e1000_tx_desc));
1775 E1000_WRITE_REG(hw, E1000_TDBAH(i),
1776 (uint32_t)(bus_addr >> 32));
1777 E1000_WRITE_REG(hw, E1000_TDBAL(i),
1778 (uint32_t)bus_addr);
1780 /* Setup the HW Tx Head and Tail descriptor pointers */
1781 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
1782 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
1785 * WTHRESH is ignored by the hardware, since header
1786 * write back mode is used.
1788 txdctl |= IGB_TX_PTHRESH;
1789 txdctl |= IGB_TX_HTHRESH << 8;
1790 txdctl |= IGB_TX_WTHRESH << 16;
1791 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1792 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
1794 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i));
1795 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1796 E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(i), dca_txctrl);
1799 * Don't set WB_on_EITR:
1800 * - 82575 does not have it
1801 * - It almost has no effect on 82576, see:
1802 * 82576 specification update errata #26
1803 * - It causes unnecessary bus traffic
1805 E1000_WRITE_REG(hw, E1000_TDWBAH(i),
1806 (uint32_t)(hdr_paddr >> 32));
1807 E1000_WRITE_REG(hw, E1000_TDWBAL(i),
1808 ((uint32_t)hdr_paddr) | E1000_TX_HEAD_WB_ENABLE);
1814 e1000_config_collision_dist(hw);
1816 /* Program the Transmit Control Register */
1817 tctl = E1000_READ_REG(hw, E1000_TCTL);
1818 tctl &= ~E1000_TCTL_CT;
1819 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
1820 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
1822 /* This write will effectively turn on the transmit unit. */
1823 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1827 igb_txcsum_ctx(struct igb_tx_ring *txr, struct mbuf *mp)
1829 struct e1000_adv_tx_context_desc *TXD;
1830 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx;
1831 int ehdrlen, ctxd, ip_hlen = 0;
1832 uint16_t vlantag = 0;
1833 boolean_t offload = TRUE;
1835 if ((mp->m_pkthdr.csum_flags & IGB_CSUM_FEATURES) == 0)
1838 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0;
1840 ctxd = txr->next_avail_desc;
1841 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd];
1844 * In advanced descriptors the vlan tag must
1845 * be placed into the context descriptor, thus
1846 * we need to be here just for that setup.
1848 if (mp->m_flags & M_VLANTAG) {
1849 vlantag = htole16(mp->m_pkthdr.ether_vlantag);
1850 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT);
1851 } else if (!offload) {
1855 ehdrlen = mp->m_pkthdr.csum_lhlen;
1856 KASSERT(ehdrlen > 0, ("invalid ether hlen"));
1858 /* Set the ether header length */
1859 vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
1861 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
1862 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
1863 ip_hlen = mp->m_pkthdr.csum_iphlen;
1864 KASSERT(ip_hlen > 0, ("invalid ip hlen"));
1867 vlan_macip_lens |= ip_hlen;
1868 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
1870 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
1871 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
1872 else if (mp->m_pkthdr.csum_flags & CSUM_UDP)
1873 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP;
1875 /* 82575 needs the queue index added */
1876 if (txr->sc->hw.mac.type == e1000_82575)
1877 mss_l4len_idx = txr->me << 4;
1879 /* Now copy bits into descriptor */
1880 TXD->vlan_macip_lens = htole32(vlan_macip_lens);
1881 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
1882 TXD->seqnum_seed = htole32(0);
1883 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
1885 /* We've consumed the first desc, adjust counters */
1886 if (++ctxd == txr->num_tx_desc)
1888 txr->next_avail_desc = ctxd;
1895 igb_txeof(struct igb_tx_ring *txr)
1897 struct ifnet *ifp = &txr->sc->arpcom.ac_if;
1898 int first, hdr, avail;
1900 if (txr->tx_avail == txr->num_tx_desc)
1903 first = txr->next_to_clean;
1904 hdr = *(txr->tx_hdr);
1909 avail = txr->tx_avail;
1910 while (first != hdr) {
1911 struct igb_tx_buf *txbuf = &txr->tx_buf[first];
1914 if (txbuf->m_head) {
1915 bus_dmamap_unload(txr->tx_tag, txbuf->map);
1916 m_freem(txbuf->m_head);
1917 txbuf->m_head = NULL;
1920 if (++first == txr->num_tx_desc)
1923 txr->next_to_clean = first;
1924 txr->tx_avail = avail;
1927 * If we have a minimum free, clear IFF_OACTIVE
1928 * to tell the stack that it is OK to send packets.
1930 if (IGB_IS_NOT_OACTIVE(txr)) {
1931 ifp->if_flags &= ~IFF_OACTIVE;
1934 * We have enough TX descriptors, turn off
1935 * the watchdog. We allow small amount of
1936 * packets (roughly intr_nsegs) pending on
1937 * the transmit ring.
1944 igb_create_rx_ring(struct igb_rx_ring *rxr)
1946 int rsize, i, error;
1949 * Validate number of receive descriptors. It must not exceed
1950 * hardware maximum, and must be multiple of IGB_DBA_ALIGN.
1952 if (((igb_rxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN) != 0 ||
1953 (igb_rxd > IGB_MAX_RXD) || (igb_rxd < IGB_MIN_RXD)) {
1954 device_printf(rxr->sc->dev,
1955 "Using %d RX descriptors instead of %d!\n",
1956 IGB_DEFAULT_RXD, igb_rxd);
1957 rxr->num_rx_desc = IGB_DEFAULT_RXD;
1959 rxr->num_rx_desc = igb_rxd;
1963 * Allocate RX descriptor ring
1965 rsize = roundup2(rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc),
1967 rxr->rxdma.dma_vaddr = bus_dmamem_coherent_any(rxr->sc->parent_tag,
1968 IGB_DBA_ALIGN, rsize, BUS_DMA_WAITOK,
1969 &rxr->rxdma.dma_tag, &rxr->rxdma.dma_map,
1970 &rxr->rxdma.dma_paddr);
1971 if (rxr->rxdma.dma_vaddr == NULL) {
1972 device_printf(rxr->sc->dev,
1973 "Unable to allocate RxDescriptor memory\n");
1976 rxr->rx_base = rxr->rxdma.dma_vaddr;
1977 bzero(rxr->rx_base, rsize);
1979 rxr->rx_buf = kmalloc(sizeof(struct igb_rx_buf) * rxr->num_rx_desc,
1980 M_DEVBUF, M_WAITOK | M_ZERO);
1983 * Create DMA tag for RX buffers
1985 error = bus_dma_tag_create(rxr->sc->parent_tag,
1986 1, 0, /* alignment, bounds */
1987 BUS_SPACE_MAXADDR, /* lowaddr */
1988 BUS_SPACE_MAXADDR, /* highaddr */
1989 NULL, NULL, /* filter, filterarg */
1990 MCLBYTES, /* maxsize */
1992 MCLBYTES, /* maxsegsize */
1993 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */
1996 device_printf(rxr->sc->dev,
1997 "Unable to create RX payload DMA tag\n");
1998 kfree(rxr->rx_buf, M_DEVBUF);
2004 * Create spare DMA map for RX buffers
2006 error = bus_dmamap_create(rxr->rx_tag, BUS_DMA_WAITOK,
2009 device_printf(rxr->sc->dev,
2010 "Unable to create spare RX DMA maps\n");
2011 bus_dma_tag_destroy(rxr->rx_tag);
2012 kfree(rxr->rx_buf, M_DEVBUF);
2018 * Create DMA maps for RX buffers
2020 for (i = 0; i < rxr->num_rx_desc; i++) {
2021 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
2023 error = bus_dmamap_create(rxr->rx_tag,
2024 BUS_DMA_WAITOK, &rxbuf->map);
2026 device_printf(rxr->sc->dev,
2027 "Unable to create RX DMA maps\n");
2028 igb_destroy_rx_ring(rxr, i);
2036 igb_free_rx_ring(struct igb_rx_ring *rxr)
2040 for (i = 0; i < rxr->num_rx_desc; ++i) {
2041 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
2043 if (rxbuf->m_head != NULL) {
2044 bus_dmamap_unload(rxr->rx_tag, rxbuf->map);
2045 m_freem(rxbuf->m_head);
2046 rxbuf->m_head = NULL;
2050 if (rxr->fmp != NULL)
2057 igb_destroy_rx_ring(struct igb_rx_ring *rxr, int ndesc)
2061 if (rxr->rxdma.dma_vaddr != NULL) {
2062 bus_dmamap_unload(rxr->rxdma.dma_tag, rxr->rxdma.dma_map);
2063 bus_dmamem_free(rxr->rxdma.dma_tag, rxr->rxdma.dma_vaddr,
2064 rxr->rxdma.dma_map);
2065 bus_dma_tag_destroy(rxr->rxdma.dma_tag);
2066 rxr->rxdma.dma_vaddr = NULL;
2069 if (rxr->rx_buf == NULL)
2072 for (i = 0; i < ndesc; ++i) {
2073 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
2075 KKASSERT(rxbuf->m_head == NULL);
2076 bus_dmamap_destroy(rxr->rx_tag, rxbuf->map);
2078 bus_dmamap_destroy(rxr->rx_tag, rxr->rx_sparemap);
2079 bus_dma_tag_destroy(rxr->rx_tag);
2081 kfree(rxr->rx_buf, M_DEVBUF);
2086 igb_setup_rxdesc(union e1000_adv_rx_desc *rxd, const struct igb_rx_buf *rxbuf)
2088 rxd->read.pkt_addr = htole64(rxbuf->paddr);
2089 rxd->wb.upper.status_error = 0;
2093 igb_newbuf(struct igb_rx_ring *rxr, int i, boolean_t wait)
2096 bus_dma_segment_t seg;
2098 struct igb_rx_buf *rxbuf;
2101 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2104 if_printf(&rxr->sc->arpcom.ac_if,
2105 "Unable to allocate RX mbuf\n");
2109 m->m_len = m->m_pkthdr.len = MCLBYTES;
2111 if (rxr->sc->max_frame_size <= MCLBYTES - ETHER_ALIGN)
2112 m_adj(m, ETHER_ALIGN);
2114 error = bus_dmamap_load_mbuf_segment(rxr->rx_tag,
2115 rxr->rx_sparemap, m, &seg, 1, &nseg, BUS_DMA_NOWAIT);
2119 if_printf(&rxr->sc->arpcom.ac_if,
2120 "Unable to load RX mbuf\n");
2125 rxbuf = &rxr->rx_buf[i];
2126 if (rxbuf->m_head != NULL)
2127 bus_dmamap_unload(rxr->rx_tag, rxbuf->map);
2130 rxbuf->map = rxr->rx_sparemap;
2131 rxr->rx_sparemap = map;
2134 rxbuf->paddr = seg.ds_addr;
2136 igb_setup_rxdesc(&rxr->rx_base[i], rxbuf);
2141 igb_init_rx_ring(struct igb_rx_ring *rxr)
2145 /* Clear the ring contents */
2147 rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc));
2149 /* Now replenish the ring mbufs */
2150 for (i = 0; i < rxr->num_rx_desc; ++i) {
2153 error = igb_newbuf(rxr, i, TRUE);
2158 /* Setup our descriptor indices */
2159 rxr->next_to_check = 0;
2163 rxr->discard = FALSE;
2169 igb_init_rx_unit(struct igb_softc *sc)
2171 struct ifnet *ifp = &sc->arpcom.ac_if;
2172 struct e1000_hw *hw = &sc->hw;
2173 uint32_t rctl, rxcsum, srrctl = 0;
2177 * Make sure receives are disabled while setting
2178 * up the descriptor ring
2180 rctl = E1000_READ_REG(hw, E1000_RCTL);
2181 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2185 ** Set up for header split
2187 if (igb_header_split) {
2188 /* Use a standard mbuf for the header */
2189 srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2190 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2193 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2196 ** Set up for jumbo frames
2198 if (ifp->if_mtu > ETHERMTU) {
2199 rctl |= E1000_RCTL_LPE;
2201 if (adapter->rx_mbuf_sz == MJUMPAGESIZE) {
2202 srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2203 rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
2204 } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) {
2205 srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2206 rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
2208 /* Set maximum packet len */
2209 psize = adapter->max_frame_size;
2210 /* are we on a vlan? */
2211 if (adapter->ifp->if_vlantrunk != NULL)
2212 psize += VLAN_TAG_SIZE;
2213 E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize);
2215 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2216 rctl |= E1000_RCTL_SZ_2048;
2219 rctl &= ~E1000_RCTL_LPE;
2220 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2221 rctl |= E1000_RCTL_SZ_2048;
2224 /* Setup the Base and Length of the Rx Descriptor Rings */
2225 for (i = 0; i < sc->rx_ring_inuse; ++i) {
2226 struct igb_rx_ring *rxr = &sc->rx_rings[i];
2227 uint64_t bus_addr = rxr->rxdma.dma_paddr;
2230 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2231 rxr->num_rx_desc * sizeof(struct e1000_rx_desc));
2232 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2233 (uint32_t)(bus_addr >> 32));
2234 E1000_WRITE_REG(hw, E1000_RDBAL(i),
2235 (uint32_t)bus_addr);
2236 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2237 /* Enable this Queue */
2238 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2239 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2240 rxdctl &= 0xFFF00000;
2241 rxdctl |= IGB_RX_PTHRESH;
2242 rxdctl |= IGB_RX_HTHRESH << 8;
2244 * Don't set WTHRESH to a value above 1 on 82576, see:
2245 * 82576 specification update errata #26
2247 rxdctl |= IGB_RX_WTHRESH << 16;
2248 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2251 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM);
2252 rxcsum &= ~(E1000_RXCSUM_PCSS_MASK | E1000_RXCSUM_IPPCSE);
2255 * Receive Checksum Offload for TCP and UDP
2257 * Checksum offloading is also enabled if multiple receive
2258 * queue is to be supported, since we need it to figure out
2261 if ((ifp->if_capenable & IFCAP_RXCSUM) || IGB_ENABLE_HWRSS(sc)) {
2264 * PCSD must be enabled to enable multiple
2267 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
2270 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
2273 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum);
2275 if (IGB_ENABLE_HWRSS(sc)) {
2276 uint8_t key[IGB_NRSSRK * IGB_RSSRK_SIZE];
2277 uint32_t reta_shift;
2282 * When we reach here, RSS has already been disabled
2283 * in igb_stop(), so we could safely configure RSS key
2284 * and redirect table.
2290 toeplitz_get_key(key, sizeof(key));
2291 for (i = 0; i < IGB_NRSSRK; ++i) {
2294 rssrk = IGB_RSSRK_VAL(key, i);
2295 IGB_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk);
2297 E1000_WRITE_REG(hw, E1000_RSSRK(i), rssrk);
2301 * Configure RSS redirect table in following fashion:
2302 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
2304 reta_shift = IGB_RETA_SHIFT;
2305 if (hw->mac.type == e1000_82575)
2306 reta_shift = IGB_RETA_SHIFT_82575;
2309 for (j = 0; j < IGB_NRETA; ++j) {
2312 for (i = 0; i < IGB_RETA_SIZE; ++i) {
2315 q = (r % sc->rx_ring_inuse) << reta_shift;
2316 reta |= q << (8 * i);
2319 IGB_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta);
2320 E1000_WRITE_REG(hw, E1000_RETA(j), reta);
2324 * Enable multiple receive queues.
2325 * Enable IPv4 RSS standard hash functions.
2326 * Disable RSS interrupt on 82575
2328 E1000_WRITE_REG(&sc->hw, E1000_MRQC,
2329 E1000_MRQC_ENABLE_RSS_4Q |
2330 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2331 E1000_MRQC_RSS_FIELD_IPV4);
2334 /* Setup the Receive Control Register */
2335 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2336 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2337 E1000_RCTL_RDMTS_HALF |
2338 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2339 /* Strip CRC bytes. */
2340 rctl |= E1000_RCTL_SECRC;
2341 /* Make sure VLAN Filters are off */
2342 rctl &= ~E1000_RCTL_VFE;
2343 /* Don't store bad packets */
2344 rctl &= ~E1000_RCTL_SBP;
2346 /* Enable Receives */
2347 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2350 * Setup the HW Rx Head and Tail Descriptor Pointers
2351 * - needs to be after enable
2353 for (i = 0; i < sc->rx_ring_inuse; ++i) {
2354 struct igb_rx_ring *rxr = &sc->rx_rings[i];
2356 E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check);
2357 E1000_WRITE_REG(hw, E1000_RDT(i), rxr->num_rx_desc - 1);
2362 igb_rxeof(struct igb_rx_ring *rxr, int count)
2364 struct ifnet *ifp = &rxr->sc->arpcom.ac_if;
2365 union e1000_adv_rx_desc *cur;
2369 i = rxr->next_to_check;
2370 cur = &rxr->rx_base[i];
2371 staterr = le32toh(cur->wb.upper.status_error);
2373 if ((staterr & E1000_RXD_STAT_DD) == 0)
2376 while ((staterr & E1000_RXD_STAT_DD) && count != 0) {
2377 struct pktinfo *pi = NULL, pi0;
2378 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
2379 struct mbuf *m = NULL;
2382 eop = (staterr & E1000_RXD_STAT_EOP) ? TRUE : FALSE;
2386 if ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) == 0 &&
2388 struct mbuf *mp = rxbuf->m_head;
2389 uint32_t hash, hashtype;
2393 len = le16toh(cur->wb.upper.length);
2394 if (rxr->sc->hw.mac.type == e1000_i350 &&
2395 (staterr & E1000_RXDEXT_STATERR_LB))
2396 vlan = be16toh(cur->wb.upper.vlan);
2398 vlan = le16toh(cur->wb.upper.vlan);
2400 hash = le32toh(cur->wb.lower.hi_dword.rss);
2401 hashtype = le32toh(cur->wb.lower.lo_dword.data) &
2402 E1000_RXDADV_RSSTYPE_MASK;
2404 IGB_RSS_DPRINTF(rxr->sc, 10,
2405 "ring%d, hash 0x%08x, hashtype %u\n",
2406 rxr->me, hash, hashtype);
2408 bus_dmamap_sync(rxr->rx_tag, rxbuf->map,
2409 BUS_DMASYNC_POSTREAD);
2411 if (igb_newbuf(rxr, i, FALSE) != 0) {
2417 if (rxr->fmp == NULL) {
2418 mp->m_pkthdr.len = len;
2422 rxr->lmp->m_next = mp;
2423 rxr->lmp = rxr->lmp->m_next;
2424 rxr->fmp->m_pkthdr.len += len;
2432 m->m_pkthdr.rcvif = ifp;
2435 if (ifp->if_capenable & IFCAP_RXCSUM)
2436 igb_rxcsum(staterr, m);
2438 if (staterr & E1000_RXD_STAT_VP) {
2439 m->m_pkthdr.ether_vlantag = vlan;
2440 m->m_flags |= M_VLANTAG;
2443 if (ifp->if_capenable & IFCAP_RSS) {
2444 pi = igb_rssinfo(m, &pi0,
2445 hash, hashtype, staterr);
2447 #ifdef IGB_RSS_DEBUG
2454 igb_setup_rxdesc(cur, rxbuf);
2456 rxr->discard = TRUE;
2458 rxr->discard = FALSE;
2459 if (rxr->fmp != NULL) {
2468 ether_input_pkt(ifp, m, pi);
2470 /* Advance our pointers to the next descriptor. */
2471 if (++i == rxr->num_rx_desc)
2474 cur = &rxr->rx_base[i];
2475 staterr = le32toh(cur->wb.upper.status_error);
2477 rxr->next_to_check = i;
2480 i = rxr->num_rx_desc - 1;
2481 E1000_WRITE_REG(&rxr->sc->hw, E1000_RDT(rxr->me), i);
2486 igb_set_vlan(struct igb_softc *sc)
2488 struct e1000_hw *hw = &sc->hw;
2491 struct ifnet *ifp = sc->arpcom.ac_if;
2495 e1000_rlpml_set_vf(hw, sc->max_frame_size + VLAN_TAG_SIZE);
2499 reg = E1000_READ_REG(hw, E1000_CTRL);
2500 reg |= E1000_CTRL_VME;
2501 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2504 /* Enable the Filter Table */
2505 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
2506 reg = E1000_READ_REG(hw, E1000_RCTL);
2507 reg &= ~E1000_RCTL_CFIEN;
2508 reg |= E1000_RCTL_VFE;
2509 E1000_WRITE_REG(hw, E1000_RCTL, reg);
2513 /* Update the frame size */
2514 E1000_WRITE_REG(&sc->hw, E1000_RLPML,
2515 sc->max_frame_size + VLAN_TAG_SIZE);
2518 /* Don't bother with table if no vlans */
2519 if ((adapter->num_vlans == 0) ||
2520 ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0))
2523 ** A soft reset zero's out the VFTA, so
2524 ** we need to repopulate it now.
2526 for (int i = 0; i < IGB_VFTA_SIZE; i++)
2527 if (adapter->shadow_vfta[i] != 0) {
2528 if (adapter->vf_ifp)
2529 e1000_vfta_set_vf(hw,
2530 adapter->shadow_vfta[i], TRUE);
2532 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
2533 i, adapter->shadow_vfta[i]);
2539 igb_enable_intr(struct igb_softc *sc)
2541 if (sc->intr_type != PCI_INTR_TYPE_MSIX) {
2542 lwkt_serialize_handler_enable(&sc->main_serialize);
2546 for (i = 0; i < sc->msix_cnt; ++i) {
2547 lwkt_serialize_handler_enable(
2548 sc->msix_data[i].msix_serialize);
2552 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) {
2553 if (sc->intr_type == PCI_INTR_TYPE_MSIX)
2554 E1000_WRITE_REG(&sc->hw, E1000_EIAC, sc->intr_mask);
2556 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0);
2557 E1000_WRITE_REG(&sc->hw, E1000_EIAM, sc->intr_mask);
2558 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask);
2559 E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC);
2561 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK);
2563 E1000_WRITE_FLUSH(&sc->hw);
2567 igb_disable_intr(struct igb_softc *sc)
2569 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) {
2570 E1000_WRITE_REG(&sc->hw, E1000_EIMC, 0xffffffff);
2571 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0);
2573 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff);
2574 E1000_WRITE_FLUSH(&sc->hw);
2576 if (sc->intr_type != PCI_INTR_TYPE_MSIX) {
2577 lwkt_serialize_handler_disable(&sc->main_serialize);
2581 for (i = 0; i < sc->msix_cnt; ++i) {
2582 lwkt_serialize_handler_disable(
2583 sc->msix_data[i].msix_serialize);
2589 * Bit of a misnomer, what this really means is
2590 * to enable OS management of the system... aka
2591 * to disable special hardware management features
2594 igb_get_mgmt(struct igb_softc *sc)
2596 if (sc->flags & IGB_FLAG_HAS_MGMT) {
2597 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H);
2598 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
2600 /* disable hardware interception of ARP */
2601 manc &= ~E1000_MANC_ARP_EN;
2603 /* enable receiving management packets to the host */
2604 manc |= E1000_MANC_EN_MNG2HOST;
2605 manc2h |= 1 << 5; /* Mng Port 623 */
2606 manc2h |= 1 << 6; /* Mng Port 664 */
2607 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h);
2608 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
2613 * Give control back to hardware management controller
2617 igb_rel_mgmt(struct igb_softc *sc)
2619 if (sc->flags & IGB_FLAG_HAS_MGMT) {
2620 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
2622 /* Re-enable hardware interception of ARP */
2623 manc |= E1000_MANC_ARP_EN;
2624 manc &= ~E1000_MANC_EN_MNG2HOST;
2626 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
2631 * Sets CTRL_EXT:DRV_LOAD bit.
2633 * For ASF and Pass Through versions of f/w this means that
2634 * the driver is loaded.
2637 igb_get_hw_control(struct igb_softc *sc)
2644 /* Let firmware know the driver has taken over */
2645 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
2646 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
2647 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2651 * Resets CTRL_EXT:DRV_LOAD bit.
2653 * For ASF and Pass Through versions of f/w this means that the
2654 * driver is no longer loaded.
2657 igb_rel_hw_control(struct igb_softc *sc)
2664 /* Let firmware taken over control of h/w */
2665 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
2666 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
2667 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2671 igb_is_valid_ether_addr(const uint8_t *addr)
2673 uint8_t zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
2675 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN))
2681 * Enable PCI Wake On Lan capability
2684 igb_enable_wol(device_t dev)
2686 uint16_t cap, status;
2689 /* First find the capabilities pointer*/
2690 cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
2692 /* Read the PM Capabilities */
2693 id = pci_read_config(dev, cap, 1);
2694 if (id != PCIY_PMG) /* Something wrong */
2698 * OK, we have the power capabilities,
2699 * so now get the status register
2701 cap += PCIR_POWER_STATUS;
2702 status = pci_read_config(dev, cap, 2);
2703 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2704 pci_write_config(dev, cap, status, 2);
2708 igb_update_stats_counters(struct igb_softc *sc)
2710 struct e1000_hw *hw = &sc->hw;
2711 struct e1000_hw_stats *stats;
2712 struct ifnet *ifp = &sc->arpcom.ac_if;
2715 * The virtual function adapter has only a
2716 * small controlled set of stats, do only
2720 igb_update_vf_stats_counters(sc);
2725 if (sc->hw.phy.media_type == e1000_media_type_copper ||
2726 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
2728 E1000_READ_REG(hw,E1000_SYMERRS);
2729 stats->sec += E1000_READ_REG(hw, E1000_SEC);
2732 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
2733 stats->mpc += E1000_READ_REG(hw, E1000_MPC);
2734 stats->scc += E1000_READ_REG(hw, E1000_SCC);
2735 stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
2737 stats->mcc += E1000_READ_REG(hw, E1000_MCC);
2738 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
2739 stats->colc += E1000_READ_REG(hw, E1000_COLC);
2740 stats->dc += E1000_READ_REG(hw, E1000_DC);
2741 stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
2742 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
2743 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
2746 * For watchdog management we need to know if we have been
2747 * paused during the last interval, so capture that here.
2749 sc->pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
2750 stats->xoffrxc += sc->pause_frames;
2751 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
2752 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
2753 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
2754 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
2755 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
2756 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
2757 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
2758 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
2759 stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
2760 stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
2761 stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
2762 stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
2764 /* For the 64-bit byte counters the low dword must be read first. */
2765 /* Both registers clear on the read of the high dword */
2767 stats->gorc += E1000_READ_REG(hw, E1000_GORCL) +
2768 ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
2769 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) +
2770 ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
2772 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
2773 stats->ruc += E1000_READ_REG(hw, E1000_RUC);
2774 stats->rfc += E1000_READ_REG(hw, E1000_RFC);
2775 stats->roc += E1000_READ_REG(hw, E1000_ROC);
2776 stats->rjc += E1000_READ_REG(hw, E1000_RJC);
2778 stats->tor += E1000_READ_REG(hw, E1000_TORH);
2779 stats->tot += E1000_READ_REG(hw, E1000_TOTH);
2781 stats->tpr += E1000_READ_REG(hw, E1000_TPR);
2782 stats->tpt += E1000_READ_REG(hw, E1000_TPT);
2783 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
2784 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
2785 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
2786 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
2787 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
2788 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
2789 stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
2790 stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
2792 /* Interrupt Counts */
2794 stats->iac += E1000_READ_REG(hw, E1000_IAC);
2795 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
2796 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
2797 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
2798 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
2799 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
2800 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
2801 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
2802 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
2804 /* Host to Card Statistics */
2806 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
2807 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
2808 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
2809 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
2810 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
2811 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
2812 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
2813 stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) +
2814 ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32));
2815 stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) +
2816 ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32));
2817 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
2818 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
2819 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
2821 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
2822 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
2823 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
2824 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
2825 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
2826 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
2828 ifp->if_collisions = stats->colc;
2831 ifp->if_ierrors = stats->rxerrc + stats->crcerrs + stats->algnerrc +
2832 stats->ruc + stats->roc + stats->mpc + stats->cexterr;
2835 ifp->if_oerrors = stats->ecol + stats->latecol + sc->watchdog_events;
2837 /* Driver specific counters */
2838 sc->device_control = E1000_READ_REG(hw, E1000_CTRL);
2839 sc->rx_control = E1000_READ_REG(hw, E1000_RCTL);
2840 sc->int_mask = E1000_READ_REG(hw, E1000_IMS);
2841 sc->eint_mask = E1000_READ_REG(hw, E1000_EIMS);
2842 sc->packet_buf_alloc_tx =
2843 ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16);
2844 sc->packet_buf_alloc_rx =
2845 (E1000_READ_REG(hw, E1000_PBA) & 0xffff);
2849 igb_vf_init_stats(struct igb_softc *sc)
2851 struct e1000_hw *hw = &sc->hw;
2852 struct e1000_vf_stats *stats;
2855 stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC);
2856 stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC);
2857 stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC);
2858 stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC);
2859 stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC);
2863 igb_update_vf_stats_counters(struct igb_softc *sc)
2865 struct e1000_hw *hw = &sc->hw;
2866 struct e1000_vf_stats *stats;
2868 if (sc->link_speed == 0)
2872 UPDATE_VF_REG(E1000_VFGPRC, stats->last_gprc, stats->gprc);
2873 UPDATE_VF_REG(E1000_VFGORC, stats->last_gorc, stats->gorc);
2874 UPDATE_VF_REG(E1000_VFGPTC, stats->last_gptc, stats->gptc);
2875 UPDATE_VF_REG(E1000_VFGOTC, stats->last_gotc, stats->gotc);
2876 UPDATE_VF_REG(E1000_VFMPRC, stats->last_mprc, stats->mprc);
2879 #ifdef DEVICE_POLLING
2882 igb_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2884 struct igb_softc *sc = ifp->if_softc;
2889 case POLL_DEREGISTER:
2890 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2894 case POLL_AND_CHECK_STATUS:
2895 ASSERT_SERIALIZED(&sc->main_serialize);
2896 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR);
2897 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
2898 sc->hw.mac.get_link_status = 1;
2899 igb_update_link_status(sc);
2903 ASSERT_SERIALIZED(&sc->main_serialize);
2904 if (ifp->if_flags & IFF_RUNNING) {
2905 struct igb_tx_ring *txr;
2908 for (i = 0; i < sc->rx_ring_inuse; ++i) {
2909 struct igb_rx_ring *rxr = &sc->rx_rings[i];
2911 lwkt_serialize_enter(&rxr->rx_serialize);
2912 igb_rxeof(rxr, count);
2913 lwkt_serialize_exit(&rxr->rx_serialize);
2916 txr = &sc->tx_rings[0];
2917 lwkt_serialize_enter(&txr->tx_serialize);
2919 if (!ifq_is_empty(&ifp->if_snd))
2921 lwkt_serialize_exit(&txr->tx_serialize);
2927 #endif /* DEVICE_POLLING */
2932 struct igb_softc *sc = xsc;
2933 struct ifnet *ifp = &sc->arpcom.ac_if;
2936 ASSERT_SERIALIZED(&sc->main_serialize);
2938 eicr = E1000_READ_REG(&sc->hw, E1000_EICR);
2943 if (ifp->if_flags & IFF_RUNNING) {
2944 struct igb_tx_ring *txr;
2947 for (i = 0; i < sc->rx_ring_inuse; ++i) {
2948 struct igb_rx_ring *rxr = &sc->rx_rings[i];
2950 if (eicr & rxr->rx_intr_mask) {
2951 lwkt_serialize_enter(&rxr->rx_serialize);
2953 lwkt_serialize_exit(&rxr->rx_serialize);
2957 txr = &sc->tx_rings[0];
2958 if (eicr & txr->tx_intr_mask) {
2959 lwkt_serialize_enter(&txr->tx_serialize);
2961 if (!ifq_is_empty(&ifp->if_snd))
2963 lwkt_serialize_exit(&txr->tx_serialize);
2967 if (eicr & E1000_EICR_OTHER) {
2968 uint32_t icr = E1000_READ_REG(&sc->hw, E1000_ICR);
2970 /* Link status change */
2971 if (icr & E1000_ICR_LSC) {
2972 sc->hw.mac.get_link_status = 1;
2973 igb_update_link_status(sc);
2978 * Reading EICR has the side effect to clear interrupt mask,
2979 * so all interrupts need to be enabled here.
2981 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask);
2985 igb_intr_shared(void *xsc)
2987 struct igb_softc *sc = xsc;
2988 struct ifnet *ifp = &sc->arpcom.ac_if;
2991 ASSERT_SERIALIZED(&sc->main_serialize);
2993 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR);
2996 if (reg_icr == 0xffffffff)
2999 /* Definitely not our interrupt. */
3003 if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0)
3006 if (ifp->if_flags & IFF_RUNNING) {
3008 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) {
3011 for (i = 0; i < sc->rx_ring_inuse; ++i) {
3012 struct igb_rx_ring *rxr = &sc->rx_rings[i];
3014 lwkt_serialize_enter(&rxr->rx_serialize);
3016 lwkt_serialize_exit(&rxr->rx_serialize);
3020 if (reg_icr & E1000_ICR_TXDW) {
3021 struct igb_tx_ring *txr = &sc->tx_rings[0];
3023 lwkt_serialize_enter(&txr->tx_serialize);
3025 if (!ifq_is_empty(&ifp->if_snd))
3027 lwkt_serialize_exit(&txr->tx_serialize);
3031 /* Link status change */
3032 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3033 sc->hw.mac.get_link_status = 1;
3034 igb_update_link_status(sc);
3037 if (reg_icr & E1000_ICR_RXO)
3042 igb_encap(struct igb_tx_ring *txr, struct mbuf **m_headp)
3044 bus_dma_segment_t segs[IGB_MAX_SCATTER];
3046 struct igb_tx_buf *tx_buf, *tx_buf_mapped;
3047 union e1000_adv_tx_desc *txd = NULL;
3048 struct mbuf *m_head = *m_headp;
3049 uint32_t olinfo_status = 0, cmd_type_len = 0, cmd_rs = 0;
3050 int maxsegs, nsegs, i, j, error, last = 0;
3051 uint32_t hdrlen = 0;
3053 /* Set basic descriptor constants */
3054 cmd_type_len |= E1000_ADVTXD_DTYP_DATA;
3055 cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
3056 if (m_head->m_flags & M_VLANTAG)
3057 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3060 * Map the packet for DMA.
3062 tx_buf = &txr->tx_buf[txr->next_avail_desc];
3063 tx_buf_mapped = tx_buf;
3066 maxsegs = txr->tx_avail - IGB_TX_RESERVED;
3067 KASSERT(maxsegs >= txr->spare_desc, ("not enough spare TX desc\n"));
3068 if (maxsegs > IGB_MAX_SCATTER)
3069 maxsegs = IGB_MAX_SCATTER;
3071 error = bus_dmamap_load_mbuf_defrag(txr->tx_tag, map, m_headp,
3072 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
3074 if (error == ENOBUFS)
3075 txr->sc->mbuf_defrag_failed++;
3077 txr->sc->no_tx_dma_setup++;
3083 bus_dmamap_sync(txr->tx_tag, map, BUS_DMASYNC_PREWRITE);
3089 * Set up the context descriptor:
3090 * used when any hardware offload is done.
3091 * This includes CSUM, VLAN, and TSO. It
3092 * will use the first descriptor.
3094 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3095 if (igb_tso_setup(txr, m_head, &hdrlen)) {
3096 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3097 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3098 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3101 } else if (igb_tx_ctx_setup(txr, m_head))
3102 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3104 if (igb_txcsum_ctx(txr, m_head)) {
3105 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3106 olinfo_status |= (E1000_TXD_POPTS_IXSM << 8);
3107 if (m_head->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_TCP))
3108 olinfo_status |= (E1000_TXD_POPTS_TXSM << 8);
3113 txr->tx_nsegs += nsegs;
3114 if (txr->tx_nsegs >= txr->intr_nsegs) {
3116 * Report Status (RS) is turned on every intr_nsegs
3117 * descriptors (roughly).
3120 cmd_rs = E1000_ADVTXD_DCMD_RS;
3123 /* Calculate payload length */
3124 olinfo_status |= ((m_head->m_pkthdr.len - hdrlen)
3125 << E1000_ADVTXD_PAYLEN_SHIFT);
3127 /* 82575 needs the queue index added */
3128 if (txr->sc->hw.mac.type == e1000_82575)
3129 olinfo_status |= txr->me << 4;
3131 /* Set up our transmit descriptors */
3132 i = txr->next_avail_desc;
3133 for (j = 0; j < nsegs; j++) {
3135 bus_addr_t seg_addr;
3137 tx_buf = &txr->tx_buf[i];
3138 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i];
3139 seg_addr = segs[j].ds_addr;
3140 seg_len = segs[j].ds_len;
3142 txd->read.buffer_addr = htole64(seg_addr);
3143 txd->read.cmd_type_len = htole32(cmd_type_len | seg_len);
3144 txd->read.olinfo_status = htole32(olinfo_status);
3146 if (++i == txr->num_tx_desc)
3148 tx_buf->m_head = NULL;
3151 KASSERT(txr->tx_avail > nsegs, ("invalid avail TX desc\n"));
3152 txr->next_avail_desc = i;
3153 txr->tx_avail -= nsegs;
3155 tx_buf->m_head = m_head;
3156 tx_buf_mapped->map = tx_buf->map;
3160 * Last Descriptor of Packet needs End Of Packet (EOP)
3162 txd->read.cmd_type_len |= htole32(E1000_ADVTXD_DCMD_EOP | cmd_rs);
3165 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
3166 * that this frame is available to transmit.
3168 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), i);
3175 igb_start(struct ifnet *ifp)
3177 struct igb_softc *sc = ifp->if_softc;
3178 struct igb_tx_ring *txr = &sc->tx_rings[0];
3179 struct mbuf *m_head;
3181 ASSERT_SERIALIZED(&txr->tx_serialize);
3183 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
3186 if (!sc->link_active) {
3187 ifq_purge(&ifp->if_snd);
3191 if (!IGB_IS_NOT_OACTIVE(txr))
3194 while (!ifq_is_empty(&ifp->if_snd)) {
3195 if (IGB_IS_OACTIVE(txr)) {
3196 ifp->if_flags |= IFF_OACTIVE;
3197 /* Set watchdog on */
3202 m_head = ifq_dequeue(&ifp->if_snd, NULL);
3206 if (igb_encap(txr, &m_head)) {
3211 /* Send a copy of the frame to the BPF listener */
3212 ETHER_BPF_MTAP(ifp, m_head);
3217 igb_watchdog(struct ifnet *ifp)
3219 struct igb_softc *sc = ifp->if_softc;
3220 struct igb_tx_ring *txr = &sc->tx_rings[0];
3222 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3225 * If flow control has paused us since last checking
3226 * it invalidates the watchdog timing, so dont run it.
3228 if (sc->pause_frames) {
3229 sc->pause_frames = 0;
3234 if_printf(ifp, "Watchdog timeout -- resetting\n");
3235 if_printf(ifp, "Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
3236 E1000_READ_REG(&sc->hw, E1000_TDH(txr->me)),
3237 E1000_READ_REG(&sc->hw, E1000_TDT(txr->me)));
3238 if_printf(ifp, "TX(%d) desc avail = %d, "
3239 "Next TX to Clean = %d\n",
3240 txr->me, txr->tx_avail, txr->next_to_clean);
3243 sc->watchdog_events++;
3246 if (!ifq_is_empty(&ifp->if_snd))
3251 igb_set_eitr(struct igb_softc *sc, int idx, int rate)
3256 if (sc->hw.mac.type == e1000_82575) {
3257 eitr = 1000000000 / 256 / rate;
3260 * Document is wrong on the 2 bits left shift
3263 eitr = 1000000 / rate;
3264 eitr <<= IGB_EITR_INTVL_SHIFT;
3268 /* Don't disable it */
3269 eitr = 1 << IGB_EITR_INTVL_SHIFT;
3270 } else if (eitr > IGB_EITR_INTVL_MASK) {
3271 /* Don't allow it to be too large */
3272 eitr = IGB_EITR_INTVL_MASK;
3275 if (sc->hw.mac.type == e1000_82575)
3278 eitr |= E1000_EITR_CNT_IGNR;
3279 E1000_WRITE_REG(&sc->hw, E1000_EITR(idx), eitr);
3283 igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS)
3285 struct igb_softc *sc = (void *)arg1;
3286 struct ifnet *ifp = &sc->arpcom.ac_if;
3287 int error, intr_rate;
3289 intr_rate = sc->intr_rate;
3290 error = sysctl_handle_int(oidp, &intr_rate, 0, req);
3291 if (error || req->newptr == NULL)
3296 ifnet_serialize_all(ifp);
3298 sc->intr_rate = intr_rate;
3299 if (ifp->if_flags & IFF_RUNNING)
3300 igb_set_eitr(sc, 0, sc->intr_rate);
3303 if_printf(ifp, "interrupt rate set to %d/sec\n", sc->intr_rate);
3305 ifnet_deserialize_all(ifp);
3311 igb_sysctl_msix_rate(SYSCTL_HANDLER_ARGS)
3313 struct igb_msix_data *msix = (void *)arg1;
3314 struct igb_softc *sc = msix->msix_sc;
3315 struct ifnet *ifp = &sc->arpcom.ac_if;
3316 int error, msix_rate;
3318 msix_rate = msix->msix_rate;
3319 error = sysctl_handle_int(oidp, &msix_rate, 0, req);
3320 if (error || req->newptr == NULL)
3325 lwkt_serialize_enter(msix->msix_serialize);
3327 msix->msix_rate = msix_rate;
3328 if (ifp->if_flags & IFF_RUNNING)
3329 igb_set_eitr(sc, msix->msix_vector, msix->msix_rate);
3332 if_printf(ifp, "%s set to %d/sec\n", msix->msix_rate_desc,
3336 lwkt_serialize_exit(msix->msix_serialize);
3342 igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS)
3344 struct igb_softc *sc = (void *)arg1;
3345 struct ifnet *ifp = &sc->arpcom.ac_if;
3346 struct igb_tx_ring *txr = &sc->tx_rings[0];
3349 nsegs = txr->intr_nsegs;
3350 error = sysctl_handle_int(oidp, &nsegs, 0, req);
3351 if (error || req->newptr == NULL)
3356 ifnet_serialize_all(ifp);
3358 if (nsegs >= txr->num_tx_desc - txr->oact_lo_desc ||
3359 nsegs >= txr->oact_hi_desc - IGB_MAX_SCATTER) {
3363 txr->intr_nsegs = nsegs;
3366 ifnet_deserialize_all(ifp);
3372 igb_init_intr(struct igb_softc *sc)
3374 igb_set_intr_mask(sc);
3376 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0)
3377 igb_init_unshared_intr(sc);
3379 if (sc->intr_type != PCI_INTR_TYPE_MSIX) {
3380 igb_set_eitr(sc, 0, sc->intr_rate);
3384 for (i = 0; i < sc->msix_cnt; ++i)
3385 igb_set_eitr(sc, i, sc->msix_data[i].msix_rate);
3390 igb_init_unshared_intr(struct igb_softc *sc)
3392 struct e1000_hw *hw = &sc->hw;
3393 const struct igb_rx_ring *rxr;
3394 const struct igb_tx_ring *txr;
3395 uint32_t ivar, index;
3399 * Enable extended mode
3401 if (sc->hw.mac.type != e1000_82575) {
3405 gpie = E1000_GPIE_NSICR;
3406 if (sc->intr_type == PCI_INTR_TYPE_MSIX) {
3407 gpie |= E1000_GPIE_MSIX_MODE |
3411 E1000_WRITE_REG(hw, E1000_GPIE, gpie);
3416 switch (sc->hw.mac.type) {
3418 ivar_max = IGB_MAX_IVAR_82580;
3422 ivar_max = IGB_MAX_IVAR_I350;
3426 case e1000_vfadapt_i350:
3427 ivar_max = IGB_MAX_IVAR_VF;
3431 ivar_max = IGB_MAX_IVAR_82576;
3435 panic("unknown mac type %d\n", sc->hw.mac.type);
3437 for (i = 0; i < ivar_max; ++i)
3438 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, 0);
3439 E1000_WRITE_REG(hw, E1000_IVAR_MISC, 0);
3443 KASSERT(sc->intr_type != PCI_INTR_TYPE_MSIX,
3444 ("82575 w/ MSI-X"));
3445 tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
3446 tmp |= E1000_CTRL_EXT_IRCA;
3447 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
3451 * Map TX/RX interrupts to EICR
3453 switch (sc->hw.mac.type) {
3457 case e1000_vfadapt_i350:
3459 for (i = 0; i < sc->rx_ring_inuse; ++i) {
3460 rxr = &sc->rx_rings[i];
3463 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3468 (rxr->rx_intr_bit | E1000_IVAR_VALID) << 16;
3472 (rxr->rx_intr_bit | E1000_IVAR_VALID);
3474 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3477 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3478 txr = &sc->tx_rings[i];
3481 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3486 (txr->tx_intr_bit | E1000_IVAR_VALID) << 24;
3490 (txr->tx_intr_bit | E1000_IVAR_VALID) << 8;
3492 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3494 if (sc->intr_type == PCI_INTR_TYPE_MSIX) {
3495 ivar = (sc->sts_intr_bit | E1000_IVAR_VALID) << 8;
3496 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
3502 for (i = 0; i < sc->rx_ring_inuse; ++i) {
3503 rxr = &sc->rx_rings[i];
3505 index = i & 0x7; /* Each IVAR has two entries */
3506 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3511 (rxr->rx_intr_bit | E1000_IVAR_VALID);
3515 (rxr->rx_intr_bit | E1000_IVAR_VALID) << 16;
3517 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3520 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3521 txr = &sc->tx_rings[i];
3523 index = i & 0x7; /* Each IVAR has two entries */
3524 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3529 (txr->tx_intr_bit | E1000_IVAR_VALID) << 8;
3533 (txr->tx_intr_bit | E1000_IVAR_VALID) << 24;
3535 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3537 if (sc->intr_type == PCI_INTR_TYPE_MSIX) {
3538 ivar = (sc->sts_intr_bit | E1000_IVAR_VALID) << 8;
3539 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
3545 * Enable necessary interrupt bits.
3547 * The name of the register is confusing; in addition to
3548 * configuring the first vector of MSI-X, it also configures
3549 * which bits of EICR could be set by the hardware even when
3550 * MSI or line interrupt is used; it thus controls interrupt
3551 * generation. It MUST be configured explicitly; the default
3552 * value mentioned in the datasheet is wrong: RX queue0 and
3553 * TX queue0 are NOT enabled by default.
3555 E1000_WRITE_REG(&sc->hw, E1000_MSIXBM(0), sc->intr_mask);
3559 panic("unknown mac type %d\n", sc->hw.mac.type);
3564 igb_setup_intr(struct igb_softc *sc)
3566 struct ifnet *ifp = &sc->arpcom.ac_if;
3569 if (sc->intr_type == PCI_INTR_TYPE_MSIX)
3570 return igb_msix_setup(sc);
3572 error = bus_setup_intr(sc->dev, sc->intr_res, INTR_MPSAFE,
3573 (sc->flags & IGB_FLAG_SHARED_INTR) ? igb_intr_shared : igb_intr,
3574 sc, &sc->intr_tag, &sc->main_serialize);
3576 device_printf(sc->dev, "Failed to register interrupt handler");
3580 ifp->if_cpuid = rman_get_cpuid(sc->intr_res);
3581 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
3587 igb_set_txintr_mask(struct igb_tx_ring *txr, int *intr_bit0, int intr_bitmax)
3589 if (txr->sc->hw.mac.type == e1000_82575) {
3590 txr->tx_intr_bit = 0; /* unused */
3593 txr->tx_intr_mask = E1000_EICR_TX_QUEUE0;
3596 txr->tx_intr_mask = E1000_EICR_TX_QUEUE1;
3599 txr->tx_intr_mask = E1000_EICR_TX_QUEUE2;
3602 txr->tx_intr_mask = E1000_EICR_TX_QUEUE3;
3605 panic("unsupported # of TX ring, %d\n", txr->me);
3608 int intr_bit = *intr_bit0;
3610 txr->tx_intr_bit = intr_bit % intr_bitmax;
3611 txr->tx_intr_mask = 1 << txr->tx_intr_bit;
3613 *intr_bit0 = intr_bit + 1;
3618 igb_set_rxintr_mask(struct igb_rx_ring *rxr, int *intr_bit0, int intr_bitmax)
3620 if (rxr->sc->hw.mac.type == e1000_82575) {
3621 rxr->rx_intr_bit = 0; /* unused */
3624 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE0;
3627 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE1;
3630 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE2;
3633 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE3;
3636 panic("unsupported # of RX ring, %d\n", rxr->me);
3639 int intr_bit = *intr_bit0;
3641 rxr->rx_intr_bit = intr_bit % intr_bitmax;
3642 rxr->rx_intr_mask = 1 << rxr->rx_intr_bit;
3644 *intr_bit0 = intr_bit + 1;
3649 igb_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3651 struct igb_softc *sc = ifp->if_softc;
3653 ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt,
3654 sc->tx_serialize, sc->rx_serialize, slz);
3658 igb_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3660 struct igb_softc *sc = ifp->if_softc;
3662 ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt,
3663 sc->tx_serialize, sc->rx_serialize, slz);
3667 igb_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3669 struct igb_softc *sc = ifp->if_softc;
3671 return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt,
3672 sc->tx_serialize, sc->rx_serialize, slz);
3678 igb_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
3679 boolean_t serialized)
3681 struct igb_softc *sc = ifp->if_softc;
3683 ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt,
3684 sc->tx_serialize, sc->rx_serialize, slz, serialized);
3687 #endif /* INVARIANTS */
3690 igb_set_intr_mask(struct igb_softc *sc)
3694 sc->intr_mask = sc->sts_intr_mask;
3695 for (i = 0; i < sc->rx_ring_inuse; ++i)
3696 sc->intr_mask |= sc->rx_rings[i].rx_intr_mask;
3697 for (i = 0; i < sc->tx_ring_cnt; ++i)
3698 sc->intr_mask |= sc->tx_rings[i].tx_intr_mask;
3700 device_printf(sc->dev, "intr mask 0x%08x\n", sc->intr_mask);
3704 igb_alloc_intr(struct igb_softc *sc)
3706 int i, intr_bit, intr_bitmax;
3709 igb_msix_try_alloc(sc);
3710 if (sc->intr_type == PCI_INTR_TYPE_MSIX)
3714 * Allocate MSI/legacy interrupt resource
3716 sc->intr_type = pci_alloc_1intr(sc->dev, igb_msi_enable,
3717 &sc->intr_rid, &intr_flags);
3719 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) {
3722 unshared = device_getenv_int(sc->dev, "irq.unshared", 0);
3724 sc->flags |= IGB_FLAG_SHARED_INTR;
3726 device_printf(sc->dev, "IRQ shared\n");
3728 intr_flags &= ~RF_SHAREABLE;
3730 device_printf(sc->dev, "IRQ unshared\n");
3734 sc->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
3735 &sc->intr_rid, intr_flags);
3736 if (sc->intr_res == NULL) {
3737 device_printf(sc->dev, "Unable to allocate bus resource: "
3743 * Setup MSI/legacy interrupt mask
3745 switch (sc->hw.mac.type) {
3747 intr_bitmax = IGB_MAX_TXRXINT_82575;
3750 intr_bitmax = IGB_MAX_TXRXINT_82580;
3753 intr_bitmax = IGB_MAX_TXRXINT_I350;
3756 intr_bitmax = IGB_MAX_TXRXINT_82576;
3759 intr_bitmax = IGB_MIN_TXRXINT;
3763 for (i = 0; i < sc->tx_ring_cnt; ++i)
3764 igb_set_txintr_mask(&sc->tx_rings[i], &intr_bit, intr_bitmax);
3765 for (i = 0; i < sc->rx_ring_cnt; ++i)
3766 igb_set_rxintr_mask(&sc->rx_rings[i], &intr_bit, intr_bitmax);
3767 sc->sts_intr_bit = 0;
3768 sc->sts_intr_mask = E1000_EICR_OTHER;
3770 /* Initialize interrupt rate */
3771 sc->intr_rate = IGB_INTR_RATE;
3773 igb_set_ring_inuse(sc, FALSE);
3774 igb_set_intr_mask(sc);
3779 igb_free_intr(struct igb_softc *sc)
3781 if (sc->intr_type != PCI_INTR_TYPE_MSIX) {
3782 if (sc->intr_res != NULL) {
3783 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr_rid,
3786 if (sc->intr_type == PCI_INTR_TYPE_MSI)
3787 pci_release_msi(sc->dev);
3789 igb_msix_free(sc, TRUE);
3794 igb_teardown_intr(struct igb_softc *sc)
3796 if (sc->intr_type != PCI_INTR_TYPE_MSIX)
3797 bus_teardown_intr(sc->dev, sc->intr_res, sc->intr_tag);
3799 igb_msix_teardown(sc, sc->msix_cnt);
3803 igb_msix_try_alloc(struct igb_softc *sc)
3805 int msix_enable, msix_cnt, msix_cnt2, alloc_cnt;
3807 struct igb_msix_data *msix;
3808 boolean_t aggregate, setup = FALSE;
3811 * Don't enable MSI-X on 82575, see:
3812 * 82575 specification update errata #25
3814 if (sc->hw.mac.type == e1000_82575)
3817 /* Don't enable MSI-X on VF */
3821 msix_enable = device_getenv_int(sc->dev, "msix.enable",
3826 msix_cnt = pci_msix_count(sc->dev);
3827 #ifdef IGB_MSIX_DEBUG
3828 msix_cnt = device_getenv_int(sc->dev, "msix.count", msix_cnt);
3830 if (msix_cnt <= 1) {
3831 /* One MSI-X model does not make sense */
3836 while ((1 << (i + 1)) <= msix_cnt)
3841 device_printf(sc->dev, "MSI-X count %d/%d\n",
3842 msix_cnt2, msix_cnt);
3845 KKASSERT(msix_cnt2 <= msix_cnt);
3846 if (msix_cnt == msix_cnt2) {
3847 /* We need at least one MSI-X for link status */
3849 if (msix_cnt2 <= 1) {
3850 /* One MSI-X for RX/TX does not make sense */
3851 device_printf(sc->dev, "not enough MSI-X for TX/RX, "
3852 "MSI-X count %d/%d\n", msix_cnt2, msix_cnt);
3855 KKASSERT(msix_cnt > msix_cnt2);
3858 device_printf(sc->dev, "MSI-X count fixup %d/%d\n",
3859 msix_cnt2, msix_cnt);
3863 sc->rx_ring_msix = sc->rx_ring_cnt;
3864 if (sc->rx_ring_msix > msix_cnt2)
3865 sc->rx_ring_msix = msix_cnt2;
3867 if (msix_cnt >= sc->tx_ring_cnt + sc->rx_ring_msix + 1) {
3869 * Independent TX/RX MSI-X
3873 device_printf(sc->dev, "independent TX/RX MSI-X\n");
3874 alloc_cnt = sc->tx_ring_cnt + sc->rx_ring_msix;
3877 * Aggregate TX/RX MSI-X
3881 device_printf(sc->dev, "aggregate TX/RX MSI-X\n");
3882 alloc_cnt = msix_cnt2;
3883 if (alloc_cnt > ncpus2)
3885 if (sc->rx_ring_msix > alloc_cnt)
3886 sc->rx_ring_msix = alloc_cnt;
3888 ++alloc_cnt; /* For link status */
3891 device_printf(sc->dev, "MSI-X alloc %d, RX ring %d\n",
3892 alloc_cnt, sc->rx_ring_msix);
3895 sc->msix_mem_rid = PCIR_BAR(IGB_MSIX_BAR);
3896 sc->msix_mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
3897 &sc->msix_mem_rid, RF_ACTIVE);
3898 if (sc->msix_mem_res == NULL) {
3899 device_printf(sc->dev, "Unable to map MSI-X table\n");
3903 sc->msix_cnt = alloc_cnt;
3904 sc->msix_data = kmalloc(sizeof(struct igb_msix_data) * sc->msix_cnt,
3905 M_DEVBUF, M_WAITOK | M_ZERO);
3906 for (x = 0; x < sc->msix_cnt; ++x) {
3907 msix = &sc->msix_data[x];
3909 lwkt_serialize_init(&msix->msix_serialize0);
3911 msix->msix_rid = -1;
3912 msix->msix_vector = x;
3913 msix->msix_mask = 1 << msix->msix_vector;
3914 msix->msix_rate = IGB_INTR_RATE;
3919 int offset, offset_def;
3921 if (sc->rx_ring_msix == ncpus2) {
3924 offset_def = (sc->rx_ring_msix *
3925 device_get_unit(sc->dev)) % ncpus2;
3927 offset = device_getenv_int(sc->dev,
3928 "msix.rxoff", offset_def);
3929 if (offset >= ncpus2 ||
3930 offset % sc->rx_ring_msix != 0) {
3931 device_printf(sc->dev,
3932 "invalid msix.rxoff %d, use %d\n",
3933 offset, offset_def);
3934 offset = offset_def;
3939 for (i = 0; i < sc->rx_ring_msix; ++i) {
3940 struct igb_rx_ring *rxr = &sc->rx_rings[i];
3942 KKASSERT(x < sc->msix_cnt);
3943 msix = &sc->msix_data[x++];
3944 rxr->rx_intr_bit = msix->msix_vector;
3945 rxr->rx_intr_mask = msix->msix_mask;
3947 msix->msix_serialize = &rxr->rx_serialize;
3948 msix->msix_func = igb_msix_rx;
3949 msix->msix_arg = rxr;
3950 msix->msix_cpuid = i + offset;
3951 KKASSERT(msix->msix_cpuid < ncpus2);
3952 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc),
3953 "%s rx%d", device_get_nameunit(sc->dev), i);
3954 msix->msix_rate = IGB_MSIX_RX_RATE;
3955 ksnprintf(msix->msix_rate_desc,
3956 sizeof(msix->msix_rate_desc),
3957 "RX%d interrupt rate", i);
3960 offset_def = device_get_unit(sc->dev) % ncpus2;
3961 offset = device_getenv_int(sc->dev, "msix.txoff", offset_def);
3962 if (offset >= ncpus2) {
3963 device_printf(sc->dev, "invalid msix.txoff %d, "
3964 "use %d\n", offset, offset_def);
3965 offset = offset_def;
3969 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3970 struct igb_tx_ring *txr = &sc->tx_rings[i];
3972 KKASSERT(x < sc->msix_cnt);
3973 msix = &sc->msix_data[x++];
3974 txr->tx_intr_bit = msix->msix_vector;
3975 txr->tx_intr_mask = msix->msix_mask;
3977 msix->msix_serialize = &txr->tx_serialize;
3978 msix->msix_func = igb_msix_tx;
3979 msix->msix_arg = txr;
3980 msix->msix_cpuid = i + offset;
3981 sc->msix_tx_cpuid = msix->msix_cpuid; /* XXX */
3982 KKASSERT(msix->msix_cpuid < ncpus2);
3983 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc),
3984 "%s tx%d", device_get_nameunit(sc->dev), i);
3985 msix->msix_rate = IGB_MSIX_TX_RATE;
3986 ksnprintf(msix->msix_rate_desc,
3987 sizeof(msix->msix_rate_desc),
3988 "TX%d interrupt rate", i);
3999 KKASSERT(x < sc->msix_cnt);
4000 msix = &sc->msix_data[x++];
4001 sc->sts_intr_bit = msix->msix_vector;
4002 sc->sts_intr_mask = msix->msix_mask;
4004 msix->msix_serialize = &sc->main_serialize;
4005 msix->msix_func = igb_msix_status;
4006 msix->msix_arg = sc;
4007 msix->msix_cpuid = 0; /* TODO tunable */
4008 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s sts",
4009 device_get_nameunit(sc->dev));
4010 ksnprintf(msix->msix_rate_desc, sizeof(msix->msix_rate_desc),
4011 "status interrupt rate");
4013 KKASSERT(x == sc->msix_cnt);
4015 error = pci_setup_msix(sc->dev);
4017 device_printf(sc->dev, "Setup MSI-X failed\n");
4022 for (i = 0; i < sc->msix_cnt; ++i) {
4023 msix = &sc->msix_data[i];
4025 error = pci_alloc_msix_vector(sc->dev, msix->msix_vector,
4026 &msix->msix_rid, msix->msix_cpuid);
4028 device_printf(sc->dev,
4029 "Unable to allocate MSI-X %d on cpu%d\n",
4030 msix->msix_vector, msix->msix_cpuid);
4034 msix->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
4035 &msix->msix_rid, RF_ACTIVE);
4036 if (msix->msix_res == NULL) {
4037 device_printf(sc->dev,
4038 "Unable to allocate MSI-X %d resource\n",
4045 pci_enable_msix(sc->dev);
4046 sc->intr_type = PCI_INTR_TYPE_MSIX;
4049 igb_msix_free(sc, setup);
4053 igb_msix_free(struct igb_softc *sc, boolean_t setup)
4057 KKASSERT(sc->msix_cnt > 1);
4059 for (i = 0; i < sc->msix_cnt; ++i) {
4060 struct igb_msix_data *msix = &sc->msix_data[i];
4062 if (msix->msix_res != NULL) {
4063 bus_release_resource(sc->dev, SYS_RES_IRQ,
4064 msix->msix_rid, msix->msix_res);
4066 if (msix->msix_rid >= 0)
4067 pci_release_msix_vector(sc->dev, msix->msix_rid);
4070 pci_teardown_msix(sc->dev);
4073 kfree(sc->msix_data, M_DEVBUF);
4074 sc->msix_data = NULL;
4078 igb_msix_setup(struct igb_softc *sc)
4080 struct ifnet *ifp = &sc->arpcom.ac_if;
4083 for (i = 0; i < sc->msix_cnt; ++i) {
4084 struct igb_msix_data *msix = &sc->msix_data[i];
4087 error = bus_setup_intr_descr(sc->dev, msix->msix_res,
4088 INTR_MPSAFE, msix->msix_func, msix->msix_arg,
4089 &msix->msix_handle, msix->msix_serialize, msix->msix_desc);
4091 device_printf(sc->dev, "could not set up %s "
4092 "interrupt handler.\n", msix->msix_desc);
4093 igb_msix_teardown(sc, i);
4097 ifp->if_cpuid = sc->msix_tx_cpuid;
4103 igb_msix_teardown(struct igb_softc *sc, int msix_cnt)
4107 for (i = 0; i < msix_cnt; ++i) {
4108 struct igb_msix_data *msix = &sc->msix_data[i];
4110 bus_teardown_intr(sc->dev, msix->msix_res, msix->msix_handle);
4115 igb_msix_rx(void *arg)
4117 struct igb_rx_ring *rxr = arg;
4119 ASSERT_SERIALIZED(&rxr->rx_serialize);
4122 E1000_WRITE_REG(&rxr->sc->hw, E1000_EIMS, rxr->rx_intr_mask);
4126 igb_msix_tx(void *arg)
4128 struct igb_tx_ring *txr = arg;
4129 struct ifnet *ifp = &txr->sc->arpcom.ac_if;
4131 ASSERT_SERIALIZED(&txr->tx_serialize);
4134 if (!ifq_is_empty(&ifp->if_snd))
4137 E1000_WRITE_REG(&txr->sc->hw, E1000_EIMS, txr->tx_intr_mask);
4141 igb_msix_status(void *arg)
4143 struct igb_softc *sc = arg;
4146 ASSERT_SERIALIZED(&sc->main_serialize);
4148 icr = E1000_READ_REG(&sc->hw, E1000_ICR);
4149 if (icr & E1000_ICR_LSC) {
4150 sc->hw.mac.get_link_status = 1;
4151 igb_update_link_status(sc);
4154 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->sts_intr_mask);
4158 igb_set_ring_inuse(struct igb_softc *sc, boolean_t polling)
4160 if (!IGB_ENABLE_HWRSS(sc))
4163 if (sc->intr_type != PCI_INTR_TYPE_MSIX || polling)
4164 sc->rx_ring_inuse = IGB_MIN_RING_RSS;
4166 sc->rx_ring_inuse = sc->rx_ring_msix;
4168 device_printf(sc->dev, "RX rings %d/%d\n",
4169 sc->rx_ring_inuse, sc->rx_ring_cnt);