2 * Copyright (c) 2001-2011, Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the Intel Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #include "opt_ifpoll.h"
35 #include <sys/param.h>
37 #include <sys/endian.h>
38 #include <sys/interrupt.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
44 #include <sys/serialize.h>
45 #include <sys/serialize2.h>
46 #include <sys/socket.h>
47 #include <sys/sockio.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
52 #include <net/ethernet.h>
54 #include <net/if_arp.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57 #include <net/ifq_var.h>
58 #include <net/toeplitz.h>
59 #include <net/toeplitz2.h>
60 #include <net/vlan/if_vlan_var.h>
61 #include <net/vlan/if_vlan_ether.h>
62 #include <net/if_poll.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/in.h>
66 #include <netinet/ip.h>
67 #include <netinet/tcp.h>
68 #include <netinet/udp.h>
70 #include <bus/pci/pcivar.h>
71 #include <bus/pci/pcireg.h>
73 #include <dev/netif/ig_hal/e1000_api.h>
74 #include <dev/netif/ig_hal/e1000_82575.h>
75 #include <dev/netif/igb/if_igb.h>
78 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) \
80 if (sc->rss_debug >= lvl) \
81 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
83 #else /* !IGB_RSS_DEBUG */
84 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
85 #endif /* IGB_RSS_DEBUG */
87 #define IGB_NAME "Intel(R) PRO/1000 "
88 #define IGB_DEVICE(id) \
89 { IGB_VENDOR_ID, E1000_DEV_ID_##id, IGB_NAME #id }
90 #define IGB_DEVICE_NULL { 0, 0, NULL }
92 static struct igb_device {
97 IGB_DEVICE(82575EB_COPPER),
98 IGB_DEVICE(82575EB_FIBER_SERDES),
99 IGB_DEVICE(82575GB_QUAD_COPPER),
101 IGB_DEVICE(82576_NS),
102 IGB_DEVICE(82576_NS_SERDES),
103 IGB_DEVICE(82576_FIBER),
104 IGB_DEVICE(82576_SERDES),
105 IGB_DEVICE(82576_SERDES_QUAD),
106 IGB_DEVICE(82576_QUAD_COPPER),
107 IGB_DEVICE(82576_QUAD_COPPER_ET2),
108 IGB_DEVICE(82576_VF),
109 IGB_DEVICE(82580_COPPER),
110 IGB_DEVICE(82580_FIBER),
111 IGB_DEVICE(82580_SERDES),
112 IGB_DEVICE(82580_SGMII),
113 IGB_DEVICE(82580_COPPER_DUAL),
114 IGB_DEVICE(82580_QUAD_FIBER),
115 IGB_DEVICE(DH89XXCC_SERDES),
116 IGB_DEVICE(DH89XXCC_SGMII),
117 IGB_DEVICE(DH89XXCC_SFP),
118 IGB_DEVICE(DH89XXCC_BACKPLANE),
119 IGB_DEVICE(I350_COPPER),
120 IGB_DEVICE(I350_FIBER),
121 IGB_DEVICE(I350_SERDES),
122 IGB_DEVICE(I350_SGMII),
125 /* required last entry */
129 static int igb_probe(device_t);
130 static int igb_attach(device_t);
131 static int igb_detach(device_t);
132 static int igb_shutdown(device_t);
133 static int igb_suspend(device_t);
134 static int igb_resume(device_t);
136 static boolean_t igb_is_valid_ether_addr(const uint8_t *);
137 static void igb_setup_ifp(struct igb_softc *);
138 static boolean_t igb_txcsum_ctx(struct igb_tx_ring *, struct mbuf *);
139 static int igb_tso_pullup(struct igb_tx_ring *, struct mbuf **);
140 static void igb_tso_ctx(struct igb_tx_ring *, struct mbuf *, uint32_t *);
141 static void igb_add_sysctl(struct igb_softc *);
142 static int igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS);
143 static int igb_sysctl_msix_rate(SYSCTL_HANDLER_ARGS);
144 static int igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS);
145 static int igb_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS);
146 static int igb_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS);
147 static void igb_set_ring_inuse(struct igb_softc *, boolean_t);
148 static int igb_get_rxring_inuse(const struct igb_softc *, boolean_t);
149 static int igb_get_txring_inuse(const struct igb_softc *, boolean_t);
151 static int igb_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS);
152 static int igb_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS);
155 static void igb_vf_init_stats(struct igb_softc *);
156 static void igb_reset(struct igb_softc *);
157 static void igb_update_stats_counters(struct igb_softc *);
158 static void igb_update_vf_stats_counters(struct igb_softc *);
159 static void igb_update_link_status(struct igb_softc *);
160 static void igb_init_tx_unit(struct igb_softc *);
161 static void igb_init_rx_unit(struct igb_softc *);
163 static void igb_set_vlan(struct igb_softc *);
164 static void igb_set_multi(struct igb_softc *);
165 static void igb_set_promisc(struct igb_softc *);
166 static void igb_disable_promisc(struct igb_softc *);
168 static int igb_alloc_rings(struct igb_softc *);
169 static void igb_free_rings(struct igb_softc *);
170 static int igb_create_tx_ring(struct igb_tx_ring *);
171 static int igb_create_rx_ring(struct igb_rx_ring *);
172 static void igb_free_tx_ring(struct igb_tx_ring *);
173 static void igb_free_rx_ring(struct igb_rx_ring *);
174 static void igb_destroy_tx_ring(struct igb_tx_ring *, int);
175 static void igb_destroy_rx_ring(struct igb_rx_ring *, int);
176 static void igb_init_tx_ring(struct igb_tx_ring *);
177 static int igb_init_rx_ring(struct igb_rx_ring *);
178 static int igb_newbuf(struct igb_rx_ring *, int, boolean_t);
179 static int igb_encap(struct igb_tx_ring *, struct mbuf **, int *, int *);
180 static void igb_rx_refresh(struct igb_rx_ring *, int);
181 static void igb_setup_serializer(struct igb_softc *);
183 static void igb_stop(struct igb_softc *);
184 static void igb_init(void *);
185 static int igb_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
186 static void igb_media_status(struct ifnet *, struct ifmediareq *);
187 static int igb_media_change(struct ifnet *);
188 static void igb_timer(void *);
189 static void igb_watchdog(struct ifaltq_subque *);
190 static void igb_start(struct ifnet *, struct ifaltq_subque *);
192 static void igb_npoll(struct ifnet *, struct ifpoll_info *);
193 static void igb_npoll_rx(struct ifnet *, void *, int);
194 static void igb_npoll_tx(struct ifnet *, void *, int);
195 static void igb_npoll_status(struct ifnet *);
197 static void igb_serialize(struct ifnet *, enum ifnet_serialize);
198 static void igb_deserialize(struct ifnet *, enum ifnet_serialize);
199 static int igb_tryserialize(struct ifnet *, enum ifnet_serialize);
201 static void igb_serialize_assert(struct ifnet *, enum ifnet_serialize,
205 static void igb_intr(void *);
206 static void igb_intr_shared(void *);
207 static void igb_rxeof(struct igb_rx_ring *, int);
208 static void igb_txeof(struct igb_tx_ring *);
209 static void igb_set_eitr(struct igb_softc *, int, int);
210 static void igb_enable_intr(struct igb_softc *);
211 static void igb_disable_intr(struct igb_softc *);
212 static void igb_init_unshared_intr(struct igb_softc *);
213 static void igb_init_intr(struct igb_softc *);
214 static int igb_setup_intr(struct igb_softc *);
215 static void igb_set_txintr_mask(struct igb_tx_ring *, int *, int);
216 static void igb_set_rxintr_mask(struct igb_rx_ring *, int *, int);
217 static void igb_set_intr_mask(struct igb_softc *);
218 static int igb_alloc_intr(struct igb_softc *);
219 static void igb_free_intr(struct igb_softc *);
220 static void igb_teardown_intr(struct igb_softc *);
221 static void igb_msix_try_alloc(struct igb_softc *);
222 static void igb_msix_rx_conf(struct igb_softc *, int, int *, int);
223 static void igb_msix_tx_conf(struct igb_softc *, int, int *, int);
224 static void igb_msix_free(struct igb_softc *, boolean_t);
225 static int igb_msix_setup(struct igb_softc *);
226 static void igb_msix_teardown(struct igb_softc *, int);
227 static void igb_msix_rx(void *);
228 static void igb_msix_tx(void *);
229 static void igb_msix_status(void *);
230 static void igb_msix_rxtx(void *);
232 /* Management and WOL Support */
233 static void igb_get_mgmt(struct igb_softc *);
234 static void igb_rel_mgmt(struct igb_softc *);
235 static void igb_get_hw_control(struct igb_softc *);
236 static void igb_rel_hw_control(struct igb_softc *);
237 static void igb_enable_wol(device_t);
239 static device_method_t igb_methods[] = {
240 /* Device interface */
241 DEVMETHOD(device_probe, igb_probe),
242 DEVMETHOD(device_attach, igb_attach),
243 DEVMETHOD(device_detach, igb_detach),
244 DEVMETHOD(device_shutdown, igb_shutdown),
245 DEVMETHOD(device_suspend, igb_suspend),
246 DEVMETHOD(device_resume, igb_resume),
250 static driver_t igb_driver = {
253 sizeof(struct igb_softc),
256 static devclass_t igb_devclass;
258 DECLARE_DUMMY_MODULE(if_igb);
259 MODULE_DEPEND(igb, ig_hal, 1, 1, 1);
260 DRIVER_MODULE(if_igb, pci, igb_driver, igb_devclass, NULL, NULL);
262 static int igb_rxd = IGB_DEFAULT_RXD;
263 static int igb_txd = IGB_DEFAULT_TXD;
264 static int igb_rxr = 0;
265 static int igb_txr = 0;
266 static int igb_msi_enable = 1;
267 static int igb_msix_enable = 1;
268 static int igb_eee_disabled = 1; /* Energy Efficient Ethernet */
269 static int igb_fc_setting = e1000_fc_full;
272 * DMA Coalescing, only for i350 - default to off,
273 * this feature is for power savings
275 static int igb_dma_coalesce = 0;
277 TUNABLE_INT("hw.igb.rxd", &igb_rxd);
278 TUNABLE_INT("hw.igb.txd", &igb_txd);
279 TUNABLE_INT("hw.igb.rxr", &igb_rxr);
280 TUNABLE_INT("hw.igb.txr", &igb_txr);
281 TUNABLE_INT("hw.igb.msi.enable", &igb_msi_enable);
282 TUNABLE_INT("hw.igb.msix.enable", &igb_msix_enable);
283 TUNABLE_INT("hw.igb.fc_setting", &igb_fc_setting);
286 TUNABLE_INT("hw.igb.eee_disabled", &igb_eee_disabled);
287 TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce);
290 igb_rxcsum(uint32_t staterr, struct mbuf *mp)
292 /* Ignore Checksum bit is set */
293 if (staterr & E1000_RXD_STAT_IXSM)
296 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) ==
298 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
300 if (staterr & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) {
301 if ((staterr & E1000_RXDEXT_STATERR_TCPE) == 0) {
302 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
303 CSUM_PSEUDO_HDR | CSUM_FRAG_NOT_CHECKED;
304 mp->m_pkthdr.csum_data = htons(0xffff);
309 static __inline struct pktinfo *
310 igb_rssinfo(struct mbuf *m, struct pktinfo *pi,
311 uint32_t hash, uint32_t hashtype, uint32_t staterr)
314 case E1000_RXDADV_RSSTYPE_IPV4_TCP:
315 pi->pi_netisr = NETISR_IP;
317 pi->pi_l3proto = IPPROTO_TCP;
320 case E1000_RXDADV_RSSTYPE_IPV4:
321 if (staterr & E1000_RXD_STAT_IXSM)
325 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) ==
326 E1000_RXD_STAT_TCPCS) {
327 pi->pi_netisr = NETISR_IP;
329 pi->pi_l3proto = IPPROTO_UDP;
337 m->m_flags |= M_HASH;
338 m->m_pkthdr.hash = toeplitz_hash(hash);
343 igb_probe(device_t dev)
345 const struct igb_device *d;
348 vid = pci_get_vendor(dev);
349 did = pci_get_device(dev);
351 for (d = igb_devices; d->desc != NULL; ++d) {
352 if (vid == d->vid && did == d->did) {
353 device_set_desc(dev, d->desc);
361 igb_attach(device_t dev)
363 struct igb_softc *sc = device_get_softc(dev);
364 uint16_t eeprom_data;
365 int error = 0, i, ring_max;
367 int offset, offset_def;
372 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
373 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
374 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
375 igb_sysctl_nvm_info, "I", "NVM Information");
376 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
377 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
378 OID_AUTO, "flow_control", CTLTYPE_INT|CTLFLAG_RW,
379 adapter, 0, igb_set_flowcntl, "I", "Flow Control");
382 callout_init_mp(&sc->timer);
383 lwkt_serialize_init(&sc->main_serialize);
385 if_initname(&sc->arpcom.ac_if, device_get_name(dev),
386 device_get_unit(dev));
387 sc->dev = sc->osdep.dev = dev;
390 * Determine hardware and mac type
392 sc->hw.vendor_id = pci_get_vendor(dev);
393 sc->hw.device_id = pci_get_device(dev);
394 sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
395 sc->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
396 sc->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
398 if (e1000_set_mac_type(&sc->hw))
401 /* Are we a VF device? */
402 if (sc->hw.mac.type == e1000_vfadapt ||
403 sc->hw.mac.type == e1000_vfadapt_i350)
409 * Configure total supported RX/TX ring count
411 switch (sc->hw.mac.type) {
413 ring_max = IGB_MAX_RING_82575;
416 ring_max = IGB_MAX_RING_82580;
419 ring_max = IGB_MAX_RING_I350;
422 ring_max = IGB_MAX_RING_82576;
425 ring_max = IGB_MIN_RING;
429 sc->rx_ring_cnt = device_getenv_int(dev, "rxr", igb_rxr);
430 sc->rx_ring_cnt = if_ring_count2(sc->rx_ring_cnt, ring_max);
432 sc->rx_ring_cnt = device_getenv_int(dev, "rxr_debug", sc->rx_ring_cnt);
434 sc->rx_ring_inuse = sc->rx_ring_cnt;
436 sc->tx_ring_cnt = device_getenv_int(dev, "txr", igb_txr);
437 sc->tx_ring_cnt = if_ring_count2(sc->tx_ring_cnt, ring_max);
439 sc->tx_ring_cnt = device_getenv_int(dev, "txr_debug", sc->tx_ring_cnt);
441 sc->tx_ring_inuse = sc->tx_ring_cnt;
443 /* Enable bus mastering */
444 pci_enable_busmaster(dev);
449 sc->mem_rid = PCIR_BAR(0);
450 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
452 if (sc->mem_res == NULL) {
453 device_printf(dev, "Unable to allocate bus resource: memory\n");
457 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->mem_res);
458 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->mem_res);
460 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle;
462 /* Save PCI command register for Shared Code */
463 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
464 sc->hw.back = &sc->osdep;
466 /* Do Shared Code initialization */
467 if (e1000_setup_init_funcs(&sc->hw, TRUE)) {
468 device_printf(dev, "Setup of Shared code failed\n");
473 e1000_get_bus_info(&sc->hw);
475 sc->hw.mac.autoneg = DO_AUTO_NEG;
476 sc->hw.phy.autoneg_wait_to_complete = FALSE;
477 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
480 if (sc->hw.phy.media_type == e1000_media_type_copper) {
481 sc->hw.phy.mdix = AUTO_ALL_MODES;
482 sc->hw.phy.disable_polarity_correction = FALSE;
483 sc->hw.phy.ms_type = IGB_MASTER_SLAVE;
486 /* Set the frame limits assuming standard ethernet sized frames. */
487 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
489 /* Allocate RX/TX rings */
490 error = igb_alloc_rings(sc);
496 * NPOLLING RX CPU offset
498 if (sc->rx_ring_cnt == ncpus2) {
501 offset_def = (sc->rx_ring_cnt * device_get_unit(dev)) % ncpus2;
502 offset = device_getenv_int(dev, "npoll.rxoff", offset_def);
503 if (offset >= ncpus2 ||
504 offset % sc->rx_ring_cnt != 0) {
505 device_printf(dev, "invalid npoll.rxoff %d, use %d\n",
510 sc->rx_npoll_off = offset;
513 * NPOLLING TX CPU offset
515 if (sc->tx_ring_cnt == ncpus2) {
518 offset_def = (sc->tx_ring_cnt * device_get_unit(dev)) % ncpus2;
519 offset = device_getenv_int(dev, "npoll.txoff", offset_def);
520 if (offset >= ncpus2 ||
521 offset % sc->tx_ring_cnt != 0) {
522 device_printf(dev, "invalid npoll.txoff %d, use %d\n",
527 sc->tx_npoll_off = offset;
530 /* Allocate interrupt */
531 error = igb_alloc_intr(sc);
535 /* Setup serializers */
536 igb_setup_serializer(sc);
538 /* Allocate the appropriate stats memory */
540 sc->stats = kmalloc(sizeof(struct e1000_vf_stats), M_DEVBUF,
542 igb_vf_init_stats(sc);
544 sc->stats = kmalloc(sizeof(struct e1000_hw_stats), M_DEVBUF,
548 /* Allocate multicast array memory. */
549 sc->mta = kmalloc(ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES,
552 /* Some adapter-specific advanced features */
553 if (sc->hw.mac.type >= e1000_i350) {
555 igb_set_sysctl_value(adapter, "dma_coalesce",
556 "configure dma coalesce",
557 &adapter->dma_coalesce, igb_dma_coalesce);
558 igb_set_sysctl_value(adapter, "eee_disabled",
559 "enable Energy Efficient Ethernet",
560 &adapter->hw.dev_spec._82575.eee_disable,
563 sc->dma_coalesce = igb_dma_coalesce;
564 sc->hw.dev_spec._82575.eee_disable = igb_eee_disabled;
566 e1000_set_eee_i350(&sc->hw);
570 * Start from a known state, this is important in reading the nvm and
573 e1000_reset_hw(&sc->hw);
575 /* Make sure we have a good EEPROM before we read from it */
576 if (e1000_validate_nvm_checksum(&sc->hw) < 0) {
578 * Some PCI-E parts fail the first check due to
579 * the link being in sleep state, call it again,
580 * if it fails a second time its a real issue.
582 if (e1000_validate_nvm_checksum(&sc->hw) < 0) {
584 "The EEPROM Checksum Is Not Valid\n");
590 /* Copy the permanent MAC address out of the EEPROM */
591 if (e1000_read_mac_addr(&sc->hw) < 0) {
592 device_printf(dev, "EEPROM read error while reading MAC"
597 if (!igb_is_valid_ether_addr(sc->hw.mac.addr)) {
598 device_printf(dev, "Invalid MAC address\n");
603 /* Setup OS specific network interface */
606 /* Add sysctl tree, must after igb_setup_ifp() */
609 /* Now get a good starting state */
612 /* Initialize statistics */
613 igb_update_stats_counters(sc);
615 sc->hw.mac.get_link_status = 1;
616 igb_update_link_status(sc);
618 /* Indicate SOL/IDER usage */
619 if (e1000_check_reset_block(&sc->hw)) {
621 "PHY reset is blocked due to SOL/IDER session.\n");
624 /* Determine if we have to control management hardware */
625 if (e1000_enable_mng_pass_thru(&sc->hw))
626 sc->flags |= IGB_FLAG_HAS_MGMT;
631 /* APME bit in EEPROM is mapped to WUC.APME */
632 eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC) & E1000_WUC_APME;
634 sc->wol = E1000_WUFC_MAG;
635 /* XXX disable WOL */
639 /* Register for VLAN events */
640 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
641 igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
642 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
643 igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
647 igb_add_hw_stats(adapter);
650 error = igb_setup_intr(sc);
652 ether_ifdetach(&sc->arpcom.ac_if);
656 for (i = 0; i < sc->tx_ring_cnt; ++i) {
657 struct ifaltq_subque *ifsq =
658 ifq_get_subq(&sc->arpcom.ac_if.if_snd, i);
659 struct igb_tx_ring *txr = &sc->tx_rings[i];
661 ifsq_set_cpuid(ifsq, txr->tx_intr_cpuid);
662 ifsq_set_priv(ifsq, txr);
665 ifsq_watchdog_init(&txr->tx_watchdog, ifsq, igb_watchdog);
676 igb_detach(device_t dev)
678 struct igb_softc *sc = device_get_softc(dev);
680 if (device_is_attached(dev)) {
681 struct ifnet *ifp = &sc->arpcom.ac_if;
683 ifnet_serialize_all(ifp);
687 e1000_phy_hw_reset(&sc->hw);
689 /* Give control back to firmware */
691 igb_rel_hw_control(sc);
694 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN);
695 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol);
699 igb_teardown_intr(sc);
701 ifnet_deserialize_all(ifp);
704 } else if (sc->mem_res != NULL) {
705 igb_rel_hw_control(sc);
707 bus_generic_detach(dev);
709 if (sc->sysctl_tree != NULL)
710 sysctl_ctx_free(&sc->sysctl_ctx);
714 if (sc->msix_mem_res != NULL) {
715 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_mem_rid,
718 if (sc->mem_res != NULL) {
719 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid,
726 kfree(sc->mta, M_DEVBUF);
727 if (sc->stats != NULL)
728 kfree(sc->stats, M_DEVBUF);
729 if (sc->serializes != NULL)
730 kfree(sc->serializes, M_DEVBUF);
736 igb_shutdown(device_t dev)
738 return igb_suspend(dev);
742 igb_suspend(device_t dev)
744 struct igb_softc *sc = device_get_softc(dev);
745 struct ifnet *ifp = &sc->arpcom.ac_if;
747 ifnet_serialize_all(ifp);
752 igb_rel_hw_control(sc);
755 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN);
756 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol);
760 ifnet_deserialize_all(ifp);
762 return bus_generic_suspend(dev);
766 igb_resume(device_t dev)
768 struct igb_softc *sc = device_get_softc(dev);
769 struct ifnet *ifp = &sc->arpcom.ac_if;
772 ifnet_serialize_all(ifp);
777 for (i = 0; i < sc->tx_ring_inuse; ++i)
778 ifsq_devstart_sched(sc->tx_rings[i].ifsq);
780 ifnet_deserialize_all(ifp);
782 return bus_generic_resume(dev);
786 igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
788 struct igb_softc *sc = ifp->if_softc;
789 struct ifreq *ifr = (struct ifreq *)data;
790 int max_frame_size, mask, reinit;
793 ASSERT_IFNET_SERIALIZED_ALL(ifp);
797 max_frame_size = 9234;
798 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
804 ifp->if_mtu = ifr->ifr_mtu;
805 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN +
808 if (ifp->if_flags & IFF_RUNNING)
813 if (ifp->if_flags & IFF_UP) {
814 if (ifp->if_flags & IFF_RUNNING) {
815 if ((ifp->if_flags ^ sc->if_flags) &
816 (IFF_PROMISC | IFF_ALLMULTI)) {
817 igb_disable_promisc(sc);
823 } else if (ifp->if_flags & IFF_RUNNING) {
826 sc->if_flags = ifp->if_flags;
831 if (ifp->if_flags & IFF_RUNNING) {
832 igb_disable_intr(sc);
835 if (!(ifp->if_flags & IFF_NPOLLING))
843 * As the speed/duplex settings are being
844 * changed, we need toreset the PHY.
846 sc->hw.phy.reset_disable = FALSE;
848 /* Check SOL/IDER usage */
849 if (e1000_check_reset_block(&sc->hw)) {
850 if_printf(ifp, "Media change is "
851 "blocked due to SOL/IDER session.\n");
857 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
862 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
863 if (mask & IFCAP_RXCSUM) {
864 ifp->if_capenable ^= IFCAP_RXCSUM;
867 if (mask & IFCAP_VLAN_HWTAGGING) {
868 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
871 if (mask & IFCAP_TXCSUM) {
872 ifp->if_capenable ^= IFCAP_TXCSUM;
873 if (ifp->if_capenable & IFCAP_TXCSUM)
874 ifp->if_hwassist |= IGB_CSUM_FEATURES;
876 ifp->if_hwassist &= ~IGB_CSUM_FEATURES;
878 if (mask & IFCAP_TSO) {
879 ifp->if_capenable ^= IFCAP_TSO;
880 if (ifp->if_capenable & IFCAP_TSO)
881 ifp->if_hwassist |= CSUM_TSO;
883 ifp->if_hwassist &= ~CSUM_TSO;
885 if (mask & IFCAP_RSS)
886 ifp->if_capenable ^= IFCAP_RSS;
887 if (reinit && (ifp->if_flags & IFF_RUNNING))
892 error = ether_ioctl(ifp, command, data);
901 struct igb_softc *sc = xsc;
902 struct ifnet *ifp = &sc->arpcom.ac_if;
906 ASSERT_IFNET_SERIALIZED_ALL(ifp);
910 /* Get the latest mac address, User can use a LAA */
911 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN);
913 /* Put the address into the Receive Address Array */
914 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0);
917 igb_update_link_status(sc);
919 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
921 /* Configure for OS presence */
926 if (ifp->if_flags & IFF_NPOLLING)
930 /* Configured used RX/TX rings */
931 igb_set_ring_inuse(sc, polling);
932 ifq_set_subq_mask(&ifp->if_snd, sc->tx_ring_inuse - 1);
934 /* Initialize interrupt */
937 /* Prepare transmit descriptors and buffers */
938 for (i = 0; i < sc->tx_ring_inuse; ++i)
939 igb_init_tx_ring(&sc->tx_rings[i]);
940 igb_init_tx_unit(sc);
942 /* Setup Multicast table */
947 * Figure out the desired mbuf pool
948 * for doing jumbo/packetsplit
950 if (adapter->max_frame_size <= 2048)
951 adapter->rx_mbuf_sz = MCLBYTES;
952 else if (adapter->max_frame_size <= 4096)
953 adapter->rx_mbuf_sz = MJUMPAGESIZE;
955 adapter->rx_mbuf_sz = MJUM9BYTES;
958 /* Prepare receive descriptors and buffers */
959 for (i = 0; i < sc->rx_ring_inuse; ++i) {
962 error = igb_init_rx_ring(&sc->rx_rings[i]);
964 if_printf(ifp, "Could not setup receive structures\n");
969 igb_init_rx_unit(sc);
971 /* Enable VLAN support */
972 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
975 /* Don't lose promiscuous settings */
978 ifp->if_flags |= IFF_RUNNING;
979 for (i = 0; i < sc->tx_ring_inuse; ++i) {
980 ifsq_clr_oactive(sc->tx_rings[i].ifsq);
981 ifsq_watchdog_start(&sc->tx_rings[i].tx_watchdog);
984 if (polling || sc->intr_type == PCI_INTR_TYPE_MSIX)
985 sc->timer_cpuid = 0; /* XXX fixed */
987 sc->timer_cpuid = rman_get_cpuid(sc->intr_res);
988 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid);
989 e1000_clear_hw_cntrs_base_generic(&sc->hw);
991 /* This clears any pending interrupts */
992 E1000_READ_REG(&sc->hw, E1000_ICR);
995 * Only enable interrupts if we are not polling, make sure
996 * they are off otherwise.
999 igb_disable_intr(sc);
1001 igb_enable_intr(sc);
1002 E1000_WRITE_REG(&sc->hw, E1000_ICS, E1000_ICS_LSC);
1005 /* Set Energy Efficient Ethernet */
1006 e1000_set_eee_i350(&sc->hw);
1008 /* Don't reset the phy next time init gets called */
1009 sc->hw.phy.reset_disable = TRUE;
1013 igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1015 struct igb_softc *sc = ifp->if_softc;
1016 u_char fiber_type = IFM_1000_SX;
1018 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1020 igb_update_link_status(sc);
1022 ifmr->ifm_status = IFM_AVALID;
1023 ifmr->ifm_active = IFM_ETHER;
1025 if (!sc->link_active)
1028 ifmr->ifm_status |= IFM_ACTIVE;
1030 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
1031 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
1032 ifmr->ifm_active |= fiber_type | IFM_FDX;
1034 switch (sc->link_speed) {
1036 ifmr->ifm_active |= IFM_10_T;
1040 ifmr->ifm_active |= IFM_100_TX;
1044 ifmr->ifm_active |= IFM_1000_T;
1047 if (sc->link_duplex == FULL_DUPLEX)
1048 ifmr->ifm_active |= IFM_FDX;
1050 ifmr->ifm_active |= IFM_HDX;
1055 igb_media_change(struct ifnet *ifp)
1057 struct igb_softc *sc = ifp->if_softc;
1058 struct ifmedia *ifm = &sc->media;
1060 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1062 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1065 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1067 sc->hw.mac.autoneg = DO_AUTO_NEG;
1068 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1074 sc->hw.mac.autoneg = DO_AUTO_NEG;
1075 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1079 sc->hw.mac.autoneg = FALSE;
1080 sc->hw.phy.autoneg_advertised = 0;
1081 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1082 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1084 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1088 sc->hw.mac.autoneg = FALSE;
1089 sc->hw.phy.autoneg_advertised = 0;
1090 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1091 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1093 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1097 if_printf(ifp, "Unsupported media type\n");
1107 igb_set_promisc(struct igb_softc *sc)
1109 struct ifnet *ifp = &sc->arpcom.ac_if;
1110 struct e1000_hw *hw = &sc->hw;
1114 e1000_promisc_set_vf(hw, e1000_promisc_enabled);
1118 reg = E1000_READ_REG(hw, E1000_RCTL);
1119 if (ifp->if_flags & IFF_PROMISC) {
1120 reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1121 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1122 } else if (ifp->if_flags & IFF_ALLMULTI) {
1123 reg |= E1000_RCTL_MPE;
1124 reg &= ~E1000_RCTL_UPE;
1125 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1130 igb_disable_promisc(struct igb_softc *sc)
1132 struct e1000_hw *hw = &sc->hw;
1136 e1000_promisc_set_vf(hw, e1000_promisc_disabled);
1139 reg = E1000_READ_REG(hw, E1000_RCTL);
1140 reg &= ~E1000_RCTL_UPE;
1141 reg &= ~E1000_RCTL_MPE;
1142 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1146 igb_set_multi(struct igb_softc *sc)
1148 struct ifnet *ifp = &sc->arpcom.ac_if;
1149 struct ifmultiaddr *ifma;
1150 uint32_t reg_rctl = 0;
1155 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1157 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1158 if (ifma->ifma_addr->sa_family != AF_LINK)
1161 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1164 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1165 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
1169 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1170 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1171 reg_rctl |= E1000_RCTL_MPE;
1172 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1174 e1000_update_mc_addr_list(&sc->hw, mta, mcnt);
1179 igb_timer(void *xsc)
1181 struct igb_softc *sc = xsc;
1183 lwkt_serialize_enter(&sc->main_serialize);
1185 igb_update_link_status(sc);
1186 igb_update_stats_counters(sc);
1188 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid);
1190 lwkt_serialize_exit(&sc->main_serialize);
1194 igb_update_link_status(struct igb_softc *sc)
1196 struct ifnet *ifp = &sc->arpcom.ac_if;
1197 struct e1000_hw *hw = &sc->hw;
1198 uint32_t link_check, thstat, ctrl;
1200 link_check = thstat = ctrl = 0;
1202 /* Get the cached link value or read for real */
1203 switch (hw->phy.media_type) {
1204 case e1000_media_type_copper:
1205 if (hw->mac.get_link_status) {
1206 /* Do the work to read phy */
1207 e1000_check_for_link(hw);
1208 link_check = !hw->mac.get_link_status;
1214 case e1000_media_type_fiber:
1215 e1000_check_for_link(hw);
1216 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU;
1219 case e1000_media_type_internal_serdes:
1220 e1000_check_for_link(hw);
1221 link_check = hw->mac.serdes_has_link;
1224 /* VF device is type_unknown */
1225 case e1000_media_type_unknown:
1226 e1000_check_for_link(hw);
1227 link_check = !hw->mac.get_link_status;
1233 /* Check for thermal downshift or shutdown */
1234 if (hw->mac.type == e1000_i350) {
1235 thstat = E1000_READ_REG(hw, E1000_THSTAT);
1236 ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT);
1239 /* Now we check if a transition has happened */
1240 if (link_check && sc->link_active == 0) {
1241 e1000_get_speed_and_duplex(hw,
1242 &sc->link_speed, &sc->link_duplex);
1244 if_printf(ifp, "Link is up %d Mbps %s\n",
1246 sc->link_duplex == FULL_DUPLEX ?
1247 "Full Duplex" : "Half Duplex");
1249 sc->link_active = 1;
1251 ifp->if_baudrate = sc->link_speed * 1000000;
1252 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) &&
1253 (thstat & E1000_THSTAT_LINK_THROTTLE))
1254 if_printf(ifp, "Link: thermal downshift\n");
1255 /* This can sleep */
1256 ifp->if_link_state = LINK_STATE_UP;
1257 if_link_state_change(ifp);
1258 } else if (!link_check && sc->link_active == 1) {
1259 ifp->if_baudrate = sc->link_speed = 0;
1260 sc->link_duplex = 0;
1262 if_printf(ifp, "Link is Down\n");
1263 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) &&
1264 (thstat & E1000_THSTAT_PWR_DOWN))
1265 if_printf(ifp, "Link: thermal shutdown\n");
1266 sc->link_active = 0;
1267 /* This can sleep */
1268 ifp->if_link_state = LINK_STATE_DOWN;
1269 if_link_state_change(ifp);
1274 igb_stop(struct igb_softc *sc)
1276 struct ifnet *ifp = &sc->arpcom.ac_if;
1279 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1281 igb_disable_intr(sc);
1283 callout_stop(&sc->timer);
1285 ifp->if_flags &= ~IFF_RUNNING;
1286 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1287 ifsq_clr_oactive(sc->tx_rings[i].ifsq);
1288 ifsq_watchdog_stop(&sc->tx_rings[i].tx_watchdog);
1289 sc->tx_rings[i].tx_flags &= ~IGB_TXFLAG_ENABLED;
1292 e1000_reset_hw(&sc->hw);
1293 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0);
1295 e1000_led_off(&sc->hw);
1296 e1000_cleanup_led(&sc->hw);
1298 for (i = 0; i < sc->tx_ring_cnt; ++i)
1299 igb_free_tx_ring(&sc->tx_rings[i]);
1300 for (i = 0; i < sc->rx_ring_cnt; ++i)
1301 igb_free_rx_ring(&sc->rx_rings[i]);
1305 igb_reset(struct igb_softc *sc)
1307 struct ifnet *ifp = &sc->arpcom.ac_if;
1308 struct e1000_hw *hw = &sc->hw;
1309 struct e1000_fc_info *fc = &hw->fc;
1313 /* Let the firmware know the OS is in control */
1314 igb_get_hw_control(sc);
1317 * Packet Buffer Allocation (PBA)
1318 * Writing PBA sets the receive portion of the buffer
1319 * the remainder is used for the transmit buffer.
1321 switch (hw->mac.type) {
1323 pba = E1000_PBA_32K;
1328 pba = E1000_READ_REG(hw, E1000_RXPBS);
1329 pba &= E1000_RXPBS_SIZE_MASK_82576;
1334 case e1000_vfadapt_i350:
1335 pba = E1000_READ_REG(hw, E1000_RXPBS);
1336 pba = e1000_rxpbs_adjust_82580(pba);
1338 /* XXX pba = E1000_PBA_35K; */
1344 /* Special needs in case of Jumbo frames */
1345 if (hw->mac.type == e1000_82575 && ifp->if_mtu > ETHERMTU) {
1346 uint32_t tx_space, min_tx, min_rx;
1348 pba = E1000_READ_REG(hw, E1000_PBA);
1349 tx_space = pba >> 16;
1352 min_tx = (sc->max_frame_size +
1353 sizeof(struct e1000_tx_desc) - ETHER_CRC_LEN) * 2;
1354 min_tx = roundup2(min_tx, 1024);
1356 min_rx = sc->max_frame_size;
1357 min_rx = roundup2(min_rx, 1024);
1359 if (tx_space < min_tx && (min_tx - tx_space) < pba) {
1360 pba = pba - (min_tx - tx_space);
1362 * if short on rx space, rx wins
1363 * and must trump tx adjustment
1368 E1000_WRITE_REG(hw, E1000_PBA, pba);
1372 * These parameters control the automatic generation (Tx) and
1373 * response (Rx) to Ethernet PAUSE frames.
1374 * - High water mark should allow for at least two frames to be
1375 * received after sending an XOFF.
1376 * - Low water mark works best when it is very near the high water mark.
1377 * This allows the receiver to restart by sending XON when it has
1380 hwm = min(((pba << 10) * 9 / 10),
1381 ((pba << 10) - 2 * sc->max_frame_size));
1383 if (hw->mac.type < e1000_82576) {
1384 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
1385 fc->low_water = fc->high_water - 8;
1387 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1388 fc->low_water = fc->high_water - 16;
1390 fc->pause_time = IGB_FC_PAUSE_TIME;
1391 fc->send_xon = TRUE;
1393 /* Issue a global reset */
1395 E1000_WRITE_REG(hw, E1000_WUC, 0);
1397 if (e1000_init_hw(hw) < 0)
1398 if_printf(ifp, "Hardware Initialization Failed\n");
1400 /* Setup DMA Coalescing */
1401 if (hw->mac.type == e1000_i350 && sc->dma_coalesce) {
1404 hwm = (pba - 4) << 10;
1405 reg = ((pba - 6) << E1000_DMACR_DMACTHR_SHIFT)
1406 & E1000_DMACR_DMACTHR_MASK;
1408 /* transition to L0x or L1 if available..*/
1409 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
1411 /* timer = +-1000 usec in 32usec intervals */
1413 E1000_WRITE_REG(hw, E1000_DMACR, reg);
1415 /* No lower threshold */
1416 E1000_WRITE_REG(hw, E1000_DMCRTRH, 0);
1418 /* set hwm to PBA - 2 * max frame size */
1419 E1000_WRITE_REG(hw, E1000_FCRTC, hwm);
1421 /* Set the interval before transition */
1422 reg = E1000_READ_REG(hw, E1000_DMCTLX);
1423 reg |= 0x800000FF; /* 255 usec */
1424 E1000_WRITE_REG(hw, E1000_DMCTLX, reg);
1426 /* free space in tx packet buffer to wake from DMA coal */
1427 E1000_WRITE_REG(hw, E1000_DMCTXTH,
1428 (20480 - (2 * sc->max_frame_size)) >> 6);
1430 /* make low power state decision controlled by DMA coal */
1431 reg = E1000_READ_REG(hw, E1000_PCIEMISC);
1432 E1000_WRITE_REG(hw, E1000_PCIEMISC,
1433 reg | E1000_PCIEMISC_LX_DECISION);
1434 if_printf(ifp, "DMA Coalescing enabled\n");
1437 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
1438 e1000_get_phy_info(hw);
1439 e1000_check_for_link(hw);
1443 igb_setup_ifp(struct igb_softc *sc)
1445 struct ifnet *ifp = &sc->arpcom.ac_if;
1448 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1449 ifp->if_init = igb_init;
1450 ifp->if_ioctl = igb_ioctl;
1451 ifp->if_start = igb_start;
1452 ifp->if_serialize = igb_serialize;
1453 ifp->if_deserialize = igb_deserialize;
1454 ifp->if_tryserialize = igb_tryserialize;
1456 ifp->if_serialize_assert = igb_serialize_assert;
1458 #ifdef IFPOLL_ENABLE
1459 ifp->if_npoll = igb_npoll;
1462 ifq_set_maxlen(&ifp->if_snd, sc->tx_rings[0].num_tx_desc - 1);
1463 ifq_set_ready(&ifp->if_snd);
1464 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt);
1466 ifp->if_mapsubq = ifq_mapsubq_mask;
1467 ifq_set_subq_mask(&ifp->if_snd, 0);
1469 ether_ifattach(ifp, sc->hw.mac.addr, NULL);
1471 ifp->if_capabilities =
1472 IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_TSO;
1473 if (IGB_ENABLE_HWRSS(sc))
1474 ifp->if_capabilities |= IFCAP_RSS;
1475 ifp->if_capenable = ifp->if_capabilities;
1476 ifp->if_hwassist = IGB_CSUM_FEATURES | CSUM_TSO;
1479 * Tell the upper layer(s) we support long frames
1481 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1484 * Specify the media types supported by this adapter and register
1485 * callbacks to update media and link information
1487 ifmedia_init(&sc->media, IFM_IMASK, igb_media_change, igb_media_status);
1488 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
1489 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
1490 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1492 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1494 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1495 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1497 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1498 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1500 if (sc->hw.phy.type != e1000_phy_ife) {
1501 ifmedia_add(&sc->media,
1502 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1503 ifmedia_add(&sc->media,
1504 IFM_ETHER | IFM_1000_T, 0, NULL);
1507 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1508 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1512 igb_add_sysctl(struct igb_softc *sc)
1517 sysctl_ctx_init(&sc->sysctl_ctx);
1518 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
1519 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
1520 device_get_nameunit(sc->dev), CTLFLAG_RD, 0, "");
1521 if (sc->sysctl_tree == NULL) {
1522 device_printf(sc->dev, "can't add sysctl node\n");
1526 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1527 OID_AUTO, "rxr", CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings");
1528 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1529 OID_AUTO, "rxr_inuse", CTLFLAG_RD, &sc->rx_ring_inuse, 0,
1530 "# of RX rings used");
1531 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1532 OID_AUTO, "txr", CTLFLAG_RD, &sc->tx_ring_cnt, 0, "# of TX rings");
1533 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1534 OID_AUTO, "txr_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0,
1535 "# of TX rings used");
1536 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1537 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_rings[0].num_rx_desc, 0,
1539 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1540 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_rings[0].num_tx_desc, 0,
1543 if (sc->intr_type != PCI_INTR_TYPE_MSIX) {
1544 SYSCTL_ADD_PROC(&sc->sysctl_ctx,
1545 SYSCTL_CHILDREN(sc->sysctl_tree),
1546 OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW,
1547 sc, 0, igb_sysctl_intr_rate, "I", "interrupt rate");
1549 for (i = 0; i < sc->msix_cnt; ++i) {
1550 struct igb_msix_data *msix = &sc->msix_data[i];
1552 ksnprintf(node, sizeof(node), "msix%d_rate", i);
1553 SYSCTL_ADD_PROC(&sc->sysctl_ctx,
1554 SYSCTL_CHILDREN(sc->sysctl_tree),
1555 OID_AUTO, node, CTLTYPE_INT | CTLFLAG_RW,
1556 msix, 0, igb_sysctl_msix_rate, "I",
1557 msix->msix_rate_desc);
1561 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1562 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT | CTLFLAG_RW,
1563 sc, 0, igb_sysctl_tx_intr_nsegs, "I",
1564 "# of segments per TX interrupt");
1566 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1567 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT | CTLFLAG_RW,
1568 sc, 0, igb_sysctl_tx_wreg_nsegs, "I",
1569 "# of segments sent before write to hardware register");
1571 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1572 OID_AUTO, "rx_wreg_nsegs", CTLTYPE_INT | CTLFLAG_RW,
1573 sc, 0, igb_sysctl_rx_wreg_nsegs, "I",
1574 "# of segments received before write to hardware register");
1576 #ifdef IFPOLL_ENABLE
1577 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1578 OID_AUTO, "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW,
1579 sc, 0, igb_sysctl_npoll_rxoff, "I", "NPOLLING RX cpu offset");
1580 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1581 OID_AUTO, "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW,
1582 sc, 0, igb_sysctl_npoll_txoff, "I", "NPOLLING TX cpu offset");
1585 #ifdef IGB_RSS_DEBUG
1586 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1587 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 0,
1589 for (i = 0; i < sc->rx_ring_cnt; ++i) {
1590 ksnprintf(node, sizeof(node), "rx%d_pkt", i);
1591 SYSCTL_ADD_ULONG(&sc->sysctl_ctx,
1592 SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, node,
1593 CTLFLAG_RW, &sc->rx_rings[i].rx_packets, "RXed packets");
1596 #ifdef IGB_TSS_DEBUG
1597 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1598 ksnprintf(node, sizeof(node), "tx%d_pkt", i);
1599 SYSCTL_ADD_ULONG(&sc->sysctl_ctx,
1600 SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, node,
1601 CTLFLAG_RW, &sc->tx_rings[i].tx_packets, "TXed packets");
1607 igb_alloc_rings(struct igb_softc *sc)
1612 * Create top level busdma tag
1614 error = bus_dma_tag_create(NULL, 1, 0,
1615 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1616 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
1619 device_printf(sc->dev, "could not create top level DMA tag\n");
1624 * Allocate TX descriptor rings and buffers
1626 sc->tx_rings = kmalloc_cachealign(
1627 sizeof(struct igb_tx_ring) * sc->tx_ring_cnt,
1628 M_DEVBUF, M_WAITOK | M_ZERO);
1629 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1630 struct igb_tx_ring *txr = &sc->tx_rings[i];
1632 /* Set up some basics */
1635 lwkt_serialize_init(&txr->tx_serialize);
1637 error = igb_create_tx_ring(txr);
1643 * Allocate RX descriptor rings and buffers
1645 sc->rx_rings = kmalloc_cachealign(
1646 sizeof(struct igb_rx_ring) * sc->rx_ring_cnt,
1647 M_DEVBUF, M_WAITOK | M_ZERO);
1648 for (i = 0; i < sc->rx_ring_cnt; ++i) {
1649 struct igb_rx_ring *rxr = &sc->rx_rings[i];
1651 /* Set up some basics */
1654 lwkt_serialize_init(&rxr->rx_serialize);
1656 error = igb_create_rx_ring(rxr);
1665 igb_free_rings(struct igb_softc *sc)
1669 if (sc->tx_rings != NULL) {
1670 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1671 struct igb_tx_ring *txr = &sc->tx_rings[i];
1673 igb_destroy_tx_ring(txr, txr->num_tx_desc);
1675 kfree(sc->tx_rings, M_DEVBUF);
1678 if (sc->rx_rings != NULL) {
1679 for (i = 0; i < sc->rx_ring_cnt; ++i) {
1680 struct igb_rx_ring *rxr = &sc->rx_rings[i];
1682 igb_destroy_rx_ring(rxr, rxr->num_rx_desc);
1684 kfree(sc->rx_rings, M_DEVBUF);
1689 igb_create_tx_ring(struct igb_tx_ring *txr)
1691 int tsize, error, i, ntxd;
1694 * Validate number of transmit descriptors. It must not exceed
1695 * hardware maximum, and must be multiple of IGB_DBA_ALIGN.
1697 ntxd = device_getenv_int(txr->sc->dev, "txd", igb_txd);
1698 if ((ntxd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN != 0 ||
1699 ntxd > IGB_MAX_TXD || ntxd < IGB_MIN_TXD) {
1700 device_printf(txr->sc->dev,
1701 "Using %d TX descriptors instead of %d!\n",
1702 IGB_DEFAULT_TXD, ntxd);
1703 txr->num_tx_desc = IGB_DEFAULT_TXD;
1705 txr->num_tx_desc = ntxd;
1709 * Allocate TX descriptor ring
1711 tsize = roundup2(txr->num_tx_desc * sizeof(union e1000_adv_tx_desc),
1713 txr->txdma.dma_vaddr = bus_dmamem_coherent_any(txr->sc->parent_tag,
1714 IGB_DBA_ALIGN, tsize, BUS_DMA_WAITOK,
1715 &txr->txdma.dma_tag, &txr->txdma.dma_map, &txr->txdma.dma_paddr);
1716 if (txr->txdma.dma_vaddr == NULL) {
1717 device_printf(txr->sc->dev,
1718 "Unable to allocate TX Descriptor memory\n");
1721 txr->tx_base = txr->txdma.dma_vaddr;
1722 bzero(txr->tx_base, tsize);
1724 tsize = __VM_CACHELINE_ALIGN(
1725 sizeof(struct igb_tx_buf) * txr->num_tx_desc);
1726 txr->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO);
1729 * Allocate TX head write-back buffer
1731 txr->tx_hdr = bus_dmamem_coherent_any(txr->sc->parent_tag,
1732 __VM_CACHELINE_SIZE, __VM_CACHELINE_SIZE, BUS_DMA_WAITOK,
1733 &txr->tx_hdr_dtag, &txr->tx_hdr_dmap, &txr->tx_hdr_paddr);
1734 if (txr->tx_hdr == NULL) {
1735 device_printf(txr->sc->dev,
1736 "Unable to allocate TX head write-back buffer\n");
1741 * Create DMA tag for TX buffers
1743 error = bus_dma_tag_create(txr->sc->parent_tag,
1744 1, 0, /* alignment, bounds */
1745 BUS_SPACE_MAXADDR, /* lowaddr */
1746 BUS_SPACE_MAXADDR, /* highaddr */
1747 NULL, NULL, /* filter, filterarg */
1748 IGB_TSO_SIZE, /* maxsize */
1749 IGB_MAX_SCATTER, /* nsegments */
1750 PAGE_SIZE, /* maxsegsize */
1751 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW |
1752 BUS_DMA_ONEBPAGE, /* flags */
1755 device_printf(txr->sc->dev, "Unable to allocate TX DMA tag\n");
1756 kfree(txr->tx_buf, M_DEVBUF);
1762 * Create DMA maps for TX buffers
1764 for (i = 0; i < txr->num_tx_desc; ++i) {
1765 struct igb_tx_buf *txbuf = &txr->tx_buf[i];
1767 error = bus_dmamap_create(txr->tx_tag,
1768 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, &txbuf->map);
1770 device_printf(txr->sc->dev,
1771 "Unable to create TX DMA map\n");
1772 igb_destroy_tx_ring(txr, i);
1777 if (txr->sc->hw.mac.type == e1000_82575)
1778 txr->tx_flags |= IGB_TXFLAG_TSO_IPLEN0;
1781 * Initialize various watermark
1783 txr->spare_desc = IGB_TX_SPARE;
1784 txr->intr_nsegs = txr->num_tx_desc / 16;
1785 txr->wreg_nsegs = IGB_DEF_TXWREG_NSEGS;
1786 txr->oact_hi_desc = txr->num_tx_desc / 2;
1787 txr->oact_lo_desc = txr->num_tx_desc / 8;
1788 if (txr->oact_lo_desc > IGB_TX_OACTIVE_MAX)
1789 txr->oact_lo_desc = IGB_TX_OACTIVE_MAX;
1790 if (txr->oact_lo_desc < txr->spare_desc + IGB_TX_RESERVED)
1791 txr->oact_lo_desc = txr->spare_desc + IGB_TX_RESERVED;
1797 igb_free_tx_ring(struct igb_tx_ring *txr)
1801 for (i = 0; i < txr->num_tx_desc; ++i) {
1802 struct igb_tx_buf *txbuf = &txr->tx_buf[i];
1804 if (txbuf->m_head != NULL) {
1805 bus_dmamap_unload(txr->tx_tag, txbuf->map);
1806 m_freem(txbuf->m_head);
1807 txbuf->m_head = NULL;
1813 igb_destroy_tx_ring(struct igb_tx_ring *txr, int ndesc)
1817 if (txr->txdma.dma_vaddr != NULL) {
1818 bus_dmamap_unload(txr->txdma.dma_tag, txr->txdma.dma_map);
1819 bus_dmamem_free(txr->txdma.dma_tag, txr->txdma.dma_vaddr,
1820 txr->txdma.dma_map);
1821 bus_dma_tag_destroy(txr->txdma.dma_tag);
1822 txr->txdma.dma_vaddr = NULL;
1825 if (txr->tx_hdr != NULL) {
1826 bus_dmamap_unload(txr->tx_hdr_dtag, txr->tx_hdr_dmap);
1827 bus_dmamem_free(txr->tx_hdr_dtag, txr->tx_hdr,
1829 bus_dma_tag_destroy(txr->tx_hdr_dtag);
1833 if (txr->tx_buf == NULL)
1836 for (i = 0; i < ndesc; ++i) {
1837 struct igb_tx_buf *txbuf = &txr->tx_buf[i];
1839 KKASSERT(txbuf->m_head == NULL);
1840 bus_dmamap_destroy(txr->tx_tag, txbuf->map);
1842 bus_dma_tag_destroy(txr->tx_tag);
1844 kfree(txr->tx_buf, M_DEVBUF);
1849 igb_init_tx_ring(struct igb_tx_ring *txr)
1851 /* Clear the old descriptor contents */
1853 sizeof(union e1000_adv_tx_desc) * txr->num_tx_desc);
1855 /* Clear TX head write-back buffer */
1859 txr->next_avail_desc = 0;
1860 txr->next_to_clean = 0;
1863 /* Set number of descriptors available */
1864 txr->tx_avail = txr->num_tx_desc;
1866 /* Enable this TX ring */
1867 txr->tx_flags |= IGB_TXFLAG_ENABLED;
1871 igb_init_tx_unit(struct igb_softc *sc)
1873 struct e1000_hw *hw = &sc->hw;
1877 /* Setup the Tx Descriptor Rings */
1878 for (i = 0; i < sc->tx_ring_inuse; ++i) {
1879 struct igb_tx_ring *txr = &sc->tx_rings[i];
1880 uint64_t bus_addr = txr->txdma.dma_paddr;
1881 uint64_t hdr_paddr = txr->tx_hdr_paddr;
1882 uint32_t txdctl = 0;
1883 uint32_t dca_txctrl;
1885 E1000_WRITE_REG(hw, E1000_TDLEN(i),
1886 txr->num_tx_desc * sizeof(struct e1000_tx_desc));
1887 E1000_WRITE_REG(hw, E1000_TDBAH(i),
1888 (uint32_t)(bus_addr >> 32));
1889 E1000_WRITE_REG(hw, E1000_TDBAL(i),
1890 (uint32_t)bus_addr);
1892 /* Setup the HW Tx Head and Tail descriptor pointers */
1893 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
1894 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
1896 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i));
1897 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1898 E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(i), dca_txctrl);
1901 * Don't set WB_on_EITR:
1902 * - 82575 does not have it
1903 * - It almost has no effect on 82576, see:
1904 * 82576 specification update errata #26
1905 * - It causes unnecessary bus traffic
1907 E1000_WRITE_REG(hw, E1000_TDWBAH(i),
1908 (uint32_t)(hdr_paddr >> 32));
1909 E1000_WRITE_REG(hw, E1000_TDWBAL(i),
1910 ((uint32_t)hdr_paddr) | E1000_TX_HEAD_WB_ENABLE);
1913 * WTHRESH is ignored by the hardware, since header
1914 * write back mode is used.
1916 txdctl |= IGB_TX_PTHRESH;
1917 txdctl |= IGB_TX_HTHRESH << 8;
1918 txdctl |= IGB_TX_WTHRESH << 16;
1919 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1920 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
1926 e1000_config_collision_dist(hw);
1928 /* Program the Transmit Control Register */
1929 tctl = E1000_READ_REG(hw, E1000_TCTL);
1930 tctl &= ~E1000_TCTL_CT;
1931 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
1932 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
1934 /* This write will effectively turn on the transmit unit. */
1935 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1939 igb_txcsum_ctx(struct igb_tx_ring *txr, struct mbuf *mp)
1941 struct e1000_adv_tx_context_desc *TXD;
1942 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx;
1943 int ehdrlen, ctxd, ip_hlen = 0;
1944 boolean_t offload = TRUE;
1946 if ((mp->m_pkthdr.csum_flags & IGB_CSUM_FEATURES) == 0)
1949 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0;
1951 ctxd = txr->next_avail_desc;
1952 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd];
1955 * In advanced descriptors the vlan tag must
1956 * be placed into the context descriptor, thus
1957 * we need to be here just for that setup.
1959 if (mp->m_flags & M_VLANTAG) {
1962 vlantag = htole16(mp->m_pkthdr.ether_vlantag);
1963 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT);
1964 } else if (!offload) {
1968 ehdrlen = mp->m_pkthdr.csum_lhlen;
1969 KASSERT(ehdrlen > 0, ("invalid ether hlen"));
1971 /* Set the ether header length */
1972 vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
1973 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
1974 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
1975 ip_hlen = mp->m_pkthdr.csum_iphlen;
1976 KASSERT(ip_hlen > 0, ("invalid ip hlen"));
1978 vlan_macip_lens |= ip_hlen;
1980 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
1981 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
1982 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
1983 else if (mp->m_pkthdr.csum_flags & CSUM_UDP)
1984 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP;
1986 /* 82575 needs the queue index added */
1987 if (txr->sc->hw.mac.type == e1000_82575)
1988 mss_l4len_idx = txr->me << 4;
1990 /* Now copy bits into descriptor */
1991 TXD->vlan_macip_lens = htole32(vlan_macip_lens);
1992 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
1993 TXD->seqnum_seed = htole32(0);
1994 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
1996 /* We've consumed the first desc, adjust counters */
1997 if (++ctxd == txr->num_tx_desc)
1999 txr->next_avail_desc = ctxd;
2006 igb_txeof(struct igb_tx_ring *txr)
2008 struct ifnet *ifp = &txr->sc->arpcom.ac_if;
2009 int first, hdr, avail;
2011 if (txr->tx_avail == txr->num_tx_desc)
2014 first = txr->next_to_clean;
2015 hdr = *(txr->tx_hdr);
2020 avail = txr->tx_avail;
2021 while (first != hdr) {
2022 struct igb_tx_buf *txbuf = &txr->tx_buf[first];
2025 if (txbuf->m_head) {
2026 bus_dmamap_unload(txr->tx_tag, txbuf->map);
2027 m_freem(txbuf->m_head);
2028 txbuf->m_head = NULL;
2031 if (++first == txr->num_tx_desc)
2034 txr->next_to_clean = first;
2035 txr->tx_avail = avail;
2038 * If we have a minimum free, clear OACTIVE
2039 * to tell the stack that it is OK to send packets.
2041 if (IGB_IS_NOT_OACTIVE(txr)) {
2042 ifsq_clr_oactive(txr->ifsq);
2045 * We have enough TX descriptors, turn off
2046 * the watchdog. We allow small amount of
2047 * packets (roughly intr_nsegs) pending on
2048 * the transmit ring.
2050 txr->tx_watchdog.wd_timer = 0;
2055 igb_create_rx_ring(struct igb_rx_ring *rxr)
2057 int rsize, i, error, nrxd;
2060 * Validate number of receive descriptors. It must not exceed
2061 * hardware maximum, and must be multiple of IGB_DBA_ALIGN.
2063 nrxd = device_getenv_int(rxr->sc->dev, "rxd", igb_rxd);
2064 if ((nrxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN != 0 ||
2065 nrxd > IGB_MAX_RXD || nrxd < IGB_MIN_RXD) {
2066 device_printf(rxr->sc->dev,
2067 "Using %d RX descriptors instead of %d!\n",
2068 IGB_DEFAULT_RXD, nrxd);
2069 rxr->num_rx_desc = IGB_DEFAULT_RXD;
2071 rxr->num_rx_desc = nrxd;
2075 * Allocate RX descriptor ring
2077 rsize = roundup2(rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc),
2079 rxr->rxdma.dma_vaddr = bus_dmamem_coherent_any(rxr->sc->parent_tag,
2080 IGB_DBA_ALIGN, rsize, BUS_DMA_WAITOK,
2081 &rxr->rxdma.dma_tag, &rxr->rxdma.dma_map,
2082 &rxr->rxdma.dma_paddr);
2083 if (rxr->rxdma.dma_vaddr == NULL) {
2084 device_printf(rxr->sc->dev,
2085 "Unable to allocate RxDescriptor memory\n");
2088 rxr->rx_base = rxr->rxdma.dma_vaddr;
2089 bzero(rxr->rx_base, rsize);
2091 rsize = __VM_CACHELINE_ALIGN(
2092 sizeof(struct igb_rx_buf) * rxr->num_rx_desc);
2093 rxr->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO);
2096 * Create DMA tag for RX buffers
2098 error = bus_dma_tag_create(rxr->sc->parent_tag,
2099 1, 0, /* alignment, bounds */
2100 BUS_SPACE_MAXADDR, /* lowaddr */
2101 BUS_SPACE_MAXADDR, /* highaddr */
2102 NULL, NULL, /* filter, filterarg */
2103 MCLBYTES, /* maxsize */
2105 MCLBYTES, /* maxsegsize */
2106 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */
2109 device_printf(rxr->sc->dev,
2110 "Unable to create RX payload DMA tag\n");
2111 kfree(rxr->rx_buf, M_DEVBUF);
2117 * Create spare DMA map for RX buffers
2119 error = bus_dmamap_create(rxr->rx_tag, BUS_DMA_WAITOK,
2122 device_printf(rxr->sc->dev,
2123 "Unable to create spare RX DMA maps\n");
2124 bus_dma_tag_destroy(rxr->rx_tag);
2125 kfree(rxr->rx_buf, M_DEVBUF);
2131 * Create DMA maps for RX buffers
2133 for (i = 0; i < rxr->num_rx_desc; i++) {
2134 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
2136 error = bus_dmamap_create(rxr->rx_tag,
2137 BUS_DMA_WAITOK, &rxbuf->map);
2139 device_printf(rxr->sc->dev,
2140 "Unable to create RX DMA maps\n");
2141 igb_destroy_rx_ring(rxr, i);
2147 * Initialize various watermark
2149 rxr->wreg_nsegs = IGB_DEF_RXWREG_NSEGS;
2155 igb_free_rx_ring(struct igb_rx_ring *rxr)
2159 for (i = 0; i < rxr->num_rx_desc; ++i) {
2160 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
2162 if (rxbuf->m_head != NULL) {
2163 bus_dmamap_unload(rxr->rx_tag, rxbuf->map);
2164 m_freem(rxbuf->m_head);
2165 rxbuf->m_head = NULL;
2169 if (rxr->fmp != NULL)
2176 igb_destroy_rx_ring(struct igb_rx_ring *rxr, int ndesc)
2180 if (rxr->rxdma.dma_vaddr != NULL) {
2181 bus_dmamap_unload(rxr->rxdma.dma_tag, rxr->rxdma.dma_map);
2182 bus_dmamem_free(rxr->rxdma.dma_tag, rxr->rxdma.dma_vaddr,
2183 rxr->rxdma.dma_map);
2184 bus_dma_tag_destroy(rxr->rxdma.dma_tag);
2185 rxr->rxdma.dma_vaddr = NULL;
2188 if (rxr->rx_buf == NULL)
2191 for (i = 0; i < ndesc; ++i) {
2192 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
2194 KKASSERT(rxbuf->m_head == NULL);
2195 bus_dmamap_destroy(rxr->rx_tag, rxbuf->map);
2197 bus_dmamap_destroy(rxr->rx_tag, rxr->rx_sparemap);
2198 bus_dma_tag_destroy(rxr->rx_tag);
2200 kfree(rxr->rx_buf, M_DEVBUF);
2205 igb_setup_rxdesc(union e1000_adv_rx_desc *rxd, const struct igb_rx_buf *rxbuf)
2207 rxd->read.pkt_addr = htole64(rxbuf->paddr);
2208 rxd->wb.upper.status_error = 0;
2212 igb_newbuf(struct igb_rx_ring *rxr, int i, boolean_t wait)
2215 bus_dma_segment_t seg;
2217 struct igb_rx_buf *rxbuf;
2220 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2223 if_printf(&rxr->sc->arpcom.ac_if,
2224 "Unable to allocate RX mbuf\n");
2228 m->m_len = m->m_pkthdr.len = MCLBYTES;
2230 if (rxr->sc->max_frame_size <= MCLBYTES - ETHER_ALIGN)
2231 m_adj(m, ETHER_ALIGN);
2233 error = bus_dmamap_load_mbuf_segment(rxr->rx_tag,
2234 rxr->rx_sparemap, m, &seg, 1, &nseg, BUS_DMA_NOWAIT);
2238 if_printf(&rxr->sc->arpcom.ac_if,
2239 "Unable to load RX mbuf\n");
2244 rxbuf = &rxr->rx_buf[i];
2245 if (rxbuf->m_head != NULL)
2246 bus_dmamap_unload(rxr->rx_tag, rxbuf->map);
2249 rxbuf->map = rxr->rx_sparemap;
2250 rxr->rx_sparemap = map;
2253 rxbuf->paddr = seg.ds_addr;
2255 igb_setup_rxdesc(&rxr->rx_base[i], rxbuf);
2260 igb_init_rx_ring(struct igb_rx_ring *rxr)
2264 /* Clear the ring contents */
2266 rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc));
2268 /* Now replenish the ring mbufs */
2269 for (i = 0; i < rxr->num_rx_desc; ++i) {
2272 error = igb_newbuf(rxr, i, TRUE);
2277 /* Setup our descriptor indices */
2278 rxr->next_to_check = 0;
2282 rxr->discard = FALSE;
2288 igb_init_rx_unit(struct igb_softc *sc)
2290 struct ifnet *ifp = &sc->arpcom.ac_if;
2291 struct e1000_hw *hw = &sc->hw;
2292 uint32_t rctl, rxcsum, srrctl = 0;
2296 * Make sure receives are disabled while setting
2297 * up the descriptor ring
2299 rctl = E1000_READ_REG(hw, E1000_RCTL);
2300 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2304 ** Set up for header split
2306 if (igb_header_split) {
2307 /* Use a standard mbuf for the header */
2308 srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2309 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2312 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2315 ** Set up for jumbo frames
2317 if (ifp->if_mtu > ETHERMTU) {
2318 rctl |= E1000_RCTL_LPE;
2320 if (adapter->rx_mbuf_sz == MJUMPAGESIZE) {
2321 srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2322 rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
2323 } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) {
2324 srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2325 rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
2327 /* Set maximum packet len */
2328 psize = adapter->max_frame_size;
2329 /* are we on a vlan? */
2330 if (adapter->ifp->if_vlantrunk != NULL)
2331 psize += VLAN_TAG_SIZE;
2332 E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize);
2334 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2335 rctl |= E1000_RCTL_SZ_2048;
2338 rctl &= ~E1000_RCTL_LPE;
2339 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2340 rctl |= E1000_RCTL_SZ_2048;
2343 /* Setup the Base and Length of the Rx Descriptor Rings */
2344 for (i = 0; i < sc->rx_ring_inuse; ++i) {
2345 struct igb_rx_ring *rxr = &sc->rx_rings[i];
2346 uint64_t bus_addr = rxr->rxdma.dma_paddr;
2349 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2350 rxr->num_rx_desc * sizeof(struct e1000_rx_desc));
2351 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2352 (uint32_t)(bus_addr >> 32));
2353 E1000_WRITE_REG(hw, E1000_RDBAL(i),
2354 (uint32_t)bus_addr);
2355 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2356 /* Enable this Queue */
2357 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2358 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2359 rxdctl &= 0xFFF00000;
2360 rxdctl |= IGB_RX_PTHRESH;
2361 rxdctl |= IGB_RX_HTHRESH << 8;
2363 * Don't set WTHRESH to a value above 1 on 82576, see:
2364 * 82576 specification update errata #26
2366 rxdctl |= IGB_RX_WTHRESH << 16;
2367 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2370 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM);
2371 rxcsum &= ~(E1000_RXCSUM_PCSS_MASK | E1000_RXCSUM_IPPCSE);
2374 * Receive Checksum Offload for TCP and UDP
2376 * Checksum offloading is also enabled if multiple receive
2377 * queue is to be supported, since we need it to figure out
2380 if ((ifp->if_capenable & IFCAP_RXCSUM) || IGB_ENABLE_HWRSS(sc)) {
2383 * PCSD must be enabled to enable multiple
2386 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
2389 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
2392 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum);
2394 if (IGB_ENABLE_HWRSS(sc)) {
2395 uint8_t key[IGB_NRSSRK * IGB_RSSRK_SIZE];
2396 uint32_t reta_shift;
2401 * When we reach here, RSS has already been disabled
2402 * in igb_stop(), so we could safely configure RSS key
2403 * and redirect table.
2409 toeplitz_get_key(key, sizeof(key));
2410 for (i = 0; i < IGB_NRSSRK; ++i) {
2413 rssrk = IGB_RSSRK_VAL(key, i);
2414 IGB_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk);
2416 E1000_WRITE_REG(hw, E1000_RSSRK(i), rssrk);
2420 * Configure RSS redirect table in following fashion:
2421 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
2423 reta_shift = IGB_RETA_SHIFT;
2424 if (hw->mac.type == e1000_82575)
2425 reta_shift = IGB_RETA_SHIFT_82575;
2428 for (j = 0; j < IGB_NRETA; ++j) {
2431 for (i = 0; i < IGB_RETA_SIZE; ++i) {
2434 q = (r % sc->rx_ring_inuse) << reta_shift;
2435 reta |= q << (8 * i);
2438 IGB_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta);
2439 E1000_WRITE_REG(hw, E1000_RETA(j), reta);
2443 * Enable multiple receive queues.
2444 * Enable IPv4 RSS standard hash functions.
2445 * Disable RSS interrupt on 82575
2447 E1000_WRITE_REG(&sc->hw, E1000_MRQC,
2448 E1000_MRQC_ENABLE_RSS_4Q |
2449 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2450 E1000_MRQC_RSS_FIELD_IPV4);
2453 /* Setup the Receive Control Register */
2454 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2455 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2456 E1000_RCTL_RDMTS_HALF |
2457 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2458 /* Strip CRC bytes. */
2459 rctl |= E1000_RCTL_SECRC;
2460 /* Make sure VLAN Filters are off */
2461 rctl &= ~E1000_RCTL_VFE;
2462 /* Don't store bad packets */
2463 rctl &= ~E1000_RCTL_SBP;
2465 /* Enable Receives */
2466 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2469 * Setup the HW Rx Head and Tail Descriptor Pointers
2470 * - needs to be after enable
2472 for (i = 0; i < sc->rx_ring_inuse; ++i) {
2473 struct igb_rx_ring *rxr = &sc->rx_rings[i];
2475 E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check);
2476 E1000_WRITE_REG(hw, E1000_RDT(i), rxr->num_rx_desc - 1);
2481 igb_rx_refresh(struct igb_rx_ring *rxr, int i)
2484 i = rxr->num_rx_desc - 1;
2485 E1000_WRITE_REG(&rxr->sc->hw, E1000_RDT(rxr->me), i);
2489 igb_rxeof(struct igb_rx_ring *rxr, int count)
2491 struct ifnet *ifp = &rxr->sc->arpcom.ac_if;
2492 union e1000_adv_rx_desc *cur;
2496 i = rxr->next_to_check;
2497 cur = &rxr->rx_base[i];
2498 staterr = le32toh(cur->wb.upper.status_error);
2500 if ((staterr & E1000_RXD_STAT_DD) == 0)
2503 while ((staterr & E1000_RXD_STAT_DD) && count != 0) {
2504 struct pktinfo *pi = NULL, pi0;
2505 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
2506 struct mbuf *m = NULL;
2509 eop = (staterr & E1000_RXD_STAT_EOP) ? TRUE : FALSE;
2514 if ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) == 0 &&
2516 struct mbuf *mp = rxbuf->m_head;
2517 uint32_t hash, hashtype;
2521 len = le16toh(cur->wb.upper.length);
2522 if (rxr->sc->hw.mac.type == e1000_i350 &&
2523 (staterr & E1000_RXDEXT_STATERR_LB))
2524 vlan = be16toh(cur->wb.upper.vlan);
2526 vlan = le16toh(cur->wb.upper.vlan);
2528 hash = le32toh(cur->wb.lower.hi_dword.rss);
2529 hashtype = le32toh(cur->wb.lower.lo_dword.data) &
2530 E1000_RXDADV_RSSTYPE_MASK;
2532 IGB_RSS_DPRINTF(rxr->sc, 10,
2533 "ring%d, hash 0x%08x, hashtype %u\n",
2534 rxr->me, hash, hashtype);
2536 bus_dmamap_sync(rxr->rx_tag, rxbuf->map,
2537 BUS_DMASYNC_POSTREAD);
2539 if (igb_newbuf(rxr, i, FALSE) != 0) {
2545 if (rxr->fmp == NULL) {
2546 mp->m_pkthdr.len = len;
2550 rxr->lmp->m_next = mp;
2551 rxr->lmp = rxr->lmp->m_next;
2552 rxr->fmp->m_pkthdr.len += len;
2560 m->m_pkthdr.rcvif = ifp;
2563 if (ifp->if_capenable & IFCAP_RXCSUM)
2564 igb_rxcsum(staterr, m);
2566 if (staterr & E1000_RXD_STAT_VP) {
2567 m->m_pkthdr.ether_vlantag = vlan;
2568 m->m_flags |= M_VLANTAG;
2571 if (ifp->if_capenable & IFCAP_RSS) {
2572 pi = igb_rssinfo(m, &pi0,
2573 hash, hashtype, staterr);
2575 #ifdef IGB_RSS_DEBUG
2582 igb_setup_rxdesc(cur, rxbuf);
2584 rxr->discard = TRUE;
2586 rxr->discard = FALSE;
2587 if (rxr->fmp != NULL) {
2596 ether_input_pkt(ifp, m, pi);
2598 /* Advance our pointers to the next descriptor. */
2599 if (++i == rxr->num_rx_desc)
2602 if (ncoll >= rxr->wreg_nsegs) {
2603 igb_rx_refresh(rxr, i);
2607 cur = &rxr->rx_base[i];
2608 staterr = le32toh(cur->wb.upper.status_error);
2610 rxr->next_to_check = i;
2613 igb_rx_refresh(rxr, i);
2618 igb_set_vlan(struct igb_softc *sc)
2620 struct e1000_hw *hw = &sc->hw;
2623 struct ifnet *ifp = sc->arpcom.ac_if;
2627 e1000_rlpml_set_vf(hw, sc->max_frame_size + VLAN_TAG_SIZE);
2631 reg = E1000_READ_REG(hw, E1000_CTRL);
2632 reg |= E1000_CTRL_VME;
2633 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2636 /* Enable the Filter Table */
2637 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
2638 reg = E1000_READ_REG(hw, E1000_RCTL);
2639 reg &= ~E1000_RCTL_CFIEN;
2640 reg |= E1000_RCTL_VFE;
2641 E1000_WRITE_REG(hw, E1000_RCTL, reg);
2645 /* Update the frame size */
2646 E1000_WRITE_REG(&sc->hw, E1000_RLPML,
2647 sc->max_frame_size + VLAN_TAG_SIZE);
2650 /* Don't bother with table if no vlans */
2651 if ((adapter->num_vlans == 0) ||
2652 ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0))
2655 ** A soft reset zero's out the VFTA, so
2656 ** we need to repopulate it now.
2658 for (int i = 0; i < IGB_VFTA_SIZE; i++)
2659 if (adapter->shadow_vfta[i] != 0) {
2660 if (adapter->vf_ifp)
2661 e1000_vfta_set_vf(hw,
2662 adapter->shadow_vfta[i], TRUE);
2664 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
2665 i, adapter->shadow_vfta[i]);
2671 igb_enable_intr(struct igb_softc *sc)
2673 if (sc->intr_type != PCI_INTR_TYPE_MSIX) {
2674 lwkt_serialize_handler_enable(&sc->main_serialize);
2678 for (i = 0; i < sc->msix_cnt; ++i) {
2679 lwkt_serialize_handler_enable(
2680 sc->msix_data[i].msix_serialize);
2684 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) {
2685 if (sc->intr_type == PCI_INTR_TYPE_MSIX)
2686 E1000_WRITE_REG(&sc->hw, E1000_EIAC, sc->intr_mask);
2688 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0);
2689 E1000_WRITE_REG(&sc->hw, E1000_EIAM, sc->intr_mask);
2690 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask);
2691 E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC);
2693 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK);
2695 E1000_WRITE_FLUSH(&sc->hw);
2699 igb_disable_intr(struct igb_softc *sc)
2701 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) {
2702 E1000_WRITE_REG(&sc->hw, E1000_EIMC, 0xffffffff);
2703 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0);
2705 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff);
2706 E1000_WRITE_FLUSH(&sc->hw);
2708 if (sc->intr_type != PCI_INTR_TYPE_MSIX) {
2709 lwkt_serialize_handler_disable(&sc->main_serialize);
2713 for (i = 0; i < sc->msix_cnt; ++i) {
2714 lwkt_serialize_handler_disable(
2715 sc->msix_data[i].msix_serialize);
2721 * Bit of a misnomer, what this really means is
2722 * to enable OS management of the system... aka
2723 * to disable special hardware management features
2726 igb_get_mgmt(struct igb_softc *sc)
2728 if (sc->flags & IGB_FLAG_HAS_MGMT) {
2729 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H);
2730 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
2732 /* disable hardware interception of ARP */
2733 manc &= ~E1000_MANC_ARP_EN;
2735 /* enable receiving management packets to the host */
2736 manc |= E1000_MANC_EN_MNG2HOST;
2737 manc2h |= 1 << 5; /* Mng Port 623 */
2738 manc2h |= 1 << 6; /* Mng Port 664 */
2739 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h);
2740 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
2745 * Give control back to hardware management controller
2749 igb_rel_mgmt(struct igb_softc *sc)
2751 if (sc->flags & IGB_FLAG_HAS_MGMT) {
2752 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
2754 /* Re-enable hardware interception of ARP */
2755 manc |= E1000_MANC_ARP_EN;
2756 manc &= ~E1000_MANC_EN_MNG2HOST;
2758 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
2763 * Sets CTRL_EXT:DRV_LOAD bit.
2765 * For ASF and Pass Through versions of f/w this means that
2766 * the driver is loaded.
2769 igb_get_hw_control(struct igb_softc *sc)
2776 /* Let firmware know the driver has taken over */
2777 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
2778 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
2779 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2783 * Resets CTRL_EXT:DRV_LOAD bit.
2785 * For ASF and Pass Through versions of f/w this means that the
2786 * driver is no longer loaded.
2789 igb_rel_hw_control(struct igb_softc *sc)
2796 /* Let firmware taken over control of h/w */
2797 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
2798 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
2799 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2803 igb_is_valid_ether_addr(const uint8_t *addr)
2805 uint8_t zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
2807 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN))
2813 * Enable PCI Wake On Lan capability
2816 igb_enable_wol(device_t dev)
2818 uint16_t cap, status;
2821 /* First find the capabilities pointer*/
2822 cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
2824 /* Read the PM Capabilities */
2825 id = pci_read_config(dev, cap, 1);
2826 if (id != PCIY_PMG) /* Something wrong */
2830 * OK, we have the power capabilities,
2831 * so now get the status register
2833 cap += PCIR_POWER_STATUS;
2834 status = pci_read_config(dev, cap, 2);
2835 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2836 pci_write_config(dev, cap, status, 2);
2840 igb_update_stats_counters(struct igb_softc *sc)
2842 struct e1000_hw *hw = &sc->hw;
2843 struct e1000_hw_stats *stats;
2844 struct ifnet *ifp = &sc->arpcom.ac_if;
2847 * The virtual function adapter has only a
2848 * small controlled set of stats, do only
2852 igb_update_vf_stats_counters(sc);
2857 if (sc->hw.phy.media_type == e1000_media_type_copper ||
2858 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
2860 E1000_READ_REG(hw,E1000_SYMERRS);
2861 stats->sec += E1000_READ_REG(hw, E1000_SEC);
2864 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
2865 stats->mpc += E1000_READ_REG(hw, E1000_MPC);
2866 stats->scc += E1000_READ_REG(hw, E1000_SCC);
2867 stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
2869 stats->mcc += E1000_READ_REG(hw, E1000_MCC);
2870 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
2871 stats->colc += E1000_READ_REG(hw, E1000_COLC);
2872 stats->dc += E1000_READ_REG(hw, E1000_DC);
2873 stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
2874 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
2875 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
2878 * For watchdog management we need to know if we have been
2879 * paused during the last interval, so capture that here.
2881 sc->pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
2882 stats->xoffrxc += sc->pause_frames;
2883 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
2884 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
2885 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
2886 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
2887 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
2888 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
2889 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
2890 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
2891 stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
2892 stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
2893 stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
2894 stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
2896 /* For the 64-bit byte counters the low dword must be read first. */
2897 /* Both registers clear on the read of the high dword */
2899 stats->gorc += E1000_READ_REG(hw, E1000_GORCL) +
2900 ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
2901 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) +
2902 ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
2904 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
2905 stats->ruc += E1000_READ_REG(hw, E1000_RUC);
2906 stats->rfc += E1000_READ_REG(hw, E1000_RFC);
2907 stats->roc += E1000_READ_REG(hw, E1000_ROC);
2908 stats->rjc += E1000_READ_REG(hw, E1000_RJC);
2910 stats->tor += E1000_READ_REG(hw, E1000_TORH);
2911 stats->tot += E1000_READ_REG(hw, E1000_TOTH);
2913 stats->tpr += E1000_READ_REG(hw, E1000_TPR);
2914 stats->tpt += E1000_READ_REG(hw, E1000_TPT);
2915 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
2916 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
2917 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
2918 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
2919 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
2920 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
2921 stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
2922 stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
2924 /* Interrupt Counts */
2926 stats->iac += E1000_READ_REG(hw, E1000_IAC);
2927 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
2928 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
2929 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
2930 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
2931 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
2932 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
2933 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
2934 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
2936 /* Host to Card Statistics */
2938 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
2939 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
2940 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
2941 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
2942 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
2943 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
2944 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
2945 stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) +
2946 ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32));
2947 stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) +
2948 ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32));
2949 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
2950 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
2951 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
2953 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
2954 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
2955 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
2956 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
2957 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
2958 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
2960 ifp->if_collisions = stats->colc;
2963 ifp->if_ierrors = stats->rxerrc + stats->crcerrs + stats->algnerrc +
2964 stats->ruc + stats->roc + stats->mpc + stats->cexterr;
2967 ifp->if_oerrors = stats->ecol + stats->latecol + sc->watchdog_events;
2969 /* Driver specific counters */
2970 sc->device_control = E1000_READ_REG(hw, E1000_CTRL);
2971 sc->rx_control = E1000_READ_REG(hw, E1000_RCTL);
2972 sc->int_mask = E1000_READ_REG(hw, E1000_IMS);
2973 sc->eint_mask = E1000_READ_REG(hw, E1000_EIMS);
2974 sc->packet_buf_alloc_tx =
2975 ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16);
2976 sc->packet_buf_alloc_rx =
2977 (E1000_READ_REG(hw, E1000_PBA) & 0xffff);
2981 igb_vf_init_stats(struct igb_softc *sc)
2983 struct e1000_hw *hw = &sc->hw;
2984 struct e1000_vf_stats *stats;
2987 stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC);
2988 stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC);
2989 stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC);
2990 stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC);
2991 stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC);
2995 igb_update_vf_stats_counters(struct igb_softc *sc)
2997 struct e1000_hw *hw = &sc->hw;
2998 struct e1000_vf_stats *stats;
3000 if (sc->link_speed == 0)
3004 UPDATE_VF_REG(E1000_VFGPRC, stats->last_gprc, stats->gprc);
3005 UPDATE_VF_REG(E1000_VFGORC, stats->last_gorc, stats->gorc);
3006 UPDATE_VF_REG(E1000_VFGPTC, stats->last_gptc, stats->gptc);
3007 UPDATE_VF_REG(E1000_VFGOTC, stats->last_gotc, stats->gotc);
3008 UPDATE_VF_REG(E1000_VFMPRC, stats->last_mprc, stats->mprc);
3011 #ifdef IFPOLL_ENABLE
3014 igb_npoll_status(struct ifnet *ifp)
3016 struct igb_softc *sc = ifp->if_softc;
3019 ASSERT_SERIALIZED(&sc->main_serialize);
3021 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR);
3022 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3023 sc->hw.mac.get_link_status = 1;
3024 igb_update_link_status(sc);
3029 igb_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused)
3031 struct igb_tx_ring *txr = arg;
3033 ASSERT_SERIALIZED(&txr->tx_serialize);
3036 if (!ifsq_is_empty(txr->ifsq))
3037 ifsq_devstart(txr->ifsq);
3041 igb_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle)
3043 struct igb_rx_ring *rxr = arg;
3045 ASSERT_SERIALIZED(&rxr->rx_serialize);
3047 igb_rxeof(rxr, cycle);
3051 igb_npoll(struct ifnet *ifp, struct ifpoll_info *info)
3053 struct igb_softc *sc = ifp->if_softc;
3054 int i, txr_cnt, rxr_cnt;
3056 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3061 info->ifpi_status.status_func = igb_npoll_status;
3062 info->ifpi_status.serializer = &sc->main_serialize;
3064 txr_cnt = igb_get_txring_inuse(sc, TRUE);
3065 off = sc->tx_npoll_off;
3066 for (i = 0; i < txr_cnt; ++i) {
3067 struct igb_tx_ring *txr = &sc->tx_rings[i];
3070 KKASSERT(idx < ncpus2);
3071 info->ifpi_tx[idx].poll_func = igb_npoll_tx;
3072 info->ifpi_tx[idx].arg = txr;
3073 info->ifpi_tx[idx].serializer = &txr->tx_serialize;
3074 ifsq_set_cpuid(txr->ifsq, idx);
3077 rxr_cnt = igb_get_rxring_inuse(sc, TRUE);
3078 off = sc->rx_npoll_off;
3079 for (i = 0; i < rxr_cnt; ++i) {
3080 struct igb_rx_ring *rxr = &sc->rx_rings[i];
3083 KKASSERT(idx < ncpus2);
3084 info->ifpi_rx[idx].poll_func = igb_npoll_rx;
3085 info->ifpi_rx[idx].arg = rxr;
3086 info->ifpi_rx[idx].serializer = &rxr->rx_serialize;
3089 if (ifp->if_flags & IFF_RUNNING) {
3090 if (rxr_cnt == sc->rx_ring_inuse &&
3091 txr_cnt == sc->tx_ring_inuse)
3092 igb_disable_intr(sc);
3097 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3098 struct igb_tx_ring *txr = &sc->tx_rings[i];
3100 ifsq_set_cpuid(txr->ifsq, txr->tx_intr_cpuid);
3103 if (ifp->if_flags & IFF_RUNNING) {
3104 txr_cnt = igb_get_txring_inuse(sc, FALSE);
3105 rxr_cnt = igb_get_rxring_inuse(sc, FALSE);
3107 if (rxr_cnt == sc->rx_ring_inuse &&
3108 txr_cnt == sc->tx_ring_inuse)
3109 igb_enable_intr(sc);
3116 #endif /* IFPOLL_ENABLE */
3121 struct igb_softc *sc = xsc;
3122 struct ifnet *ifp = &sc->arpcom.ac_if;
3125 ASSERT_SERIALIZED(&sc->main_serialize);
3127 eicr = E1000_READ_REG(&sc->hw, E1000_EICR);
3132 if (ifp->if_flags & IFF_RUNNING) {
3133 struct igb_tx_ring *txr = &sc->tx_rings[0];
3136 for (i = 0; i < sc->rx_ring_inuse; ++i) {
3137 struct igb_rx_ring *rxr = &sc->rx_rings[i];
3139 if (eicr & rxr->rx_intr_mask) {
3140 lwkt_serialize_enter(&rxr->rx_serialize);
3142 lwkt_serialize_exit(&rxr->rx_serialize);
3146 if (eicr & txr->tx_intr_mask) {
3147 lwkt_serialize_enter(&txr->tx_serialize);
3149 if (!ifsq_is_empty(txr->ifsq))
3150 ifsq_devstart(txr->ifsq);
3151 lwkt_serialize_exit(&txr->tx_serialize);
3155 if (eicr & E1000_EICR_OTHER) {
3156 uint32_t icr = E1000_READ_REG(&sc->hw, E1000_ICR);
3158 /* Link status change */
3159 if (icr & E1000_ICR_LSC) {
3160 sc->hw.mac.get_link_status = 1;
3161 igb_update_link_status(sc);
3166 * Reading EICR has the side effect to clear interrupt mask,
3167 * so all interrupts need to be enabled here.
3169 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask);
3173 igb_intr_shared(void *xsc)
3175 struct igb_softc *sc = xsc;
3176 struct ifnet *ifp = &sc->arpcom.ac_if;
3179 ASSERT_SERIALIZED(&sc->main_serialize);
3181 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR);
3184 if (reg_icr == 0xffffffff)
3187 /* Definitely not our interrupt. */
3191 if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0)
3194 if (ifp->if_flags & IFF_RUNNING) {
3196 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) {
3199 for (i = 0; i < sc->rx_ring_inuse; ++i) {
3200 struct igb_rx_ring *rxr = &sc->rx_rings[i];
3202 lwkt_serialize_enter(&rxr->rx_serialize);
3204 lwkt_serialize_exit(&rxr->rx_serialize);
3208 if (reg_icr & E1000_ICR_TXDW) {
3209 struct igb_tx_ring *txr = &sc->tx_rings[0];
3211 lwkt_serialize_enter(&txr->tx_serialize);
3213 if (!ifsq_is_empty(txr->ifsq))
3214 ifsq_devstart(txr->ifsq);
3215 lwkt_serialize_exit(&txr->tx_serialize);
3219 /* Link status change */
3220 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3221 sc->hw.mac.get_link_status = 1;
3222 igb_update_link_status(sc);
3225 if (reg_icr & E1000_ICR_RXO)
3230 igb_encap(struct igb_tx_ring *txr, struct mbuf **m_headp,
3231 int *segs_used, int *idx)
3233 bus_dma_segment_t segs[IGB_MAX_SCATTER];
3235 struct igb_tx_buf *tx_buf, *tx_buf_mapped;
3236 union e1000_adv_tx_desc *txd = NULL;
3237 struct mbuf *m_head = *m_headp;
3238 uint32_t olinfo_status = 0, cmd_type_len = 0, cmd_rs = 0;
3239 int maxsegs, nsegs, i, j, error;
3240 uint32_t hdrlen = 0;
3242 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3243 error = igb_tso_pullup(txr, m_headp);
3249 /* Set basic descriptor constants */
3250 cmd_type_len |= E1000_ADVTXD_DTYP_DATA;
3251 cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
3252 if (m_head->m_flags & M_VLANTAG)
3253 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3256 * Map the packet for DMA.
3258 tx_buf = &txr->tx_buf[txr->next_avail_desc];
3259 tx_buf_mapped = tx_buf;
3262 maxsegs = txr->tx_avail - IGB_TX_RESERVED;
3263 KASSERT(maxsegs >= txr->spare_desc, ("not enough spare TX desc\n"));
3264 if (maxsegs > IGB_MAX_SCATTER)
3265 maxsegs = IGB_MAX_SCATTER;
3267 error = bus_dmamap_load_mbuf_defrag(txr->tx_tag, map, m_headp,
3268 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
3270 if (error == ENOBUFS)
3271 txr->sc->mbuf_defrag_failed++;
3273 txr->sc->no_tx_dma_setup++;
3279 bus_dmamap_sync(txr->tx_tag, map, BUS_DMASYNC_PREWRITE);
3284 * Set up the TX context descriptor, if any hardware offloading is
3285 * needed. This includes CSUM, VLAN, and TSO. It will consume one
3288 * Unlike these chips' predecessors (em/emx), TX context descriptor
3289 * will _not_ interfere TX data fetching pipelining.
3291 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3292 igb_tso_ctx(txr, m_head, &hdrlen);
3293 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3294 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3295 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3298 } else if (igb_txcsum_ctx(txr, m_head)) {
3299 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3300 olinfo_status |= (E1000_TXD_POPTS_IXSM << 8);
3301 if (m_head->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_TCP))
3302 olinfo_status |= (E1000_TXD_POPTS_TXSM << 8);
3307 *segs_used += nsegs;
3308 txr->tx_nsegs += nsegs;
3309 if (txr->tx_nsegs >= txr->intr_nsegs) {
3311 * Report Status (RS) is turned on every intr_nsegs
3312 * descriptors (roughly).
3315 cmd_rs = E1000_ADVTXD_DCMD_RS;
3318 /* Calculate payload length */
3319 olinfo_status |= ((m_head->m_pkthdr.len - hdrlen)
3320 << E1000_ADVTXD_PAYLEN_SHIFT);
3322 /* 82575 needs the queue index added */
3323 if (txr->sc->hw.mac.type == e1000_82575)
3324 olinfo_status |= txr->me << 4;
3326 /* Set up our transmit descriptors */
3327 i = txr->next_avail_desc;
3328 for (j = 0; j < nsegs; j++) {
3330 bus_addr_t seg_addr;
3332 tx_buf = &txr->tx_buf[i];
3333 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i];
3334 seg_addr = segs[j].ds_addr;
3335 seg_len = segs[j].ds_len;
3337 txd->read.buffer_addr = htole64(seg_addr);
3338 txd->read.cmd_type_len = htole32(cmd_type_len | seg_len);
3339 txd->read.olinfo_status = htole32(olinfo_status);
3340 if (++i == txr->num_tx_desc)
3342 tx_buf->m_head = NULL;
3345 KASSERT(txr->tx_avail > nsegs, ("invalid avail TX desc\n"));
3346 txr->next_avail_desc = i;
3347 txr->tx_avail -= nsegs;
3349 tx_buf->m_head = m_head;
3350 tx_buf_mapped->map = tx_buf->map;
3354 * Last Descriptor of Packet needs End Of Packet (EOP)
3356 txd->read.cmd_type_len |= htole32(E1000_ADVTXD_DCMD_EOP | cmd_rs);
3359 * Defer TDT updating, until enough descrptors are setup
3362 #ifdef IGB_TSS_DEBUG
3370 igb_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
3372 struct igb_softc *sc = ifp->if_softc;
3373 struct igb_tx_ring *txr = ifsq_get_priv(ifsq);
3374 struct mbuf *m_head;
3375 int idx = -1, nsegs = 0;
3377 KKASSERT(txr->ifsq == ifsq);
3378 ASSERT_SERIALIZED(&txr->tx_serialize);
3380 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq))
3383 if (!sc->link_active || (txr->tx_flags & IGB_TXFLAG_ENABLED) == 0) {
3388 if (!IGB_IS_NOT_OACTIVE(txr))
3391 while (!ifsq_is_empty(ifsq)) {
3392 if (IGB_IS_OACTIVE(txr)) {
3393 ifsq_set_oactive(ifsq);
3394 /* Set watchdog on */
3395 txr->tx_watchdog.wd_timer = 5;
3399 m_head = ifsq_dequeue(ifsq, NULL);
3403 if (igb_encap(txr, &m_head, &nsegs, &idx)) {
3408 if (nsegs >= txr->wreg_nsegs) {
3409 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx);
3414 /* Send a copy of the frame to the BPF listener */
3415 ETHER_BPF_MTAP(ifp, m_head);
3418 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx);
3422 igb_watchdog(struct ifaltq_subque *ifsq)
3424 struct igb_tx_ring *txr = ifsq_get_priv(ifsq);
3425 struct ifnet *ifp = ifsq_get_ifp(ifsq);
3426 struct igb_softc *sc = ifp->if_softc;
3429 KKASSERT(txr->ifsq == ifsq);
3430 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3433 * If flow control has paused us since last checking
3434 * it invalidates the watchdog timing, so dont run it.
3436 if (sc->pause_frames) {
3437 sc->pause_frames = 0;
3438 txr->tx_watchdog.wd_timer = 5;
3442 if_printf(ifp, "Watchdog timeout -- resetting\n");
3443 if_printf(ifp, "Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
3444 E1000_READ_REG(&sc->hw, E1000_TDH(txr->me)),
3445 E1000_READ_REG(&sc->hw, E1000_TDT(txr->me)));
3446 if_printf(ifp, "TX(%d) desc avail = %d, "
3447 "Next TX to Clean = %d\n",
3448 txr->me, txr->tx_avail, txr->next_to_clean);
3451 sc->watchdog_events++;
3454 for (i = 0; i < sc->tx_ring_inuse; ++i)
3455 ifsq_devstart_sched(sc->tx_rings[i].ifsq);
3459 igb_set_eitr(struct igb_softc *sc, int idx, int rate)
3464 if (sc->hw.mac.type == e1000_82575) {
3465 eitr = 1000000000 / 256 / rate;
3468 * Document is wrong on the 2 bits left shift
3471 eitr = 1000000 / rate;
3472 eitr <<= IGB_EITR_INTVL_SHIFT;
3476 /* Don't disable it */
3477 eitr = 1 << IGB_EITR_INTVL_SHIFT;
3478 } else if (eitr > IGB_EITR_INTVL_MASK) {
3479 /* Don't allow it to be too large */
3480 eitr = IGB_EITR_INTVL_MASK;
3483 if (sc->hw.mac.type == e1000_82575)
3486 eitr |= E1000_EITR_CNT_IGNR;
3487 E1000_WRITE_REG(&sc->hw, E1000_EITR(idx), eitr);
3491 igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS)
3493 struct igb_softc *sc = (void *)arg1;
3494 struct ifnet *ifp = &sc->arpcom.ac_if;
3495 int error, intr_rate;
3497 intr_rate = sc->intr_rate;
3498 error = sysctl_handle_int(oidp, &intr_rate, 0, req);
3499 if (error || req->newptr == NULL)
3504 ifnet_serialize_all(ifp);
3506 sc->intr_rate = intr_rate;
3507 if (ifp->if_flags & IFF_RUNNING)
3508 igb_set_eitr(sc, 0, sc->intr_rate);
3511 if_printf(ifp, "interrupt rate set to %d/sec\n", sc->intr_rate);
3513 ifnet_deserialize_all(ifp);
3519 igb_sysctl_msix_rate(SYSCTL_HANDLER_ARGS)
3521 struct igb_msix_data *msix = (void *)arg1;
3522 struct igb_softc *sc = msix->msix_sc;
3523 struct ifnet *ifp = &sc->arpcom.ac_if;
3524 int error, msix_rate;
3526 msix_rate = msix->msix_rate;
3527 error = sysctl_handle_int(oidp, &msix_rate, 0, req);
3528 if (error || req->newptr == NULL)
3533 lwkt_serialize_enter(msix->msix_serialize);
3535 msix->msix_rate = msix_rate;
3536 if (ifp->if_flags & IFF_RUNNING)
3537 igb_set_eitr(sc, msix->msix_vector, msix->msix_rate);
3540 if_printf(ifp, "%s set to %d/sec\n", msix->msix_rate_desc,
3544 lwkt_serialize_exit(msix->msix_serialize);
3550 igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS)
3552 struct igb_softc *sc = (void *)arg1;
3553 struct ifnet *ifp = &sc->arpcom.ac_if;
3554 struct igb_tx_ring *txr = &sc->tx_rings[0];
3557 nsegs = txr->intr_nsegs;
3558 error = sysctl_handle_int(oidp, &nsegs, 0, req);
3559 if (error || req->newptr == NULL)
3564 ifnet_serialize_all(ifp);
3566 if (nsegs >= txr->num_tx_desc - txr->oact_lo_desc ||
3567 nsegs >= txr->oact_hi_desc - IGB_MAX_SCATTER) {
3573 for (i = 0; i < sc->tx_ring_cnt; ++i)
3574 sc->tx_rings[i].intr_nsegs = nsegs;
3577 ifnet_deserialize_all(ifp);
3583 igb_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS)
3585 struct igb_softc *sc = (void *)arg1;
3586 struct ifnet *ifp = &sc->arpcom.ac_if;
3587 int error, nsegs, i;
3589 nsegs = sc->rx_rings[0].wreg_nsegs;
3590 error = sysctl_handle_int(oidp, &nsegs, 0, req);
3591 if (error || req->newptr == NULL)
3594 ifnet_serialize_all(ifp);
3595 for (i = 0; i < sc->rx_ring_cnt; ++i)
3596 sc->rx_rings[i].wreg_nsegs =nsegs;
3597 ifnet_deserialize_all(ifp);
3603 igb_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS)
3605 struct igb_softc *sc = (void *)arg1;
3606 struct ifnet *ifp = &sc->arpcom.ac_if;
3607 int error, nsegs, i;
3609 nsegs = sc->tx_rings[0].wreg_nsegs;
3610 error = sysctl_handle_int(oidp, &nsegs, 0, req);
3611 if (error || req->newptr == NULL)
3614 ifnet_serialize_all(ifp);
3615 for (i = 0; i < sc->tx_ring_cnt; ++i)
3616 sc->tx_rings[i].wreg_nsegs =nsegs;
3617 ifnet_deserialize_all(ifp);
3622 #ifdef IFPOLL_ENABLE
3625 igb_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS)
3627 struct igb_softc *sc = (void *)arg1;
3628 struct ifnet *ifp = &sc->arpcom.ac_if;
3631 off = sc->rx_npoll_off;
3632 error = sysctl_handle_int(oidp, &off, 0, req);
3633 if (error || req->newptr == NULL)
3638 ifnet_serialize_all(ifp);
3639 if (off >= ncpus2 || off % sc->rx_ring_cnt != 0) {
3643 sc->rx_npoll_off = off;
3645 ifnet_deserialize_all(ifp);
3651 igb_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS)
3653 struct igb_softc *sc = (void *)arg1;
3654 struct ifnet *ifp = &sc->arpcom.ac_if;
3657 off = sc->tx_npoll_off;
3658 error = sysctl_handle_int(oidp, &off, 0, req);
3659 if (error || req->newptr == NULL)
3664 ifnet_serialize_all(ifp);
3665 if (off >= ncpus2 || off % sc->tx_ring_cnt != 0) {
3669 sc->tx_npoll_off = off;
3671 ifnet_deserialize_all(ifp);
3676 #endif /* IFPOLL_ENABLE */
3679 igb_init_intr(struct igb_softc *sc)
3681 igb_set_intr_mask(sc);
3683 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0)
3684 igb_init_unshared_intr(sc);
3686 if (sc->intr_type != PCI_INTR_TYPE_MSIX) {
3687 igb_set_eitr(sc, 0, sc->intr_rate);
3691 for (i = 0; i < sc->msix_cnt; ++i)
3692 igb_set_eitr(sc, i, sc->msix_data[i].msix_rate);
3697 igb_init_unshared_intr(struct igb_softc *sc)
3699 struct e1000_hw *hw = &sc->hw;
3700 const struct igb_rx_ring *rxr;
3701 const struct igb_tx_ring *txr;
3702 uint32_t ivar, index;
3706 * Enable extended mode
3708 if (sc->hw.mac.type != e1000_82575) {
3712 gpie = E1000_GPIE_NSICR;
3713 if (sc->intr_type == PCI_INTR_TYPE_MSIX) {
3714 gpie |= E1000_GPIE_MSIX_MODE |
3718 E1000_WRITE_REG(hw, E1000_GPIE, gpie);
3723 switch (sc->hw.mac.type) {
3725 ivar_max = IGB_MAX_IVAR_82580;
3729 ivar_max = IGB_MAX_IVAR_I350;
3733 case e1000_vfadapt_i350:
3734 ivar_max = IGB_MAX_IVAR_VF;
3738 ivar_max = IGB_MAX_IVAR_82576;
3742 panic("unknown mac type %d\n", sc->hw.mac.type);
3744 for (i = 0; i < ivar_max; ++i)
3745 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, 0);
3746 E1000_WRITE_REG(hw, E1000_IVAR_MISC, 0);
3750 KASSERT(sc->intr_type != PCI_INTR_TYPE_MSIX,
3751 ("82575 w/ MSI-X"));
3752 tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
3753 tmp |= E1000_CTRL_EXT_IRCA;
3754 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
3758 * Map TX/RX interrupts to EICR
3760 switch (sc->hw.mac.type) {
3764 case e1000_vfadapt_i350:
3766 for (i = 0; i < sc->rx_ring_inuse; ++i) {
3767 rxr = &sc->rx_rings[i];
3770 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3775 (rxr->rx_intr_bit | E1000_IVAR_VALID) << 16;
3779 (rxr->rx_intr_bit | E1000_IVAR_VALID);
3781 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3784 for (i = 0; i < sc->tx_ring_inuse; ++i) {
3785 txr = &sc->tx_rings[i];
3788 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3793 (txr->tx_intr_bit | E1000_IVAR_VALID) << 24;
3797 (txr->tx_intr_bit | E1000_IVAR_VALID) << 8;
3799 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3801 if (sc->intr_type == PCI_INTR_TYPE_MSIX) {
3802 ivar = (sc->sts_intr_bit | E1000_IVAR_VALID) << 8;
3803 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
3809 for (i = 0; i < sc->rx_ring_inuse; ++i) {
3810 rxr = &sc->rx_rings[i];
3812 index = i & 0x7; /* Each IVAR has two entries */
3813 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3818 (rxr->rx_intr_bit | E1000_IVAR_VALID);
3822 (rxr->rx_intr_bit | E1000_IVAR_VALID) << 16;
3824 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3827 for (i = 0; i < sc->tx_ring_inuse; ++i) {
3828 txr = &sc->tx_rings[i];
3830 index = i & 0x7; /* Each IVAR has two entries */
3831 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3836 (txr->tx_intr_bit | E1000_IVAR_VALID) << 8;
3840 (txr->tx_intr_bit | E1000_IVAR_VALID) << 24;
3842 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3844 if (sc->intr_type == PCI_INTR_TYPE_MSIX) {
3845 ivar = (sc->sts_intr_bit | E1000_IVAR_VALID) << 8;
3846 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
3852 * Enable necessary interrupt bits.
3854 * The name of the register is confusing; in addition to
3855 * configuring the first vector of MSI-X, it also configures
3856 * which bits of EICR could be set by the hardware even when
3857 * MSI or line interrupt is used; it thus controls interrupt
3858 * generation. It MUST be configured explicitly; the default
3859 * value mentioned in the datasheet is wrong: RX queue0 and
3860 * TX queue0 are NOT enabled by default.
3862 E1000_WRITE_REG(&sc->hw, E1000_MSIXBM(0), sc->intr_mask);
3866 panic("unknown mac type %d\n", sc->hw.mac.type);
3871 igb_setup_intr(struct igb_softc *sc)
3875 if (sc->intr_type == PCI_INTR_TYPE_MSIX)
3876 return igb_msix_setup(sc);
3878 error = bus_setup_intr(sc->dev, sc->intr_res, INTR_MPSAFE,
3879 (sc->flags & IGB_FLAG_SHARED_INTR) ? igb_intr_shared : igb_intr,
3880 sc, &sc->intr_tag, &sc->main_serialize);
3882 device_printf(sc->dev, "Failed to register interrupt handler");
3886 for (i = 0; i < sc->tx_ring_cnt; ++i)
3887 sc->tx_rings[i].tx_intr_cpuid = rman_get_cpuid(sc->intr_res);
3893 igb_set_txintr_mask(struct igb_tx_ring *txr, int *intr_bit0, int intr_bitmax)
3895 if (txr->sc->hw.mac.type == e1000_82575) {
3896 txr->tx_intr_bit = 0; /* unused */
3899 txr->tx_intr_mask = E1000_EICR_TX_QUEUE0;
3902 txr->tx_intr_mask = E1000_EICR_TX_QUEUE1;
3905 txr->tx_intr_mask = E1000_EICR_TX_QUEUE2;
3908 txr->tx_intr_mask = E1000_EICR_TX_QUEUE3;
3911 panic("unsupported # of TX ring, %d\n", txr->me);
3914 int intr_bit = *intr_bit0;
3916 txr->tx_intr_bit = intr_bit % intr_bitmax;
3917 txr->tx_intr_mask = 1 << txr->tx_intr_bit;
3919 *intr_bit0 = intr_bit + 1;
3924 igb_set_rxintr_mask(struct igb_rx_ring *rxr, int *intr_bit0, int intr_bitmax)
3926 if (rxr->sc->hw.mac.type == e1000_82575) {
3927 rxr->rx_intr_bit = 0; /* unused */
3930 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE0;
3933 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE1;
3936 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE2;
3939 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE3;
3942 panic("unsupported # of RX ring, %d\n", rxr->me);
3945 int intr_bit = *intr_bit0;
3947 rxr->rx_intr_bit = intr_bit % intr_bitmax;
3948 rxr->rx_intr_mask = 1 << rxr->rx_intr_bit;
3950 *intr_bit0 = intr_bit + 1;
3955 igb_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3957 struct igb_softc *sc = ifp->if_softc;
3959 ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt,
3960 sc->tx_serialize, sc->rx_serialize, slz);
3964 igb_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3966 struct igb_softc *sc = ifp->if_softc;
3968 ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt,
3969 sc->tx_serialize, sc->rx_serialize, slz);
3973 igb_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3975 struct igb_softc *sc = ifp->if_softc;
3977 return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt,
3978 sc->tx_serialize, sc->rx_serialize, slz);
3984 igb_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
3985 boolean_t serialized)
3987 struct igb_softc *sc = ifp->if_softc;
3989 ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt,
3990 sc->tx_serialize, sc->rx_serialize, slz, serialized);
3993 #endif /* INVARIANTS */
3996 igb_set_intr_mask(struct igb_softc *sc)
4000 sc->intr_mask = sc->sts_intr_mask;
4001 for (i = 0; i < sc->rx_ring_inuse; ++i)
4002 sc->intr_mask |= sc->rx_rings[i].rx_intr_mask;
4003 for (i = 0; i < sc->tx_ring_inuse; ++i)
4004 sc->intr_mask |= sc->tx_rings[i].tx_intr_mask;
4006 if_printf(&sc->arpcom.ac_if, "intr mask 0x%08x\n",
4012 igb_alloc_intr(struct igb_softc *sc)
4014 int i, intr_bit, intr_bitmax;
4017 igb_msix_try_alloc(sc);
4018 if (sc->intr_type == PCI_INTR_TYPE_MSIX)
4022 * Allocate MSI/legacy interrupt resource
4024 sc->intr_type = pci_alloc_1intr(sc->dev, igb_msi_enable,
4025 &sc->intr_rid, &intr_flags);
4027 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) {
4030 unshared = device_getenv_int(sc->dev, "irq.unshared", 0);
4032 sc->flags |= IGB_FLAG_SHARED_INTR;
4034 device_printf(sc->dev, "IRQ shared\n");
4036 intr_flags &= ~RF_SHAREABLE;
4038 device_printf(sc->dev, "IRQ unshared\n");
4042 sc->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
4043 &sc->intr_rid, intr_flags);
4044 if (sc->intr_res == NULL) {
4045 device_printf(sc->dev, "Unable to allocate bus resource: "
4051 * Setup MSI/legacy interrupt mask
4053 switch (sc->hw.mac.type) {
4055 intr_bitmax = IGB_MAX_TXRXINT_82575;
4058 intr_bitmax = IGB_MAX_TXRXINT_82580;
4061 intr_bitmax = IGB_MAX_TXRXINT_I350;
4064 intr_bitmax = IGB_MAX_TXRXINT_82576;
4067 intr_bitmax = IGB_MIN_TXRXINT;
4071 for (i = 0; i < sc->tx_ring_cnt; ++i)
4072 igb_set_txintr_mask(&sc->tx_rings[i], &intr_bit, intr_bitmax);
4073 for (i = 0; i < sc->rx_ring_cnt; ++i)
4074 igb_set_rxintr_mask(&sc->rx_rings[i], &intr_bit, intr_bitmax);
4075 sc->sts_intr_bit = 0;
4076 sc->sts_intr_mask = E1000_EICR_OTHER;
4078 /* Initialize interrupt rate */
4079 sc->intr_rate = IGB_INTR_RATE;
4081 igb_set_ring_inuse(sc, FALSE);
4082 igb_set_intr_mask(sc);
4087 igb_free_intr(struct igb_softc *sc)
4089 if (sc->intr_type != PCI_INTR_TYPE_MSIX) {
4090 if (sc->intr_res != NULL) {
4091 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr_rid,
4094 if (sc->intr_type == PCI_INTR_TYPE_MSI)
4095 pci_release_msi(sc->dev);
4097 igb_msix_free(sc, TRUE);
4102 igb_teardown_intr(struct igb_softc *sc)
4104 if (sc->intr_type != PCI_INTR_TYPE_MSIX)
4105 bus_teardown_intr(sc->dev, sc->intr_res, sc->intr_tag);
4107 igb_msix_teardown(sc, sc->msix_cnt);
4111 igb_msix_try_alloc(struct igb_softc *sc)
4113 int msix_enable, msix_cnt, msix_cnt2, alloc_cnt;
4115 int offset, offset_def;
4116 struct igb_msix_data *msix;
4117 boolean_t aggregate, setup = FALSE;
4120 * Don't enable MSI-X on 82575, see:
4121 * 82575 specification update errata #25
4123 if (sc->hw.mac.type == e1000_82575)
4126 /* Don't enable MSI-X on VF */
4130 msix_enable = device_getenv_int(sc->dev, "msix.enable",
4135 msix_cnt = pci_msix_count(sc->dev);
4136 #ifdef IGB_MSIX_DEBUG
4137 msix_cnt = device_getenv_int(sc->dev, "msix.count", msix_cnt);
4139 if (msix_cnt <= 1) {
4140 /* One MSI-X model does not make sense */
4145 while ((1 << (i + 1)) <= msix_cnt)
4150 device_printf(sc->dev, "MSI-X count %d/%d\n",
4151 msix_cnt2, msix_cnt);
4154 KKASSERT(msix_cnt2 <= msix_cnt);
4155 if (msix_cnt == msix_cnt2) {
4156 /* We need at least one MSI-X for link status */
4158 if (msix_cnt2 <= 1) {
4159 /* One MSI-X for RX/TX does not make sense */
4160 device_printf(sc->dev, "not enough MSI-X for TX/RX, "
4161 "MSI-X count %d/%d\n", msix_cnt2, msix_cnt);
4164 KKASSERT(msix_cnt > msix_cnt2);
4167 device_printf(sc->dev, "MSI-X count fixup %d/%d\n",
4168 msix_cnt2, msix_cnt);
4172 sc->rx_ring_msix = sc->rx_ring_cnt;
4173 if (sc->rx_ring_msix > msix_cnt2)
4174 sc->rx_ring_msix = msix_cnt2;
4176 sc->tx_ring_msix = sc->tx_ring_cnt;
4177 if (sc->tx_ring_msix > msix_cnt2)
4178 sc->tx_ring_msix = msix_cnt2;
4180 if (msix_cnt >= sc->tx_ring_msix + sc->rx_ring_msix + 1) {
4182 * Independent TX/RX MSI-X
4186 device_printf(sc->dev, "independent TX/RX MSI-X\n");
4187 alloc_cnt = sc->tx_ring_msix + sc->rx_ring_msix;
4190 * Aggregate TX/RX MSI-X
4194 device_printf(sc->dev, "aggregate TX/RX MSI-X\n");
4195 alloc_cnt = msix_cnt2;
4196 if (alloc_cnt > ncpus2)
4198 if (sc->rx_ring_msix > alloc_cnt)
4199 sc->rx_ring_msix = alloc_cnt;
4200 if (sc->tx_ring_msix > alloc_cnt)
4201 sc->tx_ring_msix = alloc_cnt;
4203 ++alloc_cnt; /* For link status */
4206 device_printf(sc->dev, "MSI-X alloc %d, "
4207 "RX ring %d, TX ring %d\n", alloc_cnt,
4208 sc->rx_ring_msix, sc->tx_ring_msix);
4211 sc->msix_mem_rid = PCIR_BAR(IGB_MSIX_BAR);
4212 sc->msix_mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
4213 &sc->msix_mem_rid, RF_ACTIVE);
4214 if (sc->msix_mem_res == NULL) {
4215 device_printf(sc->dev, "Unable to map MSI-X table\n");
4219 sc->msix_cnt = alloc_cnt;
4220 sc->msix_data = kmalloc_cachealign(
4221 sizeof(struct igb_msix_data) * sc->msix_cnt,
4222 M_DEVBUF, M_WAITOK | M_ZERO);
4223 for (x = 0; x < sc->msix_cnt; ++x) {
4224 msix = &sc->msix_data[x];
4226 lwkt_serialize_init(&msix->msix_serialize0);
4228 msix->msix_rid = -1;
4229 msix->msix_vector = x;
4230 msix->msix_mask = 1 << msix->msix_vector;
4231 msix->msix_rate = IGB_INTR_RATE;
4239 if (sc->rx_ring_msix == ncpus2) {
4242 offset_def = (sc->rx_ring_msix *
4243 device_get_unit(sc->dev)) % ncpus2;
4245 offset = device_getenv_int(sc->dev,
4246 "msix.rxoff", offset_def);
4247 if (offset >= ncpus2 ||
4248 offset % sc->rx_ring_msix != 0) {
4249 device_printf(sc->dev,
4250 "invalid msix.rxoff %d, use %d\n",
4251 offset, offset_def);
4252 offset = offset_def;
4255 igb_msix_rx_conf(sc, 0, &x, offset);
4260 if (sc->tx_ring_msix == ncpus2) {
4263 offset_def = (sc->tx_ring_msix *
4264 device_get_unit(sc->dev)) % ncpus2;
4266 offset = device_getenv_int(sc->dev,
4267 "msix.txoff", offset_def);
4268 if (offset >= ncpus2 ||
4269 offset % sc->tx_ring_msix != 0) {
4270 device_printf(sc->dev,
4271 "invalid msix.txoff %d, use %d\n",
4272 offset, offset_def);
4273 offset = offset_def;
4276 igb_msix_tx_conf(sc, 0, &x, offset);
4278 int ring_agg, ring_max;
4280 ring_agg = sc->rx_ring_msix;
4281 if (ring_agg > sc->tx_ring_msix)
4282 ring_agg = sc->tx_ring_msix;
4284 ring_max = sc->rx_ring_msix;
4285 if (ring_max < sc->tx_ring_msix)
4286 ring_max = sc->tx_ring_msix;
4288 if (ring_max == ncpus2) {
4291 offset_def = (ring_max * device_get_unit(sc->dev)) %
4294 offset = device_getenv_int(sc->dev, "msix.off",
4296 if (offset >= ncpus2 || offset % ring_max != 0) {
4297 device_printf(sc->dev,
4298 "invalid msix.off %d, use %d\n",
4299 offset, offset_def);
4300 offset = offset_def;
4304 for (i = 0; i < ring_agg; ++i) {
4305 struct igb_tx_ring *txr = &sc->tx_rings[i];
4306 struct igb_rx_ring *rxr = &sc->rx_rings[i];
4308 KKASSERT(x < sc->msix_cnt);
4309 msix = &sc->msix_data[x++];
4311 txr->tx_intr_bit = msix->msix_vector;
4312 txr->tx_intr_mask = msix->msix_mask;
4313 rxr->rx_intr_bit = msix->msix_vector;
4314 rxr->rx_intr_mask = msix->msix_mask;
4316 msix->msix_serialize = &msix->msix_serialize0;
4317 msix->msix_func = igb_msix_rxtx;
4318 msix->msix_arg = msix;
4319 msix->msix_rx = rxr;
4320 msix->msix_tx = txr;
4322 msix->msix_cpuid = i + offset;
4323 KKASSERT(msix->msix_cpuid < ncpus2);
4324 txr->tx_intr_cpuid = msix->msix_cpuid;
4326 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc),
4327 "%s rxtx%d", device_get_nameunit(sc->dev), i);
4328 msix->msix_rate = IGB_MSIX_RX_RATE;
4329 ksnprintf(msix->msix_rate_desc,
4330 sizeof(msix->msix_rate_desc),
4331 "RXTX%d interrupt rate", i);
4334 if (ring_agg != ring_max) {
4335 if (ring_max == sc->tx_ring_msix)
4336 igb_msix_tx_conf(sc, i, &x, offset);
4338 igb_msix_rx_conf(sc, i, &x, offset);
4345 KKASSERT(x < sc->msix_cnt);
4346 msix = &sc->msix_data[x++];
4347 sc->sts_intr_bit = msix->msix_vector;
4348 sc->sts_intr_mask = msix->msix_mask;
4350 msix->msix_serialize = &sc->main_serialize;
4351 msix->msix_func = igb_msix_status;
4352 msix->msix_arg = sc;
4353 msix->msix_cpuid = 0;
4354 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s sts",
4355 device_get_nameunit(sc->dev));
4356 ksnprintf(msix->msix_rate_desc, sizeof(msix->msix_rate_desc),
4357 "status interrupt rate");
4359 KKASSERT(x == sc->msix_cnt);
4361 error = pci_setup_msix(sc->dev);
4363 device_printf(sc->dev, "Setup MSI-X failed\n");
4368 for (i = 0; i < sc->msix_cnt; ++i) {
4369 msix = &sc->msix_data[i];
4371 error = pci_alloc_msix_vector(sc->dev, msix->msix_vector,
4372 &msix->msix_rid, msix->msix_cpuid);
4374 device_printf(sc->dev,
4375 "Unable to allocate MSI-X %d on cpu%d\n",
4376 msix->msix_vector, msix->msix_cpuid);
4380 msix->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
4381 &msix->msix_rid, RF_ACTIVE);
4382 if (msix->msix_res == NULL) {
4383 device_printf(sc->dev,
4384 "Unable to allocate MSI-X %d resource\n",
4391 pci_enable_msix(sc->dev);
4392 sc->intr_type = PCI_INTR_TYPE_MSIX;
4395 igb_msix_free(sc, setup);
4399 igb_msix_free(struct igb_softc *sc, boolean_t setup)
4403 KKASSERT(sc->msix_cnt > 1);
4405 for (i = 0; i < sc->msix_cnt; ++i) {
4406 struct igb_msix_data *msix = &sc->msix_data[i];
4408 if (msix->msix_res != NULL) {
4409 bus_release_resource(sc->dev, SYS_RES_IRQ,
4410 msix->msix_rid, msix->msix_res);
4412 if (msix->msix_rid >= 0)
4413 pci_release_msix_vector(sc->dev, msix->msix_rid);
4416 pci_teardown_msix(sc->dev);
4419 kfree(sc->msix_data, M_DEVBUF);
4420 sc->msix_data = NULL;
4424 igb_msix_setup(struct igb_softc *sc)
4428 for (i = 0; i < sc->msix_cnt; ++i) {
4429 struct igb_msix_data *msix = &sc->msix_data[i];
4432 error = bus_setup_intr_descr(sc->dev, msix->msix_res,
4433 INTR_MPSAFE, msix->msix_func, msix->msix_arg,
4434 &msix->msix_handle, msix->msix_serialize, msix->msix_desc);
4436 device_printf(sc->dev, "could not set up %s "
4437 "interrupt handler.\n", msix->msix_desc);
4438 igb_msix_teardown(sc, i);
4446 igb_msix_teardown(struct igb_softc *sc, int msix_cnt)
4450 for (i = 0; i < msix_cnt; ++i) {
4451 struct igb_msix_data *msix = &sc->msix_data[i];
4453 bus_teardown_intr(sc->dev, msix->msix_res, msix->msix_handle);
4458 igb_msix_rx(void *arg)
4460 struct igb_rx_ring *rxr = arg;
4462 ASSERT_SERIALIZED(&rxr->rx_serialize);
4465 E1000_WRITE_REG(&rxr->sc->hw, E1000_EIMS, rxr->rx_intr_mask);
4469 igb_msix_tx(void *arg)
4471 struct igb_tx_ring *txr = arg;
4473 ASSERT_SERIALIZED(&txr->tx_serialize);
4476 if (!ifsq_is_empty(txr->ifsq))
4477 ifsq_devstart(txr->ifsq);
4479 E1000_WRITE_REG(&txr->sc->hw, E1000_EIMS, txr->tx_intr_mask);
4483 igb_msix_status(void *arg)
4485 struct igb_softc *sc = arg;
4488 ASSERT_SERIALIZED(&sc->main_serialize);
4490 icr = E1000_READ_REG(&sc->hw, E1000_ICR);
4491 if (icr & E1000_ICR_LSC) {
4492 sc->hw.mac.get_link_status = 1;
4493 igb_update_link_status(sc);
4496 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->sts_intr_mask);
4500 igb_set_ring_inuse(struct igb_softc *sc, boolean_t polling)
4502 sc->rx_ring_inuse = igb_get_rxring_inuse(sc, polling);
4503 sc->tx_ring_inuse = igb_get_txring_inuse(sc, polling);
4505 if_printf(&sc->arpcom.ac_if, "RX rings %d/%d, TX rings %d/%d\n",
4506 sc->rx_ring_inuse, sc->rx_ring_cnt,
4507 sc->tx_ring_inuse, sc->tx_ring_cnt);
4512 igb_get_rxring_inuse(const struct igb_softc *sc, boolean_t polling)
4514 if (!IGB_ENABLE_HWRSS(sc))
4518 return sc->rx_ring_cnt;
4519 else if (sc->intr_type != PCI_INTR_TYPE_MSIX)
4520 return IGB_MIN_RING_RSS;
4522 return sc->rx_ring_msix;
4526 igb_get_txring_inuse(const struct igb_softc *sc, boolean_t polling)
4528 if (!IGB_ENABLE_HWTSS(sc))
4532 return sc->tx_ring_cnt;
4533 else if (sc->intr_type != PCI_INTR_TYPE_MSIX)
4534 return IGB_MIN_RING;
4536 return sc->tx_ring_msix;
4540 igb_tso_pullup(struct igb_tx_ring *txr, struct mbuf **mp)
4542 int hoff, iphlen, thoff;
4546 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
4548 iphlen = m->m_pkthdr.csum_iphlen;
4549 thoff = m->m_pkthdr.csum_thlen;
4550 hoff = m->m_pkthdr.csum_lhlen;
4552 KASSERT(iphlen > 0, ("invalid ip hlen"));
4553 KASSERT(thoff > 0, ("invalid tcp hlen"));
4554 KASSERT(hoff > 0, ("invalid ether hlen"));
4556 if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
4557 m = m_pullup(m, hoff + iphlen + thoff);
4564 if (txr->tx_flags & IGB_TXFLAG_TSO_IPLEN0) {
4567 ip = mtodoff(m, struct ip *, hoff);
4575 igb_tso_ctx(struct igb_tx_ring *txr, struct mbuf *m, uint32_t *hlen)
4577 struct e1000_adv_tx_context_desc *TXD;
4578 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx;
4579 int hoff, ctxd, iphlen, thoff;
4581 iphlen = m->m_pkthdr.csum_iphlen;
4582 thoff = m->m_pkthdr.csum_thlen;
4583 hoff = m->m_pkthdr.csum_lhlen;
4585 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0;
4587 ctxd = txr->next_avail_desc;
4588 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd];
4590 if (m->m_flags & M_VLANTAG) {
4593 vlantag = htole16(m->m_pkthdr.ether_vlantag);
4594 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT);
4597 vlan_macip_lens |= (hoff << E1000_ADVTXD_MACLEN_SHIFT);
4598 vlan_macip_lens |= iphlen;
4600 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
4601 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
4602 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
4604 mss_l4len_idx |= (m->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT);
4605 mss_l4len_idx |= (thoff << E1000_ADVTXD_L4LEN_SHIFT);
4606 /* 82575 needs the queue index added */
4607 if (txr->sc->hw.mac.type == e1000_82575)
4608 mss_l4len_idx |= txr->me << 4;
4610 TXD->vlan_macip_lens = htole32(vlan_macip_lens);
4611 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
4612 TXD->seqnum_seed = htole32(0);
4613 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
4615 /* We've consumed the first desc, adjust counters */
4616 if (++ctxd == txr->num_tx_desc)
4618 txr->next_avail_desc = ctxd;
4621 *hlen = hoff + iphlen + thoff;
4625 igb_setup_serializer(struct igb_softc *sc)
4627 const struct igb_msix_data *msix;
4631 * Allocate serializer array
4634 /* Main + TX + RX */
4635 sc->serialize_cnt = 1 + sc->tx_ring_cnt + sc->rx_ring_cnt;
4637 /* Aggregate TX/RX MSI-X */
4638 for (i = 0; i < sc->msix_cnt; ++i) {
4639 msix = &sc->msix_data[i];
4640 if (msix->msix_serialize == &msix->msix_serialize0)
4641 sc->serialize_cnt++;
4645 kmalloc(sc->serialize_cnt * sizeof(struct lwkt_serialize *),
4646 M_DEVBUF, M_WAITOK | M_ZERO);
4651 * NOTE: Order is critical
4655 KKASSERT(i < sc->serialize_cnt);
4656 sc->serializes[i++] = &sc->main_serialize;
4658 for (j = 0; j < sc->msix_cnt; ++j) {
4659 msix = &sc->msix_data[j];
4660 if (msix->msix_serialize == &msix->msix_serialize0) {
4661 KKASSERT(i < sc->serialize_cnt);
4662 sc->serializes[i++] = msix->msix_serialize;
4666 sc->tx_serialize = i;
4667 for (j = 0; j < sc->tx_ring_cnt; ++j) {
4668 KKASSERT(i < sc->serialize_cnt);
4669 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize;
4672 sc->rx_serialize = i;
4673 for (j = 0; j < sc->rx_ring_cnt; ++j) {
4674 KKASSERT(i < sc->serialize_cnt);
4675 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize;
4678 KKASSERT(i == sc->serialize_cnt);
4682 igb_msix_rx_conf(struct igb_softc *sc, int i, int *x0, int offset)
4686 for (; i < sc->rx_ring_msix; ++i) {
4687 struct igb_rx_ring *rxr = &sc->rx_rings[i];
4688 struct igb_msix_data *msix;
4690 KKASSERT(x < sc->msix_cnt);
4691 msix = &sc->msix_data[x++];
4693 rxr->rx_intr_bit = msix->msix_vector;
4694 rxr->rx_intr_mask = msix->msix_mask;
4696 msix->msix_serialize = &rxr->rx_serialize;
4697 msix->msix_func = igb_msix_rx;
4698 msix->msix_arg = rxr;
4700 msix->msix_cpuid = i + offset;
4701 KKASSERT(msix->msix_cpuid < ncpus2);
4703 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s rx%d",
4704 device_get_nameunit(sc->dev), i);
4706 msix->msix_rate = IGB_MSIX_RX_RATE;
4707 ksnprintf(msix->msix_rate_desc, sizeof(msix->msix_rate_desc),
4708 "RX%d interrupt rate", i);
4714 igb_msix_tx_conf(struct igb_softc *sc, int i, int *x0, int offset)
4718 for (; i < sc->tx_ring_msix; ++i) {
4719 struct igb_tx_ring *txr = &sc->tx_rings[i];
4720 struct igb_msix_data *msix;
4722 KKASSERT(x < sc->msix_cnt);
4723 msix = &sc->msix_data[x++];
4725 txr->tx_intr_bit = msix->msix_vector;
4726 txr->tx_intr_mask = msix->msix_mask;
4728 msix->msix_serialize = &txr->tx_serialize;
4729 msix->msix_func = igb_msix_tx;
4730 msix->msix_arg = txr;
4732 msix->msix_cpuid = i + offset;
4733 KKASSERT(msix->msix_cpuid < ncpus2);
4734 txr->tx_intr_cpuid = msix->msix_cpuid;
4736 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s tx%d",
4737 device_get_nameunit(sc->dev), i);
4739 msix->msix_rate = IGB_MSIX_TX_RATE;
4740 ksnprintf(msix->msix_rate_desc, sizeof(msix->msix_rate_desc),
4741 "TX%d interrupt rate", i);
4747 igb_msix_rxtx(void *arg)
4749 struct igb_msix_data *msix = arg;
4750 struct igb_rx_ring *rxr = msix->msix_rx;
4751 struct igb_tx_ring *txr = msix->msix_tx;
4753 ASSERT_SERIALIZED(&msix->msix_serialize0);
4755 lwkt_serialize_enter(&rxr->rx_serialize);
4757 lwkt_serialize_exit(&rxr->rx_serialize);
4759 lwkt_serialize_enter(&txr->tx_serialize);
4761 if (!ifsq_is_empty(txr->ifsq))
4762 ifsq_devstart(txr->ifsq);
4763 lwkt_serialize_exit(&txr->tx_serialize);
4765 E1000_WRITE_REG(&msix->msix_sc->hw, E1000_EIMS, msix->msix_mask);