2 * Copyright (c) 2001-2011, Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the Intel Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #include "opt_polling.h"
34 #include <sys/param.h>
36 #include <sys/endian.h>
37 #include <sys/interrupt.h>
38 #include <sys/kernel.h>
39 #include <sys/malloc.h>
43 #include <sys/serialize.h>
44 #include <sys/serialize2.h>
45 #include <sys/socket.h>
46 #include <sys/sockio.h>
47 #include <sys/sysctl.h>
48 #include <sys/systm.h>
51 #include <net/ethernet.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/ifq_var.h>
57 #include <net/toeplitz.h>
58 #include <net/toeplitz2.h>
59 #include <net/vlan/if_vlan_var.h>
60 #include <net/vlan/if_vlan_ether.h>
61 #include <net/if_poll.h>
63 #include <netinet/in_systm.h>
64 #include <netinet/in.h>
65 #include <netinet/ip.h>
66 #include <netinet/tcp.h>
67 #include <netinet/udp.h>
69 #include <bus/pci/pcivar.h>
70 #include <bus/pci/pcireg.h>
72 #include <dev/netif/ig_hal/e1000_api.h>
73 #include <dev/netif/ig_hal/e1000_82575.h>
74 #include <dev/netif/igb/if_igb.h>
76 #define IGB_NAME "Intel(R) PRO/1000 "
77 #define IGB_DEVICE(id) \
78 { IGB_VENDOR_ID, E1000_DEV_ID_##id, IGB_NAME #id }
79 #define IGB_DEVICE_NULL { 0, 0, NULL }
81 static struct igb_device {
86 IGB_DEVICE(82575EB_COPPER),
87 IGB_DEVICE(82575EB_FIBER_SERDES),
88 IGB_DEVICE(82575GB_QUAD_COPPER),
91 IGB_DEVICE(82576_NS_SERDES),
92 IGB_DEVICE(82576_FIBER),
93 IGB_DEVICE(82576_SERDES),
94 IGB_DEVICE(82576_SERDES_QUAD),
95 IGB_DEVICE(82576_QUAD_COPPER),
96 IGB_DEVICE(82576_QUAD_COPPER_ET2),
98 IGB_DEVICE(82580_COPPER),
99 IGB_DEVICE(82580_FIBER),
100 IGB_DEVICE(82580_SERDES),
101 IGB_DEVICE(82580_SGMII),
102 IGB_DEVICE(82580_COPPER_DUAL),
103 IGB_DEVICE(82580_QUAD_FIBER),
104 IGB_DEVICE(DH89XXCC_SERDES),
105 IGB_DEVICE(DH89XXCC_SGMII),
106 IGB_DEVICE(DH89XXCC_SFP),
107 IGB_DEVICE(DH89XXCC_BACKPLANE),
108 IGB_DEVICE(I350_COPPER),
109 IGB_DEVICE(I350_FIBER),
110 IGB_DEVICE(I350_SERDES),
111 IGB_DEVICE(I350_SGMII),
114 /* required last entry */
118 static int igb_probe(device_t);
119 static int igb_attach(device_t);
120 static int igb_detach(device_t);
121 static int igb_shutdown(device_t);
122 static int igb_suspend(device_t);
123 static int igb_resume(device_t);
125 static boolean_t igb_is_valid_ether_addr(const uint8_t *);
126 static void igb_setup_ifp(struct igb_softc *);
127 static int igb_txctx_pullup(struct igb_tx_ring *, struct mbuf **);
128 static boolean_t igb_txctx(struct igb_tx_ring *, struct mbuf *);
129 static void igb_add_sysctl(struct igb_softc *);
130 static int igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS);
131 static int igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS);
133 static void igb_vf_init_stats(struct igb_softc *);
134 static void igb_reset(struct igb_softc *);
135 static void igb_update_stats_counters(struct igb_softc *);
136 static void igb_update_vf_stats_counters(struct igb_softc *);
137 static void igb_update_link_status(struct igb_softc *);
138 static void igb_init_tx_unit(struct igb_softc *);
139 static void igb_init_rx_unit(struct igb_softc *);
141 static void igb_set_vlan(struct igb_softc *);
142 static void igb_set_multi(struct igb_softc *);
143 static void igb_set_promisc(struct igb_softc *);
144 static void igb_disable_promisc(struct igb_softc *);
146 static int igb_alloc_rings(struct igb_softc *);
147 static void igb_free_rings(struct igb_softc *);
148 static int igb_create_tx_ring(struct igb_tx_ring *);
149 static int igb_create_rx_ring(struct igb_rx_ring *);
150 static void igb_free_tx_ring(struct igb_tx_ring *);
151 static void igb_free_rx_ring(struct igb_rx_ring *);
152 static void igb_destroy_tx_ring(struct igb_tx_ring *, int);
153 static void igb_destroy_rx_ring(struct igb_rx_ring *, int);
154 static void igb_init_tx_ring(struct igb_tx_ring *);
155 static int igb_init_rx_ring(struct igb_rx_ring *);
156 static int igb_newbuf(struct igb_rx_ring *, int, boolean_t);
157 static int igb_encap(struct igb_tx_ring *, struct mbuf **);
159 static void igb_stop(struct igb_softc *);
160 static void igb_init(void *);
161 static int igb_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
162 static void igb_media_status(struct ifnet *, struct ifmediareq *);
163 static int igb_media_change(struct ifnet *);
164 static void igb_timer(void *);
165 static void igb_watchdog(struct ifnet *);
166 static void igb_start(struct ifnet *);
167 #ifdef DEVICE_POLLING
168 static void igb_poll(struct ifnet *, enum poll_cmd, int);
170 static void igb_serialize(struct ifnet *, enum ifnet_serialize);
171 static void igb_deserialize(struct ifnet *, enum ifnet_serialize);
172 static int igb_tryserialize(struct ifnet *, enum ifnet_serialize);
174 static void igb_serialize_assert(struct ifnet *, enum ifnet_serialize,
178 static void igb_intr(void *);
179 static void igb_shared_intr(void *);
180 static void igb_rxeof(struct igb_rx_ring *, int);
181 static void igb_txeof(struct igb_tx_ring *);
182 static void igb_set_eitr(struct igb_softc *);
183 static void igb_enable_intr(struct igb_softc *);
184 static void igb_disable_intr(struct igb_softc *);
185 static void igb_init_unshared_intr(struct igb_softc *);
186 static void igb_init_intr(struct igb_softc *);
187 static int igb_setup_intr(struct igb_softc *);
188 static void igb_setup_tx_intr(struct igb_tx_ring *);
189 static void igb_setup_rx_intr(struct igb_rx_ring *);
191 /* Management and WOL Support */
192 static void igb_get_mgmt(struct igb_softc *);
193 static void igb_rel_mgmt(struct igb_softc *);
194 static void igb_get_hw_control(struct igb_softc *);
195 static void igb_rel_hw_control(struct igb_softc *);
196 static void igb_enable_wol(device_t);
198 static void igb_serialize_skipmain(struct igb_softc *);
199 static void igb_deserialize_skipmain(struct igb_softc *);
201 static device_method_t igb_methods[] = {
202 /* Device interface */
203 DEVMETHOD(device_probe, igb_probe),
204 DEVMETHOD(device_attach, igb_attach),
205 DEVMETHOD(device_detach, igb_detach),
206 DEVMETHOD(device_shutdown, igb_shutdown),
207 DEVMETHOD(device_suspend, igb_suspend),
208 DEVMETHOD(device_resume, igb_resume),
212 static driver_t igb_driver = {
215 sizeof(struct igb_softc),
218 static devclass_t igb_devclass;
220 DECLARE_DUMMY_MODULE(if_igb);
221 MODULE_DEPEND(igb, ig_hal, 1, 1, 1);
222 DRIVER_MODULE(if_igb, pci, igb_driver, igb_devclass, NULL, NULL);
224 static int igb_rxd = IGB_DEFAULT_RXD;
225 static int igb_txd = IGB_DEFAULT_TXD;
226 static int igb_msi_enable = 1;
227 static int igb_msix_enable = 1;
228 static int igb_eee_disabled = 1; /* Energy Efficient Ethernet */
229 static int igb_fc_setting = e1000_fc_full;
232 * DMA Coalescing, only for i350 - default to off,
233 * this feature is for power savings
235 static int igb_dma_coalesce = 0;
237 TUNABLE_INT("hw.igb.rxd", &igb_rxd);
238 TUNABLE_INT("hw.igb.txd", &igb_txd);
239 TUNABLE_INT("hw.igb.msi.enable", &igb_msi_enable);
240 TUNABLE_INT("hw.igb.msix.enable", &igb_msix_enable);
241 TUNABLE_INT("hw.igb.fc_setting", &igb_fc_setting);
244 TUNABLE_INT("hw.igb.eee_disabled", &igb_eee_disabled);
245 TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce);
248 igb_rxcsum(uint32_t staterr, struct mbuf *mp)
250 /* Ignore Checksum bit is set */
251 if (staterr & E1000_RXD_STAT_IXSM)
254 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) ==
256 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
258 if (staterr & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) {
259 if ((staterr & E1000_RXDEXT_STATERR_TCPE) == 0) {
260 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
261 CSUM_PSEUDO_HDR | CSUM_FRAG_NOT_CHECKED;
262 mp->m_pkthdr.csum_data = htons(0xffff);
268 igb_probe(device_t dev)
270 const struct igb_device *d;
273 vid = pci_get_vendor(dev);
274 did = pci_get_device(dev);
276 for (d = igb_devices; d->desc != NULL; ++d) {
277 if (vid == d->vid && did == d->did) {
278 device_set_desc(dev, d->desc);
286 igb_attach(device_t dev)
288 struct igb_softc *sc = device_get_softc(dev);
289 uint16_t eeprom_data;
295 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
296 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
297 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
298 igb_sysctl_nvm_info, "I", "NVM Information");
300 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
301 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
302 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
303 &igb_enable_aim, 1, "Interrupt Moderation");
305 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
306 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
307 OID_AUTO, "flow_control", CTLTYPE_INT|CTLFLAG_RW,
308 adapter, 0, igb_set_flowcntl, "I", "Flow Control");
311 callout_init_mp(&sc->timer);
313 sc->dev = sc->osdep.dev = dev;
316 * Determine hardware and mac type
318 sc->hw.vendor_id = pci_get_vendor(dev);
319 sc->hw.device_id = pci_get_device(dev);
320 sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
321 sc->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
322 sc->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
324 if (e1000_set_mac_type(&sc->hw))
327 /* Are we a VF device? */
328 if (sc->hw.mac.type == e1000_vfadapt ||
329 sc->hw.mac.type == e1000_vfadapt_i350)
334 /* Enable bus mastering */
335 pci_enable_busmaster(dev);
340 sc->mem_rid = PCIR_BAR(0);
341 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
343 if (sc->mem_res == NULL) {
344 device_printf(dev, "Unable to allocate bus resource: memory\n");
348 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->mem_res);
349 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->mem_res);
351 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle;
356 sc->intr_type = pci_alloc_1intr(dev, igb_msi_enable,
357 &sc->intr_rid, &intr_flags);
359 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid,
361 if (sc->intr_res == NULL) {
362 device_printf(dev, "Unable to allocate bus resource: "
368 /* Save PCI command register for Shared Code */
369 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
370 sc->hw.back = &sc->osdep;
372 sc->tx_ring_cnt = 1; /* XXX */
373 sc->rx_ring_cnt = 1; /* XXX */
374 sc->intr_rate = IGB_INTR_RATE;
376 /* Do Shared Code initialization */
377 if (e1000_setup_init_funcs(&sc->hw, TRUE)) {
378 device_printf(dev, "Setup of Shared code failed\n");
383 e1000_get_bus_info(&sc->hw);
385 sc->hw.mac.autoneg = DO_AUTO_NEG;
386 sc->hw.phy.autoneg_wait_to_complete = FALSE;
387 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
390 if (sc->hw.phy.media_type == e1000_media_type_copper) {
391 sc->hw.phy.mdix = AUTO_ALL_MODES;
392 sc->hw.phy.disable_polarity_correction = FALSE;
393 sc->hw.phy.ms_type = IGB_MASTER_SLAVE;
396 /* Set the frame limits assuming standard ethernet sized frames. */
397 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
399 /* Allocate RX/TX rings */
400 error = igb_alloc_rings(sc);
407 lwkt_serialize_init(&sc->main_serialize);
409 sc->serializes[i++] = &sc->main_serialize;
411 sc->tx_serialize = i;
412 for (j = 0; j < sc->tx_ring_cnt; ++j)
413 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize;
415 sc->rx_serialize = i;
416 for (j = 0; j < sc->rx_ring_cnt; ++j)
417 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize;
419 sc->serialize_cnt = i;
420 KKASSERT(sc->serialize_cnt <= IGB_NSERIALIZE);
422 /* Allocate the appropriate stats memory */
424 sc->stats = kmalloc(sizeof(struct e1000_vf_stats), M_DEVBUF,
426 igb_vf_init_stats(sc);
428 sc->stats = kmalloc(sizeof(struct e1000_hw_stats), M_DEVBUF,
432 /* Allocate multicast array memory. */
433 sc->mta = kmalloc(ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES,
436 /* Some adapter-specific advanced features */
437 if (sc->hw.mac.type >= e1000_i350) {
439 igb_set_sysctl_value(adapter, "dma_coalesce",
440 "configure dma coalesce",
441 &adapter->dma_coalesce, igb_dma_coalesce);
442 igb_set_sysctl_value(adapter, "eee_disabled",
443 "enable Energy Efficient Ethernet",
444 &adapter->hw.dev_spec._82575.eee_disable,
447 sc->dma_coalesce = igb_dma_coalesce;
448 sc->hw.dev_spec._82575.eee_disable = igb_eee_disabled;
450 e1000_set_eee_i350(&sc->hw);
454 * Start from a known state, this is important in reading the nvm and
457 e1000_reset_hw(&sc->hw);
459 /* Make sure we have a good EEPROM before we read from it */
460 if (e1000_validate_nvm_checksum(&sc->hw) < 0) {
462 * Some PCI-E parts fail the first check due to
463 * the link being in sleep state, call it again,
464 * if it fails a second time its a real issue.
466 if (e1000_validate_nvm_checksum(&sc->hw) < 0) {
468 "The EEPROM Checksum Is Not Valid\n");
474 /* Copy the permanent MAC address out of the EEPROM */
475 if (e1000_read_mac_addr(&sc->hw) < 0) {
476 device_printf(dev, "EEPROM read error while reading MAC"
481 if (!igb_is_valid_ether_addr(sc->hw.mac.addr)) {
482 device_printf(dev, "Invalid MAC address\n");
489 ** Configure Interrupts
491 if ((adapter->msix > 1) && (igb_enable_msix))
492 error = igb_allocate_msix(adapter);
493 else /* MSI or Legacy */
494 error = igb_allocate_legacy(adapter);
499 /* Setup OS specific network interface */
502 /* Add sysctl tree, must after igb_setup_ifp() */
505 /* Now get a good starting state */
508 /* Initialize statistics */
509 igb_update_stats_counters(sc);
511 sc->hw.mac.get_link_status = 1;
512 igb_update_link_status(sc);
514 /* Indicate SOL/IDER usage */
515 if (e1000_check_reset_block(&sc->hw)) {
517 "PHY reset is blocked due to SOL/IDER session.\n");
520 /* Determine if we have to control management hardware */
521 if (e1000_enable_mng_pass_thru(&sc->hw))
522 sc->flags |= IGB_FLAG_HAS_MGMT;
527 /* APME bit in EEPROM is mapped to WUC.APME */
528 eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC) & E1000_WUC_APME;
530 sc->wol = E1000_WUFC_MAG;
531 /* XXX disable WOL */
535 /* Register for VLAN events */
536 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
537 igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
538 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
539 igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
543 igb_add_hw_stats(adapter);
546 error = igb_setup_intr(sc);
548 ether_ifdetach(&sc->arpcom.ac_if);
559 igb_detach(device_t dev)
561 struct igb_softc *sc = device_get_softc(dev);
563 if (device_is_attached(dev)) {
564 struct ifnet *ifp = &sc->arpcom.ac_if;
566 ifnet_serialize_all(ifp);
570 e1000_phy_hw_reset(&sc->hw);
572 /* Give control back to firmware */
574 igb_rel_hw_control(sc);
577 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN);
578 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol);
582 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag);
584 ifnet_deserialize_all(ifp);
587 } else if (sc->mem_res != NULL) {
588 igb_rel_hw_control(sc);
590 bus_generic_detach(dev);
592 if (sc->intr_res != NULL) {
593 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid,
596 if (sc->intr_type == PCI_INTR_TYPE_MSI)
597 pci_release_msi(dev);
599 if (sc->mem_res != NULL) {
600 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid,
607 kfree(sc->mta, M_DEVBUF);
608 if (sc->stats != NULL)
609 kfree(sc->stats, M_DEVBUF);
611 if (sc->sysctl_tree != NULL)
612 sysctl_ctx_free(&sc->sysctl_ctx);
618 igb_shutdown(device_t dev)
620 return igb_suspend(dev);
624 igb_suspend(device_t dev)
626 struct igb_softc *sc = device_get_softc(dev);
627 struct ifnet *ifp = &sc->arpcom.ac_if;
629 ifnet_serialize_all(ifp);
634 igb_rel_hw_control(sc);
637 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN);
638 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol);
642 ifnet_deserialize_all(ifp);
644 return bus_generic_suspend(dev);
648 igb_resume(device_t dev)
650 struct igb_softc *sc = device_get_softc(dev);
651 struct ifnet *ifp = &sc->arpcom.ac_if;
653 ifnet_serialize_all(ifp);
660 ifnet_deserialize_all(ifp);
662 return bus_generic_resume(dev);
666 igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
668 struct igb_softc *sc = ifp->if_softc;
669 struct ifreq *ifr = (struct ifreq *)data;
670 int max_frame_size, mask, reinit;
673 ASSERT_IFNET_SERIALIZED_ALL(ifp);
677 max_frame_size = 9234;
678 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
684 ifp->if_mtu = ifr->ifr_mtu;
685 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN +
688 if (ifp->if_flags & IFF_RUNNING)
693 if (ifp->if_flags & IFF_UP) {
694 if (ifp->if_flags & IFF_RUNNING) {
695 if ((ifp->if_flags ^ sc->if_flags) &
696 (IFF_PROMISC | IFF_ALLMULTI)) {
697 igb_disable_promisc(sc);
703 } else if (ifp->if_flags & IFF_RUNNING) {
706 sc->if_flags = ifp->if_flags;
711 if (ifp->if_flags & IFF_RUNNING) {
712 igb_disable_intr(sc);
714 #ifdef DEVICE_POLLING
715 if (!(ifp->if_flags & IFF_POLLING))
723 * As the speed/duplex settings are being
724 * changed, we need toreset the PHY.
726 sc->hw.phy.reset_disable = FALSE;
728 /* Check SOL/IDER usage */
729 if (e1000_check_reset_block(&sc->hw)) {
730 if_printf(ifp, "Media change is "
731 "blocked due to SOL/IDER session.\n");
737 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
742 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
743 if (mask & IFCAP_HWCSUM) {
744 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
747 if (mask & IFCAP_VLAN_HWTAGGING) {
748 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
751 if (reinit && (ifp->if_flags & IFF_RUNNING))
756 error = ether_ioctl(ifp, command, data);
765 struct igb_softc *sc = xsc;
766 struct ifnet *ifp = &sc->arpcom.ac_if;
769 ASSERT_IFNET_SERIALIZED_ALL(ifp);
773 /* Get the latest mac address, User can use a LAA */
774 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN);
776 /* Put the address into the Receive Address Array */
777 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0);
780 igb_update_link_status(sc);
782 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
784 /* Set hardware offload abilities */
785 if (ifp->if_capenable & IFCAP_TXCSUM)
786 ifp->if_hwassist = IGB_CSUM_FEATURES;
788 ifp->if_hwassist = 0;
790 /* Configure for OS presence */
793 /* Prepare transmit descriptors and buffers */
794 for (i = 0; i < sc->tx_ring_cnt; ++i)
795 igb_init_tx_ring(&sc->tx_rings[i]);
796 igb_init_tx_unit(sc);
798 /* Setup Multicast table */
803 * Figure out the desired mbuf pool
804 * for doing jumbo/packetsplit
806 if (adapter->max_frame_size <= 2048)
807 adapter->rx_mbuf_sz = MCLBYTES;
808 else if (adapter->max_frame_size <= 4096)
809 adapter->rx_mbuf_sz = MJUMPAGESIZE;
811 adapter->rx_mbuf_sz = MJUM9BYTES;
814 /* Initialize interrupt */
817 /* Prepare receive descriptors and buffers */
818 for (i = 0; i < sc->rx_ring_cnt; ++i) {
821 error = igb_init_rx_ring(&sc->rx_rings[i]);
823 if_printf(ifp, "Could not setup receive structures\n");
828 igb_init_rx_unit(sc);
830 /* Enable VLAN support */
831 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
834 /* Don't lose promiscuous settings */
837 ifp->if_flags |= IFF_RUNNING;
838 ifp->if_flags &= ~IFF_OACTIVE;
840 callout_reset(&sc->timer, hz, igb_timer, sc);
841 e1000_clear_hw_cntrs_base_generic(&sc->hw);
844 if (adapter->msix > 1) /* Set up queue routing */
845 igb_configure_queues(adapter);
848 /* this clears any pending interrupts */
849 E1000_READ_REG(&sc->hw, E1000_ICR);
850 #ifdef DEVICE_POLLING
852 * Only enable interrupts if we are not polling, make sure
853 * they are off otherwise.
855 if (ifp->if_flags & IFF_POLLING)
856 igb_disable_intr(sc);
858 #endif /* DEVICE_POLLING */
861 E1000_WRITE_REG(&sc->hw, E1000_ICS, E1000_ICS_LSC);
864 /* Set Energy Efficient Ethernet */
865 e1000_set_eee_i350(&sc->hw);
867 /* Don't reset the phy next time init gets called */
868 sc->hw.phy.reset_disable = TRUE;
872 igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
874 struct igb_softc *sc = ifp->if_softc;
875 u_char fiber_type = IFM_1000_SX;
877 ASSERT_IFNET_SERIALIZED_ALL(ifp);
879 igb_update_link_status(sc);
881 ifmr->ifm_status = IFM_AVALID;
882 ifmr->ifm_active = IFM_ETHER;
884 if (!sc->link_active)
887 ifmr->ifm_status |= IFM_ACTIVE;
889 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
890 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
891 ifmr->ifm_active |= fiber_type | IFM_FDX;
893 switch (sc->link_speed) {
895 ifmr->ifm_active |= IFM_10_T;
899 ifmr->ifm_active |= IFM_100_TX;
903 ifmr->ifm_active |= IFM_1000_T;
906 if (sc->link_duplex == FULL_DUPLEX)
907 ifmr->ifm_active |= IFM_FDX;
909 ifmr->ifm_active |= IFM_HDX;
914 igb_media_change(struct ifnet *ifp)
916 struct igb_softc *sc = ifp->if_softc;
917 struct ifmedia *ifm = &sc->media;
919 ASSERT_IFNET_SERIALIZED_ALL(ifp);
921 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
924 switch (IFM_SUBTYPE(ifm->ifm_media)) {
926 sc->hw.mac.autoneg = DO_AUTO_NEG;
927 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
933 sc->hw.mac.autoneg = DO_AUTO_NEG;
934 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
938 sc->hw.mac.autoneg = FALSE;
939 sc->hw.phy.autoneg_advertised = 0;
940 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
941 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
943 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
947 sc->hw.mac.autoneg = FALSE;
948 sc->hw.phy.autoneg_advertised = 0;
949 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
950 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
952 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
956 if_printf(ifp, "Unsupported media type\n");
966 igb_set_promisc(struct igb_softc *sc)
968 struct ifnet *ifp = &sc->arpcom.ac_if;
969 struct e1000_hw *hw = &sc->hw;
973 e1000_promisc_set_vf(hw, e1000_promisc_enabled);
977 reg = E1000_READ_REG(hw, E1000_RCTL);
978 if (ifp->if_flags & IFF_PROMISC) {
979 reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
980 E1000_WRITE_REG(hw, E1000_RCTL, reg);
981 } else if (ifp->if_flags & IFF_ALLMULTI) {
982 reg |= E1000_RCTL_MPE;
983 reg &= ~E1000_RCTL_UPE;
984 E1000_WRITE_REG(hw, E1000_RCTL, reg);
989 igb_disable_promisc(struct igb_softc *sc)
991 struct e1000_hw *hw = &sc->hw;
995 e1000_promisc_set_vf(hw, e1000_promisc_disabled);
998 reg = E1000_READ_REG(hw, E1000_RCTL);
999 reg &= ~E1000_RCTL_UPE;
1000 reg &= ~E1000_RCTL_MPE;
1001 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1005 igb_set_multi(struct igb_softc *sc)
1007 struct ifnet *ifp = &sc->arpcom.ac_if;
1008 struct ifmultiaddr *ifma;
1009 uint32_t reg_rctl = 0;
1014 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1016 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1017 if (ifma->ifma_addr->sa_family != AF_LINK)
1020 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1023 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1024 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
1028 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1029 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1030 reg_rctl |= E1000_RCTL_MPE;
1031 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1033 e1000_update_mc_addr_list(&sc->hw, mta, mcnt);
1038 igb_timer(void *xsc)
1040 struct igb_softc *sc = xsc;
1041 struct ifnet *ifp = &sc->arpcom.ac_if;
1043 ifnet_serialize_all(ifp);
1045 igb_update_link_status(sc);
1046 igb_update_stats_counters(sc);
1048 callout_reset(&sc->timer, hz, igb_timer, sc);
1050 ifnet_deserialize_all(ifp);
1054 igb_update_link_status(struct igb_softc *sc)
1056 struct ifnet *ifp = &sc->arpcom.ac_if;
1057 struct e1000_hw *hw = &sc->hw;
1058 uint32_t link_check, thstat, ctrl;
1060 link_check = thstat = ctrl = 0;
1062 /* Get the cached link value or read for real */
1063 switch (hw->phy.media_type) {
1064 case e1000_media_type_copper:
1065 if (hw->mac.get_link_status) {
1066 /* Do the work to read phy */
1067 e1000_check_for_link(hw);
1068 link_check = !hw->mac.get_link_status;
1074 case e1000_media_type_fiber:
1075 e1000_check_for_link(hw);
1076 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU;
1079 case e1000_media_type_internal_serdes:
1080 e1000_check_for_link(hw);
1081 link_check = hw->mac.serdes_has_link;
1084 /* VF device is type_unknown */
1085 case e1000_media_type_unknown:
1086 e1000_check_for_link(hw);
1087 link_check = !hw->mac.get_link_status;
1093 /* Check for thermal downshift or shutdown */
1094 if (hw->mac.type == e1000_i350) {
1095 thstat = E1000_READ_REG(hw, E1000_THSTAT);
1096 ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT);
1099 /* Now we check if a transition has happened */
1100 if (link_check && sc->link_active == 0) {
1101 e1000_get_speed_and_duplex(hw,
1102 &sc->link_speed, &sc->link_duplex);
1104 if_printf(ifp, "Link is up %d Mbps %s\n",
1106 sc->link_duplex == FULL_DUPLEX ?
1107 "Full Duplex" : "Half Duplex");
1109 sc->link_active = 1;
1111 ifp->if_baudrate = sc->link_speed * 1000000;
1112 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) &&
1113 (thstat & E1000_THSTAT_LINK_THROTTLE))
1114 if_printf(ifp, "Link: thermal downshift\n");
1115 /* This can sleep */
1116 ifp->if_link_state = LINK_STATE_UP;
1117 if_link_state_change(ifp);
1118 } else if (!link_check && sc->link_active == 1) {
1119 ifp->if_baudrate = sc->link_speed = 0;
1120 sc->link_duplex = 0;
1122 if_printf(ifp, "Link is Down\n");
1123 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) &&
1124 (thstat & E1000_THSTAT_PWR_DOWN))
1125 if_printf(ifp, "Link: thermal shutdown\n");
1126 sc->link_active = 0;
1127 /* This can sleep */
1128 ifp->if_link_state = LINK_STATE_DOWN;
1129 if_link_state_change(ifp);
1134 igb_stop(struct igb_softc *sc)
1136 struct ifnet *ifp = &sc->arpcom.ac_if;
1139 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1141 igb_disable_intr(sc);
1143 callout_stop(&sc->timer);
1145 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1148 e1000_reset_hw(&sc->hw);
1149 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0);
1151 e1000_led_off(&sc->hw);
1152 e1000_cleanup_led(&sc->hw);
1154 for (i = 0; i < sc->tx_ring_cnt; ++i)
1155 igb_free_tx_ring(&sc->tx_rings[i]);
1156 for (i = 0; i < sc->rx_ring_cnt; ++i)
1157 igb_free_rx_ring(&sc->rx_rings[i]);
1161 igb_reset(struct igb_softc *sc)
1163 struct ifnet *ifp = &sc->arpcom.ac_if;
1164 struct e1000_hw *hw = &sc->hw;
1165 struct e1000_fc_info *fc = &hw->fc;
1169 /* Let the firmware know the OS is in control */
1170 igb_get_hw_control(sc);
1173 * Packet Buffer Allocation (PBA)
1174 * Writing PBA sets the receive portion of the buffer
1175 * the remainder is used for the transmit buffer.
1177 switch (hw->mac.type) {
1179 pba = E1000_PBA_32K;
1184 pba = E1000_READ_REG(hw, E1000_RXPBS);
1185 pba &= E1000_RXPBS_SIZE_MASK_82576;
1190 case e1000_vfadapt_i350:
1191 pba = E1000_READ_REG(hw, E1000_RXPBS);
1192 pba = e1000_rxpbs_adjust_82580(pba);
1194 /* XXX pba = E1000_PBA_35K; */
1200 /* Special needs in case of Jumbo frames */
1201 if (hw->mac.type == e1000_82575 && ifp->if_mtu > ETHERMTU) {
1202 uint32_t tx_space, min_tx, min_rx;
1204 pba = E1000_READ_REG(hw, E1000_PBA);
1205 tx_space = pba >> 16;
1208 min_tx = (sc->max_frame_size +
1209 sizeof(struct e1000_tx_desc) - ETHER_CRC_LEN) * 2;
1210 min_tx = roundup2(min_tx, 1024);
1212 min_rx = sc->max_frame_size;
1213 min_rx = roundup2(min_rx, 1024);
1215 if (tx_space < min_tx && (min_tx - tx_space) < pba) {
1216 pba = pba - (min_tx - tx_space);
1218 * if short on rx space, rx wins
1219 * and must trump tx adjustment
1224 E1000_WRITE_REG(hw, E1000_PBA, pba);
1228 * These parameters control the automatic generation (Tx) and
1229 * response (Rx) to Ethernet PAUSE frames.
1230 * - High water mark should allow for at least two frames to be
1231 * received after sending an XOFF.
1232 * - Low water mark works best when it is very near the high water mark.
1233 * This allows the receiver to restart by sending XON when it has
1236 hwm = min(((pba << 10) * 9 / 10),
1237 ((pba << 10) - 2 * sc->max_frame_size));
1239 if (hw->mac.type < e1000_82576) {
1240 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
1241 fc->low_water = fc->high_water - 8;
1243 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1244 fc->low_water = fc->high_water - 16;
1246 fc->pause_time = IGB_FC_PAUSE_TIME;
1247 fc->send_xon = TRUE;
1249 /* Issue a global reset */
1251 E1000_WRITE_REG(hw, E1000_WUC, 0);
1253 if (e1000_init_hw(hw) < 0)
1254 if_printf(ifp, "Hardware Initialization Failed\n");
1256 /* Setup DMA Coalescing */
1257 if (hw->mac.type == e1000_i350 && sc->dma_coalesce) {
1260 hwm = (pba - 4) << 10;
1261 reg = ((pba - 6) << E1000_DMACR_DMACTHR_SHIFT)
1262 & E1000_DMACR_DMACTHR_MASK;
1264 /* transition to L0x or L1 if available..*/
1265 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
1267 /* timer = +-1000 usec in 32usec intervals */
1269 E1000_WRITE_REG(hw, E1000_DMACR, reg);
1271 /* No lower threshold */
1272 E1000_WRITE_REG(hw, E1000_DMCRTRH, 0);
1274 /* set hwm to PBA - 2 * max frame size */
1275 E1000_WRITE_REG(hw, E1000_FCRTC, hwm);
1277 /* Set the interval before transition */
1278 reg = E1000_READ_REG(hw, E1000_DMCTLX);
1279 reg |= 0x800000FF; /* 255 usec */
1280 E1000_WRITE_REG(hw, E1000_DMCTLX, reg);
1282 /* free space in tx packet buffer to wake from DMA coal */
1283 E1000_WRITE_REG(hw, E1000_DMCTXTH,
1284 (20480 - (2 * sc->max_frame_size)) >> 6);
1286 /* make low power state decision controlled by DMA coal */
1287 reg = E1000_READ_REG(hw, E1000_PCIEMISC);
1288 E1000_WRITE_REG(hw, E1000_PCIEMISC,
1289 reg | E1000_PCIEMISC_LX_DECISION);
1290 if_printf(ifp, "DMA Coalescing enabled\n");
1293 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
1294 e1000_get_phy_info(hw);
1295 e1000_check_for_link(hw);
1299 igb_setup_ifp(struct igb_softc *sc)
1301 struct ifnet *ifp = &sc->arpcom.ac_if;
1303 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
1305 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1306 ifp->if_init = igb_init;
1307 ifp->if_ioctl = igb_ioctl;
1308 ifp->if_start = igb_start;
1309 ifp->if_serialize = igb_serialize;
1310 ifp->if_deserialize = igb_deserialize;
1311 ifp->if_tryserialize = igb_tryserialize;
1313 ifp->if_serialize_assert = igb_serialize_assert;
1315 #ifdef DEVICE_POLLING
1316 ifp->if_poll = igb_poll;
1318 ifp->if_watchdog = igb_watchdog;
1320 ifq_set_maxlen(&ifp->if_snd, sc->tx_rings[0].num_tx_desc - 1);
1321 ifq_set_ready(&ifp->if_snd);
1323 ether_ifattach(ifp, sc->hw.mac.addr, NULL);
1325 ifp->if_capabilities =
1326 IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1327 ifp->if_capenable = ifp->if_capabilities;
1328 ifp->if_hwassist = IGB_CSUM_FEATURES;
1331 * Tell the upper layer(s) we support long frames
1333 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1336 * Specify the media types supported by this adapter and register
1337 * callbacks to update media and link information
1339 ifmedia_init(&sc->media, IFM_IMASK, igb_media_change, igb_media_status);
1340 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
1341 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
1342 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1344 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1346 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1347 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1349 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1350 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1352 if (sc->hw.phy.type != e1000_phy_ife) {
1353 ifmedia_add(&sc->media,
1354 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1355 ifmedia_add(&sc->media,
1356 IFM_ETHER | IFM_1000_T, 0, NULL);
1359 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1360 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1364 igb_add_sysctl(struct igb_softc *sc)
1366 sysctl_ctx_init(&sc->sysctl_ctx);
1367 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
1368 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
1369 device_get_nameunit(sc->dev), CTLFLAG_RD, 0, "");
1370 if (sc->sysctl_tree == NULL) {
1371 device_printf(sc->dev, "can't add sysctl node\n");
1375 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1376 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_rings[0].num_rx_desc, 0, NULL);
1377 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1378 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_rings[0].num_tx_desc, 0, NULL);
1380 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1381 OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW,
1382 sc, 0, igb_sysctl_intr_rate, "I", "interrupt rate");
1384 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1385 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT | CTLFLAG_RW,
1386 sc, 0, igb_sysctl_tx_intr_nsegs, "I",
1387 "# segments per TX interrupt");
1391 igb_alloc_rings(struct igb_softc *sc)
1396 * Create top level busdma tag
1398 error = bus_dma_tag_create(NULL, 1, 0,
1399 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1400 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
1403 device_printf(sc->dev, "could not create top level DMA tag\n");
1408 * Allocate TX descriptor rings and buffers
1410 sc->tx_rings = kmalloc(sizeof(struct igb_tx_ring) * sc->tx_ring_cnt,
1411 M_DEVBUF, M_WAITOK | M_ZERO);
1412 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1413 struct igb_tx_ring *txr = &sc->tx_rings[i];
1415 /* Set up some basics */
1418 lwkt_serialize_init(&txr->tx_serialize);
1420 error = igb_create_tx_ring(txr);
1426 * Allocate RX descriptor rings and buffers
1428 sc->rx_rings = kmalloc(sizeof(struct igb_rx_ring) * sc->rx_ring_cnt,
1429 M_DEVBUF, M_WAITOK | M_ZERO);
1430 for (i = 0; i < sc->rx_ring_cnt; ++i) {
1431 struct igb_rx_ring *rxr = &sc->rx_rings[i];
1433 /* Set up some basics */
1436 lwkt_serialize_init(&rxr->rx_serialize);
1438 error = igb_create_rx_ring(rxr);
1447 igb_free_rings(struct igb_softc *sc)
1451 if (sc->tx_rings != NULL) {
1452 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1453 struct igb_tx_ring *txr = &sc->tx_rings[i];
1455 igb_destroy_tx_ring(txr, txr->num_tx_desc);
1457 kfree(sc->tx_rings, M_DEVBUF);
1460 if (sc->rx_rings != NULL) {
1461 for (i = 0; i < sc->rx_ring_cnt; ++i) {
1462 struct igb_rx_ring *rxr = &sc->rx_rings[i];
1464 igb_destroy_rx_ring(rxr, rxr->num_rx_desc);
1466 kfree(sc->rx_rings, M_DEVBUF);
1471 igb_create_tx_ring(struct igb_tx_ring *txr)
1473 int tsize, error, i;
1476 * Validate number of transmit descriptors. It must not exceed
1477 * hardware maximum, and must be multiple of IGB_DBA_ALIGN.
1479 if (((igb_txd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN) != 0 ||
1480 (igb_txd > IGB_MAX_TXD) || (igb_txd < IGB_MIN_TXD)) {
1481 device_printf(txr->sc->dev,
1482 "Using %d TX descriptors instead of %d!\n",
1483 IGB_DEFAULT_TXD, igb_txd);
1484 txr->num_tx_desc = IGB_DEFAULT_TXD;
1486 txr->num_tx_desc = igb_txd;
1490 * Allocate TX descriptor ring
1492 tsize = roundup2(txr->num_tx_desc * sizeof(union e1000_adv_tx_desc),
1494 txr->txdma.dma_vaddr = bus_dmamem_coherent_any(txr->sc->parent_tag,
1495 IGB_DBA_ALIGN, tsize, BUS_DMA_WAITOK,
1496 &txr->txdma.dma_tag, &txr->txdma.dma_map, &txr->txdma.dma_paddr);
1497 if (txr->txdma.dma_vaddr == NULL) {
1498 device_printf(txr->sc->dev,
1499 "Unable to allocate TX Descriptor memory\n");
1502 txr->tx_base = txr->txdma.dma_vaddr;
1503 bzero(txr->tx_base, tsize);
1505 txr->tx_buf = kmalloc(sizeof(struct igb_tx_buf) * txr->num_tx_desc,
1506 M_DEVBUF, M_WAITOK | M_ZERO);
1509 * Allocate TX head write-back buffer
1511 txr->tx_hdr = bus_dmamem_coherent_any(txr->sc->parent_tag,
1512 __VM_CACHELINE_SIZE, __VM_CACHELINE_SIZE, BUS_DMA_WAITOK,
1513 &txr->tx_hdr_dtag, &txr->tx_hdr_dmap, &txr->tx_hdr_paddr);
1514 if (txr->tx_hdr == NULL) {
1515 device_printf(txr->sc->dev,
1516 "Unable to allocate TX head write-back buffer\n");
1521 * Create DMA tag for TX buffers
1523 error = bus_dma_tag_create(txr->sc->parent_tag,
1524 1, 0, /* alignment, bounds */
1525 BUS_SPACE_MAXADDR, /* lowaddr */
1526 BUS_SPACE_MAXADDR, /* highaddr */
1527 NULL, NULL, /* filter, filterarg */
1528 IGB_TSO_SIZE, /* maxsize */
1529 IGB_MAX_SCATTER, /* nsegments */
1530 PAGE_SIZE, /* maxsegsize */
1531 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW |
1532 BUS_DMA_ONEBPAGE, /* flags */
1535 device_printf(txr->sc->dev, "Unable to allocate TX DMA tag\n");
1536 kfree(txr->tx_buf, M_DEVBUF);
1542 * Create DMA maps for TX buffers
1544 for (i = 0; i < txr->num_tx_desc; ++i) {
1545 struct igb_tx_buf *txbuf = &txr->tx_buf[i];
1547 error = bus_dmamap_create(txr->tx_tag,
1548 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, &txbuf->map);
1550 device_printf(txr->sc->dev,
1551 "Unable to create TX DMA map\n");
1552 igb_destroy_tx_ring(txr, i);
1558 * Initialize various watermark
1560 txr->spare_desc = IGB_TX_SPARE;
1561 txr->intr_nsegs = txr->num_tx_desc / 16;
1562 txr->oact_hi_desc = txr->num_tx_desc / 2;
1563 txr->oact_lo_desc = txr->num_tx_desc / 8;
1564 if (txr->oact_lo_desc > IGB_TX_OACTIVE_MAX)
1565 txr->oact_lo_desc = IGB_TX_OACTIVE_MAX;
1566 if (txr->oact_lo_desc < txr->spare_desc + IGB_TX_RESERVED)
1567 txr->oact_lo_desc = txr->spare_desc + IGB_TX_RESERVED;
1573 igb_free_tx_ring(struct igb_tx_ring *txr)
1577 for (i = 0; i < txr->num_tx_desc; ++i) {
1578 struct igb_tx_buf *txbuf = &txr->tx_buf[i];
1580 if (txbuf->m_head != NULL) {
1581 bus_dmamap_unload(txr->tx_tag, txbuf->map);
1582 m_freem(txbuf->m_head);
1583 txbuf->m_head = NULL;
1589 igb_destroy_tx_ring(struct igb_tx_ring *txr, int ndesc)
1593 if (txr->txdma.dma_vaddr != NULL) {
1594 bus_dmamap_unload(txr->txdma.dma_tag, txr->txdma.dma_map);
1595 bus_dmamem_free(txr->txdma.dma_tag, txr->txdma.dma_vaddr,
1596 txr->txdma.dma_map);
1597 bus_dma_tag_destroy(txr->txdma.dma_tag);
1598 txr->txdma.dma_vaddr = NULL;
1601 if (txr->tx_hdr != NULL) {
1602 bus_dmamap_unload(txr->tx_hdr_dtag, txr->tx_hdr_dmap);
1603 bus_dmamem_free(txr->tx_hdr_dtag, txr->tx_hdr,
1605 bus_dma_tag_destroy(txr->tx_hdr_dtag);
1609 if (txr->tx_buf == NULL)
1612 for (i = 0; i < ndesc; ++i) {
1613 struct igb_tx_buf *txbuf = &txr->tx_buf[i];
1615 KKASSERT(txbuf->m_head == NULL);
1616 bus_dmamap_destroy(txr->tx_tag, txbuf->map);
1618 bus_dma_tag_destroy(txr->tx_tag);
1620 kfree(txr->tx_buf, M_DEVBUF);
1625 igb_init_tx_ring(struct igb_tx_ring *txr)
1627 /* Clear the old descriptor contents */
1629 sizeof(union e1000_adv_tx_desc) * txr->num_tx_desc);
1631 /* Clear TX head write-back buffer */
1635 txr->next_avail_desc = 0;
1636 txr->next_to_clean = 0;
1639 /* Set number of descriptors available */
1640 txr->tx_avail = txr->num_tx_desc;
1644 igb_init_tx_unit(struct igb_softc *sc)
1646 struct e1000_hw *hw = &sc->hw;
1650 /* Setup the Tx Descriptor Rings */
1651 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1652 struct igb_tx_ring *txr = &sc->tx_rings[i];
1653 uint64_t bus_addr = txr->txdma.dma_paddr;
1654 uint64_t hdr_paddr = txr->tx_hdr_paddr;
1655 uint32_t txdctl = 0;
1656 uint32_t dca_txctrl;
1658 E1000_WRITE_REG(hw, E1000_TDLEN(i),
1659 txr->num_tx_desc * sizeof(struct e1000_tx_desc));
1660 E1000_WRITE_REG(hw, E1000_TDBAH(i),
1661 (uint32_t)(bus_addr >> 32));
1662 E1000_WRITE_REG(hw, E1000_TDBAL(i),
1663 (uint32_t)bus_addr);
1665 /* Setup the HW Tx Head and Tail descriptor pointers */
1666 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
1667 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
1669 txdctl |= IGB_TX_PTHRESH;
1670 txdctl |= IGB_TX_HTHRESH << 8;
1671 txdctl |= IGB_TX_WTHRESH << 16;
1672 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1673 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
1675 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i));
1676 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1677 E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(i), dca_txctrl);
1679 E1000_WRITE_REG(hw, E1000_TDWBAH(i),
1680 (uint32_t)(hdr_paddr >> 32));
1681 E1000_WRITE_REG(hw, E1000_TDWBAL(i),
1682 ((uint32_t)hdr_paddr) | E1000_TX_HEAD_WB_ENABLE);
1688 e1000_config_collision_dist(hw);
1690 /* Program the Transmit Control Register */
1691 tctl = E1000_READ_REG(hw, E1000_TCTL);
1692 tctl &= ~E1000_TCTL_CT;
1693 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
1694 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
1696 /* This write will effectively turn on the transmit unit. */
1697 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1701 igb_txctx(struct igb_tx_ring *txr, struct mbuf *mp)
1703 struct e1000_adv_tx_context_desc *TXD;
1704 struct igb_tx_buf *txbuf;
1705 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx;
1706 struct ether_vlan_header *eh;
1707 struct ip *ip = NULL;
1708 int ehdrlen, ctxd, ip_hlen = 0;
1709 uint16_t etype, vlantag = 0;
1710 boolean_t offload = TRUE;
1712 if ((mp->m_pkthdr.csum_flags & IGB_CSUM_FEATURES) == 0)
1715 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0;
1716 ctxd = txr->next_avail_desc;
1717 txbuf = &txr->tx_buf[ctxd];
1718 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd];
1721 * In advanced descriptors the vlan tag must
1722 * be placed into the context descriptor, thus
1723 * we need to be here just for that setup.
1725 if (mp->m_flags & M_VLANTAG) {
1726 vlantag = htole16(mp->m_pkthdr.ether_vlantag);
1727 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT);
1728 } else if (!offload) {
1733 * Determine where frame payload starts.
1734 * Jump over vlan headers if already present,
1735 * helpful for QinQ too.
1737 KASSERT(mp->m_len >= ETHER_HDR_LEN,
1738 ("igb_txctx_pullup is not called (eh)?\n"));
1739 eh = mtod(mp, struct ether_vlan_header *);
1740 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1741 KASSERT(mp->m_len >= ETHER_HDR_LEN + EVL_ENCAPLEN,
1742 ("igb_txctx_pullup is not called (evh)?\n"));
1743 etype = ntohs(eh->evl_proto);
1744 ehdrlen = ETHER_HDR_LEN + EVL_ENCAPLEN;
1746 etype = ntohs(eh->evl_encap_proto);
1747 ehdrlen = ETHER_HDR_LEN;
1750 /* Set the ether header length */
1751 vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
1755 KASSERT(mp->m_len >= ehdrlen + IGB_IPVHL_SIZE,
1756 ("igb_txctx_pullup is not called (eh+ip_vhl)?\n"));
1758 /* NOTE: We could only safely access ip.ip_vhl part */
1759 ip = (struct ip *)(mp->m_data + ehdrlen);
1760 ip_hlen = ip->ip_hl << 2;
1762 if (mp->m_pkthdr.csum_flags & CSUM_IP)
1763 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
1767 case ETHERTYPE_IPV6:
1768 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1769 ip_hlen = sizeof(struct ip6_hdr);
1770 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
1779 vlan_macip_lens |= ip_hlen;
1780 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
1782 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
1783 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
1784 else if (mp->m_pkthdr.csum_flags & CSUM_UDP)
1785 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP;
1787 /* 82575 needs the queue index added */
1788 if (txr->sc->hw.mac.type == e1000_82575)
1789 mss_l4len_idx = txr->me << 4;
1791 /* Now copy bits into descriptor */
1792 TXD->vlan_macip_lens = htole32(vlan_macip_lens);
1793 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
1794 TXD->seqnum_seed = htole32(0);
1795 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
1797 txbuf->m_head = NULL;
1799 /* We've consumed the first desc, adjust counters */
1800 if (++ctxd == txr->num_tx_desc)
1802 txr->next_avail_desc = ctxd;
1809 igb_txeof(struct igb_tx_ring *txr)
1811 struct ifnet *ifp = &txr->sc->arpcom.ac_if;
1812 int first, hdr, avail;
1814 if (txr->tx_avail == txr->num_tx_desc)
1817 first = txr->next_to_clean;
1818 hdr = *(txr->tx_hdr);
1823 avail = txr->tx_avail;
1824 while (first != hdr) {
1825 struct igb_tx_buf *txbuf = &txr->tx_buf[first];
1828 if (txbuf->m_head) {
1829 bus_dmamap_unload(txr->tx_tag, txbuf->map);
1830 m_freem(txbuf->m_head);
1831 txbuf->m_head = NULL;
1834 if (++first == txr->num_tx_desc)
1837 txr->next_to_clean = first;
1838 txr->tx_avail = avail;
1841 * If we have a minimum free, clear IFF_OACTIVE
1842 * to tell the stack that it is OK to send packets.
1844 if (IGB_IS_NOT_OACTIVE(txr)) {
1845 ifp->if_flags &= ~IFF_OACTIVE;
1848 * We have enough TX descriptors, turn off
1849 * the watchdog. We allow small amount of
1850 * packets (roughly intr_nsegs) pending on
1851 * the transmit ring.
1858 igb_create_rx_ring(struct igb_rx_ring *rxr)
1860 int rsize, i, error;
1863 * Validate number of receive descriptors. It must not exceed
1864 * hardware maximum, and must be multiple of IGB_DBA_ALIGN.
1866 if (((igb_rxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN) != 0 ||
1867 (igb_rxd > IGB_MAX_RXD) || (igb_rxd < IGB_MIN_RXD)) {
1868 device_printf(rxr->sc->dev,
1869 "Using %d RX descriptors instead of %d!\n",
1870 IGB_DEFAULT_RXD, igb_rxd);
1871 rxr->num_rx_desc = IGB_DEFAULT_RXD;
1873 rxr->num_rx_desc = igb_rxd;
1877 * Allocate RX descriptor ring
1879 rsize = roundup2(rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc),
1881 rxr->rxdma.dma_vaddr = bus_dmamem_coherent_any(rxr->sc->parent_tag,
1882 IGB_DBA_ALIGN, rsize, BUS_DMA_WAITOK,
1883 &rxr->rxdma.dma_tag, &rxr->rxdma.dma_map,
1884 &rxr->rxdma.dma_paddr);
1885 if (rxr->rxdma.dma_vaddr == NULL) {
1886 device_printf(rxr->sc->dev,
1887 "Unable to allocate RxDescriptor memory\n");
1890 rxr->rx_base = rxr->rxdma.dma_vaddr;
1891 bzero(rxr->rx_base, rsize);
1893 rxr->rx_buf = kmalloc(sizeof(struct igb_rx_buf) * rxr->num_rx_desc,
1894 M_DEVBUF, M_WAITOK | M_ZERO);
1897 * Create DMA tag for RX buffers
1899 error = bus_dma_tag_create(rxr->sc->parent_tag,
1900 1, 0, /* alignment, bounds */
1901 BUS_SPACE_MAXADDR, /* lowaddr */
1902 BUS_SPACE_MAXADDR, /* highaddr */
1903 NULL, NULL, /* filter, filterarg */
1904 MCLBYTES, /* maxsize */
1906 MCLBYTES, /* maxsegsize */
1907 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */
1910 device_printf(rxr->sc->dev,
1911 "Unable to create RX payload DMA tag\n");
1912 kfree(rxr->rx_buf, M_DEVBUF);
1918 * Create spare DMA map for RX buffers
1920 error = bus_dmamap_create(rxr->rx_tag, BUS_DMA_WAITOK,
1923 device_printf(rxr->sc->dev,
1924 "Unable to create spare RX DMA maps\n");
1925 bus_dma_tag_destroy(rxr->rx_tag);
1926 kfree(rxr->rx_buf, M_DEVBUF);
1932 * Create DMA maps for RX buffers
1934 for (i = 0; i < rxr->num_rx_desc; i++) {
1935 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
1937 error = bus_dmamap_create(rxr->rx_tag,
1938 BUS_DMA_WAITOK, &rxbuf->map);
1940 device_printf(rxr->sc->dev,
1941 "Unable to create RX DMA maps\n");
1942 igb_destroy_rx_ring(rxr, i);
1950 igb_free_rx_ring(struct igb_rx_ring *rxr)
1954 for (i = 0; i < rxr->num_rx_desc; ++i) {
1955 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
1957 if (rxbuf->m_head != NULL) {
1958 bus_dmamap_unload(rxr->rx_tag, rxbuf->map);
1959 m_freem(rxbuf->m_head);
1960 rxbuf->m_head = NULL;
1964 if (rxr->fmp != NULL)
1971 igb_destroy_rx_ring(struct igb_rx_ring *rxr, int ndesc)
1975 if (rxr->rxdma.dma_vaddr != NULL) {
1976 bus_dmamap_unload(rxr->rxdma.dma_tag, rxr->rxdma.dma_map);
1977 bus_dmamem_free(rxr->rxdma.dma_tag, rxr->rxdma.dma_vaddr,
1978 rxr->rxdma.dma_map);
1979 bus_dma_tag_destroy(rxr->rxdma.dma_tag);
1980 rxr->rxdma.dma_vaddr = NULL;
1983 if (rxr->rx_buf == NULL)
1986 for (i = 0; i < ndesc; ++i) {
1987 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
1989 KKASSERT(rxbuf->m_head == NULL);
1990 bus_dmamap_destroy(rxr->rx_tag, rxbuf->map);
1992 bus_dmamap_destroy(rxr->rx_tag, rxr->rx_sparemap);
1993 bus_dma_tag_destroy(rxr->rx_tag);
1995 kfree(rxr->rx_buf, M_DEVBUF);
2000 igb_setup_rxdesc(union e1000_adv_rx_desc *rxd, const struct igb_rx_buf *rxbuf)
2002 rxd->read.pkt_addr = htole64(rxbuf->paddr);
2003 rxd->wb.upper.status_error = 0;
2007 igb_newbuf(struct igb_rx_ring *rxr, int i, boolean_t wait)
2010 bus_dma_segment_t seg;
2012 struct igb_rx_buf *rxbuf;
2015 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2018 if_printf(&rxr->sc->arpcom.ac_if,
2019 "Unable to allocate RX mbuf\n");
2023 m->m_len = m->m_pkthdr.len = MCLBYTES;
2025 if (rxr->sc->max_frame_size <= MCLBYTES - ETHER_ALIGN)
2026 m_adj(m, ETHER_ALIGN);
2028 error = bus_dmamap_load_mbuf_segment(rxr->rx_tag,
2029 rxr->rx_sparemap, m, &seg, 1, &nseg, BUS_DMA_NOWAIT);
2033 if_printf(&rxr->sc->arpcom.ac_if,
2034 "Unable to load RX mbuf\n");
2039 rxbuf = &rxr->rx_buf[i];
2040 if (rxbuf->m_head != NULL)
2041 bus_dmamap_unload(rxr->rx_tag, rxbuf->map);
2044 rxbuf->map = rxr->rx_sparemap;
2045 rxr->rx_sparemap = map;
2048 rxbuf->paddr = seg.ds_addr;
2050 igb_setup_rxdesc(&rxr->rx_base[i], rxbuf);
2055 igb_init_rx_ring(struct igb_rx_ring *rxr)
2059 /* Clear the ring contents */
2061 rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc));
2063 /* Now replenish the ring mbufs */
2064 for (i = 0; i < rxr->num_rx_desc; ++i) {
2067 error = igb_newbuf(rxr, i, TRUE);
2072 /* Setup our descriptor indices */
2073 rxr->next_to_check = 0;
2077 rxr->discard = FALSE;
2083 igb_init_rx_unit(struct igb_softc *sc)
2085 struct ifnet *ifp = &sc->arpcom.ac_if;
2086 struct e1000_hw *hw = &sc->hw;
2087 uint32_t rctl, rxcsum, srrctl = 0;
2091 * Make sure receives are disabled while setting
2092 * up the descriptor ring
2094 rctl = E1000_READ_REG(hw, E1000_RCTL);
2095 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2099 ** Set up for header split
2101 if (igb_header_split) {
2102 /* Use a standard mbuf for the header */
2103 srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2104 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2107 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2110 ** Set up for jumbo frames
2112 if (ifp->if_mtu > ETHERMTU) {
2113 rctl |= E1000_RCTL_LPE;
2115 if (adapter->rx_mbuf_sz == MJUMPAGESIZE) {
2116 srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2117 rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
2118 } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) {
2119 srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2120 rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
2122 /* Set maximum packet len */
2123 psize = adapter->max_frame_size;
2124 /* are we on a vlan? */
2125 if (adapter->ifp->if_vlantrunk != NULL)
2126 psize += VLAN_TAG_SIZE;
2127 E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize);
2129 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2130 rctl |= E1000_RCTL_SZ_2048;
2133 rctl &= ~E1000_RCTL_LPE;
2134 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2135 rctl |= E1000_RCTL_SZ_2048;
2138 /* Setup the Base and Length of the Rx Descriptor Rings */
2139 for (i = 0; i < sc->rx_ring_cnt; ++i) {
2140 struct igb_rx_ring *rxr = &sc->rx_rings[i];
2141 uint64_t bus_addr = rxr->rxdma.dma_paddr;
2144 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2145 rxr->num_rx_desc * sizeof(struct e1000_rx_desc));
2146 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2147 (uint32_t)(bus_addr >> 32));
2148 E1000_WRITE_REG(hw, E1000_RDBAL(i),
2149 (uint32_t)bus_addr);
2150 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2151 /* Enable this Queue */
2152 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2153 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2154 rxdctl &= 0xFFF00000;
2155 rxdctl |= IGB_RX_PTHRESH;
2156 rxdctl |= IGB_RX_HTHRESH << 8;
2157 rxdctl |= IGB_RX_WTHRESH << 16;
2158 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2162 * Setup for RX MultiQueue
2164 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2166 if (sc->rx_ring_cnt >1) {
2167 u32 random[10], mrqc, shift = 0;
2173 arc4rand(&random, sizeof(random), 0);
2174 if (adapter->hw.mac.type == e1000_82575)
2176 /* Warning FM follows */
2177 for (int i = 0; i < 128; i++) {
2179 (i % sc->rx_ring_cnt) << shift;
2182 E1000_RETA(i >> 2), reta.dword);
2184 /* Now fill in hash table */
2185 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2186 for (int i = 0; i < 10; i++)
2187 E1000_WRITE_REG_ARRAY(hw,
2188 E1000_RSSRK(0), i, random[i]);
2190 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2191 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2192 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2193 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2194 mrqc |=( E1000_MRQC_RSS_FIELD_IPV4_UDP |
2195 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2196 mrqc |=( E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2197 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2199 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2202 ** NOTE: Receive Full-Packet Checksum Offload
2203 ** is mutually exclusive with Multiqueue. However
2204 ** this is not the same as TCP/IP checksums which
2207 rxcsum |= E1000_RXCSUM_PCSD;
2212 if (ifp->if_capenable & IFCAP_RXCSUM)
2213 rxcsum |= E1000_RXCSUM_IPPCSE;
2215 rxcsum &= ~E1000_RXCSUM_TUOFL;
2217 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2219 /* Setup the Receive Control Register */
2220 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2221 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2222 E1000_RCTL_RDMTS_HALF |
2223 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2224 /* Strip CRC bytes. */
2225 rctl |= E1000_RCTL_SECRC;
2226 /* Make sure VLAN Filters are off */
2227 rctl &= ~E1000_RCTL_VFE;
2228 /* Don't store bad packets */
2229 rctl &= ~E1000_RCTL_SBP;
2231 /* Enable Receives */
2232 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2235 * Setup the HW Rx Head and Tail Descriptor Pointers
2236 * - needs to be after enable
2238 for (i = 0; i < sc->rx_ring_cnt; ++i) {
2239 struct igb_rx_ring *rxr = &sc->rx_rings[i];
2241 E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check);
2242 E1000_WRITE_REG(hw, E1000_RDT(i), rxr->num_rx_desc - 1);
2247 igb_rxeof(struct igb_rx_ring *rxr, int count)
2249 struct ifnet *ifp = &rxr->sc->arpcom.ac_if;
2250 union e1000_adv_rx_desc *cur;
2254 i = rxr->next_to_check;
2255 cur = &rxr->rx_base[i];
2256 staterr = le32toh(cur->wb.upper.status_error);
2258 if ((staterr & E1000_RXD_STAT_DD) == 0)
2261 while ((staterr & E1000_RXD_STAT_DD) && count != 0) {
2262 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
2263 struct mbuf *m = NULL;
2266 eop = (staterr & E1000_RXD_STAT_EOP) ? TRUE : FALSE;
2270 if ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) == 0 &&
2272 struct mbuf *mp = rxbuf->m_head;
2276 len = le16toh(cur->wb.upper.length);
2277 if (rxr->sc->hw.mac.type == e1000_i350 &&
2278 (staterr & E1000_RXDEXT_STATERR_LB))
2279 vlan = be16toh(cur->wb.upper.vlan);
2281 vlan = le16toh(cur->wb.upper.vlan);
2283 bus_dmamap_sync(rxr->rx_tag, rxbuf->map,
2284 BUS_DMASYNC_POSTREAD);
2286 if (igb_newbuf(rxr, i, FALSE) != 0) {
2292 if (rxr->fmp == NULL) {
2293 mp->m_pkthdr.len = len;
2297 rxr->lmp->m_next = mp;
2298 rxr->lmp = rxr->lmp->m_next;
2299 rxr->fmp->m_pkthdr.len += len;
2307 m->m_pkthdr.rcvif = ifp;
2310 if (ifp->if_capenable & IFCAP_RXCSUM)
2311 igb_rxcsum(staterr, m);
2313 if (staterr & E1000_RXD_STAT_VP) {
2314 m->m_pkthdr.ether_vlantag = vlan;
2315 m->m_flags |= M_VLANTAG;
2319 if (ifp->if_capenable & IFCAP_RSS) {
2320 pi = emx_rssinfo(m, &pi0, mrq,
2328 igb_setup_rxdesc(cur, rxbuf);
2330 rxr->discard = TRUE;
2332 rxr->discard = FALSE;
2333 if (rxr->fmp != NULL) {
2342 ether_input_pkt(ifp, m, NULL);
2344 /* Advance our pointers to the next descriptor. */
2345 if (++i == rxr->num_rx_desc)
2348 cur = &rxr->rx_base[i];
2349 staterr = le32toh(cur->wb.upper.status_error);
2351 rxr->next_to_check = i;
2354 i = rxr->num_rx_desc - 1;
2355 E1000_WRITE_REG(&rxr->sc->hw, E1000_RDT(rxr->me), i);
2360 igb_set_vlan(struct igb_softc *sc)
2362 struct e1000_hw *hw = &sc->hw;
2365 struct ifnet *ifp = sc->arpcom.ac_if;
2369 e1000_rlpml_set_vf(hw, sc->max_frame_size + VLAN_TAG_SIZE);
2373 reg = E1000_READ_REG(hw, E1000_CTRL);
2374 reg |= E1000_CTRL_VME;
2375 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2378 /* Enable the Filter Table */
2379 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
2380 reg = E1000_READ_REG(hw, E1000_RCTL);
2381 reg &= ~E1000_RCTL_CFIEN;
2382 reg |= E1000_RCTL_VFE;
2383 E1000_WRITE_REG(hw, E1000_RCTL, reg);
2387 /* Update the frame size */
2388 E1000_WRITE_REG(&sc->hw, E1000_RLPML,
2389 sc->max_frame_size + VLAN_TAG_SIZE);
2392 /* Don't bother with table if no vlans */
2393 if ((adapter->num_vlans == 0) ||
2394 ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0))
2397 ** A soft reset zero's out the VFTA, so
2398 ** we need to repopulate it now.
2400 for (int i = 0; i < IGB_VFTA_SIZE; i++)
2401 if (adapter->shadow_vfta[i] != 0) {
2402 if (adapter->vf_ifp)
2403 e1000_vfta_set_vf(hw,
2404 adapter->shadow_vfta[i], TRUE);
2406 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
2407 i, adapter->shadow_vfta[i]);
2413 igb_enable_intr(struct igb_softc *sc)
2415 lwkt_serialize_handler_enable(&sc->main_serialize);
2417 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) {
2418 /* XXX MSI-X should use sc->intr_mask */
2419 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0);
2420 E1000_WRITE_REG(&sc->hw, E1000_EIAM, sc->intr_mask);
2421 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask);
2422 E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC);
2424 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK);
2426 E1000_WRITE_FLUSH(&sc->hw);
2430 igb_disable_intr(struct igb_softc *sc)
2432 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) {
2433 E1000_WRITE_REG(&sc->hw, E1000_EIMC, 0xffffffff);
2434 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0);
2436 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff);
2437 E1000_WRITE_FLUSH(&sc->hw);
2439 lwkt_serialize_handler_disable(&sc->main_serialize);
2443 * Bit of a misnomer, what this really means is
2444 * to enable OS management of the system... aka
2445 * to disable special hardware management features
2448 igb_get_mgmt(struct igb_softc *sc)
2450 if (sc->flags & IGB_FLAG_HAS_MGMT) {
2451 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H);
2452 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
2454 /* disable hardware interception of ARP */
2455 manc &= ~E1000_MANC_ARP_EN;
2457 /* enable receiving management packets to the host */
2458 manc |= E1000_MANC_EN_MNG2HOST;
2459 manc2h |= 1 << 5; /* Mng Port 623 */
2460 manc2h |= 1 << 6; /* Mng Port 664 */
2461 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h);
2462 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
2467 * Give control back to hardware management controller
2471 igb_rel_mgmt(struct igb_softc *sc)
2473 if (sc->flags & IGB_FLAG_HAS_MGMT) {
2474 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
2476 /* Re-enable hardware interception of ARP */
2477 manc |= E1000_MANC_ARP_EN;
2478 manc &= ~E1000_MANC_EN_MNG2HOST;
2480 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
2485 * Sets CTRL_EXT:DRV_LOAD bit.
2487 * For ASF and Pass Through versions of f/w this means that
2488 * the driver is loaded.
2491 igb_get_hw_control(struct igb_softc *sc)
2498 /* Let firmware know the driver has taken over */
2499 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
2500 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
2501 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2505 * Resets CTRL_EXT:DRV_LOAD bit.
2507 * For ASF and Pass Through versions of f/w this means that the
2508 * driver is no longer loaded.
2511 igb_rel_hw_control(struct igb_softc *sc)
2518 /* Let firmware taken over control of h/w */
2519 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
2520 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
2521 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2525 igb_is_valid_ether_addr(const uint8_t *addr)
2527 uint8_t zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
2529 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN))
2535 * Enable PCI Wake On Lan capability
2538 igb_enable_wol(device_t dev)
2540 uint16_t cap, status;
2543 /* First find the capabilities pointer*/
2544 cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
2546 /* Read the PM Capabilities */
2547 id = pci_read_config(dev, cap, 1);
2548 if (id != PCIY_PMG) /* Something wrong */
2552 * OK, we have the power capabilities,
2553 * so now get the status register
2555 cap += PCIR_POWER_STATUS;
2556 status = pci_read_config(dev, cap, 2);
2557 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2558 pci_write_config(dev, cap, status, 2);
2562 igb_update_stats_counters(struct igb_softc *sc)
2564 struct e1000_hw *hw = &sc->hw;
2565 struct e1000_hw_stats *stats;
2566 struct ifnet *ifp = &sc->arpcom.ac_if;
2569 * The virtual function adapter has only a
2570 * small controlled set of stats, do only
2574 igb_update_vf_stats_counters(sc);
2579 if (sc->hw.phy.media_type == e1000_media_type_copper ||
2580 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
2582 E1000_READ_REG(hw,E1000_SYMERRS);
2583 stats->sec += E1000_READ_REG(hw, E1000_SEC);
2586 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
2587 stats->mpc += E1000_READ_REG(hw, E1000_MPC);
2588 stats->scc += E1000_READ_REG(hw, E1000_SCC);
2589 stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
2591 stats->mcc += E1000_READ_REG(hw, E1000_MCC);
2592 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
2593 stats->colc += E1000_READ_REG(hw, E1000_COLC);
2594 stats->dc += E1000_READ_REG(hw, E1000_DC);
2595 stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
2596 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
2597 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
2600 * For watchdog management we need to know if we have been
2601 * paused during the last interval, so capture that here.
2603 sc->pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
2604 stats->xoffrxc += sc->pause_frames;
2605 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
2606 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
2607 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
2608 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
2609 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
2610 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
2611 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
2612 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
2613 stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
2614 stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
2615 stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
2616 stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
2618 /* For the 64-bit byte counters the low dword must be read first. */
2619 /* Both registers clear on the read of the high dword */
2621 stats->gorc += E1000_READ_REG(hw, E1000_GORCL) +
2622 ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
2623 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) +
2624 ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
2626 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
2627 stats->ruc += E1000_READ_REG(hw, E1000_RUC);
2628 stats->rfc += E1000_READ_REG(hw, E1000_RFC);
2629 stats->roc += E1000_READ_REG(hw, E1000_ROC);
2630 stats->rjc += E1000_READ_REG(hw, E1000_RJC);
2632 stats->tor += E1000_READ_REG(hw, E1000_TORH);
2633 stats->tot += E1000_READ_REG(hw, E1000_TOTH);
2635 stats->tpr += E1000_READ_REG(hw, E1000_TPR);
2636 stats->tpt += E1000_READ_REG(hw, E1000_TPT);
2637 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
2638 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
2639 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
2640 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
2641 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
2642 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
2643 stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
2644 stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
2646 /* Interrupt Counts */
2648 stats->iac += E1000_READ_REG(hw, E1000_IAC);
2649 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
2650 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
2651 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
2652 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
2653 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
2654 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
2655 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
2656 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
2658 /* Host to Card Statistics */
2660 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
2661 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
2662 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
2663 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
2664 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
2665 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
2666 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
2667 stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) +
2668 ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32));
2669 stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) +
2670 ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32));
2671 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
2672 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
2673 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
2675 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
2676 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
2677 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
2678 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
2679 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
2680 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
2682 ifp->if_collisions = stats->colc;
2685 ifp->if_ierrors = stats->rxerrc + stats->crcerrs + stats->algnerrc +
2686 stats->ruc + stats->roc + stats->mpc + stats->cexterr;
2689 ifp->if_oerrors = stats->ecol + stats->latecol + sc->watchdog_events;
2691 /* Driver specific counters */
2692 sc->device_control = E1000_READ_REG(hw, E1000_CTRL);
2693 sc->rx_control = E1000_READ_REG(hw, E1000_RCTL);
2694 sc->int_mask = E1000_READ_REG(hw, E1000_IMS);
2695 sc->eint_mask = E1000_READ_REG(hw, E1000_EIMS);
2696 sc->packet_buf_alloc_tx =
2697 ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16);
2698 sc->packet_buf_alloc_rx =
2699 (E1000_READ_REG(hw, E1000_PBA) & 0xffff);
2703 igb_vf_init_stats(struct igb_softc *sc)
2705 struct e1000_hw *hw = &sc->hw;
2706 struct e1000_vf_stats *stats;
2709 stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC);
2710 stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC);
2711 stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC);
2712 stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC);
2713 stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC);
2717 igb_update_vf_stats_counters(struct igb_softc *sc)
2719 struct e1000_hw *hw = &sc->hw;
2720 struct e1000_vf_stats *stats;
2722 if (sc->link_speed == 0)
2726 UPDATE_VF_REG(E1000_VFGPRC, stats->last_gprc, stats->gprc);
2727 UPDATE_VF_REG(E1000_VFGORC, stats->last_gorc, stats->gorc);
2728 UPDATE_VF_REG(E1000_VFGPTC, stats->last_gptc, stats->gptc);
2729 UPDATE_VF_REG(E1000_VFGOTC, stats->last_gotc, stats->gotc);
2730 UPDATE_VF_REG(E1000_VFMPRC, stats->last_mprc, stats->mprc);
2733 #ifdef DEVICE_POLLING
2736 igb_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2738 struct igb_softc *sc = ifp->if_softc;
2741 ASSERT_SERIALIZED(&sc->main_serialize);
2745 igb_disable_intr(sc);
2748 case POLL_DEREGISTER:
2749 igb_enable_intr(sc);
2752 case POLL_AND_CHECK_STATUS:
2753 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR);
2754 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
2755 igb_serialize_skipmain(sc);
2756 sc->hw.mac.get_link_status = 1;
2757 igb_update_link_status(sc);
2758 igb_deserialize_skipmain(sc);
2762 if (ifp->if_flags & IFF_RUNNING) {
2763 struct igb_tx_ring *txr;
2766 for (i = 0; i < sc->rx_ring_cnt; ++i) {
2767 struct igb_rx_ring *rxr = &sc->rx_rings[i];
2769 lwkt_serialize_enter(&rxr->rx_serialize);
2770 igb_rxeof(rxr, count);
2771 lwkt_serialize_exit(&rxr->rx_serialize);
2774 txr = &sc->tx_rings[0];
2775 lwkt_serialize_enter(&txr->tx_serialize);
2777 if (!ifq_is_empty(&ifp->if_snd))
2779 lwkt_serialize_exit(&txr->tx_serialize);
2785 #endif /* DEVICE_POLLING */
2790 struct igb_softc *sc = xsc;
2791 struct ifnet *ifp = &sc->arpcom.ac_if;
2794 ASSERT_SERIALIZED(&sc->main_serialize);
2796 eicr = E1000_READ_REG(&sc->hw, E1000_EICR);
2801 if (ifp->if_flags & IFF_RUNNING) {
2802 struct igb_tx_ring *txr;
2805 for (i = 0; i < sc->rx_ring_cnt; ++i) {
2806 struct igb_rx_ring *rxr = &sc->rx_rings[i];
2808 if (eicr & rxr->rx_intr_mask) {
2809 lwkt_serialize_enter(&rxr->rx_serialize);
2811 lwkt_serialize_exit(&rxr->rx_serialize);
2815 txr = &sc->tx_rings[0];
2816 if (eicr & txr->tx_intr_mask) {
2817 lwkt_serialize_enter(&txr->tx_serialize);
2819 if (!ifq_is_empty(&ifp->if_snd))
2821 lwkt_serialize_exit(&txr->tx_serialize);
2825 if (eicr & E1000_EICR_OTHER) {
2826 uint32_t icr = E1000_READ_REG(&sc->hw, E1000_ICR);
2828 /* Link status change */
2829 if (icr & E1000_ICR_LSC) {
2830 igb_serialize_skipmain(sc);
2831 sc->hw.mac.get_link_status = 1;
2832 igb_update_link_status(sc);
2833 igb_deserialize_skipmain(sc);
2838 * Reading EICR has the side effect to clear interrupt mask,
2839 * so all interrupts need to be enabled here.
2841 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask);
2845 igb_shared_intr(void *xsc)
2847 struct igb_softc *sc = xsc;
2848 struct ifnet *ifp = &sc->arpcom.ac_if;
2851 ASSERT_SERIALIZED(&sc->main_serialize);
2853 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR);
2856 if (reg_icr == 0xffffffff)
2859 /* Definitely not our interrupt. */
2863 if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0)
2866 if (ifp->if_flags & IFF_RUNNING) {
2867 struct igb_tx_ring *txr;
2870 for (i = 0; i < sc->rx_ring_cnt; ++i) {
2871 struct igb_rx_ring *rxr = &sc->rx_rings[i];
2873 lwkt_serialize_enter(&rxr->rx_serialize);
2875 lwkt_serialize_exit(&rxr->rx_serialize);
2878 txr = &sc->tx_rings[0];
2879 lwkt_serialize_enter(&txr->tx_serialize);
2881 if (!ifq_is_empty(&ifp->if_snd))
2883 lwkt_serialize_exit(&txr->tx_serialize);
2886 /* Link status change */
2887 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
2888 igb_serialize_skipmain(sc);
2889 sc->hw.mac.get_link_status = 1;
2890 igb_update_link_status(sc);
2891 igb_deserialize_skipmain(sc);
2894 if (reg_icr & E1000_ICR_RXO)
2899 igb_txctx_pullup(struct igb_tx_ring *txr, struct mbuf **m0)
2901 struct mbuf *m = *m0;
2902 struct ether_header *eh;
2905 txr->ctx_try_pullup++;
2907 len = ETHER_HDR_LEN + IGB_IPVHL_SIZE;
2909 if (__predict_false(!M_WRITABLE(m))) {
2910 if (__predict_false(m->m_len < ETHER_HDR_LEN)) {
2916 eh = mtod(m, struct ether_header *);
2918 if (eh->ether_type == htons(ETHERTYPE_VLAN))
2919 len += EVL_ENCAPLEN;
2921 if (m->m_len < len) {
2930 if (__predict_false(m->m_len < ETHER_HDR_LEN)) {
2932 m = m_pullup(m, ETHER_HDR_LEN);
2934 txr->ctx_pullup1_failed++;
2940 eh = mtod(m, struct ether_header *);
2942 if (eh->ether_type == htons(ETHERTYPE_VLAN))
2943 len += EVL_ENCAPLEN;
2945 if (m->m_len < len) {
2947 m = m_pullup(m, len);
2949 txr->ctx_pullup2_failed++;
2959 igb_encap(struct igb_tx_ring *txr, struct mbuf **m_headp)
2961 bus_dma_segment_t segs[IGB_MAX_SCATTER];
2963 struct igb_tx_buf *tx_buf, *tx_buf_mapped;
2964 union e1000_adv_tx_desc *txd = NULL;
2965 struct mbuf *m_head = *m_headp;
2966 uint32_t olinfo_status = 0, cmd_type_len = 0, cmd_rs = 0;
2967 int maxsegs, nsegs, i, j, error, last = 0;
2968 uint32_t hdrlen = 0;
2970 if (m_head->m_len < IGB_TXCSUM_MINHL &&
2971 ((m_head->m_pkthdr.csum_flags & IGB_CSUM_FEATURES) ||
2972 (m_head->m_flags & M_VLANTAG))) {
2974 * Make sure that ethernet header and ip.ip_hl are in
2975 * contiguous memory, since if TXCSUM or VLANTAG is
2976 * enabled, later TX context descriptor's setup need
2977 * to access ip.ip_hl.
2979 error = igb_txctx_pullup(txr, m_headp);
2981 KKASSERT(*m_headp == NULL);
2987 /* Set basic descriptor constants */
2988 cmd_type_len |= E1000_ADVTXD_DTYP_DATA;
2989 cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
2990 if (m_head->m_flags & M_VLANTAG)
2991 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
2994 * Map the packet for DMA.
2996 tx_buf = &txr->tx_buf[txr->next_avail_desc];
2997 tx_buf_mapped = tx_buf;
3000 maxsegs = txr->tx_avail - IGB_TX_RESERVED;
3001 KASSERT(maxsegs >= txr->spare_desc, ("not enough spare TX desc\n"));
3002 if (maxsegs > IGB_MAX_SCATTER)
3003 maxsegs = IGB_MAX_SCATTER;
3005 error = bus_dmamap_load_mbuf_defrag(txr->tx_tag, map, m_headp,
3006 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
3008 if (error == ENOBUFS)
3009 txr->sc->mbuf_defrag_failed++;
3011 txr->sc->no_tx_dma_setup++;
3017 bus_dmamap_sync(txr->tx_tag, map, BUS_DMASYNC_PREWRITE);
3023 * Set up the context descriptor:
3024 * used when any hardware offload is done.
3025 * This includes CSUM, VLAN, and TSO. It
3026 * will use the first descriptor.
3028 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3029 if (igb_tso_setup(txr, m_head, &hdrlen)) {
3030 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3031 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3032 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3035 } else if (igb_tx_ctx_setup(txr, m_head))
3036 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3038 if (igb_txctx(txr, m_head)) {
3039 olinfo_status |= (E1000_TXD_POPTS_IXSM << 8);
3040 if (m_head->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_TCP))
3041 olinfo_status |= (E1000_TXD_POPTS_TXSM << 8);
3046 txr->tx_nsegs += nsegs;
3047 if (txr->tx_nsegs >= txr->intr_nsegs) {
3049 * Report Status (RS) is turned on every intr_nsegs
3050 * descriptors (roughly).
3053 cmd_rs = E1000_ADVTXD_DCMD_RS;
3056 /* Calculate payload length */
3057 olinfo_status |= ((m_head->m_pkthdr.len - hdrlen)
3058 << E1000_ADVTXD_PAYLEN_SHIFT);
3060 /* 82575 needs the queue index added */
3061 if (txr->sc->hw.mac.type == e1000_82575)
3062 olinfo_status |= txr->me << 4;
3064 /* Set up our transmit descriptors */
3065 i = txr->next_avail_desc;
3066 for (j = 0; j < nsegs; j++) {
3068 bus_addr_t seg_addr;
3070 tx_buf = &txr->tx_buf[i];
3071 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i];
3072 seg_addr = segs[j].ds_addr;
3073 seg_len = segs[j].ds_len;
3075 txd->read.buffer_addr = htole64(seg_addr);
3076 txd->read.cmd_type_len = htole32(cmd_type_len | seg_len);
3077 txd->read.olinfo_status = htole32(olinfo_status);
3079 if (++i == txr->num_tx_desc)
3081 tx_buf->m_head = NULL;
3084 KASSERT(txr->tx_avail > nsegs, ("invalid avail TX desc\n"));
3085 txr->next_avail_desc = i;
3086 txr->tx_avail -= nsegs;
3088 tx_buf->m_head = m_head;
3089 tx_buf_mapped->map = tx_buf->map;
3093 * Last Descriptor of Packet needs End Of Packet (EOP)
3095 txd->read.cmd_type_len |= htole32(E1000_ADVTXD_DCMD_EOP | cmd_rs);
3098 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
3099 * that this frame is available to transmit.
3101 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), i);
3108 igb_start(struct ifnet *ifp)
3110 struct igb_softc *sc = ifp->if_softc;
3111 struct igb_tx_ring *txr = &sc->tx_rings[0];
3112 struct mbuf *m_head;
3114 ASSERT_SERIALIZED(&txr->tx_serialize);
3116 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
3119 if (!sc->link_active) {
3120 ifq_purge(&ifp->if_snd);
3124 if (!IGB_IS_NOT_OACTIVE(txr))
3127 while (!ifq_is_empty(&ifp->if_snd)) {
3128 if (IGB_IS_OACTIVE(txr)) {
3129 ifp->if_flags |= IFF_OACTIVE;
3130 /* Set watchdog on */
3135 m_head = ifq_dequeue(&ifp->if_snd, NULL);
3139 if (igb_encap(txr, &m_head)) {
3144 /* Send a copy of the frame to the BPF listener */
3145 ETHER_BPF_MTAP(ifp, m_head);
3150 igb_watchdog(struct ifnet *ifp)
3152 struct igb_softc *sc = ifp->if_softc;
3153 struct igb_tx_ring *txr = &sc->tx_rings[0];
3155 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3158 * If flow control has paused us since last checking
3159 * it invalidates the watchdog timing, so dont run it.
3161 if (sc->pause_frames) {
3162 sc->pause_frames = 0;
3167 if_printf(ifp, "Watchdog timeout -- resetting\n");
3168 if_printf(ifp, "Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
3169 E1000_READ_REG(&sc->hw, E1000_TDH(txr->me)),
3170 E1000_READ_REG(&sc->hw, E1000_TDT(txr->me)));
3171 if_printf(ifp, "TX(%d) desc avail = %d, "
3172 "Next TX to Clean = %d\n",
3173 txr->me, txr->tx_avail, txr->next_to_clean);
3176 sc->watchdog_events++;
3179 if (!ifq_is_empty(&ifp->if_snd))
3184 igb_set_eitr(struct igb_softc *sc)
3188 if (sc->intr_rate > 0) {
3189 if (sc->hw.mac.type == e1000_82575) {
3190 itr = 1000000000 / 256 / sc->intr_rate;
3193 * Document is wrong on the 2 bits left shift
3196 itr = 1000000 / sc->intr_rate;
3201 if (sc->hw.mac.type == e1000_82575)
3204 itr |= E1000_EITR_CNT_IGNR;
3205 E1000_WRITE_REG(&sc->hw, E1000_EITR(0), itr);
3209 igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS)
3211 struct igb_softc *sc = (void *)arg1;
3212 struct ifnet *ifp = &sc->arpcom.ac_if;
3213 int error, intr_rate;
3215 intr_rate = sc->intr_rate;
3216 error = sysctl_handle_int(oidp, &intr_rate, 0, req);
3217 if (error || req->newptr == NULL)
3222 ifnet_serialize_all(ifp);
3224 sc->intr_rate = intr_rate;
3225 if (ifp->if_flags & IFF_RUNNING)
3228 ifnet_deserialize_all(ifp);
3231 if_printf(ifp, "Interrupt rate set to %d/sec\n", sc->intr_rate);
3236 igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS)
3238 struct igb_softc *sc = (void *)arg1;
3239 struct ifnet *ifp = &sc->arpcom.ac_if;
3240 struct igb_tx_ring *txr = &sc->tx_rings[0];
3243 nsegs = txr->intr_nsegs;
3244 error = sysctl_handle_int(oidp, &nsegs, 0, req);
3245 if (error || req->newptr == NULL)
3250 ifnet_serialize_all(ifp);
3252 if (nsegs >= txr->num_tx_desc - txr->oact_lo_desc ||
3253 nsegs >= txr->oact_hi_desc - IGB_MAX_SCATTER) {
3257 txr->intr_nsegs = nsegs;
3260 ifnet_deserialize_all(ifp);
3266 igb_init_intr(struct igb_softc *sc)
3268 if (sc->flags & IGB_FLAG_SHARED_INTR)
3271 igb_init_unshared_intr(sc);
3275 igb_init_unshared_intr(struct igb_softc *sc)
3277 struct e1000_hw *hw = &sc->hw;
3278 const struct igb_rx_ring *rxr;
3279 const struct igb_tx_ring *txr;
3280 uint32_t ivar, index;
3284 * Enable extended mode
3286 if (sc->hw.mac.type != e1000_82575) {
3287 E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_NSICR);
3291 tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
3292 tmp |= E1000_CTRL_EXT_IRCA;
3293 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
3297 * Map TX/RX interrupts to EICR
3299 switch (sc->hw.mac.type) {
3303 case e1000_vfadapt_i350:
3305 for (i = 0; i < sc->rx_ring_cnt; ++i) {
3306 rxr = &sc->rx_rings[i];
3309 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3314 (rxr->rx_intr_bit | E1000_IVAR_VALID) << 16;
3318 (rxr->rx_intr_bit | E1000_IVAR_VALID);
3320 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3323 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3324 txr = &sc->tx_rings[i];
3327 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3332 (txr->tx_intr_bit | E1000_IVAR_VALID) << 24;
3336 (txr->tx_intr_bit | E1000_IVAR_VALID) << 8;
3338 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3340 /* Clear unused IVAR_MISC */
3341 E1000_WRITE_REG(hw, E1000_IVAR_MISC, 0);
3346 for (i = 0; i < sc->rx_ring_cnt; ++i) {
3347 rxr = &sc->rx_rings[i];
3349 index = i & 0x7; /* Each IVAR has two entries */
3350 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3355 (rxr->rx_intr_bit | E1000_IVAR_VALID);
3359 (rxr->rx_intr_bit | E1000_IVAR_VALID) << 16;
3361 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3364 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3365 txr = &sc->tx_rings[i];
3367 index = i & 0x7; /* Each IVAR has two entries */
3368 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3373 (txr->tx_intr_bit | E1000_IVAR_VALID) << 8;
3377 (txr->tx_intr_bit | E1000_IVAR_VALID) << 24;
3379 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3381 /* Clear unused IVAR_MISC */
3382 E1000_WRITE_REG(hw, E1000_IVAR_MISC, 0);
3387 * Enable necessary interrupt bits.
3389 * The name of the register is confusing; in addition to
3390 * configuring the first vector of MSI-X, it also configures
3391 * which bits of EICR could be set by the hardware even when
3392 * MSI or line interrupt is used; it thus controls interrupt
3393 * generation. It MUST be configured explicitly; the default
3394 * value mentioned in the datasheet is wrong: RX queue0 and
3395 * TX queue0 are NOT enabled by default.
3397 E1000_WRITE_REG(&sc->hw, E1000_MSIXBM(0), sc->intr_mask);
3405 * Configure interrupt moderation
3411 igb_setup_intr(struct igb_softc *sc)
3413 struct ifnet *ifp = &sc->arpcom.ac_if;
3417 * Setup interrupt mask
3419 for (i = 0; i < sc->tx_ring_cnt; ++i)
3420 igb_setup_tx_intr(&sc->tx_rings[i]);
3421 for (i = 0; i < sc->rx_ring_cnt; ++i)
3422 igb_setup_rx_intr(&sc->rx_rings[i]);
3424 sc->intr_mask = E1000_EICR_OTHER;
3425 for (i = 0; i < sc->rx_ring_cnt; ++i)
3426 sc->intr_mask |= sc->rx_rings[i].rx_intr_mask;
3427 for (i = 0; i < sc->tx_ring_cnt; ++i)
3428 sc->intr_mask |= sc->tx_rings[i].tx_intr_mask;
3430 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) {
3433 unshared = device_getenv_int(sc->dev, "irq.unshared", 0);
3435 sc->flags |= IGB_FLAG_SHARED_INTR;
3437 device_printf(sc->dev, "IRQ shared\n");
3438 } else if (bootverbose) {
3439 device_printf(sc->dev, "IRQ unshared\n");
3443 error = bus_setup_intr(sc->dev, sc->intr_res, INTR_MPSAFE,
3444 (sc->flags & IGB_FLAG_SHARED_INTR) ? igb_shared_intr : igb_intr,
3445 sc, &sc->intr_tag, &sc->main_serialize);
3447 device_printf(sc->dev, "Failed to register interrupt handler");
3451 ifp->if_cpuid = rman_get_cpuid(sc->intr_res);
3452 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
3458 igb_setup_tx_intr(struct igb_tx_ring *txr)
3460 if (txr->sc->hw.mac.type == e1000_82575) {
3461 txr->tx_intr_bit = 0; /* unused */
3464 txr->tx_intr_mask = E1000_EICR_TX_QUEUE0;
3467 txr->tx_intr_mask = E1000_EICR_TX_QUEUE1;
3470 txr->tx_intr_mask = E1000_EICR_TX_QUEUE2;
3473 txr->tx_intr_mask = E1000_EICR_TX_QUEUE3;
3476 panic("unsupported # of TX ring, %d\n", txr->me);
3479 txr->tx_intr_bit = 0; /* XXX */
3480 txr->tx_intr_mask = 1 << txr->tx_intr_bit;
3485 igb_setup_rx_intr(struct igb_rx_ring *rxr)
3487 if (rxr->sc->hw.mac.type == e1000_82575) {
3488 rxr->rx_intr_bit = 0; /* unused */
3491 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE0;
3494 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE1;
3497 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE2;
3500 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE3;
3503 panic("unsupported # of RX ring, %d\n", rxr->me);
3506 rxr->rx_intr_bit = 1; /* XXX */
3507 rxr->rx_intr_mask = 1 << rxr->rx_intr_bit;
3512 igb_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
3514 struct igb_softc *sc = ifp->if_softc;
3516 ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt,
3517 sc->tx_serialize, sc->rx_serialize, slz);
3521 igb_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3523 struct igb_softc *sc = ifp->if_softc;
3525 ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt,
3526 sc->tx_serialize, sc->rx_serialize, slz);
3530 igb_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
3532 struct igb_softc *sc = ifp->if_softc;
3534 return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt,
3535 sc->tx_serialize, sc->rx_serialize, slz);
3539 igb_serialize_skipmain(struct igb_softc *sc)
3541 lwkt_serialize_array_enter(sc->serializes, sc->serialize_cnt, 1);
3545 igb_deserialize_skipmain(struct igb_softc *sc)
3547 lwkt_serialize_array_exit(sc->serializes, sc->serialize_cnt, 1);
3553 igb_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
3554 boolean_t serialized)
3556 struct igb_softc *sc = ifp->if_softc;
3558 ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt,
3559 sc->tx_serialize, sc->rx_serialize, slz, serialized);
3562 #endif /* INVARIANTS */