1 /******************************************************************************
3 Copyright (c) 2001-2012, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
33 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe.c,v 1.70 2012/07/05 20:51:44 jfv Exp $*/
36 #include "opt_inet6.h"
40 /*********************************************************************
41 * Set this to one to display debug statistics
42 *********************************************************************/
43 int ixgbe_display_debug_stats = 0;
45 /*********************************************************************
47 *********************************************************************/
48 char ixgbe_driver_version[] = "2.4.8";
50 /*********************************************************************
53 * Used by probe to select devices to load on
54 * Last field stores an index into ixgbe_strings
55 * Last entry must be all 0s
57 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58 *********************************************************************/
60 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
62 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
63 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
64 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
65 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
66 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
67 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
68 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
69 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
70 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
71 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
72 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
73 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
74 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
75 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
76 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
77 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
78 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
79 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
80 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
81 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
82 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
83 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
84 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
85 {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
86 /* required last entry */
90 /*********************************************************************
91 * Table of branding strings
92 *********************************************************************/
94 static char *ixgbe_strings[] = {
95 "Intel(R) PRO/10GbE PCI-Express Network Driver"
98 /*********************************************************************
100 *********************************************************************/
101 static int ixgbe_probe(device_t);
102 static int ixgbe_attach(device_t);
103 static int ixgbe_detach(device_t);
104 static int ixgbe_shutdown(device_t);
105 static void ixgbe_start(struct ifnet *);
106 static void ixgbe_start_locked(struct tx_ring *, struct ifnet *);
107 #if 0 /* __FreeBSD_version >= 800000 */
108 static int ixgbe_mq_start(struct ifnet *, struct mbuf *);
109 static int ixgbe_mq_start_locked(struct ifnet *,
110 struct tx_ring *, struct mbuf *);
111 static void ixgbe_qflush(struct ifnet *);
113 static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
114 static void ixgbe_init(void *);
115 static void ixgbe_init_locked(struct adapter *);
116 static void ixgbe_stop(void *);
117 static void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
118 static int ixgbe_media_change(struct ifnet *);
119 static void ixgbe_identify_hardware(struct adapter *);
120 static int ixgbe_allocate_pci_resources(struct adapter *);
121 static int ixgbe_allocate_msix(struct adapter *);
122 static int ixgbe_allocate_legacy(struct adapter *);
123 static int ixgbe_allocate_queues(struct adapter *);
124 static int ixgbe_setup_msix(struct adapter *);
125 static void ixgbe_free_pci_resources(struct adapter *);
126 static void ixgbe_local_timer(void *);
127 static int ixgbe_setup_interface(device_t, struct adapter *);
128 static void ixgbe_config_link(struct adapter *);
130 static int ixgbe_allocate_transmit_buffers(struct tx_ring *);
131 static int ixgbe_setup_transmit_structures(struct adapter *);
132 static void ixgbe_setup_transmit_ring(struct tx_ring *);
133 static void ixgbe_initialize_transmit_units(struct adapter *);
134 static void ixgbe_free_transmit_structures(struct adapter *);
135 static void ixgbe_free_transmit_buffers(struct tx_ring *);
137 static int ixgbe_allocate_receive_buffers(struct rx_ring *);
138 static int ixgbe_setup_receive_structures(struct adapter *);
139 static int ixgbe_setup_receive_ring(struct rx_ring *);
140 static void ixgbe_initialize_receive_units(struct adapter *);
141 static void ixgbe_free_receive_structures(struct adapter *);
142 static void ixgbe_free_receive_buffers(struct rx_ring *);
144 static void ixgbe_setup_hw_rsc(struct rx_ring *);
147 static void ixgbe_enable_intr(struct adapter *);
148 static void ixgbe_disable_intr(struct adapter *);
149 static void ixgbe_update_stats_counters(struct adapter *);
150 static void ixgbe_txeof(struct tx_ring *);
151 static void ixgbe_rxeof(struct ix_queue *, int);
152 static void ixgbe_rx_checksum(u32, struct mbuf *, u32);
153 static void ixgbe_set_promisc(struct adapter *);
154 static void ixgbe_set_multi(struct adapter *);
155 static void ixgbe_update_link_status(struct adapter *);
156 static void ixgbe_refresh_mbufs(struct rx_ring *, int);
157 static int ixgbe_xmit(struct tx_ring *, struct mbuf **);
158 static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
159 static int ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
160 static int ixgbe_set_thermal_test(SYSCTL_HANDLER_ARGS);
161 static int ixgbe_dma_malloc(struct adapter *, bus_size_t,
162 struct ixgbe_dma_alloc *, int);
163 static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
164 static void ixgbe_add_rx_process_limit(struct adapter *, const char *,
165 const char *, int *, int);
166 static bool ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
167 static bool ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *, u32 *);
168 static int ixgbe_tso_pullup(struct tx_ring *, struct mbuf **);
169 static void ixgbe_add_sysctl(struct adapter *);
170 static void ixgbe_set_eitr(struct adapter *, int, int);
171 static int ixgbe_sysctl_intr_rate(SYSCTL_HANDLER_ARGS);
172 static void ixgbe_set_ivar(struct adapter *, u8, u8, s8);
173 static void ixgbe_configure_ivars(struct adapter *);
174 static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
176 static void ixgbe_setup_vlan_hw_support(struct adapter *);
177 static void ixgbe_register_vlan(void *, struct ifnet *, u16);
178 static void ixgbe_unregister_vlan(void *, struct ifnet *, u16);
180 static void ixgbe_add_hw_stats(struct adapter *adapter);
182 static __inline void ixgbe_rx_discard(struct rx_ring *, int);
183 static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
186 /* Support for pluggable optic modules */
187 static bool ixgbe_sfp_probe(struct adapter *);
188 static void ixgbe_setup_optics(struct adapter *);
190 /* Legacy (single vector interrupt handler */
191 static void ixgbe_legacy_irq(void *);
193 /* The MSI/X Interrupt handlers */
194 static void ixgbe_msix_que(void *);
195 static void ixgbe_msix_link(void *);
197 /* Deferred interrupt tasklets */
198 static void ixgbe_handle_msf(void *, int);
199 static void ixgbe_handle_mod(void *, int);
202 static void ixgbe_atr(struct tx_ring *, struct mbuf *);
203 static void ixgbe_reinit_fdir(void *, int);
206 /*********************************************************************
207 * FreeBSD Device Interface Entry Points
208 *********************************************************************/
210 static device_method_t ixgbe_methods[] = {
211 /* Device interface */
212 DEVMETHOD(device_probe, ixgbe_probe),
213 DEVMETHOD(device_attach, ixgbe_attach),
214 DEVMETHOD(device_detach, ixgbe_detach),
215 DEVMETHOD(device_shutdown, ixgbe_shutdown),
219 static driver_t ixgbe_driver = {
220 "ix", ixgbe_methods, sizeof(struct adapter),
223 devclass_t ixgbe_devclass;
224 DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
226 MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
227 MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
230 ** TUNEABLE PARAMETERS:
233 /* How many packets rxeof tries to clean at a time */
234 static int ixgbe_rx_process_limit = 128;
235 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
238 ** Smart speed setting, default to on
239 ** this only works as a compile option
240 ** right now as its during attach, set
241 ** this to 'ixgbe_smart_speed_off' to
244 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
246 static int ixgbe_msi_enable = 1;
247 TUNABLE_INT("hw.ixgbe.msi.enable", &ixgbe_msi_enable);
250 * MSIX should be the default for best performance,
251 * but this allows it to be forced off for testing.
253 static int ixgbe_enable_msix = 1;
254 TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
257 * Header split: this causes the hardware to DMA
258 * the header into a separate mbuf from the payload,
259 * it can be a performance win in some workloads, but
260 * in others it actually hurts, its off by default.
262 static int ixgbe_header_split = FALSE;
263 TUNABLE_INT("hw.ixgbe.hdr_split", &ixgbe_header_split);
266 * Number of Queues, can be set to 0,
267 * it then autoconfigures based on the
268 * number of cpus with a max of 8. This
269 * can be overriden manually here.
271 static int ixgbe_num_queues = 0;
272 TUNABLE_INT("hw.ixgbe.num_queues", &ixgbe_num_queues);
275 ** Number of TX descriptors per ring,
276 ** setting higher than RX as this seems
277 ** the better performing choice.
279 static int ixgbe_txd = PERFORM_TXD;
280 TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
282 /* Number of RX descriptors per ring */
283 static int ixgbe_rxd = PERFORM_RXD;
284 TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
286 /* Keep running tab on them for sanity check */
287 static int ixgbe_total_ports;
291 ** For Flow Director: this is the
292 ** number of TX packets we sample
293 ** for the filter pool, this means
294 ** every 20th packet will be probed.
296 ** This feature can be disabled by
297 ** setting this to 0.
299 static int atr_sample_rate = 20;
301 ** Flow Director actually 'steals'
302 ** part of the packet buffer as its
303 ** filter pool, this variable controls
305 ** 0 = 64K, 1 = 128K, 2 = 256K
307 static int fdir_pballoc = 1;
312 * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
313 * be a reference on how to implement netmap support in a driver.
314 * Additional comments are in ixgbe_netmap.h .
316 * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
317 * that extend the standard driver.
319 #include <dev/netmap/ixgbe_netmap.h>
320 #endif /* DEV_NETMAP */
322 /*********************************************************************
323 * Device identification routine
325 * ixgbe_probe determines if the driver should be loaded on
326 * adapter based on PCI vendor/device id of the adapter.
328 * return BUS_PROBE_DEFAULT on success, positive on failure
329 *********************************************************************/
332 ixgbe_probe(device_t dev)
334 ixgbe_vendor_info_t *ent;
336 u16 pci_vendor_id = 0;
337 u16 pci_device_id = 0;
338 u16 pci_subvendor_id = 0;
339 u16 pci_subdevice_id = 0;
340 char adapter_name[256];
342 INIT_DEBUGOUT("ixgbe_probe: begin");
344 pci_vendor_id = pci_get_vendor(dev);
345 if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
348 pci_device_id = pci_get_device(dev);
349 pci_subvendor_id = pci_get_subvendor(dev);
350 pci_subdevice_id = pci_get_subdevice(dev);
352 ent = ixgbe_vendor_info_array;
353 while (ent->vendor_id != 0) {
354 if ((pci_vendor_id == ent->vendor_id) &&
355 (pci_device_id == ent->device_id) &&
357 ((pci_subvendor_id == ent->subvendor_id) ||
358 (ent->subvendor_id == 0)) &&
360 ((pci_subdevice_id == ent->subdevice_id) ||
361 (ent->subdevice_id == 0))) {
362 ksprintf(adapter_name, "%s, Version - %s",
363 ixgbe_strings[ent->index],
364 ixgbe_driver_version);
365 device_set_desc_copy(dev, adapter_name);
367 return (BUS_PROBE_DEFAULT);
374 /*********************************************************************
375 * Device initialization routine
377 * The attach entry point is called when the driver is being loaded.
378 * This routine identifies the type of hardware, allocates all resources
379 * and initializes the hardware.
381 * return 0 on success, positive on failure
382 *********************************************************************/
385 ixgbe_attach(device_t dev)
387 struct adapter *adapter;
393 INIT_DEBUGOUT("ixgbe_attach: begin");
395 if (resource_disabled("ixgbe", device_get_unit(dev))) {
396 device_printf(dev, "Disabled by device hint\n");
400 /* Allocate, clear, and link in our adapter structure */
401 adapter = device_get_softc(dev);
402 adapter->dev = adapter->osdep.dev = dev;
406 IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
408 /* Set up the timer callout */
409 callout_init_mp(&adapter->timer);
411 /* Determine hardware revision */
412 ixgbe_identify_hardware(adapter);
414 /* Enable bus mastering */
415 pci_enable_busmaster(dev);
417 /* Do base PCI setup - map BAR0 */
418 if (ixgbe_allocate_pci_resources(adapter)) {
419 device_printf(dev, "Allocation of PCI resources failed\n");
424 /* Do descriptor calc and sanity checks */
425 if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
426 ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
427 device_printf(dev, "TXD config issue, using default!\n");
428 adapter->num_tx_desc = DEFAULT_TXD;
430 adapter->num_tx_desc = ixgbe_txd;
433 ** With many RX rings it is easy to exceed the
434 ** system mbuf allocation. Tuning nmbclusters
435 ** can alleviate this.
437 if (nmbclusters > 0 ) {
439 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
440 if (s > nmbclusters) {
441 device_printf(dev, "RX Descriptors exceed "
442 "system mbuf max, using default instead!\n");
443 ixgbe_rxd = DEFAULT_RXD;
447 if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
448 ixgbe_rxd < MIN_TXD || ixgbe_rxd > MAX_TXD) {
449 device_printf(dev, "RXD config issue, using default!\n");
450 adapter->num_rx_desc = DEFAULT_RXD;
452 adapter->num_rx_desc = ixgbe_rxd;
454 /* Allocate our TX/RX Queues */
455 if (ixgbe_allocate_queues(adapter)) {
460 /* Allocate multicast array memory. */
461 adapter->mta = kmalloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
462 MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
463 if (adapter->mta == NULL) {
464 device_printf(dev, "Can not allocate multicast setup array\n");
469 /* Initialize the shared code */
470 error = ixgbe_init_shared_code(hw);
471 if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
473 ** No optics in this port, set up
474 ** so the timer routine will probe
475 ** for later insertion.
477 adapter->sfp_probe = TRUE;
479 } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
480 device_printf(dev,"Unsupported SFP+ module detected!\n");
484 device_printf(dev,"Unable to initialize the shared code\n");
489 /* Make sure we have a good EEPROM before we read from it */
490 if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
491 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
496 error = ixgbe_init_hw(hw);
498 case IXGBE_ERR_EEPROM_VERSION:
499 device_printf(dev, "This device is a pre-production adapter/"
500 "LOM. Please be aware there may be issues associated "
501 "with your hardware.\n If you are experiencing problems "
502 "please contact your Intel or hardware representative "
503 "who provided you with this hardware.\n");
505 case IXGBE_ERR_SFP_NOT_SUPPORTED:
506 device_printf(dev,"Unsupported SFP+ Module\n");
508 device_printf(dev,"Hardware Initialization Failure\n");
510 case IXGBE_ERR_SFP_NOT_PRESENT:
511 device_printf(dev,"No SFP+ Module found\n");
517 /* Detect and set physical type */
518 ixgbe_setup_optics(adapter);
520 if ((adapter->msix > 1) && (ixgbe_enable_msix)) {
521 adapter->intr_type = PCI_INTR_TYPE_MSIX;
522 error = ixgbe_allocate_msix(adapter);
524 error = ixgbe_allocate_legacy(adapter);
529 /* Setup OS specific network interface */
530 if (ixgbe_setup_interface(dev, adapter) != 0)
533 /* Add sysctl tree */
534 ixgbe_add_sysctl(adapter);
536 /* Initialize statistics */
537 ixgbe_update_stats_counters(adapter);
539 /* Register for VLAN events */
540 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
541 ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
542 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
543 ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
545 /* Print PCIE bus type/speed/width info */
546 ixgbe_get_bus_info(hw);
547 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
548 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
549 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
550 (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
551 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
552 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
555 if ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
556 (hw->bus.speed == ixgbe_bus_speed_2500)) {
557 device_printf(dev, "PCI-Express bandwidth available"
558 " for this card\n is not sufficient for"
559 " optimal performance.\n");
560 device_printf(dev, "For optimal performance a x8 "
561 "PCIE, or x4 PCIE 2 slot is required.\n");
564 /* let hardware know driver is loaded */
565 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
566 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
567 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
569 ixgbe_add_hw_stats(adapter);
572 ixgbe_netmap_attach(adapter);
573 #endif /* DEV_NETMAP */
574 INIT_DEBUGOUT("ixgbe_attach: end");
577 ixgbe_free_transmit_structures(adapter);
578 ixgbe_free_receive_structures(adapter);
580 if (adapter->ifp != NULL)
581 if_free(adapter->ifp);
582 ixgbe_free_pci_resources(adapter);
583 kfree(adapter->mta, M_DEVBUF);
588 /*********************************************************************
589 * Device removal routine
591 * The detach entry point is called when the driver is being removed.
592 * This routine stops the adapter and deallocates all the resources
593 * that were allocated for driver operation.
595 * return 0 on success, positive on failure
596 *********************************************************************/
599 ixgbe_detach(device_t dev)
601 struct adapter *adapter = device_get_softc(dev);
604 INIT_DEBUGOUT("ixgbe_detach: begin");
606 /* Make sure VLANS are not using driver */
607 if (adapter->ifp->if_vlantrunks != NULL) {
608 device_printf(dev,"Vlan in use, detach first\n");
612 IXGBE_CORE_LOCK(adapter);
614 IXGBE_CORE_UNLOCK(adapter);
616 /* Drain the Link queue */
618 taskqueue_drain(adapter->tq, &adapter->mod_task);
619 taskqueue_drain(adapter->tq, &adapter->msf_task);
621 taskqueue_drain(adapter->tq, &adapter->fdir_task);
623 taskqueue_free(adapter->tq);
626 /* let hardware know driver is unloading */
627 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
628 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
629 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
631 /* Unregister VLAN events */
632 if (adapter->vlan_attach != NULL)
633 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
634 if (adapter->vlan_detach != NULL)
635 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
637 ether_ifdetach(adapter->ifp);
638 callout_stop(&adapter->timer);
640 netmap_detach(adapter->ifp);
641 #endif /* DEV_NETMAP */
642 ixgbe_free_pci_resources(adapter);
643 bus_generic_detach(dev);
644 if_free(adapter->ifp);
646 ixgbe_free_transmit_structures(adapter);
647 ixgbe_free_receive_structures(adapter);
648 kfree(adapter->mta, M_DEVBUF);
649 sysctl_ctx_free(&adapter->sysctl_ctx);
651 IXGBE_CORE_LOCK_DESTROY(adapter);
655 /*********************************************************************
657 * Shutdown entry point
659 **********************************************************************/
662 ixgbe_shutdown(device_t dev)
664 struct adapter *adapter = device_get_softc(dev);
665 IXGBE_CORE_LOCK(adapter);
667 IXGBE_CORE_UNLOCK(adapter);
672 /*********************************************************************
673 * Transmit entry point
675 * ixgbe_start is called by the stack to initiate a transmit.
676 * The driver will remain in this routine as long as there are
677 * packets to transmit and transmit resources are available.
678 * In case resources are not available stack is notified and
679 * the packet is requeued.
680 **********************************************************************/
683 ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
686 struct adapter *adapter = txr->adapter;
688 IXGBE_TX_LOCK_ASSERT(txr);
690 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
693 if (!adapter->link_active) {
694 ifq_purge(&ifp->if_snd);
698 while (!ifq_is_empty(&ifp->if_snd)) {
699 if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE) {
700 txr->queue_status |= IXGBE_QUEUE_DEPLETED;
704 m_head = ifq_dequeue(&ifp->if_snd, NULL);
708 if (ixgbe_xmit(txr, &m_head)) {
709 #if 0 /* XXX: prepend to an ALTQ queue ? */
711 IF_PREPEND(&ifp->if_snd, m_head);
713 if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
714 txr->queue_status |= IXGBE_QUEUE_DEPLETED;
717 /* Send a copy of the frame to the BPF listener */
718 ETHER_BPF_MTAP(ifp, m_head);
720 /* Set watchdog on */
721 txr->watchdog_time = ticks;
722 txr->queue_status = IXGBE_QUEUE_WORKING;
729 * Legacy TX start - called by the stack, this
730 * always uses the first tx ring, and should
731 * not be used with multiqueue tx enabled.
734 ixgbe_start(struct ifnet *ifp)
736 struct adapter *adapter = ifp->if_softc;
737 struct tx_ring *txr = adapter->tx_rings;
739 if (ifp->if_flags & IFF_RUNNING) {
741 ixgbe_start_locked(txr, ifp);
742 IXGBE_TX_UNLOCK(txr);
747 #if 0 /* __FreeBSD_version >= 800000 */
749 ** Multiqueue Transmit driver
753 ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
755 struct adapter *adapter = ifp->if_softc;
756 struct ix_queue *que;
760 /* Which queue to use */
761 if ((m->m_flags & M_FLOWID) != 0)
762 i = m->m_pkthdr.flowid % adapter->num_queues;
764 i = curcpu % adapter->num_queues;
766 txr = &adapter->tx_rings[i];
767 que = &adapter->queues[i];
769 if (((txr->queue_status & IXGBE_QUEUE_DEPLETED) == 0) &&
770 IXGBE_TX_TRYLOCK(txr)) {
771 err = ixgbe_mq_start_locked(ifp, txr, m);
772 IXGBE_TX_UNLOCK(txr);
774 err = drbr_enqueue(ifp, txr->br, m);
775 taskqueue_enqueue(que->tq, &que->que_task);
782 ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
784 struct adapter *adapter = txr->adapter;
786 int enqueued, err = 0;
788 if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
789 (txr->queue_status == IXGBE_QUEUE_DEPLETED) ||
790 adapter->link_active == 0) {
792 err = drbr_enqueue(ifp, txr->br, m);
798 next = drbr_dequeue(ifp, txr->br);
799 } else if (drbr_needs_enqueue(ifp, txr->br)) {
800 if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
802 next = drbr_dequeue(ifp, txr->br);
806 /* Process the queue */
807 while (next != NULL) {
808 if ((err = ixgbe_xmit(txr, &next)) != 0) {
810 err = drbr_enqueue(ifp, txr->br, next);
814 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
815 /* Send a copy of the frame to the BPF listener */
816 ETHER_BPF_MTAP(ifp, next);
817 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
819 if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD)
821 if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD) {
822 txr->queue_status |= IXGBE_QUEUE_DEPLETED;
825 next = drbr_dequeue(ifp, txr->br);
829 /* Set watchdog on */
830 txr->queue_status |= IXGBE_QUEUE_WORKING;
831 txr->watchdog_time = ticks;
834 if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD)
841 ** Flush all ring buffers
844 ixgbe_qflush(struct ifnet *ifp)
846 struct adapter *adapter = ifp->if_softc;
847 struct tx_ring *txr = adapter->tx_rings;
850 for (int i = 0; i < adapter->num_queues; i++, txr++) {
852 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
854 IXGBE_TX_UNLOCK(txr);
858 #endif /* __FreeBSD_version >= 800000 */
860 /*********************************************************************
863 * ixgbe_ioctl is called when the user wants to configure the
866 * return 0 on success, positive on failure
867 **********************************************************************/
870 ixgbe_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
872 struct adapter *adapter = ifp->if_softc;
873 struct ifreq *ifr = (struct ifreq *) data;
874 #if defined(INET) || defined(INET6)
875 struct ifaddr *ifa = (struct ifaddr *)data;
876 bool avoid_reset = FALSE;
884 if (ifa->ifa_addr->sa_family == AF_INET)
888 if (ifa->ifa_addr->sa_family == AF_INET6)
891 #if defined(INET) || defined(INET6)
893 ** Calling init results in link renegotiation,
894 ** so we avoid doing it when possible.
897 ifp->if_flags |= IFF_UP;
898 if (!(ifp->if_flags & IFF_RUNNING))
900 if (!(ifp->if_flags & IFF_NOARP))
901 arp_ifinit(ifp, ifa);
903 error = ether_ioctl(ifp, command, data);
907 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
908 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
911 IXGBE_CORE_LOCK(adapter);
912 ifp->if_mtu = ifr->ifr_mtu;
913 adapter->max_frame_size =
914 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
915 ixgbe_init_locked(adapter);
916 IXGBE_CORE_UNLOCK(adapter);
920 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
921 IXGBE_CORE_LOCK(adapter);
922 if (ifp->if_flags & IFF_UP) {
923 if ((ifp->if_flags & IFF_RUNNING)) {
924 if ((ifp->if_flags ^ adapter->if_flags) &
925 (IFF_PROMISC | IFF_ALLMULTI)) {
926 ixgbe_set_promisc(adapter);
929 ixgbe_init_locked(adapter);
931 if (ifp->if_flags & IFF_RUNNING)
933 adapter->if_flags = ifp->if_flags;
934 IXGBE_CORE_UNLOCK(adapter);
938 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
939 if (ifp->if_flags & IFF_RUNNING) {
940 IXGBE_CORE_LOCK(adapter);
941 ixgbe_disable_intr(adapter);
942 ixgbe_set_multi(adapter);
943 ixgbe_enable_intr(adapter);
944 IXGBE_CORE_UNLOCK(adapter);
949 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
950 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
954 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
955 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
956 if (mask & IFCAP_HWCSUM)
957 ifp->if_capenable ^= IFCAP_HWCSUM;
958 if (mask & IFCAP_TSO4)
959 ifp->if_capenable ^= IFCAP_TSO4;
960 if (mask & IFCAP_TSO6)
961 ifp->if_capenable ^= IFCAP_TSO6;
963 if (mask & IFCAP_LRO)
964 ifp->if_capenable ^= IFCAP_LRO;
966 if (mask & IFCAP_VLAN_HWTAGGING)
967 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
968 if (mask & IFCAP_VLAN_HWFILTER)
969 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
971 if (mask & IFCAP_VLAN_HWTSO)
972 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
974 if (ifp->if_flags & IFF_RUNNING) {
975 IXGBE_CORE_LOCK(adapter);
976 ixgbe_init_locked(adapter);
977 IXGBE_CORE_UNLOCK(adapter);
980 VLAN_CAPABILITIES(ifp);
986 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
987 error = ether_ioctl(ifp, command, data);
994 /*********************************************************************
997 * This routine is used in two ways. It is used by the stack as
998 * init entry point in network interface structure. It is also used
999 * by the driver as a hw/sw initialization routine to get to a
1002 * return 0 on success, positive on failure
1003 **********************************************************************/
1004 #define IXGBE_MHADD_MFS_SHIFT 16
1007 ixgbe_init_locked(struct adapter *adapter)
1009 struct ifnet *ifp = adapter->ifp;
1010 device_t dev = adapter->dev;
1011 struct ixgbe_hw *hw = &adapter->hw;
1012 u32 k, txdctl, mhadd, gpie;
1015 KKASSERT(lockstatus(&adapter->core_lock, curthread) != 0);
1016 INIT_DEBUGOUT("ixgbe_init: begin");
1017 hw->adapter_stopped = FALSE;
1018 ixgbe_stop_adapter(hw);
1019 callout_stop(&adapter->timer);
1021 /* reprogram the RAR[0] in case user changed it. */
1022 ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
1024 /* Get the latest mac address, User can use a LAA */
1025 bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
1026 IXGBE_ETH_LENGTH_OF_ADDRESS);
1027 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
1028 hw->addr_ctrl.rar_used_count = 1;
1030 /* Set the various hardware offload abilities */
1031 ifp->if_hwassist = 0;
1032 if (ifp->if_capenable & IFCAP_TSO)
1033 ifp->if_hwassist |= CSUM_TSO;
1034 if (ifp->if_capenable & IFCAP_TXCSUM) {
1035 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1037 if (hw->mac.type != ixgbe_mac_82598EB)
1038 ifp->if_hwassist |= CSUM_SCTP;
1042 /* Prepare transmit descriptors and buffers */
1043 if (ixgbe_setup_transmit_structures(adapter)) {
1044 device_printf(dev,"Could not setup transmit structures\n");
1045 ixgbe_stop(adapter);
1050 ixgbe_initialize_transmit_units(adapter);
1052 /* Setup Multicast table */
1053 ixgbe_set_multi(adapter);
1056 ** Determine the correct mbuf pool
1057 ** for doing jumbo/headersplit
1060 if (adapter->max_frame_size <= 2048)
1061 adapter->rx_mbuf_sz = MCLBYTES;
1062 else if (adapter->max_frame_size <= 4096)
1063 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1064 else if (adapter->max_frame_size <= 9216)
1065 adapter->rx_mbuf_sz = MJUM9BYTES;
1067 adapter->rx_mbuf_sz = MJUM16BYTES;
1069 adapter->rx_mbuf_sz = MCLBYTES;
1072 /* Prepare receive descriptors and buffers */
1073 if (ixgbe_setup_receive_structures(adapter)) {
1074 device_printf(dev,"Could not setup receive structures\n");
1075 ixgbe_stop(adapter);
1079 /* Configure RX settings */
1080 ixgbe_initialize_receive_units(adapter);
1082 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
1084 /* Enable Fan Failure Interrupt */
1085 gpie |= IXGBE_SDP1_GPIEN;
1087 /* Add for Module detection */
1088 if (hw->mac.type == ixgbe_mac_82599EB)
1089 gpie |= IXGBE_SDP2_GPIEN;
1091 /* Thermal Failure Detection */
1092 if (hw->mac.type == ixgbe_mac_X540)
1093 gpie |= IXGBE_SDP0_GPIEN;
1095 if (adapter->msix > 1) {
1096 /* Enable Enhanced MSIX mode */
1097 gpie |= IXGBE_GPIE_MSIX_MODE;
1098 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1101 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1104 if (ifp->if_mtu > ETHERMTU) {
1105 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1106 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1107 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1108 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1111 /* Now enable all the queues */
1113 for (int i = 0; i < adapter->num_queues; i++) {
1114 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
1115 txdctl |= IXGBE_TXDCTL_ENABLE;
1116 /* Set WTHRESH to 8, burst writeback */
1117 txdctl |= (8 << 16);
1119 * When the internal queue falls below PTHRESH (32),
1120 * start prefetching as long as there are at least
1121 * HTHRESH (1) buffers ready. The values are taken
1122 * from the Intel linux driver 3.8.21.
1123 * Prefetching enables tx line rate even with 1 queue.
1125 txdctl |= (32 << 0) | (1 << 8);
1126 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
1129 for (int i = 0; i < adapter->num_queues; i++) {
1130 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1131 if (hw->mac.type == ixgbe_mac_82598EB) {
1137 rxdctl &= ~0x3FFFFF;
1140 rxdctl |= IXGBE_RXDCTL_ENABLE;
1141 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
1142 for (k = 0; k < 10; k++) {
1143 if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
1144 IXGBE_RXDCTL_ENABLE)
1152 * In netmap mode, we must preserve the buffers made
1153 * available to userspace before the if_init()
1154 * (this is true by default on the TX side, because
1155 * init makes all buffers available to userspace).
1157 * netmap_reset() and the device specific routines
1158 * (e.g. ixgbe_setup_receive_rings()) map these
1159 * buffers at the end of the NIC ring, so here we
1160 * must set the RDT (tail) register to make sure
1161 * they are not overwritten.
1163 * In this driver the NIC ring starts at RDH = 0,
1164 * RDT points to the last slot available for reception (?),
1165 * so RDT = num_rx_desc - 1 means the whole ring is available.
1167 if (ifp->if_capenable & IFCAP_NETMAP) {
1168 struct netmap_adapter *na = NA(adapter->ifp);
1169 struct netmap_kring *kring = &na->rx_rings[i];
1170 int t = na->num_rx_desc - 1 - kring->nr_hwavail;
1172 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), t);
1174 #endif /* DEV_NETMAP */
1175 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1178 /* Set up VLAN support and filter */
1179 ixgbe_setup_vlan_hw_support(adapter);
1181 /* Enable Receive engine */
1182 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1183 if (hw->mac.type == ixgbe_mac_82598EB)
1184 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1185 rxctrl |= IXGBE_RXCTRL_RXEN;
1186 ixgbe_enable_rx_dma(hw, rxctrl);
1188 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1190 /* Set up MSI/X routing */
1191 if (ixgbe_enable_msix) {
1192 ixgbe_configure_ivars(adapter);
1193 /* Set up auto-mask */
1194 if (hw->mac.type == ixgbe_mac_82598EB)
1195 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1197 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1198 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1200 } else { /* Simple settings for Legacy/MSI */
1201 ixgbe_set_ivar(adapter, 0, 0, 0);
1202 ixgbe_set_ivar(adapter, 0, 0, 1);
1203 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1207 /* Init Flow director */
1208 if (hw->mac.type != ixgbe_mac_82598EB) {
1209 u32 hdrm = 32 << fdir_pballoc;
1211 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1212 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1217 ** Check on any SFP devices that
1218 ** need to be kick-started
1220 if (hw->phy.type == ixgbe_phy_none) {
1221 int err = hw->phy.ops.identify(hw);
1222 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1224 "Unsupported SFP+ module type was detected.\n");
1229 /* Set moderation on the Link interrupt */
1230 ixgbe_set_eitr(adapter, adapter->linkvec, IXGBE_LINK_ITR);
1232 /* Config/Enable Link */
1233 ixgbe_config_link(adapter);
1235 /* Hardware Packet Buffer & Flow Control setup */
1237 u32 rxpb, frame, size, tmp;
1239 frame = adapter->max_frame_size;
1241 /* Calculate High Water */
1242 if (hw->mac.type == ixgbe_mac_X540)
1243 tmp = IXGBE_DV_X540(frame, frame);
1245 tmp = IXGBE_DV(frame, frame);
1246 size = IXGBE_BT2KB(tmp);
1247 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1248 hw->fc.high_water[0] = rxpb - size;
1250 /* Now calculate Low Water */
1251 if (hw->mac.type == ixgbe_mac_X540)
1252 tmp = IXGBE_LOW_DV_X540(frame);
1254 tmp = IXGBE_LOW_DV(frame);
1255 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1257 adapter->fc = hw->fc.requested_mode = ixgbe_fc_full;
1258 hw->fc.pause_time = IXGBE_FC_PAUSE;
1259 hw->fc.send_xon = TRUE;
1261 /* Initialize the FC settings */
1264 /* And now turn on interrupts */
1265 ixgbe_enable_intr(adapter);
1267 /* Now inform the stack we're ready */
1268 ifp->if_flags |= IFF_RUNNING;
1269 ifq_clr_oactive(&ifp->if_snd);
1275 ixgbe_init(void *arg)
1277 struct adapter *adapter = arg;
1279 IXGBE_CORE_LOCK(adapter);
1280 ixgbe_init_locked(adapter);
1281 IXGBE_CORE_UNLOCK(adapter);
1288 ** MSIX Interrupt Handlers and Tasklets
1293 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1295 struct ixgbe_hw *hw = &adapter->hw;
1296 u64 queue = (u64)(1 << vector);
1299 if (hw->mac.type == ixgbe_mac_82598EB) {
1300 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1301 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1303 mask = (queue & 0xFFFFFFFF);
1305 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1306 mask = (queue >> 32);
1308 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1313 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1315 struct ixgbe_hw *hw = &adapter->hw;
1316 u64 queue = (u64)(1 << vector);
1319 if (hw->mac.type == ixgbe_mac_82598EB) {
1320 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1321 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1323 mask = (queue & 0xFFFFFFFF);
1325 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1326 mask = (queue >> 32);
1328 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1333 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
1337 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1338 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
1339 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1341 mask = (queues & 0xFFFFFFFF);
1342 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
1343 mask = (queues >> 32);
1344 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
1348 /*********************************************************************
1350 * Legacy Interrupt Service routine
1352 **********************************************************************/
1355 ixgbe_legacy_irq(void *arg)
1357 struct ix_queue *que = arg;
1358 struct adapter *adapter = que->adapter;
1359 struct ixgbe_hw *hw = &adapter->hw;
1360 struct tx_ring *txr = adapter->tx_rings;
1364 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1367 if (reg_eicr == 0) {
1368 ixgbe_enable_intr(adapter);
1372 ixgbe_rxeof(que, adapter->rx_process_limit);
1376 if (!ifq_is_empty(&adapter->ifp->if_snd))
1377 ixgbe_start_locked(txr, adapter->ifp);
1378 IXGBE_TX_UNLOCK(txr);
1380 /* Check for fan failure */
1381 if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1382 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1383 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1384 "REPLACE IMMEDIATELY!!\n");
1385 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
1388 /* Link status change */
1389 if (reg_eicr & IXGBE_EICR_LSC) {
1390 ixgbe_check_link(&adapter->hw,
1391 &adapter->link_speed, &adapter->link_up, 0);
1392 ixgbe_update_link_status(adapter);
1395 ixgbe_enable_intr(adapter);
1399 /*********************************************************************
1401 * MSIX Queue Interrupt Service routine
1403 **********************************************************************/
1405 ixgbe_msix_que(void *arg)
1407 struct ix_queue *que = arg;
1408 struct adapter *adapter = que->adapter;
1409 struct tx_ring *txr = que->txr;
1411 ixgbe_disable_queue(adapter, que->msix);
1414 ixgbe_rxeof(que, adapter->rx_process_limit);
1418 if (!ifq_is_empty(&adapter->ifp->if_snd))
1419 ixgbe_start_locked(txr, adapter->ifp);
1420 IXGBE_TX_UNLOCK(txr);
1422 /* Reenable this interrupt */
1423 ixgbe_enable_queue(adapter, que->msix);
1428 ixgbe_msix_link(void *arg)
1430 struct adapter *adapter = arg;
1431 struct ixgbe_hw *hw = &adapter->hw;
1434 ++adapter->link_irq;
1436 /* First get the cause */
1437 reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1438 /* Clear interrupt with write */
1439 IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1441 /* Link status change */
1442 if (reg_eicr & IXGBE_EICR_LSC) {
1443 ixgbe_check_link(&adapter->hw,
1444 &adapter->link_speed, &adapter->link_up, 0);
1445 ixgbe_update_link_status(adapter);
1448 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1450 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1451 /* This is probably overkill :) */
1452 if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1454 /* Disable the interrupt */
1455 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1456 taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1459 if (reg_eicr & IXGBE_EICR_ECC) {
1460 device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1461 "Please Reboot!!\n");
1462 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1465 if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
1466 /* Clear the interrupt */
1467 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1468 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1469 } else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
1470 /* Clear the interrupt */
1471 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1472 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1476 /* Check for fan failure */
1477 if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1478 (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1479 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1480 "REPLACE IMMEDIATELY!!\n");
1481 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1484 /* Check for over temp condition */
1485 if ((hw->mac.type == ixgbe_mac_X540) &&
1486 (reg_eicr & IXGBE_EICR_GPI_SDP0)) {
1487 device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
1488 "PHY IS SHUT DOWN!!\n");
1489 device_printf(adapter->dev, "System shutdown required\n");
1490 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
1493 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1497 /*********************************************************************
1499 * Media Ioctl callback
1501 * This routine is called whenever the user queries the status of
1502 * the interface using ifconfig.
1504 **********************************************************************/
1506 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1508 struct adapter *adapter = ifp->if_softc;
1510 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1512 INIT_DEBUGOUT("ixgbe_media_status: begin");
1513 ixgbe_update_link_status(adapter);
1515 ifmr->ifm_status = IFM_AVALID;
1516 ifmr->ifm_active = IFM_ETHER;
1518 if (!adapter->link_active)
1521 ifmr->ifm_status |= IFM_ACTIVE;
1523 switch (adapter->link_speed) {
1524 case IXGBE_LINK_SPEED_100_FULL:
1525 ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1527 case IXGBE_LINK_SPEED_1GB_FULL:
1528 ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1530 case IXGBE_LINK_SPEED_10GB_FULL:
1531 ifmr->ifm_active |= adapter->optics | IFM_FDX;
1538 /*********************************************************************
1540 * Media Ioctl callback
1542 * This routine is called when the user changes speed/duplex using
1543 * media/mediopt option with ifconfig.
1545 **********************************************************************/
1547 ixgbe_media_change(struct ifnet * ifp)
1549 struct adapter *adapter = ifp->if_softc;
1550 struct ifmedia *ifm = &adapter->media;
1552 INIT_DEBUGOUT("ixgbe_media_change: begin");
1554 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1557 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1559 adapter->hw.phy.autoneg_advertised =
1560 IXGBE_LINK_SPEED_100_FULL |
1561 IXGBE_LINK_SPEED_1GB_FULL |
1562 IXGBE_LINK_SPEED_10GB_FULL;
1565 device_printf(adapter->dev, "Only auto media type\n");
1572 /*********************************************************************
1574 * This routine maps the mbufs to tx descriptors, allowing the
1575 * TX engine to transmit the packets.
1576 * - return 0 on success, positive on failure
1578 **********************************************************************/
1581 ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1583 struct adapter *adapter = txr->adapter;
1584 u32 olinfo_status = 0, cmd_type_len;
1586 int i, j, error, nsegs, maxsegs;
1587 int first, last = 0;
1588 struct mbuf *m_head;
1589 bus_dma_segment_t segs[adapter->num_segs];
1591 struct ixgbe_tx_buf *txbuf;
1592 union ixgbe_adv_tx_desc *txd = NULL;
1596 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1597 error = ixgbe_tso_pullup(txr, m_headp);
1603 /* Basic descriptor defines */
1604 cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1605 IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1607 if (m_head->m_flags & M_VLANTAG)
1608 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1611 * Important to capture the first descriptor
1612 * used because it will contain the index of
1613 * the one we tell the hardware to report back
1615 first = txr->next_avail_desc;
1616 txbuf = &txr->tx_buffers[first];
1620 * Map the packet for DMA.
1622 maxsegs = txr->tx_avail - IXGBE_TX_RESERVED;
1623 if (maxsegs > adapter->num_segs)
1624 maxsegs = adapter->num_segs;
1626 error = bus_dmamap_load_mbuf_defrag(txr->txtag, map, m_headp,
1627 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1629 if (error == ENOBUFS)
1630 adapter->mbuf_defrag_failed++;
1632 adapter->no_tx_dma_setup++;
1639 /* Make certain there are enough descriptors */
1640 if (nsegs > txr->tx_avail - 2) {
1641 txr->no_desc_avail++;
1648 ** Set up the appropriate offload context
1649 ** this becomes the first descriptor of
1652 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1653 if (ixgbe_tso_setup(txr, m_head, &paylen, &olinfo_status)) {
1654 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1655 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1656 olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1660 } else if (ixgbe_tx_ctx_setup(txr, m_head))
1661 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1663 #ifdef IXGBE_IEEE1588
1664 /* This is changing soon to an mtag detection */
1665 if (we detect this mbuf has a TSTAMP mtag)
1666 cmd_type_len |= IXGBE_ADVTXD_MAC_TSTAMP;
1670 /* Do the flow director magic */
1671 if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
1673 if (txr->atr_count >= atr_sample_rate) {
1674 ixgbe_atr(txr, m_head);
1679 /* Record payload length */
1681 olinfo_status |= m_head->m_pkthdr.len <<
1682 IXGBE_ADVTXD_PAYLEN_SHIFT;
1684 i = txr->next_avail_desc;
1685 for (j = 0; j < nsegs; j++) {
1689 txbuf = &txr->tx_buffers[i];
1690 txd = &txr->tx_base[i];
1691 seglen = segs[j].ds_len;
1692 segaddr = htole64(segs[j].ds_addr);
1694 txd->read.buffer_addr = segaddr;
1695 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1696 cmd_type_len |seglen);
1697 txd->read.olinfo_status = htole32(olinfo_status);
1698 last = i; /* descriptor that will get completion IRQ */
1700 if (++i == adapter->num_tx_desc)
1703 txbuf->m_head = NULL;
1704 txbuf->eop_index = -1;
1707 txd->read.cmd_type_len |=
1708 htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1709 txr->tx_avail -= nsegs;
1710 txr->next_avail_desc = i;
1712 txbuf->m_head = m_head;
1713 /* Swap the dma map between the first and last descriptor */
1714 txr->tx_buffers[first].map = txbuf->map;
1716 bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1718 /* Set the index of the descriptor that will be marked done */
1719 txbuf = &txr->tx_buffers[first];
1720 txbuf->eop_index = last;
1722 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1723 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1725 * Advance the Transmit Descriptor Tail (Tdt), this tells the
1726 * hardware that this frame is available to transmit.
1728 ++txr->total_packets;
1729 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
1734 bus_dmamap_unload(txr->txtag, txbuf->map);
1740 ixgbe_set_promisc(struct adapter *adapter)
1743 struct ifnet *ifp = adapter->ifp;
1745 reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1746 reg_rctl &= (~IXGBE_FCTRL_UPE);
1747 reg_rctl &= (~IXGBE_FCTRL_MPE);
1748 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1750 if (ifp->if_flags & IFF_PROMISC) {
1751 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1752 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1753 } else if (ifp->if_flags & IFF_ALLMULTI) {
1754 reg_rctl |= IXGBE_FCTRL_MPE;
1755 reg_rctl &= ~IXGBE_FCTRL_UPE;
1756 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1762 /*********************************************************************
1765 * This routine is called whenever multicast address list is updated.
1767 **********************************************************************/
1768 #define IXGBE_RAR_ENTRIES 16
1771 ixgbe_set_multi(struct adapter *adapter)
1776 struct ifmultiaddr *ifma;
1778 struct ifnet *ifp = adapter->ifp;
1780 IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1783 bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1784 MAX_NUM_MULTICAST_ADDRESSES);
1786 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1787 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1788 if (ifp->if_flags & IFF_PROMISC)
1789 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1790 else if (ifp->if_flags & IFF_ALLMULTI) {
1791 fctrl |= IXGBE_FCTRL_MPE;
1792 fctrl &= ~IXGBE_FCTRL_UPE;
1794 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1796 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1798 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1799 if (ifma->ifma_addr->sa_family != AF_LINK)
1801 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1802 &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1803 IXGBE_ETH_LENGTH_OF_ADDRESS);
1808 ixgbe_update_mc_addr_list(&adapter->hw,
1809 update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
1815 * This is an iterator function now needed by the multicast
1816 * shared code. It simply feeds the shared code routine the
1817 * addresses in the array of ixgbe_set_multi() one by one.
1820 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1822 u8 *addr = *update_ptr;
1826 newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1827 *update_ptr = newptr;
1832 /*********************************************************************
1835 * This routine checks for link status,updates statistics,
1836 * and runs the watchdog check.
1838 **********************************************************************/
1841 ixgbe_local_timer(void *arg)
1843 struct adapter *adapter = arg;
1844 device_t dev = adapter->dev;
1845 struct ifnet *ifp = adapter->ifp;
1846 struct ix_queue *que = adapter->queues;
1847 struct tx_ring *txr = adapter->tx_rings;
1848 int hung, busy, paused;
1850 IXGBE_CORE_LOCK(adapter);
1851 hung = busy = paused = 0;
1853 /* Check for pluggable optics */
1854 if (adapter->sfp_probe)
1855 if (!ixgbe_sfp_probe(adapter))
1856 goto out; /* Nothing to do */
1858 ixgbe_update_link_status(adapter);
1859 ixgbe_update_stats_counters(adapter);
1862 * If the interface has been paused
1863 * then don't do the watchdog check
1865 if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
1869 ** Check the TX queues status
1870 ** - central locked handling of OACTIVE
1871 ** - watchdog only if all queues show hung
1873 for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
1874 if ((txr->queue_status & IXGBE_QUEUE_HUNG) &&
1877 if (txr->queue_status & IXGBE_QUEUE_DEPLETED)
1880 /* Only truely watchdog if all queues show hung */
1881 if (hung == adapter->num_queues)
1883 /* Only turn off the stack flow when ALL are depleted */
1884 if (busy == adapter->num_queues)
1885 ifq_set_oactive(&ifp->if_snd);
1886 else if (ifq_is_oactive(&ifp->if_snd) && (busy < adapter->num_queues))
1887 ifq_clr_oactive(&ifp->if_snd);
1890 ixgbe_rearm_queues(adapter, adapter->que_mask);
1891 callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1892 IXGBE_CORE_UNLOCK(adapter);
1896 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
1897 device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
1898 IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me)),
1899 IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me)));
1900 device_printf(dev,"TX(%d) desc avail = %d,"
1901 "Next TX to Clean = %d\n",
1902 txr->me, txr->tx_avail, txr->next_to_clean);
1903 adapter->ifp->if_flags &= ~IFF_RUNNING;
1904 adapter->watchdog_events++;
1905 ixgbe_init_locked(adapter);
1907 IXGBE_CORE_UNLOCK(adapter);
1911 ** Note: this routine updates the OS on the link state
1912 ** the real check of the hardware only happens with
1913 ** a link interrupt.
1916 ixgbe_update_link_status(struct adapter *adapter)
1918 struct ifnet *ifp = adapter->ifp;
1919 struct tx_ring *txr = adapter->tx_rings;
1920 device_t dev = adapter->dev;
1923 if (adapter->link_up){
1924 if (adapter->link_active == FALSE) {
1926 device_printf(dev,"Link is up %d Gbps %s \n",
1927 ((adapter->link_speed == 128)? 10:1),
1929 adapter->link_active = TRUE;
1930 /* Update any Flow Control changes */
1931 ixgbe_fc_enable(&adapter->hw);
1932 ifp->if_link_state = LINK_STATE_UP;
1933 if_link_state_change(ifp);
1935 } else { /* Link down */
1936 if (adapter->link_active == TRUE) {
1938 device_printf(dev,"Link is Down\n");
1939 ifp->if_link_state = LINK_STATE_DOWN;
1940 if_link_state_change(ifp);
1941 adapter->link_active = FALSE;
1942 for (int i = 0; i < adapter->num_queues;
1944 txr->queue_status = IXGBE_QUEUE_IDLE;
1952 /*********************************************************************
1954 * This routine disables all traffic on the adapter by issuing a
1955 * global reset on the MAC and deallocates TX/RX buffers.
1957 **********************************************************************/
1960 ixgbe_stop(void *arg)
1963 struct adapter *adapter = arg;
1964 struct ixgbe_hw *hw = &adapter->hw;
1967 KKASSERT(lockstatus(&adapter->core_lock, curthread) != 0);
1969 INIT_DEBUGOUT("ixgbe_stop: begin\n");
1970 ixgbe_disable_intr(adapter);
1971 callout_stop(&adapter->timer);
1973 /* Let the stack know...*/
1974 ifp->if_flags &= ~IFF_RUNNING;
1975 ifq_clr_oactive(&ifp->if_snd);
1978 hw->adapter_stopped = FALSE;
1979 ixgbe_stop_adapter(hw);
1980 /* Turn off the laser */
1981 if (hw->phy.multispeed_fiber)
1982 ixgbe_disable_tx_laser(hw);
1984 /* reprogram the RAR[0] in case user changed it. */
1985 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
1991 /*********************************************************************
1993 * Determine hardware revision.
1995 **********************************************************************/
1997 ixgbe_identify_hardware(struct adapter *adapter)
1999 device_t dev = adapter->dev;
2000 struct ixgbe_hw *hw = &adapter->hw;
2002 /* Save off the information about this board */
2003 hw->vendor_id = pci_get_vendor(dev);
2004 hw->device_id = pci_get_device(dev);
2005 hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2006 hw->subsystem_vendor_id =
2007 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2008 hw->subsystem_device_id =
2009 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2011 /* We need this here to set the num_segs below */
2012 ixgbe_set_mac_type(hw);
2014 /* Pick up the 82599 and VF settings */
2015 if (hw->mac.type != ixgbe_mac_82598EB) {
2016 hw->phy.smart_speed = ixgbe_smart_speed;
2017 adapter->num_segs = IXGBE_82599_SCATTER;
2019 adapter->num_segs = IXGBE_82598_SCATTER;
2024 /*********************************************************************
2026 * Determine optic type
2028 **********************************************************************/
2030 ixgbe_setup_optics(struct adapter *adapter)
2032 struct ixgbe_hw *hw = &adapter->hw;
2035 layer = ixgbe_get_supported_physical_layer(hw);
2037 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2038 adapter->optics = IFM_10G_T;
2042 if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2043 adapter->optics = IFM_1000_T;
2047 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2048 IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2049 adapter->optics = IFM_10G_LR;
2053 if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2054 adapter->optics = IFM_10G_SR;
2058 if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2059 adapter->optics = IFM_10G_TWINAX;
2063 if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2064 IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2065 adapter->optics = IFM_10G_CX4;
2069 /* If we get here just set the default */
2070 adapter->optics = IFM_ETHER | IFM_AUTO;
2074 /*********************************************************************
2076 * Setup the Legacy or MSI Interrupt handler
2078 **********************************************************************/
2080 ixgbe_allocate_legacy(struct adapter *adapter)
2082 device_t dev = adapter->dev;
2083 struct ix_queue *que = adapter->queues;
2085 unsigned int intr_flags;
2088 if (adapter->msix == 1)
2091 /* Try allocating a MSI interrupt first */
2092 adapter->intr_type = pci_alloc_1intr(dev, ixgbe_msi_enable,
2095 /* We allocate a single interrupt resource */
2096 adapter->res = bus_alloc_resource_any(dev,
2097 SYS_RES_IRQ, &rid, intr_flags);
2098 if (adapter->res == NULL) {
2099 device_printf(dev, "Unable to allocate bus resource: "
2104 /* Tasklets for Link, SFP and Multispeed Fiber */
2105 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2106 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2108 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2111 if ((error = bus_setup_intr(dev, adapter->res, INTR_MPSAFE,
2112 ixgbe_legacy_irq, que, &adapter->tag, &adapter->serializer)) != 0) {
2113 device_printf(dev, "Failed to register fast interrupt "
2114 "handler: %d\n", error);
2115 taskqueue_free(adapter->tq);
2119 /* For simplicity in the handlers */
2120 adapter->que_mask = IXGBE_EIMS_ENABLE_MASK;
2126 /*********************************************************************
2128 * Setup MSIX Interrupt resources and handlers
2130 **********************************************************************/
2132 ixgbe_allocate_msix(struct adapter *adapter)
2134 device_t dev = adapter->dev;
2135 struct ix_queue *que = adapter->queues;
2136 int error, rid, vector = 0;
2139 error = pci_setup_msix(dev);
2141 device_printf(dev, "MSI-X setup failed\n");
2145 for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
2149 ** Bind the msix vector, and thus the
2150 ** ring to the corresponding cpu.
2152 error = pci_alloc_msix_vector(dev, vector, &rid, i);
2154 device_printf(dev, "pci_alloc_msix_vector failed\n");
2158 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2159 RF_SHAREABLE | RF_ACTIVE);
2160 if (que->res == NULL) {
2161 device_printf(dev,"Unable to allocate"
2162 " bus resource: que interrupt [%d]\n", vector);
2165 /* Set the handler function */
2166 ksnprintf(desc, sizeof(desc), "%s que %d",
2167 device_get_nameunit(dev), i);
2168 error = bus_setup_intr_descr(dev, que->res, INTR_MPSAFE,
2169 ixgbe_msix_que, que, &que->tag, &que->serializer, desc);
2172 device_printf(dev, "Failed to register QUE handler");
2176 adapter->que_mask |= (u64)(1 << que->msix);
2179 /* and Link, bind vector to cpu #0 */
2181 error = pci_alloc_msix_vector(dev, vector, &rid, 0);
2183 device_printf(dev, "pci_alloc_msix_vector failed\n");
2186 adapter->res = bus_alloc_resource_any(dev,
2187 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2188 if (!adapter->res) {
2189 device_printf(dev,"Unable to allocate"
2190 " bus resource: Link interrupt [%d]\n", rid);
2193 /* Set the link handler function */
2194 error = bus_setup_intr_descr(dev, adapter->res, INTR_MPSAFE,
2195 ixgbe_msix_link, adapter, &adapter->tag, &adapter->serializer,
2198 adapter->res = NULL;
2199 device_printf(dev, "Failed to register LINK handler");
2202 pci_enable_msix(dev);
2204 adapter->linkvec = vector;
2205 /* Tasklets for Link, SFP and Multispeed Fiber */
2206 TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2207 TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2209 TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2216 * Setup Either MSI/X or MSI
2219 ixgbe_setup_msix(struct adapter *adapter)
2221 device_t dev = adapter->dev;
2222 int rid, want, queues, msgs;
2224 /* Override by tuneable */
2225 if (ixgbe_enable_msix == 0)
2228 /* First try MSI/X */
2229 rid = PCIR_BAR(MSIX_82598_BAR);
2230 adapter->msix_mem = bus_alloc_resource_any(dev,
2231 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2232 if (!adapter->msix_mem) {
2233 rid += 4; /* 82599 maps in higher BAR */
2234 adapter->msix_mem = bus_alloc_resource_any(dev,
2235 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2237 if (!adapter->msix_mem) {
2238 /* May not be enabled */
2239 device_printf(adapter->dev,
2240 "Unable to map MSIX table \n");
2244 msgs = pci_msix_count(dev);
2245 if (msgs == 0) { /* system has msix disabled */
2246 bus_release_resource(dev, SYS_RES_MEMORY,
2247 rid, adapter->msix_mem);
2248 adapter->msix_mem = NULL;
2252 /* Figure out a reasonable auto config value */
2253 queues = (ncpus > (msgs-1)) ? (msgs-1) : ncpus;
2255 if (ixgbe_num_queues != 0)
2256 queues = ixgbe_num_queues;
2257 /* Set max queues to 8 when autoconfiguring */
2258 else if ((ixgbe_num_queues == 0) && (queues > 8))
2262 ** Want one vector (RX/TX pair) per queue
2263 ** plus an additional for Link.
2269 device_printf(adapter->dev,
2270 "MSIX Configuration Problem, "
2271 "%d vectors but %d queues wanted!\n",
2273 return (0); /* Will go to Legacy setup */
2276 device_printf(adapter->dev,
2277 "Using MSIX interrupts with %d vectors\n", msgs);
2278 adapter->num_queues = queues;
2282 msgs = pci_msi_count(dev);
2288 ixgbe_allocate_pci_resources(struct adapter *adapter)
2291 device_t dev = adapter->dev;
2294 adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2297 if (!(adapter->pci_mem)) {
2298 device_printf(dev,"Unable to allocate bus resource: memory\n");
2302 adapter->osdep.mem_bus_space_tag =
2303 rman_get_bustag(adapter->pci_mem);
2304 adapter->osdep.mem_bus_space_handle =
2305 rman_get_bushandle(adapter->pci_mem);
2306 adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2308 /* Legacy defaults */
2309 adapter->num_queues = 1;
2310 adapter->hw.back = &adapter->osdep;
2313 ** Now setup MSI or MSI/X, should
2314 ** return us the number of supported
2315 ** vectors. (Will be 1 for MSI)
2317 adapter->msix = ixgbe_setup_msix(adapter);
2322 ixgbe_free_pci_resources(struct adapter * adapter)
2324 struct ix_queue *que = adapter->queues;
2325 device_t dev = adapter->dev;
2328 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2329 memrid = PCIR_BAR(MSIX_82598_BAR);
2331 memrid = PCIR_BAR(MSIX_82599_BAR);
2334 ** There is a slight possibility of a failure mode
2335 ** in attach that will result in entering this function
2336 ** before interrupt resources have been initialized, and
2337 ** in that case we do not want to execute the loops below
2338 ** We can detect this reliably by the state of the adapter
2341 if (adapter->res == NULL)
2345 ** Release all msix queue resources:
2347 for (int i = 0; i < adapter->num_queues; i++, que++) {
2348 rid = que->msix + 1;
2349 if (que->tag != NULL) {
2350 bus_teardown_intr(dev, que->res, que->tag);
2353 if (que->res != NULL)
2354 bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2358 /* Clean the Legacy or Link interrupt last */
2359 if (adapter->linkvec) /* we are doing MSIX */
2360 rid = adapter->linkvec + 1;
2362 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2364 if (adapter->tag != NULL) {
2365 bus_teardown_intr(dev, adapter->res, adapter->tag);
2366 adapter->tag = NULL;
2368 if (adapter->res != NULL)
2369 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2370 if (adapter->intr_type == PCI_INTR_TYPE_MSI)
2371 pci_release_msi(adapter->dev);
2375 pci_release_msi(dev);
2377 if (adapter->msix_mem != NULL)
2378 bus_release_resource(dev, SYS_RES_MEMORY,
2379 memrid, adapter->msix_mem);
2381 if (adapter->pci_mem != NULL)
2382 bus_release_resource(dev, SYS_RES_MEMORY,
2383 PCIR_BAR(0), adapter->pci_mem);
2388 /*********************************************************************
2390 * Setup networking device structure and register an interface.
2392 **********************************************************************/
2394 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2396 struct ixgbe_hw *hw = &adapter->hw;
2399 INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2401 ifp = adapter->ifp = if_alloc(IFT_ETHER);
2403 device_printf(dev, "can not allocate ifnet structure\n");
2406 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2407 ifp->if_baudrate = 1000000000;
2408 ifp->if_init = ixgbe_init;
2409 ifp->if_softc = adapter;
2410 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2411 ifp->if_ioctl = ixgbe_ioctl;
2412 ifp->if_start = ixgbe_start;
2413 #if 0 /* __FreeBSD_version >= 800000 */
2414 ifp->if_transmit = ixgbe_mq_start;
2415 ifp->if_qflush = ixgbe_qflush;
2417 ifq_set_maxlen(&ifp->if_snd, adapter->num_tx_desc - 2);
2419 ether_ifattach(ifp, adapter->hw.mac.addr, NULL);
2421 adapter->max_frame_size =
2422 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2425 * Tell the upper layer(s) we support long frames.
2427 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2429 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM;
2430 ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2431 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2436 ifp->if_capenable = ifp->if_capabilities;
2438 /* Don't enable LRO by default */
2440 ifp->if_capabilities |= IFCAP_LRO;
2444 ** Don't turn this on by default, if vlans are
2445 ** created on another pseudo device (eg. lagg)
2446 ** then vlan events are not passed thru, breaking
2447 ** operation, but with HW FILTER off it works. If
2448 ** using vlans directly on the ixgbe driver you can
2449 ** enable this and get full hardware tag filtering.
2451 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2454 * Specify the media types supported by this adapter and register
2455 * callbacks to update media and link information
2457 ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2458 ixgbe_media_status);
2459 ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics, 0, NULL);
2460 ifmedia_set(&adapter->media, IFM_ETHER | adapter->optics);
2461 if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2462 ifmedia_add(&adapter->media,
2463 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2464 ifmedia_add(&adapter->media,
2465 IFM_ETHER | IFM_1000_T, 0, NULL);
2467 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2468 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2474 ixgbe_config_link(struct adapter *adapter)
2476 struct ixgbe_hw *hw = &adapter->hw;
2477 u32 autoneg, err = 0;
2478 bool sfp, negotiate;
2480 sfp = ixgbe_is_sfp(hw);
2483 if (hw->phy.multispeed_fiber) {
2484 hw->mac.ops.setup_sfp(hw);
2485 ixgbe_enable_tx_laser(hw);
2486 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2488 taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2490 if (hw->mac.ops.check_link)
2491 err = ixgbe_check_link(hw, &autoneg,
2492 &adapter->link_up, FALSE);
2495 autoneg = hw->phy.autoneg_advertised;
2496 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2497 err = hw->mac.ops.get_link_capabilities(hw,
2498 &autoneg, &negotiate);
2501 if (hw->mac.ops.setup_link)
2502 err = hw->mac.ops.setup_link(hw, autoneg,
2503 negotiate, adapter->link_up);
2509 /********************************************************************
2510 * Manage DMA'able memory.
2511 *******************************************************************/
2513 ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
2517 *(bus_addr_t *) arg = segs->ds_addr;
2522 ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
2523 struct ixgbe_dma_alloc *dma, int mapflags)
2525 device_t dev = adapter->dev;
2528 r = bus_dma_tag_create(NULL, /* parent */
2529 DBA_ALIGN, 0, /* alignment, bounds */
2530 BUS_SPACE_MAXADDR, /* lowaddr */
2531 BUS_SPACE_MAXADDR, /* highaddr */
2532 NULL, NULL, /* filter, filterarg */
2535 size, /* maxsegsize */
2536 BUS_DMA_ALLOCNOW, /* flags */
2539 device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; "
2543 r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
2544 BUS_DMA_NOWAIT, &dma->dma_map);
2546 device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; "
2550 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2554 mapflags | BUS_DMA_NOWAIT);
2556 device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; "
2560 dma->dma_size = size;
2563 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2565 bus_dma_tag_destroy(dma->dma_tag);
2567 dma->dma_map = NULL;
2568 dma->dma_tag = NULL;
2573 ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
2575 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2576 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2577 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2578 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2579 bus_dma_tag_destroy(dma->dma_tag);
2583 /*********************************************************************
2585 * Allocate memory for the transmit and receive rings, and then
2586 * the descriptors associated with each, called only once at attach.
2588 **********************************************************************/
2590 ixgbe_allocate_queues(struct adapter *adapter)
2592 device_t dev = adapter->dev;
2593 struct ix_queue *que;
2594 struct tx_ring *txr;
2595 struct rx_ring *rxr;
2596 int rsize, tsize, error = IXGBE_SUCCESS;
2597 int txconf = 0, rxconf = 0;
2599 /* First allocate the top level queue structs */
2600 if (!(adapter->queues =
2601 (struct ix_queue *) kmalloc(sizeof(struct ix_queue) *
2602 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2603 device_printf(dev, "Unable to allocate queue memory\n");
2608 /* First allocate the TX ring struct memory */
2609 if (!(adapter->tx_rings =
2610 (struct tx_ring *) kmalloc(sizeof(struct tx_ring) *
2611 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2612 device_printf(dev, "Unable to allocate TX ring memory\n");
2617 /* Next allocate the RX */
2618 if (!(adapter->rx_rings =
2619 (struct rx_ring *) kmalloc(sizeof(struct rx_ring) *
2620 adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2621 device_printf(dev, "Unable to allocate RX ring memory\n");
2626 /* For the ring itself */
2627 tsize = roundup2(adapter->num_tx_desc *
2628 sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2631 * Now set up the TX queues, txconf is needed to handle the
2632 * possibility that things fail midcourse and we need to
2633 * undo memory gracefully
2635 for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2636 /* Set up some basics */
2637 txr = &adapter->tx_rings[i];
2638 txr->adapter = adapter;
2641 /* Initialize the TX side lock */
2642 IXGBE_TX_LOCK_INIT(txr);
2644 if (ixgbe_dma_malloc(adapter, tsize,
2645 &txr->txdma, BUS_DMA_NOWAIT)) {
2647 "Unable to allocate TX Descriptor memory\n");
2651 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2652 bzero((void *)txr->tx_base, tsize);
2654 /* Now allocate transmit buffers for the ring */
2655 if (ixgbe_allocate_transmit_buffers(txr)) {
2657 "Critical Failure setting up transmit buffers\n");
2661 #if 0 /* __FreeBSD_version >= 800000 */
2662 /* Allocate a buf ring */
2663 txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
2664 M_WAITOK, &txr->tx_mtx);
2665 if (txr->br == NULL) {
2667 "Critical Failure setting up buf ring\n");
2675 * Next the RX queues...
2677 rsize = roundup2(adapter->num_rx_desc *
2678 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2679 for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2680 rxr = &adapter->rx_rings[i];
2681 /* Set up some basics */
2682 rxr->adapter = adapter;
2685 /* Initialize the RX side lock */
2686 ksnprintf(rxr->lock_name, sizeof(rxr->lock_name), "%s:rx(%d)",
2687 device_get_nameunit(dev), rxr->me);
2688 lockinit(&rxr->rx_lock, rxr->lock_name, 0, LK_CANRECURSE);
2690 if (ixgbe_dma_malloc(adapter, rsize,
2691 &rxr->rxdma, BUS_DMA_NOWAIT)) {
2693 "Unable to allocate RxDescriptor memory\n");
2697 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2698 bzero((void *)rxr->rx_base, rsize);
2700 /* Allocate receive buffers for the ring*/
2701 if (ixgbe_allocate_receive_buffers(rxr)) {
2703 "Critical Failure setting up receive buffers\n");
2710 ** Finally set up the queue holding structs
2712 for (int i = 0; i < adapter->num_queues; i++) {
2713 que = &adapter->queues[i];
2714 que->adapter = adapter;
2715 que->txr = &adapter->tx_rings[i];
2716 que->rxr = &adapter->rx_rings[i];
2722 for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2723 ixgbe_dma_free(adapter, &rxr->rxdma);
2725 for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2726 ixgbe_dma_free(adapter, &txr->txdma);
2727 kfree(adapter->rx_rings, M_DEVBUF);
2729 kfree(adapter->tx_rings, M_DEVBUF);
2731 kfree(adapter->queues, M_DEVBUF);
2736 /*********************************************************************
2738 * Allocate memory for tx_buffer structures. The tx_buffer stores all
2739 * the information needed to transmit a packet on the wire. This is
2740 * called only once at attach, setup is done every reset.
2742 **********************************************************************/
2744 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
2746 struct adapter *adapter = txr->adapter;
2747 device_t dev = adapter->dev;
2748 struct ixgbe_tx_buf *txbuf;
2752 * Setup DMA descriptor areas.
2754 if ((error = bus_dma_tag_create(
2756 1, 0, /* alignment, bounds */
2757 BUS_SPACE_MAXADDR, /* lowaddr */
2758 BUS_SPACE_MAXADDR, /* highaddr */
2759 NULL, NULL, /* filter, filterarg */
2760 IXGBE_TSO_SIZE, /* maxsize */
2761 adapter->num_segs, /* nsegments */
2762 PAGE_SIZE, /* maxsegsize */
2765 device_printf(dev,"Unable to allocate TX DMA tag\n");
2769 if (!(txr->tx_buffers =
2770 (struct ixgbe_tx_buf *) kmalloc(sizeof(struct ixgbe_tx_buf) *
2771 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2772 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2777 /* Create the descriptor buffer dma maps */
2778 txbuf = txr->tx_buffers;
2779 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2780 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2782 device_printf(dev, "Unable to create TX DMA map\n");
2789 /* We free all, it handles case where we are in the middle */
2790 ixgbe_free_transmit_structures(adapter);
2794 /*********************************************************************
2796 * Initialize a transmit ring.
2798 **********************************************************************/
2800 ixgbe_setup_transmit_ring(struct tx_ring *txr)
2802 struct adapter *adapter = txr->adapter;
2803 struct ixgbe_tx_buf *txbuf;
2806 struct netmap_adapter *na = NA(adapter->ifp);
2807 struct netmap_slot *slot;
2808 #endif /* DEV_NETMAP */
2810 /* Clear the old ring contents */
2814 * (under lock): if in netmap mode, do some consistency
2815 * checks and set slot to entry 0 of the netmap ring.
2817 slot = netmap_reset(na, NR_TX, txr->me, 0);
2818 #endif /* DEV_NETMAP */
2819 bzero((void *)txr->tx_base,
2820 (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
2822 txr->next_avail_desc = 0;
2823 txr->next_to_clean = 0;
2825 /* Free any existing tx buffers. */
2826 txbuf = txr->tx_buffers;
2827 for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2828 if (txbuf->m_head != NULL) {
2829 bus_dmamap_sync(txr->txtag, txbuf->map,
2830 BUS_DMASYNC_POSTWRITE);
2831 bus_dmamap_unload(txr->txtag, txbuf->map);
2832 m_freem(txbuf->m_head);
2833 txbuf->m_head = NULL;
2837 * In netmap mode, set the map for the packet buffer.
2838 * NOTE: Some drivers (not this one) also need to set
2839 * the physical buffer address in the NIC ring.
2840 * Slots in the netmap ring (indexed by "si") are
2841 * kring->nkr_hwofs positions "ahead" wrt the
2842 * corresponding slot in the NIC ring. In some drivers
2843 * (not here) nkr_hwofs can be negative. Function
2844 * netmap_idx_n2k() handles wraparounds properly.
2847 int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
2848 netmap_load_map(txr->txtag, txbuf->map, NMB(slot + si));
2850 #endif /* DEV_NETMAP */
2851 /* Clear the EOP index */
2852 txbuf->eop_index = -1;
2856 /* Set the rate at which we sample packets */
2857 if (adapter->hw.mac.type != ixgbe_mac_82598EB)
2858 txr->atr_sample = atr_sample_rate;
2861 /* Set number of descriptors available */
2862 txr->tx_avail = adapter->num_tx_desc;
2864 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2865 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2866 IXGBE_TX_UNLOCK(txr);
2869 /*********************************************************************
2871 * Initialize all transmit rings.
2873 **********************************************************************/
2875 ixgbe_setup_transmit_structures(struct adapter *adapter)
2877 struct tx_ring *txr = adapter->tx_rings;
2879 for (int i = 0; i < adapter->num_queues; i++, txr++)
2880 ixgbe_setup_transmit_ring(txr);
2885 /*********************************************************************
2887 * Enable transmit unit.
2889 **********************************************************************/
2891 ixgbe_initialize_transmit_units(struct adapter *adapter)
2893 struct tx_ring *txr = adapter->tx_rings;
2894 struct ixgbe_hw *hw = &adapter->hw;
2896 /* Setup the Base and Length of the Tx Descriptor Ring */
2898 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2899 u64 tdba = txr->txdma.dma_paddr;
2902 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2903 (tdba & 0x00000000ffffffffULL));
2904 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2905 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2906 adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
2908 /* Setup the HW Tx Head and Tail descriptor pointers */
2909 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2910 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2912 /* Setup Transmit Descriptor Cmd Settings */
2913 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2914 txr->queue_status = IXGBE_QUEUE_IDLE;
2916 /* Disable Head Writeback */
2917 switch (hw->mac.type) {
2918 case ixgbe_mac_82598EB:
2919 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2921 case ixgbe_mac_82599EB:
2922 case ixgbe_mac_X540:
2924 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2927 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2928 switch (hw->mac.type) {
2929 case ixgbe_mac_82598EB:
2930 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2932 case ixgbe_mac_82599EB:
2933 case ixgbe_mac_X540:
2935 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2941 if (hw->mac.type != ixgbe_mac_82598EB) {
2942 u32 dmatxctl, rttdcs;
2943 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2944 dmatxctl |= IXGBE_DMATXCTL_TE;
2945 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2946 /* Disable arbiter to set MTQC */
2947 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2948 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2949 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2950 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2951 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2952 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2958 /*********************************************************************
2960 * Free all transmit rings.
2962 **********************************************************************/
2964 ixgbe_free_transmit_structures(struct adapter *adapter)
2966 struct tx_ring *txr = adapter->tx_rings;
2968 for (int i = 0; i < adapter->num_queues; i++, txr++) {
2970 ixgbe_free_transmit_buffers(txr);
2971 ixgbe_dma_free(adapter, &txr->txdma);
2972 IXGBE_TX_UNLOCK(txr);
2973 IXGBE_TX_LOCK_DESTROY(txr);
2975 kfree(adapter->tx_rings, M_DEVBUF);
2978 /*********************************************************************
2980 * Free transmit ring related data structures.
2982 **********************************************************************/
2984 ixgbe_free_transmit_buffers(struct tx_ring *txr)
2986 struct adapter *adapter = txr->adapter;
2987 struct ixgbe_tx_buf *tx_buffer;
2990 INIT_DEBUGOUT("free_transmit_ring: begin");
2992 if (txr->tx_buffers == NULL)
2995 tx_buffer = txr->tx_buffers;
2996 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2997 if (tx_buffer->m_head != NULL) {
2998 bus_dmamap_sync(txr->txtag, tx_buffer->map,
2999 BUS_DMASYNC_POSTWRITE);
3000 bus_dmamap_unload(txr->txtag,
3002 m_freem(tx_buffer->m_head);
3003 tx_buffer->m_head = NULL;
3004 if (tx_buffer->map != NULL) {
3005 bus_dmamap_destroy(txr->txtag,
3007 tx_buffer->map = NULL;
3009 } else if (tx_buffer->map != NULL) {
3010 bus_dmamap_unload(txr->txtag,
3012 bus_dmamap_destroy(txr->txtag,
3014 tx_buffer->map = NULL;
3017 #if 0 /* __FreeBSD_version >= 800000 */
3018 if (txr->br != NULL)
3019 buf_ring_free(txr->br, M_DEVBUF);
3021 if (txr->tx_buffers != NULL) {
3022 kfree(txr->tx_buffers, M_DEVBUF);
3023 txr->tx_buffers = NULL;
3025 if (txr->txtag != NULL) {
3026 bus_dma_tag_destroy(txr->txtag);
3032 /*********************************************************************
3034 * Advanced Context Descriptor setup for VLAN or CSUM
3036 **********************************************************************/
3039 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
3041 struct adapter *adapter = txr->adapter;
3042 struct ixgbe_adv_tx_context_desc *TXD;
3043 struct ixgbe_tx_buf *tx_buffer;
3044 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3045 struct ether_vlan_header *eh;
3047 struct ip6_hdr *ip6;
3048 int ehdrlen, ip_hlen = 0;
3051 bool offload = TRUE;
3052 int ctxd = txr->next_avail_desc;
3056 if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
3059 tx_buffer = &txr->tx_buffers[ctxd];
3060 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
3063 ** In advanced descriptors the vlan tag must
3064 ** be placed into the descriptor itself.
3066 if (mp->m_flags & M_VLANTAG) {
3067 vtag = htole16(mp->m_pkthdr.ether_vlantag);
3068 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
3069 } else if (offload == FALSE)
3073 * Determine where frame payload starts.
3074 * Jump over vlan headers if already present,
3075 * helpful for QinQ too.
3077 eh = mtod(mp, struct ether_vlan_header *);
3078 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3079 etype = ntohs(eh->evl_proto);
3080 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3082 etype = ntohs(eh->evl_encap_proto);
3083 ehdrlen = ETHER_HDR_LEN;
3086 /* Set the ether header length */
3087 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
3091 ip = (struct ip *)(mp->m_data + ehdrlen);
3092 ip_hlen = ip->ip_hl << 2;
3094 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3096 case ETHERTYPE_IPV6:
3097 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3098 ip_hlen = sizeof(struct ip6_hdr);
3099 /* XXX-BZ this will go badly in case of ext hdrs. */
3100 ipproto = ip6->ip6_nxt;
3101 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
3108 vlan_macip_lens |= ip_hlen;
3109 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3113 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
3114 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3118 if (mp->m_pkthdr.csum_flags & CSUM_UDP)
3119 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
3124 if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
3125 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3133 /* Now copy bits into descriptor */
3134 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3135 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3136 TXD->seqnum_seed = htole32(0);
3137 TXD->mss_l4len_idx = htole32(0);
3139 tx_buffer->m_head = NULL;
3140 tx_buffer->eop_index = -1;
3142 /* We've consumed the first desc, adjust counters */
3143 if (++ctxd == adapter->num_tx_desc)
3145 txr->next_avail_desc = ctxd;
3151 /**********************************************************************
3153 * Setup work for hardware segmentation offload (TSO) on
3154 * adapters using advanced tx descriptors
3156 **********************************************************************/
3158 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen,
3161 struct adapter *adapter = txr->adapter;
3162 struct ixgbe_adv_tx_context_desc *TXD;
3163 struct ixgbe_tx_buf *tx_buffer;
3164 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3165 u16 vtag = 0, eh_type;
3166 u32 mss_l4len_idx = 0, len;
3167 int ctxd, ehdrlen, ip_hlen, tcp_hlen;
3168 struct ether_vlan_header *eh;
3169 #if 0 /* IPv6 TSO */
3171 struct ip6_hdr *ip6;
3181 * Determine where frame payload starts.
3182 * Jump over vlan headers if already present
3184 eh = mtod(mp, struct ether_vlan_header *);
3185 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3186 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3187 eh_type = eh->evl_proto;
3189 ehdrlen = ETHER_HDR_LEN;
3190 eh_type = eh->evl_encap_proto;
3193 /* Ensure we have at least the IP+TCP header in the first mbuf. */
3194 len = ehdrlen + sizeof(struct tcphdr);
3195 switch (ntohs(eh_type)) {
3196 #if 0 /* IPv6 TSO */
3198 case ETHERTYPE_IPV6:
3199 if (mp->m_len < len + sizeof(struct ip6_hdr))
3201 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3202 /* XXX-BZ For now we do not pretend to support ext. hdrs. */
3203 if (ip6->ip6_nxt != IPPROTO_TCP)
3205 ip_hlen = sizeof(struct ip6_hdr);
3206 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
3207 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
3208 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
3214 if (mp->m_len < len + sizeof(struct ip))
3216 ip = (struct ip *)(mp->m_data + ehdrlen);
3217 if (ip->ip_p != IPPROTO_TCP)
3220 ip_hlen = ip->ip_hl << 2;
3221 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3222 th->th_sum = in_pseudo(ip->ip_src.s_addr,
3223 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3224 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3225 /* Tell transmit desc to also do IPv4 checksum. */
3226 *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
3230 panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
3231 __func__, ntohs(eh_type));
3235 ctxd = txr->next_avail_desc;
3236 tx_buffer = &txr->tx_buffers[ctxd];
3237 TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
3239 tcp_hlen = th->th_off << 2;
3241 /* This is used in the transmit desc in encap */
3242 *paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
3244 /* VLAN MACLEN IPLEN */
3245 if (mp->m_flags & M_VLANTAG) {
3246 vtag = htole16(mp->m_pkthdr.ether_vlantag);
3247 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
3250 vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
3251 vlan_macip_lens |= ip_hlen;
3252 TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3254 /* ADV DTYPE TUCMD */
3255 type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3256 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3257 TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3260 mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
3261 mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
3262 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
3264 TXD->seqnum_seed = htole32(0);
3265 tx_buffer->m_head = NULL;
3266 tx_buffer->eop_index = -1;
3268 if (++ctxd == adapter->num_tx_desc)
3272 txr->next_avail_desc = ctxd;
3278 ** This routine parses packet headers so that Flow
3279 ** Director can make a hashed filter table entry
3280 ** allowing traffic flows to be identified and kept
3281 ** on the same cpu. This would be a performance
3282 ** hit, but we only do it at IXGBE_FDIR_RATE of
3286 ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
3288 struct adapter *adapter = txr->adapter;
3289 struct ix_queue *que;
3293 struct ether_vlan_header *eh;
3294 union ixgbe_atr_hash_dword input = {.dword = 0};
3295 union ixgbe_atr_hash_dword common = {.dword = 0};
3296 int ehdrlen, ip_hlen;
3299 eh = mtod(mp, struct ether_vlan_header *);
3300 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3301 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3302 etype = eh->evl_proto;
3304 ehdrlen = ETHER_HDR_LEN;
3305 etype = eh->evl_encap_proto;
3308 /* Only handling IPv4 */
3309 if (etype != htons(ETHERTYPE_IP))
3312 ip = (struct ip *)(mp->m_data + ehdrlen);
3313 ip_hlen = ip->ip_hl << 2;
3315 /* check if we're UDP or TCP */
3318 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3319 /* src and dst are inverted */
3320 common.port.dst ^= th->th_sport;
3321 common.port.src ^= th->th_dport;
3322 input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_TCPV4;
3325 uh = (struct udphdr *)((caddr_t)ip + ip_hlen);
3326 /* src and dst are inverted */
3327 common.port.dst ^= uh->uh_sport;
3328 common.port.src ^= uh->uh_dport;
3329 input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_UDPV4;
3335 input.formatted.vlan_id = htobe16(mp->m_pkthdr.ether_vtag);
3336 if (mp->m_pkthdr.ether_vtag)
3337 common.flex_bytes ^= htons(ETHERTYPE_VLAN);
3339 common.flex_bytes ^= etype;
3340 common.ip ^= ip->ip_src.s_addr ^ ip->ip_dst.s_addr;
3342 que = &adapter->queues[txr->me];
3344 ** This assumes the Rx queue and Tx
3345 ** queue are bound to the same CPU
3347 ixgbe_fdir_add_signature_filter_82599(&adapter->hw,
3348 input, common, que->msix);
3350 #endif /* IXGBE_FDIR */
3352 /**********************************************************************
3354 * Examine each tx_buffer in the used queue. If the hardware is done
3355 * processing the packet then free associated resources. The
3356 * tx_buffer is put back on the free queue.
3358 **********************************************************************/
3360 ixgbe_txeof(struct tx_ring *txr)
3362 struct adapter *adapter = txr->adapter;
3363 struct ifnet *ifp = adapter->ifp;
3364 u32 first, last, done, processed;
3365 struct ixgbe_tx_buf *tx_buffer;
3366 struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
3368 IXGBE_TX_LOCK_ASSERT(txr);
3371 if (ifp->if_capenable & IFCAP_NETMAP) {
3372 struct netmap_adapter *na = NA(ifp);
3373 struct netmap_kring *kring = &na->tx_rings[txr->me];
3375 tx_desc = (struct ixgbe_legacy_tx_desc *)txr->tx_base;
3377 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3378 BUS_DMASYNC_POSTREAD);
3380 * In netmap mode, all the work is done in the context
3381 * of the client thread. Interrupt handlers only wake up
3382 * clients, which may be sleeping on individual rings
3383 * or on a global resource for all rings.
3384 * To implement tx interrupt mitigation, we wake up the client
3385 * thread roughly every half ring, even if the NIC interrupts
3386 * more frequently. This is implemented as follows:
3387 * - ixgbe_txsync() sets kring->nr_kflags with the index of
3388 * the slot that should wake up the thread (nkr_num_slots
3389 * means the user thread should not be woken up);
3390 * - the driver ignores tx interrupts unless netmap_mitigate=0
3391 * or the slot has the DD bit set.
3393 * When the driver has separate locks, we need to
3394 * release and re-acquire txlock to avoid deadlocks.
3395 * XXX see if we can find a better way.
3397 if (!netmap_mitigate ||
3398 (kring->nr_kflags < kring->nkr_num_slots &&
3399 tx_desc[kring->nr_kflags].upper.fields.status & IXGBE_TXD_STAT_DD)) {
3400 kring->nr_kflags = kring->nkr_num_slots;
3401 selwakeuppri(&na->tx_rings[txr->me].si, PI_NET);
3402 IXGBE_TX_UNLOCK(txr);
3403 IXGBE_CORE_LOCK(adapter);
3404 selwakeuppri(&na->tx_si, PI_NET);
3405 IXGBE_CORE_UNLOCK(adapter);
3410 #endif /* DEV_NETMAP */
3412 if (txr->tx_avail == adapter->num_tx_desc) {
3413 txr->queue_status = IXGBE_QUEUE_IDLE;
3418 first = txr->next_to_clean;
3419 tx_buffer = &txr->tx_buffers[first];
3420 /* For cleanup we just use legacy struct */
3421 tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
3422 last = tx_buffer->eop_index;
3425 eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
3428 ** Get the index of the first descriptor
3429 ** BEYOND the EOP and call that 'done'.
3430 ** I do this so the comparison in the
3431 ** inner while loop below can be simple
3433 if (++last == adapter->num_tx_desc) last = 0;
3436 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3437 BUS_DMASYNC_POSTREAD);
3439 ** Only the EOP descriptor of a packet now has the DD
3440 ** bit set, this is what we look for...
3442 while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
3443 /* We clean the range of the packet */
3444 while (first != done) {
3445 tx_desc->upper.data = 0;
3446 tx_desc->lower.data = 0;
3447 tx_desc->buffer_addr = 0;
3451 if (tx_buffer->m_head) {
3453 tx_buffer->m_head->m_pkthdr.len;
3454 bus_dmamap_sync(txr->txtag,
3456 BUS_DMASYNC_POSTWRITE);
3457 bus_dmamap_unload(txr->txtag,
3459 m_freem(tx_buffer->m_head);
3460 tx_buffer->m_head = NULL;
3461 tx_buffer->map = NULL;
3463 tx_buffer->eop_index = -1;
3464 txr->watchdog_time = ticks;
3466 if (++first == adapter->num_tx_desc)
3469 tx_buffer = &txr->tx_buffers[first];
3471 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
3475 /* See if there is more work now */
3476 last = tx_buffer->eop_index;
3479 (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
3480 /* Get next done point */
3481 if (++last == adapter->num_tx_desc) last = 0;
3486 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3487 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3489 txr->next_to_clean = first;
3492 ** Watchdog calculation, we know there's
3493 ** work outstanding or the first return
3494 ** would have been taken, so none processed
3495 ** for too long indicates a hang.
3497 if ((!processed) && ((ticks - txr->watchdog_time) > IXGBE_WATCHDOG))
3498 txr->queue_status = IXGBE_QUEUE_HUNG;
3500 /* With a minimum free clear the depleted state bit. */
3501 if (txr->tx_avail > IXGBE_TX_CLEANUP_THRESHOLD)
3502 txr->queue_status &= ~IXGBE_QUEUE_DEPLETED;
3504 if (txr->tx_avail == adapter->num_tx_desc) {
3505 txr->queue_status = IXGBE_QUEUE_IDLE;
3509 /*********************************************************************
3511 * Refresh mbuf buffers for RX descriptor rings
3512 * - now keeps its own state so discards due to resource
3513 * exhaustion are unnecessary, if an mbuf cannot be obtained
3514 * it just returns, keeping its placeholder, thus it can simply
3515 * be recalled to try again.
3517 **********************************************************************/
3519 ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
3521 struct adapter *adapter = rxr->adapter;
3522 bus_dma_segment_t hseg[1];
3523 bus_dma_segment_t pseg[1];
3524 struct ixgbe_rx_buf *rxbuf;
3525 struct mbuf *mh, *mp;
3526 int i, j, nsegs, error;
3527 bool refreshed = FALSE;
3529 i = j = rxr->next_to_refresh;
3530 /* Control the loop with one beyond */
3531 if (++j == adapter->num_rx_desc)
3534 while (j != limit) {
3535 rxbuf = &rxr->rx_buffers[i];
3536 if (rxr->hdr_split == FALSE)
3539 if (rxbuf->m_head == NULL) {
3540 mh = m_gethdr(MB_DONTWAIT, MT_DATA);
3546 mh->m_pkthdr.len = mh->m_len = MHLEN;
3548 mh->m_flags |= M_PKTHDR;
3549 /* Get the memory mapping */
3550 error = bus_dmamap_load_mbuf_segment(rxr->htag,
3551 rxbuf->hmap, mh, hseg, 1, &nsegs, BUS_DMA_NOWAIT);
3553 kprintf("Refresh mbufs: hdr dmamap load"
3554 " failure - %d\n", error);
3556 rxbuf->m_head = NULL;
3560 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3561 BUS_DMASYNC_PREREAD);
3562 rxr->rx_base[i].read.hdr_addr =
3563 htole64(hseg[0].ds_addr);
3566 if (rxbuf->m_pack == NULL) {
3567 mp = m_getjcl(MB_DONTWAIT, MT_DATA,
3568 M_PKTHDR, adapter->rx_mbuf_sz);
3574 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
3575 /* Get the memory mapping */
3576 error = bus_dmamap_load_mbuf_segment(rxr->ptag,
3577 rxbuf->pmap, mp, pseg, 1, &nsegs, BUS_DMA_NOWAIT);
3579 kprintf("Refresh mbufs: payload dmamap load"
3580 " failure - %d\n", error);
3582 rxbuf->m_pack = NULL;
3586 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3587 BUS_DMASYNC_PREREAD);
3588 rxr->rx_base[i].read.pkt_addr =
3589 htole64(pseg[0].ds_addr);
3592 /* Next is precalculated */
3594 rxr->next_to_refresh = i;
3595 if (++j == adapter->num_rx_desc)
3599 if (refreshed) /* Update hardware tail index */
3600 IXGBE_WRITE_REG(&adapter->hw,
3601 IXGBE_RDT(rxr->me), rxr->next_to_refresh);
3605 /*********************************************************************
3607 * Allocate memory for rx_buffer structures. Since we use one
3608 * rx_buffer per received packet, the maximum number of rx_buffer's
3609 * that we'll need is equal to the number of receive descriptors
3610 * that we've allocated.
3612 **********************************************************************/
3614 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
3616 struct adapter *adapter = rxr->adapter;
3617 device_t dev = adapter->dev;
3618 struct ixgbe_rx_buf *rxbuf;
3619 int i, bsize, error;
3621 bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc;
3622 if (!(rxr->rx_buffers =
3623 (struct ixgbe_rx_buf *) kmalloc(bsize,
3624 M_DEVBUF, M_NOWAIT | M_ZERO))) {
3625 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3630 if ((error = bus_dma_tag_create(NULL, /* parent */
3631 1, 0, /* alignment, bounds */
3632 BUS_SPACE_MAXADDR, /* lowaddr */
3633 BUS_SPACE_MAXADDR, /* highaddr */
3634 NULL, NULL, /* filter, filterarg */
3635 MSIZE, /* maxsize */
3637 MSIZE, /* maxsegsize */
3640 device_printf(dev, "Unable to create RX DMA tag\n");
3644 if ((error = bus_dma_tag_create(NULL, /* parent */
3645 1, 0, /* alignment, bounds */
3646 BUS_SPACE_MAXADDR, /* lowaddr */
3647 BUS_SPACE_MAXADDR, /* highaddr */
3648 NULL, NULL, /* filter, filterarg */
3649 MJUM16BYTES, /* maxsize */
3651 MJUM16BYTES, /* maxsegsize */
3654 device_printf(dev, "Unable to create RX DMA tag\n");
3658 for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
3659 rxbuf = &rxr->rx_buffers[i];
3660 error = bus_dmamap_create(rxr->htag,
3661 BUS_DMA_NOWAIT, &rxbuf->hmap);
3663 device_printf(dev, "Unable to create RX head map\n");
3666 error = bus_dmamap_create(rxr->ptag,
3667 BUS_DMA_NOWAIT, &rxbuf->pmap);
3669 device_printf(dev, "Unable to create RX pkt map\n");
3677 /* Frees all, but can handle partial completion */
3678 ixgbe_free_receive_structures(adapter);
3683 ** Used to detect a descriptor that has
3684 ** been merged by Hardware RSC.
3687 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
3689 return (le32toh(rx->wb.lower.lo_dword.data) &
3690 IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
3693 /*********************************************************************
3695 * Initialize Hardware RSC (LRO) feature on 82599
3696 * for an RX ring, this is toggled by the LRO capability
3697 * even though it is transparent to the stack.
3699 **********************************************************************/
3702 ixgbe_setup_hw_rsc(struct rx_ring *rxr)
3704 struct adapter *adapter = rxr->adapter;
3705 struct ixgbe_hw *hw = &adapter->hw;
3706 u32 rscctrl, rdrxctl;
3708 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3709 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3710 #ifdef DEV_NETMAP /* crcstrip is optional in netmap */
3711 if (adapter->ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
3712 #endif /* DEV_NETMAP */
3713 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3714 rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
3715 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3717 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
3718 rscctrl |= IXGBE_RSCCTL_RSCEN;
3720 ** Limit the total number of descriptors that
3721 ** can be combined, so it does not exceed 64K
3723 if (adapter->rx_mbuf_sz == MCLBYTES)
3724 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
3725 else if (adapter->rx_mbuf_sz == MJUMPAGESIZE)
3726 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
3727 else if (adapter->rx_mbuf_sz == MJUM9BYTES)
3728 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
3729 else /* Using 16K cluster */
3730 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
3732 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
3734 /* Enable TCP header recognition */
3735 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
3736 (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
3737 IXGBE_PSRTYPE_TCPHDR));
3739 /* Disable RSC for ACK packets */
3740 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3741 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
3748 ixgbe_free_receive_ring(struct rx_ring *rxr)
3750 struct adapter *adapter;
3751 struct ixgbe_rx_buf *rxbuf;
3754 adapter = rxr->adapter;
3755 for (i = 0; i < adapter->num_rx_desc; i++) {
3756 rxbuf = &rxr->rx_buffers[i];
3757 if (rxbuf->m_head != NULL) {
3758 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3759 BUS_DMASYNC_POSTREAD);
3760 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3761 rxbuf->m_head->m_flags |= M_PKTHDR;
3762 m_freem(rxbuf->m_head);
3764 if (rxbuf->m_pack != NULL) {
3765 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3766 BUS_DMASYNC_POSTREAD);
3767 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3768 rxbuf->m_pack->m_flags |= M_PKTHDR;
3769 m_freem(rxbuf->m_pack);
3771 rxbuf->m_head = NULL;
3772 rxbuf->m_pack = NULL;
3777 /*********************************************************************
3779 * Initialize a receive ring and its buffers.
3781 **********************************************************************/
3783 ixgbe_setup_receive_ring(struct rx_ring *rxr)
3785 struct adapter *adapter;
3790 struct ixgbe_rx_buf *rxbuf;
3791 bus_dma_segment_t pseg[1], hseg[1];
3793 struct lro_ctrl *lro = &rxr->lro;
3795 int rsize, nsegs, error = 0;
3797 struct netmap_adapter *na = NA(rxr->adapter->ifp);
3798 struct netmap_slot *slot;
3799 #endif /* DEV_NETMAP */
3801 adapter = rxr->adapter;
3807 /* Clear the ring contents */
3810 /* same as in ixgbe_setup_transmit_ring() */
3811 slot = netmap_reset(na, NR_RX, rxr->me, 0);
3812 #endif /* DEV_NETMAP */
3813 rsize = roundup2(adapter->num_rx_desc *
3814 sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
3815 bzero((void *)rxr->rx_base, rsize);
3817 /* Free current RX buffer structs and their mbufs */
3818 ixgbe_free_receive_ring(rxr);
3820 /* Configure header split? */
3821 if (ixgbe_header_split)
3822 rxr->hdr_split = TRUE;
3824 /* Now replenish the mbufs */
3825 for (int j = 0; j != adapter->num_rx_desc; ++j) {
3826 struct mbuf *mh, *mp;
3828 rxbuf = &rxr->rx_buffers[j];
3831 * In netmap mode, fill the map and set the buffer
3832 * address in the NIC ring, considering the offset
3833 * between the netmap and NIC rings (see comment in
3834 * ixgbe_setup_transmit_ring() ). No need to allocate
3835 * an mbuf, so end the block with a continue;
3838 int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j);
3842 addr = PNMB(slot + sj, &paddr);
3843 netmap_load_map(rxr->ptag, rxbuf->pmap, addr);
3844 /* Update descriptor */
3845 rxr->rx_base[j].read.pkt_addr = htole64(paddr);
3848 #endif /* DEV_NETMAP */
3850 ** Don't allocate mbufs if not
3851 ** doing header split, its wasteful
3853 if (rxr->hdr_split == FALSE)
3856 /* First the header */
3857 rxbuf->m_head = m_gethdr(MB_DONTWAIT, MT_DATA);
3858 if (rxbuf->m_head == NULL) {
3862 m_adj(rxbuf->m_head, ETHER_ALIGN);
3864 mh->m_len = mh->m_pkthdr.len = MHLEN;
3865 mh->m_flags |= M_PKTHDR;
3866 /* Get the memory mapping */
3867 error = bus_dmamap_load_mbuf_segment(rxr->htag,
3868 rxbuf->hmap, rxbuf->m_head, hseg, 1,
3869 &nsegs, BUS_DMA_NOWAIT);
3871 if (error != 0) /* Nothing elegant to do here */
3873 bus_dmamap_sync(rxr->htag,
3874 rxbuf->hmap, BUS_DMASYNC_PREREAD);
3875 /* Update descriptor */
3876 rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
3879 /* Now the payload cluster */
3880 rxbuf->m_pack = m_getjcl(MB_DONTWAIT, MT_DATA,
3881 M_PKTHDR, adapter->rx_mbuf_sz);
3882 if (rxbuf->m_pack == NULL) {
3887 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
3888 /* Get the memory mapping */
3889 error = bus_dmamap_load_mbuf_segment(rxr->ptag,
3890 rxbuf->pmap, mp, hseg, 1,
3891 &nsegs, BUS_DMA_NOWAIT);
3894 bus_dmamap_sync(rxr->ptag,
3895 rxbuf->pmap, BUS_DMASYNC_PREREAD);
3896 /* Update descriptor */
3897 rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
3901 /* Setup our descriptor indices */
3902 rxr->next_to_check = 0;
3903 rxr->next_to_refresh = 0;
3904 rxr->lro_enabled = FALSE;
3905 rxr->rx_split_packets = 0;
3907 rxr->discard = FALSE;
3908 rxr->vtag_strip = FALSE;
3910 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3911 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3914 ** Now set up the LRO interface:
3915 ** 82598 uses software LRO, the
3916 ** 82599 and X540 use a hardware assist.
3919 if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
3920 (ifp->if_capenable & IFCAP_RXCSUM) &&
3921 (ifp->if_capenable & IFCAP_LRO))
3922 ixgbe_setup_hw_rsc(rxr);
3923 else if (ifp->if_capenable & IFCAP_LRO) {
3924 int err = tcp_lro_init(lro);
3926 device_printf(dev, "LRO Initialization failed!\n");
3929 INIT_DEBUGOUT("RX Soft LRO Initialized\n");
3930 rxr->lro_enabled = TRUE;
3931 lro->ifp = adapter->ifp;
3935 IXGBE_RX_UNLOCK(rxr);
3939 ixgbe_free_receive_ring(rxr);
3940 IXGBE_RX_UNLOCK(rxr);
3944 /*********************************************************************
3946 * Initialize all receive rings.
3948 **********************************************************************/
3950 ixgbe_setup_receive_structures(struct adapter *adapter)
3952 struct rx_ring *rxr = adapter->rx_rings;
3955 for (j = 0; j < adapter->num_queues; j++, rxr++)
3956 if (ixgbe_setup_receive_ring(rxr))
3962 * Free RX buffers allocated so far, we will only handle
3963 * the rings that completed, the failing case will have
3964 * cleaned up for itself. 'j' failed, so its the terminus.
3966 for (int i = 0; i < j; ++i) {
3967 rxr = &adapter->rx_rings[i];
3968 ixgbe_free_receive_ring(rxr);
3974 /*********************************************************************
3976 * Setup receive registers and features.
3978 **********************************************************************/
3979 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
3981 #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
3984 ixgbe_initialize_receive_units(struct adapter *adapter)
3986 struct rx_ring *rxr = adapter->rx_rings;
3987 struct ixgbe_hw *hw = &adapter->hw;
3988 struct ifnet *ifp = adapter->ifp;
3989 u32 bufsz, rxctrl, fctrl, srrctl, rxcsum;
3990 u32 reta, mrqc = 0, hlreg, random[10];
3994 * Make sure receives are disabled while
3995 * setting up the descriptor ring
3997 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3998 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL,
3999 rxctrl & ~IXGBE_RXCTRL_RXEN);
4001 /* Enable broadcasts */
4002 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4003 fctrl |= IXGBE_FCTRL_BAM;
4004 fctrl |= IXGBE_FCTRL_DPF;
4005 fctrl |= IXGBE_FCTRL_PMCF;
4006 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4008 /* Set for Jumbo Frames? */
4009 hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4010 if (ifp->if_mtu > ETHERMTU)
4011 hlreg |= IXGBE_HLREG0_JUMBOEN;
4013 hlreg &= ~IXGBE_HLREG0_JUMBOEN;
4015 /* crcstrip is conditional in netmap (in RDRXCTL too ?) */
4016 if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
4017 hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
4019 hlreg |= IXGBE_HLREG0_RXCRCSTRP;
4020 #endif /* DEV_NETMAP */
4021 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
4023 bufsz = (adapter->rx_mbuf_sz +
4024 BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
4026 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4027 u64 rdba = rxr->rxdma.dma_paddr;
4029 /* Setup the Base and Length of the Rx Descriptor Ring */
4030 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
4031 (rdba & 0x00000000ffffffffULL));
4032 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
4033 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
4034 adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
4036 /* Set up the SRRCTL register */
4037 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
4038 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
4039 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
4041 if (rxr->hdr_split) {
4042 /* Use a standard mbuf for the header */
4043 srrctl |= ((IXGBE_RX_HDR <<
4044 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
4045 & IXGBE_SRRCTL_BSIZEHDR_MASK);
4046 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
4048 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
4049 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
4051 /* Setup the HW Rx Head and Tail Descriptor Pointers */
4052 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
4053 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
4056 if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
4057 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
4058 IXGBE_PSRTYPE_UDPHDR |
4059 IXGBE_PSRTYPE_IPV4HDR |
4060 IXGBE_PSRTYPE_IPV6HDR;
4061 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
4064 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
4067 if (adapter->num_queues > 1) {
4071 /* set up random bits */
4072 karc4rand(&random, sizeof(random));
4074 /* Set up the redirection table */
4075 for (i = 0, j = 0; i < 128; i++, j++) {
4076 if (j == adapter->num_queues) j = 0;
4077 reta = (reta << 8) | (j * 0x11);
4079 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
4082 /* Now fill our hash function seeds */
4083 for (int i = 0; i < 10; i++)
4084 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
4086 /* Perform hash on these packet types */
4087 mrqc = IXGBE_MRQC_RSSEN
4088 | IXGBE_MRQC_RSS_FIELD_IPV4
4089 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
4090 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
4091 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
4092 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
4093 | IXGBE_MRQC_RSS_FIELD_IPV6
4094 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
4095 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
4096 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
4097 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
4099 /* RSS and RX IPP Checksum are mutually exclusive */
4100 rxcsum |= IXGBE_RXCSUM_PCSD;
4103 if (ifp->if_capenable & IFCAP_RXCSUM)
4104 rxcsum |= IXGBE_RXCSUM_PCSD;
4106 if (!(rxcsum & IXGBE_RXCSUM_PCSD))
4107 rxcsum |= IXGBE_RXCSUM_IPPCSE;
4109 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
4114 /*********************************************************************
4116 * Free all receive rings.
4118 **********************************************************************/
4120 ixgbe_free_receive_structures(struct adapter *adapter)
4122 struct rx_ring *rxr = adapter->rx_rings;
4124 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
4126 struct lro_ctrl *lro = &rxr->lro;
4128 ixgbe_free_receive_buffers(rxr);
4129 /* Free LRO memory */
4133 /* Free the ring memory as well */
4134 ixgbe_dma_free(adapter, &rxr->rxdma);
4137 kfree(adapter->rx_rings, M_DEVBUF);
4141 /*********************************************************************
4143 * Free receive ring data structures
4145 **********************************************************************/
4147 ixgbe_free_receive_buffers(struct rx_ring *rxr)
4149 struct adapter *adapter = rxr->adapter;
4150 struct ixgbe_rx_buf *rxbuf;
4152 INIT_DEBUGOUT("free_receive_structures: begin");
4154 /* Cleanup any existing buffers */
4155 if (rxr->rx_buffers != NULL) {
4156 for (int i = 0; i < adapter->num_rx_desc; i++) {
4157 rxbuf = &rxr->rx_buffers[i];
4158 if (rxbuf->m_head != NULL) {
4159 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
4160 BUS_DMASYNC_POSTREAD);
4161 bus_dmamap_unload(rxr->htag, rxbuf->hmap);
4162 rxbuf->m_head->m_flags |= M_PKTHDR;
4163 m_freem(rxbuf->m_head);
4165 if (rxbuf->m_pack != NULL) {
4166 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
4167 BUS_DMASYNC_POSTREAD);
4168 bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
4169 rxbuf->m_pack->m_flags |= M_PKTHDR;
4170 m_freem(rxbuf->m_pack);
4172 rxbuf->m_head = NULL;
4173 rxbuf->m_pack = NULL;
4174 if (rxbuf->hmap != NULL) {
4175 bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
4178 if (rxbuf->pmap != NULL) {
4179 bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
4183 if (rxr->rx_buffers != NULL) {
4184 kfree(rxr->rx_buffers, M_DEVBUF);
4185 rxr->rx_buffers = NULL;
4189 if (rxr->htag != NULL) {
4190 bus_dma_tag_destroy(rxr->htag);
4193 if (rxr->ptag != NULL) {
4194 bus_dma_tag_destroy(rxr->ptag);
4201 static __inline void
4202 ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
4206 * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
4207 * should be computed by hardware. Also it should not have VLAN tag in
4208 * ethernet header. In case of IPv6 we do not yet support ext. hdrs.
4211 if (rxr->lro_enabled &&
4212 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
4213 (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
4214 ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
4215 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
4216 (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
4217 (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
4218 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
4219 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
4221 * Send to the stack if:
4222 ** - LRO not enabled, or
4223 ** - no LRO resources, or
4224 ** - lro enqueue fails
4226 if (rxr->lro.lro_cnt != 0)
4227 if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
4231 IXGBE_RX_UNLOCK(rxr);
4232 (*ifp->if_input)(ifp, m);
4236 static __inline void
4237 ixgbe_rx_discard(struct rx_ring *rxr, int i)
4239 struct ixgbe_rx_buf *rbuf;
4241 rbuf = &rxr->rx_buffers[i];
4243 if (rbuf->fmp != NULL) {/* Partial chain ? */
4244 rbuf->fmp->m_flags |= M_PKTHDR;
4250 ** With advanced descriptors the writeback
4251 ** clobbers the buffer addrs, so its easier
4252 ** to just free the existing mbufs and take
4253 ** the normal refresh path to get new buffers
4257 m_free(rbuf->m_head);
4258 rbuf->m_head = NULL;
4262 m_free(rbuf->m_pack);
4263 rbuf->m_pack = NULL;
4270 ixgbe_add_sysctl(struct adapter *adapter)
4272 sysctl_ctx_init(&adapter->sysctl_ctx);
4273 adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx,
4274 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
4275 device_get_nameunit(adapter->dev), CTLFLAG_RD, 0, "");
4276 if (adapter->sysctl_tree == NULL) {
4277 device_printf(adapter->dev, "can't add sysctl node\n");
4280 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
4281 SYSCTL_CHILDREN(adapter->sysctl_tree),
4282 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
4283 adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control");
4285 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
4286 SYSCTL_CHILDREN(adapter->sysctl_tree),
4287 OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW,
4288 adapter, 0, ixgbe_sysctl_intr_rate, "I", "interrupt rate");
4291 ** Allow a kind of speed control by forcing the autoneg
4292 ** advertised speed list to only a certain value, this
4293 ** supports 1G on 82599 devices, and 100Mb on x540.
4295 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
4296 SYSCTL_CHILDREN(adapter->sysctl_tree),
4297 OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
4298 adapter, 0, ixgbe_set_advertise, "I", "Link Speed");
4300 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
4301 SYSCTL_CHILDREN(adapter->sysctl_tree),
4302 OID_AUTO, "ts", CTLTYPE_INT | CTLFLAG_RW, adapter,
4303 0, ixgbe_set_thermal_test, "I", "Thermal Test");
4305 /* Sysctl for limiting the amount of work done in the taskqueue */
4306 ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
4307 "max number of rx packets to process", &adapter->rx_process_limit,
4308 ixgbe_rx_process_limit);
4311 /*********************************************************************
4313 * This routine executes in interrupt context. It replenishes
4314 * the mbufs in the descriptor and sends data which has been
4315 * dma'ed into host memory to upper layer.
4317 * We loop at most count times if count is > 0, or until done if
4320 * Return TRUE for more work, FALSE for all clean.
4321 *********************************************************************/
4323 ixgbe_rxeof(struct ix_queue *que, int count)
4325 struct adapter *adapter = que->adapter;
4326 struct rx_ring *rxr = que->rxr;
4327 struct ifnet *ifp = adapter->ifp;
4329 struct lro_ctrl *lro = &rxr->lro;
4330 struct lro_entry *queued;
4332 int i, nextp, processed = 0;
4334 union ixgbe_adv_rx_desc *cur;
4335 struct ixgbe_rx_buf *rbuf, *nbuf;
4340 if (ifp->if_capenable & IFCAP_NETMAP) {
4342 * Same as the txeof routine: only wakeup clients on intr.
4343 * NKR_PENDINTR in nr_kflags is used to implement interrupt
4344 * mitigation (ixgbe_rxsync() will not look for new packets
4345 * unless NKR_PENDINTR is set).
4347 struct netmap_adapter *na = NA(ifp);
4349 na->rx_rings[rxr->me].nr_kflags |= NKR_PENDINTR;
4350 selwakeuppri(&na->rx_rings[rxr->me].si, PI_NET);
4351 IXGBE_RX_UNLOCK(rxr);
4352 IXGBE_CORE_LOCK(adapter);
4353 selwakeuppri(&na->rx_si, PI_NET);
4354 IXGBE_CORE_UNLOCK(adapter);
4357 #endif /* DEV_NETMAP */
4358 for (i = rxr->next_to_check; count != 0;) {
4359 struct mbuf *sendmp, *mh, *mp;
4361 u16 hlen, plen, hdr;
4365 /* Sync the ring. */
4366 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4367 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4369 cur = &rxr->rx_base[i];
4370 staterr = le32toh(cur->wb.upper.status_error);
4372 if ((staterr & IXGBE_RXD_STAT_DD) == 0)
4374 if ((ifp->if_flags & IFF_RUNNING) == 0)
4381 cur->wb.upper.status_error = 0;
4382 rbuf = &rxr->rx_buffers[i];
4386 plen = le16toh(cur->wb.upper.length);
4387 ptype = le32toh(cur->wb.lower.lo_dword.data) &
4388 IXGBE_RXDADV_PKTTYPE_MASK;
4389 hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
4390 eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
4392 /* Process vlan info */
4393 if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP))
4394 vtag = le16toh(cur->wb.upper.vlan);
4396 /* Make sure bad packets are discarded */
4397 if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
4400 rxr->rx_discarded++;
4402 rxr->discard = FALSE;
4404 rxr->discard = TRUE;
4405 ixgbe_rx_discard(rxr, i);
4410 ** On 82599 which supports a hardware
4411 ** LRO (called HW RSC), packets need
4412 ** not be fragmented across sequential
4413 ** descriptors, rather the next descriptor
4414 ** is indicated in bits of the descriptor.
4415 ** This also means that we might proceses
4416 ** more than one packet at a time, something
4417 ** that has never been true before, it
4418 ** required eliminating global chain pointers
4419 ** in favor of what we are doing here. -jfv
4423 ** Figure out the next descriptor
4426 if (rxr->hw_rsc == TRUE) {
4427 rsc = ixgbe_rsc_count(cur);
4428 rxr->rsc_num += (rsc - 1);
4430 if (rsc) { /* Get hardware index */
4432 IXGBE_RXDADV_NEXTP_MASK) >>
4433 IXGBE_RXDADV_NEXTP_SHIFT);
4434 } else { /* Just sequential */
4436 if (nextp == adapter->num_rx_desc)
4439 nbuf = &rxr->rx_buffers[nextp];
4443 ** The header mbuf is ONLY used when header
4444 ** split is enabled, otherwise we get normal
4445 ** behavior, ie, both header and payload
4446 ** are DMA'd into the payload buffer.
4448 ** Rather than using the fmp/lmp global pointers
4449 ** we now keep the head of a packet chain in the
4450 ** buffer struct and pass this along from one
4451 ** descriptor to the next, until we get EOP.
4453 if (rxr->hdr_split && (rbuf->fmp == NULL)) {
4454 /* This must be an initial descriptor */
4455 hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
4456 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
4457 if (hlen > IXGBE_RX_HDR)
4458 hlen = IXGBE_RX_HDR;
4460 mh->m_flags |= M_PKTHDR;
4462 mh->m_pkthdr.len = mh->m_len;
4463 /* Null buf pointer so it is refreshed */
4464 rbuf->m_head = NULL;
4466 ** Check the payload length, this
4467 ** could be zero if its a small
4473 mp->m_flags &= ~M_PKTHDR;
4475 mh->m_pkthdr.len += mp->m_len;
4476 /* Null buf pointer so it is refreshed */
4477 rbuf->m_pack = NULL;
4478 rxr->rx_split_packets++;
4481 ** Now create the forward
4482 ** chain so when complete
4486 /* stash the chain head */
4488 /* Make forward chain */
4490 mp->m_next = nbuf->m_pack;
4492 mh->m_next = nbuf->m_pack;
4494 /* Singlet, prepare to send */
4496 /* If hardware handled vtag */
4498 sendmp->m_pkthdr.ether_vlantag = vtag;
4499 sendmp->m_flags |= M_VLANTAG;
4504 ** Either no header split, or a
4505 ** secondary piece of a fragmented
4510 ** See if there is a stored head
4511 ** that determines what we are
4514 rbuf->m_pack = rbuf->fmp = NULL;
4516 if (sendmp != NULL) { /* secondary frag */
4517 mp->m_flags &= ~M_PKTHDR;
4518 sendmp->m_pkthdr.len += mp->m_len;
4520 /* first desc of a non-ps chain */
4522 sendmp->m_flags |= M_PKTHDR;
4523 sendmp->m_pkthdr.len = mp->m_len;
4524 if (staterr & IXGBE_RXD_STAT_VP) {
4525 sendmp->m_pkthdr.ether_vlantag = vtag;
4526 sendmp->m_flags |= M_VLANTAG;
4529 /* Pass the head pointer on */
4533 mp->m_next = nbuf->m_pack;
4537 /* Sending this frame? */
4539 sendmp->m_pkthdr.rcvif = ifp;
4542 /* capture data for AIM */
4543 rxr->bytes += sendmp->m_pkthdr.len;
4544 rxr->rx_bytes += sendmp->m_pkthdr.len;
4545 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
4546 ixgbe_rx_checksum(staterr, sendmp, ptype);
4547 #if 0 /* __FreeBSD_version >= 800000 */
4548 sendmp->m_pkthdr.flowid = que->msix;
4549 sendmp->m_flags |= M_FLOWID;
4553 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
4554 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4556 /* Advance our pointers to the next descriptor. */
4557 if (++i == adapter->num_rx_desc)
4560 /* Now send to the stack or do LRO */
4561 if (sendmp != NULL) {
4562 rxr->next_to_check = i;
4563 ixgbe_rx_input(rxr, ifp, sendmp, ptype);
4564 i = rxr->next_to_check;
4567 /* Every 8 descriptors we go to refresh mbufs */
4568 if (processed == 8) {
4569 ixgbe_refresh_mbufs(rxr, i);
4574 /* Refresh any remaining buf structs */
4575 if (ixgbe_rx_unrefreshed(rxr))
4576 ixgbe_refresh_mbufs(rxr, i);
4578 rxr->next_to_check = i;
4581 * Flush any outstanding LRO work
4584 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
4585 SLIST_REMOVE_HEAD(&lro->lro_active, next);
4586 tcp_lro_flush(lro, queued);
4590 IXGBE_RX_UNLOCK(rxr);
4593 ** We still have cleaning to do?
4594 ** Schedule another interrupt if so.
4596 if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
4597 ixgbe_rearm_queues(adapter, (u64)(1 << que->msix));
4602 /*********************************************************************
4604 * Verify that the hardware indicated that the checksum is valid.
4605 * Inform the stack about the status of checksum so that stack
4606 * doesn't spend time verifying the checksum.
4608 *********************************************************************/
4610 ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
4612 u16 status = (u16) staterr;
4613 u8 errors = (u8) (staterr >> 24);
4616 if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
4617 (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
4620 if (status & IXGBE_RXD_STAT_IPCS) {
4621 if (!(errors & IXGBE_RXD_ERR_IPE)) {
4622 /* IP Checksum Good */
4623 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
4624 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4627 mp->m_pkthdr.csum_flags = 0;
4629 if (status & IXGBE_RXD_STAT_L4CS) {
4630 u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4633 type = CSUM_SCTP_VALID;
4635 if (!(errors & IXGBE_RXD_ERR_TCPE)) {
4636 mp->m_pkthdr.csum_flags |= type;
4638 mp->m_pkthdr.csum_data = htons(0xffff);
4646 ** This routine is run via an vlan config EVENT,
4647 ** it enables us to use the HW Filter table since
4648 ** we can get the vlan id. This just creates the
4649 ** entry in the soft version of the VFTA, init will
4650 ** repopulate the real table.
4653 ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4655 struct adapter *adapter = ifp->if_softc;
4658 if (ifp->if_softc != arg) /* Not our event */
4661 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
4664 IXGBE_CORE_LOCK(adapter);
4665 index = (vtag >> 5) & 0x7F;
4667 adapter->shadow_vfta[index] |= (1 << bit);
4668 ++adapter->num_vlans;
4669 ixgbe_init_locked(adapter);
4670 IXGBE_CORE_UNLOCK(adapter);
4674 ** This routine is run via an vlan
4675 ** unconfig EVENT, remove our entry
4676 ** in the soft vfta.
4679 ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4681 struct adapter *adapter = ifp->if_softc;
4684 if (ifp->if_softc != arg)
4687 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
4690 IXGBE_CORE_LOCK(adapter);
4691 index = (vtag >> 5) & 0x7F;
4693 adapter->shadow_vfta[index] &= ~(1 << bit);
4694 --adapter->num_vlans;
4695 /* Re-init to load the changes */
4696 ixgbe_init_locked(adapter);
4697 IXGBE_CORE_UNLOCK(adapter);
4701 ixgbe_setup_vlan_hw_support(struct adapter *adapter)
4703 struct ifnet *ifp = adapter->ifp;
4704 struct ixgbe_hw *hw = &adapter->hw;
4705 struct rx_ring *rxr;
4709 ** We get here thru init_locked, meaning
4710 ** a soft reset, this has already cleared
4711 ** the VFTA and other state, so if there
4712 ** have been no vlan's registered do nothing.
4714 if (adapter->num_vlans == 0)
4718 ** A soft reset zero's out the VFTA, so
4719 ** we need to repopulate it now.
4721 for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
4722 if (adapter->shadow_vfta[i] != 0)
4723 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
4724 adapter->shadow_vfta[i]);
4726 ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
4727 /* Enable the Filter Table if enabled */
4728 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
4729 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
4730 ctrl |= IXGBE_VLNCTRL_VFE;
4732 if (hw->mac.type == ixgbe_mac_82598EB)
4733 ctrl |= IXGBE_VLNCTRL_VME;
4734 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
4736 /* Setup the queues for vlans */
4737 for (int i = 0; i < adapter->num_queues; i++) {
4738 rxr = &adapter->rx_rings[i];
4739 /* On 82599 the VLAN enable is per/queue in RXDCTL */
4740 if (hw->mac.type != ixgbe_mac_82598EB) {
4741 ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
4742 ctrl |= IXGBE_RXDCTL_VME;
4743 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
4745 rxr->vtag_strip = TRUE;
4750 ixgbe_enable_intr(struct adapter *adapter)
4752 struct ixgbe_hw *hw = &adapter->hw;
4753 struct ix_queue *que = adapter->queues;
4754 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
4757 /* Enable Fan Failure detection */
4758 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4759 mask |= IXGBE_EIMS_GPI_SDP1;
4761 mask |= IXGBE_EIMS_ECC;
4762 mask |= IXGBE_EIMS_GPI_SDP0;
4763 mask |= IXGBE_EIMS_GPI_SDP1;
4764 mask |= IXGBE_EIMS_GPI_SDP2;
4766 mask |= IXGBE_EIMS_FLOW_DIR;
4770 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
4772 /* With RSS we use auto clear */
4773 if (adapter->msix_mem) {
4774 mask = IXGBE_EIMS_ENABLE_MASK;
4775 /* Don't autoclear Link */
4776 mask &= ~IXGBE_EIMS_OTHER;
4777 mask &= ~IXGBE_EIMS_LSC;
4778 IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
4782 ** Now enable all queues, this is done separately to
4783 ** allow for handling the extended (beyond 32) MSIX
4784 ** vectors that can be used by 82599
4786 for (int i = 0; i < adapter->num_queues; i++, que++)
4787 ixgbe_enable_queue(adapter, que->msix);
4789 IXGBE_WRITE_FLUSH(hw);
4795 ixgbe_disable_intr(struct adapter *adapter)
4797 if (adapter->msix_mem)
4798 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
4799 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
4800 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
4802 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
4803 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
4804 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
4806 IXGBE_WRITE_FLUSH(&adapter->hw);
4811 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
4815 value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev,
4822 ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
4824 pci_write_config(((struct ixgbe_osdep *)hw->back)->dev,
4831 ** Setup the correct IVAR register for a particular MSIX interrupt
4832 ** (yes this is all very magic and confusing :)
4833 ** - entry is the register array entry
4834 ** - vector is the MSIX vector for this queue
4835 ** - type is RX/TX/MISC
4838 ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
4840 struct ixgbe_hw *hw = &adapter->hw;
4843 vector |= IXGBE_IVAR_ALLOC_VAL;
4845 switch (hw->mac.type) {
4847 case ixgbe_mac_82598EB:
4849 entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
4851 entry += (type * 64);
4852 index = (entry >> 2) & 0x1F;
4853 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4854 ivar &= ~(0xFF << (8 * (entry & 0x3)));
4855 ivar |= (vector << (8 * (entry & 0x3)));
4856 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
4859 case ixgbe_mac_82599EB:
4860 case ixgbe_mac_X540:
4861 if (type == -1) { /* MISC IVAR */
4862 index = (entry & 1) * 8;
4863 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4864 ivar &= ~(0xFF << index);
4865 ivar |= (vector << index);
4866 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4867 } else { /* RX/TX IVARS */
4868 index = (16 * (entry & 1)) + (8 * type);
4869 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
4870 ivar &= ~(0xFF << index);
4871 ivar |= (vector << index);
4872 IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
4881 ixgbe_configure_ivars(struct adapter *adapter)
4883 struct ix_queue *que = adapter->queues;
4885 for (int i = 0; i < adapter->num_queues; i++, que++) {
4886 /* First the RX queue entry */
4887 ixgbe_set_ivar(adapter, i, que->msix, 0);
4888 /* ... and the TX */
4889 ixgbe_set_ivar(adapter, i, que->msix, 1);
4890 /* Set an Initial EITR value */
4891 ixgbe_set_eitr(adapter, que->msix, IXGBE_INTR_RATE);
4894 /* For the Link interrupt */
4895 ixgbe_set_ivar(adapter, 1, adapter->linkvec, -1);
4899 ** ixgbe_sfp_probe - called in the local timer to
4900 ** determine if a port had optics inserted.
4902 static bool ixgbe_sfp_probe(struct adapter *adapter)
4904 struct ixgbe_hw *hw = &adapter->hw;
4905 device_t dev = adapter->dev;
4906 bool result = FALSE;
4908 if ((hw->phy.type == ixgbe_phy_nl) &&
4909 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
4910 s32 ret = hw->phy.ops.identify_sfp(hw);
4913 ret = hw->phy.ops.reset(hw);
4914 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4915 device_printf(dev,"Unsupported SFP+ module detected!");
4916 kprintf(" Reload driver with supported module.\n");
4917 adapter->sfp_probe = FALSE;
4920 device_printf(dev,"SFP+ module detected!\n");
4921 /* We now have supported optics */
4922 adapter->sfp_probe = FALSE;
4923 /* Set the optics type so system reports correctly */
4924 ixgbe_setup_optics(adapter);
4932 ** Tasklet for handling SFP module interrupts
4935 ixgbe_handle_mod(void *context, int pending)
4937 struct adapter *adapter = context;
4938 struct ixgbe_hw *hw = &adapter->hw;
4939 device_t dev = adapter->dev;
4942 err = hw->phy.ops.identify_sfp(hw);
4943 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4945 "Unsupported SFP+ module type was detected.\n");
4948 err = hw->mac.ops.setup_sfp(hw);
4949 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4951 "Setup failure - unsupported SFP+ module type.\n");
4954 taskqueue_enqueue(adapter->tq, &adapter->msf_task);
4960 ** Tasklet for handling MSF (multispeed fiber) interrupts
4963 ixgbe_handle_msf(void *context, int pending)
4965 struct adapter *adapter = context;
4966 struct ixgbe_hw *hw = &adapter->hw;
4970 autoneg = hw->phy.autoneg_advertised;
4971 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4972 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
4973 if (hw->mac.ops.setup_link)
4974 hw->mac.ops.setup_link(hw, autoneg, negotiate, TRUE);
4980 ** Tasklet for reinitializing the Flow Director filter table
4983 ixgbe_reinit_fdir(void *context, int pending)
4985 struct adapter *adapter = context;
4986 struct ifnet *ifp = adapter->ifp;
4988 if (adapter->fdir_reinit != 1) /* Shouldn't happen */
4990 ixgbe_reinit_fdir_tables_82599(&adapter->hw);
4991 adapter->fdir_reinit = 0;
4992 /* re-enable flow director interrupts */
4993 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
4994 /* Restart the interface */
4995 ifp->if_drv_flags |= IFF_DRV_RUNNING;
5000 /**********************************************************************
5002 * Update the board statistics counters.
5004 **********************************************************************/
5006 ixgbe_update_stats_counters(struct adapter *adapter)
5008 struct ifnet *ifp = adapter->ifp;
5009 struct ixgbe_hw *hw = &adapter->hw;
5010 u32 missed_rx = 0, bprc, lxon, lxoff, total;
5011 u64 total_missed_rx = 0;
5013 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
5014 adapter->stats.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
5015 adapter->stats.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
5016 adapter->stats.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
5018 for (int i = 0; i < 8; i++) {
5020 mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
5021 /* missed_rx tallies misses for the gprc workaround */
5023 /* global total per queue */
5024 adapter->stats.mpc[i] += mp;
5025 /* Running comprehensive total for stats display */
5026 total_missed_rx += adapter->stats.mpc[i];
5027 if (hw->mac.type == ixgbe_mac_82598EB)
5028 adapter->stats.rnbc[i] +=
5029 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
5030 adapter->stats.pxontxc[i] +=
5031 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
5032 adapter->stats.pxonrxc[i] +=
5033 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
5034 adapter->stats.pxofftxc[i] +=
5035 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
5036 adapter->stats.pxoffrxc[i] +=
5037 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
5038 adapter->stats.pxon2offc[i] +=
5039 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
5041 for (int i = 0; i < 16; i++) {
5042 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
5043 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
5044 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
5045 adapter->stats.qbrc[i] +=
5046 ((u64)IXGBE_READ_REG(hw, IXGBE_QBRC(i)) << 32);
5047 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
5048 adapter->stats.qbtc[i] +=
5049 ((u64)IXGBE_READ_REG(hw, IXGBE_QBTC(i)) << 32);
5050 adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5052 adapter->stats.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
5053 adapter->stats.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
5054 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
5056 /* Hardware workaround, gprc counts missed packets */
5057 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
5058 adapter->stats.gprc -= missed_rx;
5060 if (hw->mac.type != ixgbe_mac_82598EB) {
5061 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
5062 ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
5063 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
5064 ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
5065 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
5066 ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
5067 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
5068 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
5070 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
5071 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
5072 /* 82598 only has a counter in the high register */
5073 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5074 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5075 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5079 * Workaround: mprc hardware is incorrectly counting
5080 * broadcasts, so for now we subtract those.
5082 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
5083 adapter->stats.bprc += bprc;
5084 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
5085 if (hw->mac.type == ixgbe_mac_82598EB)
5086 adapter->stats.mprc -= bprc;
5088 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
5089 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
5090 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
5091 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
5092 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
5093 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
5095 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
5096 adapter->stats.lxontxc += lxon;
5097 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
5098 adapter->stats.lxofftxc += lxoff;
5099 total = lxon + lxoff;
5101 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
5102 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
5103 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
5104 adapter->stats.gptc -= total;
5105 adapter->stats.mptc -= total;
5106 adapter->stats.ptc64 -= total;
5107 adapter->stats.gotc -= total * ETHER_MIN_LEN;
5109 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5110 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
5111 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
5112 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
5113 adapter->stats.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
5114 adapter->stats.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
5115 adapter->stats.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
5116 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
5117 adapter->stats.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
5118 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
5119 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
5120 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
5121 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
5122 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
5123 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
5124 adapter->stats.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
5125 adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
5126 adapter->stats.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
5127 /* Only read FCOE on 82599 */
5128 if (hw->mac.type != ixgbe_mac_82598EB) {
5129 adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
5130 adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
5131 adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
5132 adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
5133 adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
5136 /* Fill out the OS statistics structure */
5137 ifp->if_ipackets = adapter->stats.gprc;
5138 ifp->if_opackets = adapter->stats.gptc;
5139 ifp->if_ibytes = adapter->stats.gorc;
5140 ifp->if_obytes = adapter->stats.gotc;
5141 ifp->if_imcasts = adapter->stats.mprc;
5142 ifp->if_collisions = 0;
5145 ifp->if_ierrors = total_missed_rx + adapter->stats.crcerrs +
5146 adapter->stats.rlec;
5149 /** ixgbe_sysctl_tdh_handler - Handler function
5150 * Retrieves the TDH value from the hardware
5153 ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
5157 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
5160 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
5161 error = sysctl_handle_int(oidp, &val, 0, req);
5162 if (error || !req->newptr)
5167 /** ixgbe_sysctl_tdt_handler - Handler function
5168 * Retrieves the TDT value from the hardware
5171 ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
5175 struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
5178 unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
5179 error = sysctl_handle_int(oidp, &val, 0, req);
5180 if (error || !req->newptr)
5185 /** ixgbe_sysctl_rdh_handler - Handler function
5186 * Retrieves the RDH value from the hardware
5189 ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
5193 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
5196 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
5197 error = sysctl_handle_int(oidp, &val, 0, req);
5198 if (error || !req->newptr)
5203 /** ixgbe_sysctl_rdt_handler - Handler function
5204 * Retrieves the RDT value from the hardware
5207 ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
5211 struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
5214 unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
5215 error = sysctl_handle_int(oidp, &val, 0, req);
5216 if (error || !req->newptr)
5222 * Add sysctl variables, one per statistic, to the system.
5225 ixgbe_add_hw_stats(struct adapter *adapter)
5227 struct tx_ring *txr = adapter->tx_rings;
5228 struct rx_ring *rxr = adapter->rx_rings;
5230 struct sysctl_ctx_list *ctx = &adapter->sysctl_ctx;
5231 struct sysctl_oid *tree = adapter->sysctl_tree;
5232 struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
5233 struct ixgbe_hw_stats *stats = &adapter->stats;
5235 struct sysctl_oid *stat_node, *queue_node;
5236 struct sysctl_oid_list *stat_list, *queue_list;
5238 #define QUEUE_NAME_LEN 32
5239 char namebuf[QUEUE_NAME_LEN];
5241 /* Driver Statistics */
5242 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
5243 CTLFLAG_RD, &adapter->dropped_pkts,
5244 "Driver dropped packets");
5245 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
5246 CTLFLAG_RD, &adapter->mbuf_defrag_failed,
5247 "m_defrag() failed");
5248 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_tx_dma_setup",
5249 CTLFLAG_RD, &adapter->no_tx_dma_setup,
5250 "Driver tx dma failure in xmit");
5251 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
5252 CTLFLAG_RD, &adapter->watchdog_events,
5253 "Watchdog timeouts");
5254 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tso_tx",
5255 CTLFLAG_RD, &adapter->tso_tx,
5257 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
5258 CTLFLAG_RD, &adapter->link_irq,
5259 "Link MSIX IRQ Handled");
5261 for (int i = 0; i < adapter->num_queues; i++, txr++) {
5262 ksnprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
5263 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
5264 CTLFLAG_RD, NULL, "Queue Name");
5265 queue_list = SYSCTL_CHILDREN(queue_node);
5266 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
5267 CTLFLAG_RD, &(adapter->queues[i].irqs), 0,
5268 "irqs on this queue");
5269 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
5270 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
5271 ixgbe_sysctl_tdh_handler, "IU",
5272 "Transmit Descriptor Head");
5273 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
5274 CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
5275 ixgbe_sysctl_tdt_handler, "IU",
5276 "Transmit Descriptor Tail");
5277 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
5278 CTLFLAG_RD, &txr->no_desc_avail, 0,
5279 "Queue No Descriptor Available");
5280 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
5281 CTLFLAG_RD, &txr->total_packets, 0,
5282 "Queue Packets Transmitted");
5285 for (int i = 0; i < adapter->num_queues; i++, rxr++) {
5286 ksnprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
5287 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
5288 CTLFLAG_RD, NULL, "Queue Name");
5289 queue_list = SYSCTL_CHILDREN(queue_node);
5292 struct lro_ctrl *lro = &rxr->lro;
5295 ksnprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
5296 queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
5297 CTLFLAG_RD, NULL, "Queue Name");
5298 queue_list = SYSCTL_CHILDREN(queue_node);
5300 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
5301 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
5302 ixgbe_sysctl_rdh_handler, "IU",
5303 "Receive Descriptor Head");
5304 SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
5305 CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
5306 ixgbe_sysctl_rdt_handler, "IU",
5307 "Receive Descriptor Tail");
5308 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
5309 CTLFLAG_RD, &rxr->rx_packets, 0,
5310 "Queue Packets Received");
5311 SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
5312 CTLFLAG_RD, &rxr->rx_bytes, 0,
5313 "Queue Bytes Received");
5315 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
5316 CTLFLAG_RD, &lro->lro_queued, 0,
5318 SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
5319 CTLFLAG_RD, &lro->lro_flushed, 0,
5324 /* MAC stats get the own sub node */
5326 stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
5327 CTLFLAG_RD, NULL, "MAC Statistics");
5328 stat_list = SYSCTL_CHILDREN(stat_node);
5330 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
5331 CTLFLAG_RD, &stats->crcerrs, 0,
5333 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
5334 CTLFLAG_RD, &stats->illerrc, 0,
5335 "Illegal Byte Errors");
5336 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
5337 CTLFLAG_RD, &stats->errbc, 0,
5339 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
5340 CTLFLAG_RD, &stats->mspdc, 0,
5341 "MAC Short Packets Discarded");
5342 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
5343 CTLFLAG_RD, &stats->mlfc, 0,
5344 "MAC Local Faults");
5345 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
5346 CTLFLAG_RD, &stats->mrfc, 0,
5347 "MAC Remote Faults");
5348 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
5349 CTLFLAG_RD, &stats->rlec, 0,
5350 "Receive Length Errors");
5351 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xon_txd",
5352 CTLFLAG_RD, &stats->lxontxc, 0,
5353 "Link XON Transmitted");
5354 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xon_rcvd",
5355 CTLFLAG_RD, &stats->lxonrxc, 0,
5356 "Link XON Received");
5357 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xoff_txd",
5358 CTLFLAG_RD, &stats->lxofftxc, 0,
5359 "Link XOFF Transmitted");
5360 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xoff_rcvd",
5361 CTLFLAG_RD, &stats->lxoffrxc, 0,
5362 "Link XOFF Received");
5364 /* Packet Reception Stats */
5365 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
5366 CTLFLAG_RD, &stats->tor, 0,
5367 "Total Octets Received");
5368 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
5369 CTLFLAG_RD, &stats->gorc, 0,
5370 "Good Octets Received");
5371 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
5372 CTLFLAG_RD, &stats->tpr, 0,
5373 "Total Packets Received");
5374 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
5375 CTLFLAG_RD, &stats->gprc, 0,
5376 "Good Packets Received");
5377 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
5378 CTLFLAG_RD, &stats->mprc, 0,
5379 "Multicast Packets Received");
5380 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
5381 CTLFLAG_RD, &stats->bprc, 0,
5382 "Broadcast Packets Received");
5383 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
5384 CTLFLAG_RD, &stats->prc64, 0,
5385 "64 byte frames received ");
5386 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
5387 CTLFLAG_RD, &stats->prc127, 0,
5388 "65-127 byte frames received");
5389 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
5390 CTLFLAG_RD, &stats->prc255, 0,
5391 "128-255 byte frames received");
5392 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
5393 CTLFLAG_RD, &stats->prc511, 0,
5394 "256-511 byte frames received");
5395 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
5396 CTLFLAG_RD, &stats->prc1023, 0,
5397 "512-1023 byte frames received");
5398 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
5399 CTLFLAG_RD, &stats->prc1522, 0,
5400 "1023-1522 byte frames received");
5401 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
5402 CTLFLAG_RD, &stats->ruc, 0,
5403 "Receive Undersized");
5404 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
5405 CTLFLAG_RD, &stats->rfc, 0,
5406 "Fragmented Packets Received ");
5407 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
5408 CTLFLAG_RD, &stats->roc, 0,
5409 "Oversized Packets Received");
5410 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
5411 CTLFLAG_RD, &stats->rjc, 0,
5413 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
5414 CTLFLAG_RD, &stats->mngprc, 0,
5415 "Management Packets Received");
5416 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
5417 CTLFLAG_RD, &stats->mngptc, 0,
5418 "Management Packets Dropped");
5419 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
5420 CTLFLAG_RD, &stats->xec, 0,
5423 /* Packet Transmission Stats */
5424 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
5425 CTLFLAG_RD, &stats->gotc, 0,
5426 "Good Octets Transmitted");
5427 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
5428 CTLFLAG_RD, &stats->tpt, 0,
5429 "Total Packets Transmitted");
5430 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
5431 CTLFLAG_RD, &stats->gptc, 0,
5432 "Good Packets Transmitted");
5433 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
5434 CTLFLAG_RD, &stats->bptc, 0,
5435 "Broadcast Packets Transmitted");
5436 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
5437 CTLFLAG_RD, &stats->mptc, 0,
5438 "Multicast Packets Transmitted");
5439 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
5440 CTLFLAG_RD, &stats->mngptc, 0,
5441 "Management Packets Transmitted");
5442 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
5443 CTLFLAG_RD, &stats->ptc64, 0,
5444 "64 byte frames transmitted ");
5445 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
5446 CTLFLAG_RD, &stats->ptc127, 0,
5447 "65-127 byte frames transmitted");
5448 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
5449 CTLFLAG_RD, &stats->ptc255, 0,
5450 "128-255 byte frames transmitted");
5451 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
5452 CTLFLAG_RD, &stats->ptc511, 0,
5453 "256-511 byte frames transmitted");
5454 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
5455 CTLFLAG_RD, &stats->ptc1023, 0,
5456 "512-1023 byte frames transmitted");
5457 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
5458 CTLFLAG_RD, &stats->ptc1522, 0,
5459 "1024-1522 byte frames transmitted");
5462 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_crc",
5463 CTLFLAG_RD, &stats->fccrc, 0,
5465 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_last",
5466 CTLFLAG_RD, &stats->fclast, 0,
5468 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_drpd",
5469 CTLFLAG_RD, &stats->fcoerpdc, 0,
5470 "FCoE Packets Dropped");
5471 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_pkts_rcvd",
5472 CTLFLAG_RD, &stats->fcoeprc, 0,
5473 "FCoE Packets Received");
5474 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_pkts_txd",
5475 CTLFLAG_RD, &stats->fcoeptc, 0,
5476 "FCoE Packets Transmitted");
5477 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_dword_rcvd",
5478 CTLFLAG_RD, &stats->fcoedwrc, 0,
5479 "FCoE DWords Received");
5480 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_dword_txd",
5481 CTLFLAG_RD, &stats->fcoedwtc, 0,
5482 "FCoE DWords Transmitted");
5486 ** Set flow control using sysctl:
5487 ** Flow control values:
5494 ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
5497 struct adapter *adapter = (struct adapter *) arg1;
5500 error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
5501 if ((error) || (req->newptr == NULL))
5504 /* Don't bother if it's not changed */
5505 if (adapter->fc == last)
5508 switch (adapter->fc) {
5509 case ixgbe_fc_rx_pause:
5510 case ixgbe_fc_tx_pause:
5512 adapter->hw.fc.requested_mode = adapter->fc;
5516 adapter->hw.fc.requested_mode = ixgbe_fc_none;
5518 /* Don't autoneg if forcing a value */
5519 adapter->hw.fc.disable_fc_autoneg = TRUE;
5520 ixgbe_fc_enable(&adapter->hw);
5525 ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
5526 const char *description, int *limit, int value)
5529 SYSCTL_ADD_INT(&adapter->sysctl_ctx,
5530 SYSCTL_CHILDREN(adapter->sysctl_tree),
5531 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
5535 ** Control link advertise speed:
5536 ** 1 - advertise only 1G
5537 ** 2 - advertise 100Mb
5538 ** 3 - advertise normal
5541 ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
5544 struct adapter *adapter;
5546 struct ixgbe_hw *hw;
5547 ixgbe_link_speed speed, last;
5549 adapter = (struct adapter *) arg1;
5552 last = adapter->advertise;
5554 error = sysctl_handle_int(oidp, &adapter->advertise, 0, req);
5555 if ((error) || (adapter->advertise == -1))
5558 if (adapter->advertise == last) /* no change */
5561 if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
5562 (hw->phy.multispeed_fiber)))
5565 if ((adapter->advertise == 2) && (hw->mac.type != ixgbe_mac_X540)) {
5566 device_printf(dev, "Set Advertise: 100Mb on X540 only\n");
5570 if (adapter->advertise == 1)
5571 speed = IXGBE_LINK_SPEED_1GB_FULL;
5572 else if (adapter->advertise == 2)
5573 speed = IXGBE_LINK_SPEED_100_FULL;
5574 else if (adapter->advertise == 3)
5575 speed = IXGBE_LINK_SPEED_1GB_FULL |
5576 IXGBE_LINK_SPEED_10GB_FULL;
5577 else /* bogus value */
5580 hw->mac.autotry_restart = TRUE;
5581 hw->mac.ops.setup_link(hw, speed, TRUE, TRUE);
5587 ** Thermal Shutdown Trigger
5588 ** - cause a Thermal Overtemp IRQ
5591 ixgbe_set_thermal_test(SYSCTL_HANDLER_ARGS)
5593 int error, fire = 0;
5594 struct adapter *adapter = (struct adapter *) arg1;
5595 struct ixgbe_hw *hw = &adapter->hw;
5598 if (hw->mac.type != ixgbe_mac_X540)
5601 error = sysctl_handle_int(oidp, &fire, 0, req);
5602 if ((error) || (req->newptr == NULL))
5606 u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
5607 reg |= IXGBE_EICR_TS;
5608 IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
5615 ixgbe_set_eitr(struct adapter *sc, int idx, int rate)
5619 /* convert rate in intr/s to hw representation */
5621 eitr = 1000000 / rate;
5622 eitr <<= IXGBE_EITR_INTVL_SHIFT;
5625 /* Don't disable it */
5626 eitr = 1 << IXGBE_EITR_INTVL_SHIFT;
5627 } else if (eitr > IXGBE_EITR_INTVL_MASK) {
5628 /* Don't allow it to be too large */
5629 eitr = IXGBE_EITR_INTVL_MASK;
5632 IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(idx), eitr);
5636 ixgbe_sysctl_intr_rate(SYSCTL_HANDLER_ARGS)
5638 struct adapter *sc = (void *)arg1;
5639 struct ifnet *ifp = sc->ifp;
5640 int error, intr_rate, running;
5641 struct ix_queue *que = sc->queues;
5643 intr_rate = sc->intr_rate;
5644 error = sysctl_handle_int(oidp, &intr_rate, 0, req);
5645 if (error || req->newptr == NULL)
5650 ifnet_serialize_all(ifp);
5652 sc->intr_rate = intr_rate;
5653 running = ifp->if_flags & IFF_RUNNING;
5655 ixgbe_set_eitr(sc, 0, sc->intr_rate);
5657 if (running && (sc->intr_type == PCI_INTR_TYPE_MSIX)) {
5658 for (int i = 0; i < sc->num_queues; i++, que++)
5659 ixgbe_set_eitr(sc, que->msix, sc->intr_rate);
5663 if_printf(ifp, "interrupt rate set to %d/sec\n", sc->intr_rate);
5665 ifnet_deserialize_all(ifp);
5670 /* rearrange mbuf chain to get contiguous bytes */
5672 ixgbe_tso_pullup(struct tx_ring *txr, struct mbuf **mp)
5674 int hoff, iphlen, thoff;
5678 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
5680 iphlen = m->m_pkthdr.csum_iphlen;
5681 thoff = m->m_pkthdr.csum_thlen;
5682 hoff = m->m_pkthdr.csum_lhlen;
5684 KASSERT(iphlen > 0, ("invalid ip hlen"));
5685 KASSERT(thoff > 0, ("invalid tcp hlen"));
5686 KASSERT(hoff > 0, ("invalid ether hlen"));
5688 if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
5689 m = m_pullup(m, hoff + iphlen + thoff);