ixgbe: Import Intel PRO/10GbE driver from FreeBSD
authorFrançois Tigeot <ftigeot@wolfpond.org>
Sat, 30 Jun 2012 14:50:07 +0000 (16:50 +0200)
committerFrançois Tigeot <ftigeot@wolfpond.org>
Sat, 30 Jun 2012 16:24:56 +0000 (18:24 +0200)
Local changes:

* Disable LRO and TSO hardware optimizations, commenting out the code
  with #if 0 directives

* Disable VLAN hardware acceleration code as well

* Disable MSI-X code, only use one queue per port for now

* Use code from Sascha Wildner to create a per-port sysctl tree

Tested-with: 82599EB

32 files changed:
share/man/man4/Makefile
share/man/man4/ixgbe.4 [new file with mode: 0644]
sys/conf/files
sys/config/GENERIC
sys/config/LINT
sys/config/LINT64
sys/config/X86_64_GENERIC
sys/dev/netif/Makefile
sys/dev/netif/ixgbe/LICENSE [new file with mode: 0644]
sys/dev/netif/ixgbe/Makefile [new file with mode: 0644]
sys/dev/netif/ixgbe/README [new file with mode: 0644]
sys/dev/netif/ixgbe/ixgbe.c [new file with mode: 0644]
sys/dev/netif/ixgbe/ixgbe.h [new file with mode: 0644]
sys/dev/netif/ixgbe/ixgbe_82598.c [new file with mode: 0644]
sys/dev/netif/ixgbe/ixgbe_82598.h [new file with mode: 0644]
sys/dev/netif/ixgbe/ixgbe_82599.c [new file with mode: 0644]
sys/dev/netif/ixgbe/ixgbe_82599.h [new file with mode: 0644]
sys/dev/netif/ixgbe/ixgbe_api.c [new file with mode: 0644]
sys/dev/netif/ixgbe/ixgbe_api.h [new file with mode: 0644]
sys/dev/netif/ixgbe/ixgbe_common.c [new file with mode: 0644]
sys/dev/netif/ixgbe/ixgbe_common.h [new file with mode: 0644]
sys/dev/netif/ixgbe/ixgbe_defines.h [new file with mode: 0644]
sys/dev/netif/ixgbe/ixgbe_mbx.c [new file with mode: 0644]
sys/dev/netif/ixgbe/ixgbe_mbx.h [new file with mode: 0644]
sys/dev/netif/ixgbe/ixgbe_osdep.h [new file with mode: 0644]
sys/dev/netif/ixgbe/ixgbe_phy.c [new file with mode: 0644]
sys/dev/netif/ixgbe/ixgbe_phy.h [new file with mode: 0644]
sys/dev/netif/ixgbe/ixgbe_type.h [new file with mode: 0644]
sys/dev/netif/ixgbe/ixgbe_vf.c [new file with mode: 0644]
sys/dev/netif/ixgbe/ixgbe_vf.h [new file with mode: 0644]
sys/dev/netif/ixgbe/ixgbe_x540.c [new file with mode: 0644]
sys/dev/netif/ixgbe/ixgbe_x540.h [new file with mode: 0644]

index 60cc8a7..98ef9cc 100644 (file)
@@ -137,6 +137,7 @@ MAN=        aac.4 \
        iwifw.4 \
        iwn.4 \
        iwnfw.4 \
+       ixgbe.4 \
        jme.4 \
        joy.4 \
        kate.4 \
diff --git a/share/man/man4/ixgbe.4 b/share/man/man4/ixgbe.4
new file mode 100644 (file)
index 0000000..7188cda
--- /dev/null
@@ -0,0 +1,125 @@
+.\" Copyright (c) 2001-2008, Intel Corporation
+.\" All rights reserved.
+.\" 
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions are met:
+.\"
+.\" 1. Redistributions of source code must retain the above copyright notice,
+.\"    this list of conditions and the following disclaimer.
+.\"
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\"    notice, this list of conditions and the following disclaimer in the
+.\"    documentation and/or other materials provided with the distribution.
+.\"
+.\" 3. Neither the name of the Intel Corporation nor the names of its
+.\"    contributors may be used to endorse or promote products derived from
+.\"    this software without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+.\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+.\" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+.\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+.\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+.\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+.\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+.\" POSSIBILITY OF SUCH DAMAGE.
+.\"
+.\" * Other names and brands may be claimed as the property of others.
+.\"
+.\" $FreeBSD: src/share/man/man4/ixgbe.4,v 1.2 2008/06/17 21:14:02 brueffer Exp $
+.\"
+.Dd June 30, 2012
+.Dt IXGBE 4
+.Os
+.Sh NAME
+.Nm ixgbe
+.Nd "Intel(R) 10Gb Ethernet driver for the DragonFly BSD operating system"
+.Sh SYNOPSIS
+To compile this driver into the kernel,
+place the following line in your
+kernel configuration file:
+.Bd -ragged -offset indent
+.Cd "device ixgbe"
+.Ed
+.Pp
+Alternatively, to load the driver as a
+module at boot time, place the following line in
+.Xr loader.conf 5 :
+.Bd -literal -offset indent
+if_ixgbe_load="YES"
+.Ed
+.Sh DESCRIPTION
+The
+.Nm
+driver provides support for PCI 10Gb Ethernet adapters based on
+the Intel 82598EB Intel(R) Network Connections.
+The driver supports Jumbo Frames, MSIX, TSO, and RSS.
+.Pp
+For questions related to hardware requirements,
+refer to the documentation supplied with your Intel 10GbE adapter.
+All hardware requirements listed apply to use with
+.Dx .
+.Pp
+Support for Jumbo Frames is provided via the interface MTU setting.
+Selecting an MTU larger than 1500 bytes with the
+.Xr ifconfig 8
+utility configures the adapter to receive and transmit Jumbo Frames.
+The maximum MTU size for Jumbo Frames is 16114.
+.Pp
+This driver version supports VLANs.
+For information on enabling VLANs, see
+.Xr ifconfig 8 .
+.Sh HARDWARE
+The
+.Nm
+driver supports the following cards:
+.Pp
+.Bl -bullet -compact
+.It
+Intel(R) 10 Gigabit XF SR/AF Dual Port Server Adapter 
+.It
+Intel(R) 10 Gigabit XF SR/LR Server Adapter
+.It
+Intel(R) 82598EB 10 Gigabit AF Network Connection
+.It
+Intel(R) 82598EB 10 Gigabit AT CX4 Network Connection                  
+.El
+.Sh DIAGNOSTICS
+.Bl -diag
+.It "ix%d: Unable to allocate bus resource: memory"
+A fatal initialization error has occurred.
+.It "ix%d: Unable to allocate bus resource: interrupt"
+A fatal initialization error has occurred.
+.It "ix%d: watchdog timeout -- resetting"
+The device has stopped responding to the network, or there is a problem with
+the network connection (cable).
+.El
+.Sh SUPPORT
+For general information and support,
+go to the Intel support website at:
+.Pa http://support.intel.com .
+.Pp
+If an issue is identified with the released source code on the supported kernel
+with a supported adapter, email the specific information related to the
+issue to
+.Aq freebsdnic@mailbox.intel.com .
+.Sh SEE ALSO
+.Xr arp 4 ,
+.Xr netintro 4 ,
+.Xr ng_ether 4 ,
+.Xr polling 4 ,
+.Xr vlan 4 ,
+.Xr ifconfig 8
+.Sh HISTORY
+The
+.Nm
+device driver first appeared in
+.Dx 3.1 .
+.Sh AUTHORS
+The
+.Nm
+driver was written by
+.An Intel Corporation Aq freebsdnic@mailbox.intel.com .
index 5487749..c585873 100644 (file)
@@ -301,6 +301,15 @@ dev/netif/ep/if_ep_pccard.c        optional ep pccard
 dev/netif/em/if_em.c           optional em
 dev/netif/emx/if_emx.c         optional emx
 dev/netif/igb/if_igb.c         optional igb
+dev/netif/ixgbe/ixgbe.c                optional ixgbe
+dev/netif/ixgbe/ixgbe_82598.c  optional ixgbe
+dev/netif/ixgbe/ixgbe_82599.c  optional ixgbe
+dev/netif/ixgbe/ixgbe_api.c    optional ixgbe
+dev/netif/ixgbe/ixgbe_common.c optional ixgbe
+dev/netif/ixgbe/ixgbe_mbx.c    optional ixgbe
+dev/netif/ixgbe/ixgbe_phy.c    optional ixgbe
+dev/netif/ixgbe/ixgbe_vf.c     optional ixgbe
+dev/netif/ixgbe/ixgbe_x540.c   optional ixgbe
 dev/netif/ig_hal/e1000_80003es2lan.c   optional ig_hal
 dev/netif/ig_hal/e1000_82540.c optional ig_hal
 dev/netif/ig_hal/e1000_82541.c optional ig_hal
index a91d6d8..166298e 100644 (file)
@@ -230,6 +230,7 @@ device              em              # Intel PRO/1000 adapter Gigabit Ethernet Card (``Wiseman'')
 device         igb             # Intel Pro/1000 (82575, 82576, 82580, i350)
                                # Requires ig_hal
 device         ig_hal          # Intel PRO/1000 hardware abstraction layer
+device         ixgbe           # Intel PRO/10GbE PCIE Ethernet Family
 
 # PCI Ethernet NICs that use the common MII bus controller code.
 # NOTE: Be sure to keep the 'device miibus' line in order to use these NICs!
index 78d4a8e..4400202 100644 (file)
@@ -1886,6 +1886,7 @@ device            emx             # Intel Pro/1000 (8257{1,2,3,4})
 device         igb             # Intel Pro/1000 (82575, 82576, 82580, i350)
                                # Requires ig_hal
 device         ig_hal          # Intel Pro/1000 hardware abstraction layer
+device         ixgbe           # Intel PRO/10GbE PCIE Ethernet Family
 device         et              # Agere ET1310 10/100/1000 Ethernet
 device         lge             # Level 1 LXT1001 (``Mercury'')
 device         mxge            # Myricom Myri-10G 10GbE NIC
index 9c865c3..252bc3f 100644 (file)
@@ -1669,6 +1669,7 @@ device            emx             # Intel Pro/1000 (8257{1,2,3,4})
 device         igb             # Intel Pro/1000 (82575, 82576, 82580, i350)
                                # Requires ig_hal
 device         ig_hal          # Intel Pro/1000 hardware abstraction layer
+device         ixgbe           # Intel PRO/10GbE PCIE Ethernet Family
 device         et              # Agere ET1310 10/100/1000 Ethernet
 device         lge             # Level 1 LXT1001 (``Mercury'')
 device         mxge            # Myricom Myri-10G 10GbE NIC
index 9ed45fe..ecc8827 100644 (file)
@@ -212,6 +212,7 @@ device              em              # Intel PRO/1000 adapter Gigabit Ethernet Card (``Wiseman'')
 device         igb             # Intel Pro/1000 (82575, 82576, 82580, i350)
                                # Requires ig_hal
 device         ig_hal          # Intel PRO/1000 hardware abstraction layer
+device         ixgbe           # Intel PRO/10GbE PCIE Ethernet Family
 
 # PCI Ethernet NICs that use the common MII bus controller code.
 # NOTE: Be sure to keep the 'device miibus' line in order to use these NICs!
index 331ad44..41e961c 100644 (file)
@@ -3,7 +3,7 @@ SUBDIR= an age alc ale ar ath aue axe bce bfe bge \
        fwe fxp ic iwi iwn jme kue lge lgue lnc \
        mii_layer my msk mxge ndis nfe nge pcn \
        ral re rl rue sbni sbsh sf sis sk sln sr ste stge ti tl tx txp \
-       vge vr vx wb wi wpi xe xl ig_hal emx ae igb
+       vge vr vx wb wi wpi xe xl ig_hal emx ae igb ixgbe
 
 # XXX need to be updated to the new net80211 stack
 # SUBDIR= acx bwi iwl rtw rum ural
diff --git a/sys/dev/netif/ixgbe/LICENSE b/sys/dev/netif/ixgbe/LICENSE
new file mode 100644 (file)
index 0000000..b2cee60
--- /dev/null
@@ -0,0 +1,33 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: src/sys/dev/ixgbe/LICENSE,v 1.5 2011/06/02 00:34:57 jfv Exp $*/
diff --git a/sys/dev/netif/ixgbe/Makefile b/sys/dev/netif/ixgbe/Makefile
new file mode 100644 (file)
index 0000000..9992228
--- /dev/null
@@ -0,0 +1,22 @@
+#$FreeBSD: src/sys/modules/ixgbe/Makefile,v 1.12 2012/05/30 13:55:43 uqs Exp $
+
+.include <bsd.own.mk>
+
+KMOD    = if_ixgbe
+SRCS    = device_if.h bus_if.h pci_if.h
+SRCS    += opt_inet.h opt_inet6.h
+SRCS    += ixgbe.c #ixv.c
+# Shared source
+SRCS    += ixgbe_common.c ixgbe_api.c ixgbe_phy.c ixgbe_mbx.c ixgbe_vf.c
+SRCS    += ixgbe_82599.c ixgbe_82598.c ixgbe_x540.c
+CFLAGS+= -DSMP #-DIXGBE_FDIR
+
+.if !defined(BUILDING_WITH_KERNEL)
+opt_inet.h:
+       @echo "#define INET 1" > ${.TARGET}
+
+opt_inet6.h:
+       @echo "#define INET6 1" > ${.TARGET}
+.endif
+
+.include <bsd.kmod.mk>
diff --git a/sys/dev/netif/ixgbe/README b/sys/dev/netif/ixgbe/README
new file mode 100644 (file)
index 0000000..5d64d06
--- /dev/null
@@ -0,0 +1,323 @@
+FreeBSD Driver for Intel(R) Ethernet 10 Gigabit PCI Express Server Adapters
+============================================================================
+/*$FreeBSD: src/sys/dev/ixgbe/README,v 1.3 2011/06/02 00:34:57 jfv Exp $*/
+
+November 12, 2010
+
+
+Contents
+========
+
+- Overview
+- Supported Adapters
+- Building and Installation
+- Additional Configurations and Tuning
+- Known Limitations
+
+
+Overview
+========
+
+This file describes the FreeBSD* driver for the Intel(R) Ethernet 10 Gigabit 
+Family of Adapters.  Driver has been developed for use with FreeBSD 7.2 or later.
+
+For questions related to hardware requirements, refer to the documentation
+supplied with your Intel 10GbE adapter.  All hardware requirements listed
+apply to use with FreeBSD.
+
+
+Supported Adapters
+==================
+
+The driver in this release is compatible with 82598 and 82599-based Intel 
+Network Connections.
+              
+SFP+ Devices with Pluggable Optics
+----------------------------------
+
+82599-BASED ADAPTERS  
+
+NOTE: If your 82599-based Intel(R) Ethernet Network Adapter came with Intel 
+optics, or is an Intel(R) Ethernet Server Adapter X520-2, then it only supports
+Intel optics and/or the direct attach cables listed below.
+
+When 82599-based SFP+ devices are connected back to back, they should be set to
+the same Speed setting via Ethtool. Results may vary if you mix speed settings. 
+Supplier    Type                                             Part Numbers
+
+SR Modules                     
+Intel      DUAL RATE 1G/10G SFP+ SR (bailed)                FTLX8571D3BCV-IT   
+Intel      DUAL RATE 1G/10G SFP+ SR (bailed)                AFBR-703SDZ-IN2
+Intel      DUAL RATE 1G/10G SFP+ SR (bailed)                 AFBR-703SDDZ-IN1  
+LR Modules                     
+Intel      DUAL RATE 1G/10G SFP+ LR (bailed)                FTLX1471D3BCV-IT   
+Intel      DUAL RATE 1G/10G SFP+ LR (bailed)                AFCT-701SDZ-IN2    
+Intel       DUAL RATE 1G/10G SFP+ LR (bailed)                AFCT-701SDDZ-IN1
+
+The following is a list of 3rd party SFP+ modules and direct attach cables that
+have received some testing. Not all modules are applicable to all devices.
+
+Supplier   Type                                              Part Numbers
+
+Finisar    SFP+ SR bailed, 10g single rate                   FTLX8571D3BCL
+Avago      SFP+ SR bailed, 10g single rate                   AFBR-700SDZ
+Finisar    SFP+ LR bailed, 10g single rate                   FTLX8571D3BCV-IT
+               
+Finisar    DUAL RATE 1G/10G SFP+ SR (No Bail)               FTLX8571D3QCV-IT
+Avago     DUAL RATE 1G/10G SFP+ SR (No Bail)                AFBR-703SDZ-IN1    
+Finisar           DUAL RATE 1G/10G SFP+ LR (No Bail)                FTLX1471D3QCV-IT
+Avago     DUAL RATE 1G/10G SFP+ LR (No Bail)                AFCT-701SDZ-IN1
+Finistar   1000BASE-T SFP                                    FCLF8522P2BTL
+Avago      1000BASE-T SFP                                    ABCU-5710RZ
+               
+82599-based adapters support all passive and active limiting direct attach 
+cables that comply with SFF-8431 v4.1 and SFF-8472 v10.4 specifications.
+
+Laser turns off for SFP+ when ifconfig down
+--------------------------------------------------------
+"ifconfig down" turns off the laser for 82599-based SFP+ fiber adapters.
+"ifconfig up" turns on the later.
+
+82598-BASED ADAPTERS
+
+NOTES for 82598-Based Adapters: 
+- Intel(R) Ethernet Network Adapters that support removable optical modules 
+  only support their original module type (i.e., the Intel(R) 10 Gigabit SR 
+  Dual Port Express Module only supports SR optical modules). If you plug 
+  in a different type of module, the driver will not load.
+- Hot Swapping/hot plugging optical modules is not supported.  
+- Only single speed, 10 gigabit modules are supported.  
+- LAN on Motherboard (LOMs) may support DA, SR, or LR modules. Other module 
+  types are not supported. Please see your system documentation for details.  
+
+The following is a list of 3rd party SFP+ modules and direct attach cables that have 
+received some testing. Not all modules are applicable to all devices.
+
+Supplier   Type                                              Part Numbers
+
+Finisar    SFP+ SR bailed, 10g single rate                   FTLX8571D3BCL
+Avago      SFP+ SR bailed, 10g single rate                   AFBR-700SDZ
+Finisar    SFP+ LR bailed, 10g single rate                   FTLX1471D3BCL
+       
+82598-based adapters support all passive direct attach cables that comply 
+with SFF-8431 v4.1 and SFF-8472 v10.4 specifications. Active direct attach 
+cables are not supported.
+
+Third party optic modules and cables referred to above are listed only for the 
+purpose of highlighting third party specifications and potential compatibility, 
+and are not recommendations or endorsements or sponsorship of any third party's
+product by Intel. Intel is not endorsing or promoting products made by any 
+third party and the third party reference is provided only to share information
+regarding certain optic modules and cables with the above specifications. There
+may be other manufacturers or suppliers, producing or supplying optic modules 
+and cables with similar or matching descriptions. Customers must use their own 
+discretion and diligence to purchase optic modules and cables from any third 
+party of their choice. Customer are solely responsible for assessing the 
+suitability of the product and/or devices and for the selection of the vendor 
+for purchasing any product. INTEL ASSUMES NO LIABILITY WHATSOEVER, AND INTEL
+DISCLAIMS ANY EXPRESS OR IMPLIED WARRANTY, RELATING TO SALE AND/OR USE OF 
+SUCH THIRD PARTY PRODUCTS OR SELECTION OF VENDOR BY CUSTOMERS.
+
+Configuration and Tuning
+========================
+
+The driver supports Transmit/Receive Checksum Offload and Jumbo Frames on
+all 10 Gigabit adapters. 
+
+  Jumbo Frames
+  ------------
+  To enable Jumbo Frames, use the ifconfig utility to increase the MTU
+  beyond 1500 bytes.
+
+  NOTES:
+
+       - The Jumbo Frames setting on the switch must be set to at least
+         22 bytes larger than that of the adapter.
+
+       - There are known performance issues with this driver when running 
+         UDP traffic with Jumbo Frames. 
+
+  The Jumbo Frames MTU range for Intel Adapters is 1500 to 16114. The default
+  MTU range is 1500. To modify the setting, enter the following:
+
+        ifconfig ix<interface_num> <hostname or IP address> mtu 9000
+
+  To confirm an interface's MTU value, use the ifconfig command. To confirm
+  the MTU used between two specific devices, use:
+
+        route get <destination_IP_address>
+
+  VLANs
+  -----
+  To create a new VLAN pseudo-interface:
+
+        ifconfig <vlan_name> create
+
+  To associate the VLAN pseudo-interface with a physical interface and
+  assign a VLAN ID, IP address, and netmask:
+
+        ifconfig <vlan_name> <ip_address> netmask <subnet_mask> vlan
+           <vlan_id> vlandev <physical_interface>
+
+  Example:
+
+        ifconfig vlan10 10.0.0.1 netmask 255.255.255.0 vlan 10 vlandev ixgbe0
+
+  In this example, all packets will be marked on egress with 802.1Q VLAN 
+  tags, specifying a VLAN ID of 10.
+
+  To remove a VLAN pseudo-interface:
+
+        ifconfig <vlan_name> destroy
+
+
+  Checksum Offload
+  ----------------
+  
+  Checksum offloading supports both TCP and UDP packets and is 
+  supported for both transmit and receive. 
+
+  Checksum offloading can be enabled or disabled using ifconfig. 
+  Both transmit and receive offloading will be either enabled or 
+  disabled together. You cannot enable/disable one without the other.
+
+  To enable checksum offloading:
+
+         ifconfig <interface_num> rxcsum 
+
+  To disable checksum offloading:
+
+         ifconfig <interface_num> -rxcsum 
+
+  To confirm the current setting:
+
+         ifconfig <interface_num>
+
+  
+  TSO
+  ---
+
+  TSO is enabled by default.
+
+  To disable:
+
+         ifconfig <interface_num> -tso 
+
+  To re-enable:
+
+         ifconfig <interface_num> tso
+
+  LRO
+  ---
+  
+  Large Receive Offload is available in the driver; it is on by default. 
+  It can be disabled by using:
+         ifconfig <interface_num> -lro
+  To enable:
+         ifconfig <interface_num> lro
+
+
+  Important system configuration changes:
+  ---------------------------------------
+
+  When there is a choice run on a 64bit OS rather than 32, it makes a 
+  significant difference in improvement.
+  
+  The default scheduler SCHED_4BSD is not smart about SMP locality issues. 
+  Significant improvement can be achieved by switching to the ULE scheduler.
+
+  This is done by changing the entry in the config file from SCHED_4BSD to 
+  SCHED_ULE. Note that this is only advisable on FreeBSD 7, on 6.X there have
+  been stability problems with ULE.
+
+  The interface can generate high number of interrupts. To avoid running 
+  into the limit set by the kernel, adjust hw.intr_storm_threshold 
+  setting using sysctl:
+       sysctl hw.intr_storm_threshold=9000 (the default is 1000)
+
+  For this change to take effect on boot, edit /etc/sysctl.conf and add the 
+  line:  
+       hw.intr_storm_threshold=9000
+
+  If you still see Interrupt Storm detected messages, increase the limit to a
+  higher number.
+
+  Best throughput results are seen with a large MTU; use 9000 if possible. 
+
+  The default number of descriptors is 1024, increasing this to 2K or even 
+  4K may improve performance in some workloads, but change carefully.
+
+
+Known Limitations
+=================
+
+For known hardware and troubleshooting issues, refer to the following website.
+
+    http://support.intel.com/support/go/network/adapter/home.htm
+
+Either select the link for your adapter or perform a search for the adapter 
+number. The adapter's page lists many issues. For a complete list of hardware
+issues download your adapter's user guide and read the Release Notes. 
+
+  UDP stress test with 10GbE driver
+  ---------------------------------  
+  Under small packets UDP stress test with 10GbE driver, the FreeBSD system 
+  will drop UDP packets due to the fullness of socket buffers. You may want 
+  to change the driver's Flow Control variables to the minimum value for 
+  controlling packet reception.
+
+  Attempting to configure larger MTUs with a large numbers of processors may 
+  generate the error message "ix0:could not setup receive structures"
+  --------------------------------------------------------------------------
+  When using the ixgbe driver with RSS autoconfigured based on the number of 
+  cores (the default setting) and that number is larger than 4, increase the 
+  memory resources allocated for the mbuf pool as follows:
+
+  Add to the sysctl.conf file for the system:
+
+  kern.ipc.nmbclusters=262144
+  kern.ipc.nmbjumbop=262144
+
+  Lower than expected performance on dual port 10GbE devices
+  ----------------------------------------------------------
+  Some PCI-E x8 slots are actually configured as x4 slots. These slots have 
+  insufficient bandwidth for full 10Gbe line rate with dual port 10GbE devices.
+  The driver can detect this situation and will write the following message in
+  the system log: "PCI-Express bandwidth available for this card is not 
+  sufficient for optimal performance. For optimal performance a x8 PCI-Express 
+  slot is required."
+
+  If this error occurs, moving your adapter to a true x8 slot will resolve the 
+  issue.
+
+
+
+Support
+=======
+
+For general information and support, go to the Intel support website at:
+
+        www.intel.com/support/
+
+If an issue is identified with the released source code on the supported
+kernel with a supported adapter, email the specific information related to 
+the issue to freebsd@intel.com
+
+
+
+License
+=======
+
+This software program is released under the terms of a license agreement 
+between you ('Licensee') and Intel. Do not use or load this software or any 
+associated materials (collectively, the 'Software') until you have carefully 
+read the full terms and conditions of the LICENSE located in this software 
+package. By loading or using the Software, you agree to the terms of this 
+Agreement. If you do not agree with the terms of this Agreement, do not 
+install or use the Software.
+
+* Other names and brands may be claimed as the property of others.
+
+
diff --git a/sys/dev/netif/ixgbe/ixgbe.c b/sys/dev/netif/ixgbe/ixgbe.c
new file mode 100644 (file)
index 0000000..91edb57
--- /dev/null
@@ -0,0 +1,5827 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2012, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: src/sys/dev/ixgbe/ixgbe.c,v 1.69 2012/06/07 22:57:26 emax Exp $*/
+
+#include "opt_inet.h"
+#include "opt_inet6.h"
+
+#include "ixgbe.h"
+
+/*********************************************************************
+ *  Set this to one to display debug statistics
+ *********************************************************************/
+int             ixgbe_display_debug_stats = 0;
+
+/*********************************************************************
+ *  Driver version
+ *********************************************************************/
+char ixgbe_driver_version[] = "2.4.5";
+
+/*********************************************************************
+ *  PCI Device ID Table
+ *
+ *  Used by probe to select devices to load on
+ *  Last field stores an index into ixgbe_strings
+ *  Last entry must be all 0s
+ *
+ *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
+ *********************************************************************/
+
+static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
+{
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
+       /* required last entry */
+       {0, 0, 0, 0, 0}
+};
+
+/*********************************************************************
+ *  Table of branding strings
+ *********************************************************************/
+
+static char    *ixgbe_strings[] = {
+       "Intel(R) PRO/10GbE PCI-Express Network Driver"
+};
+
+/*********************************************************************
+ *  Function prototypes
+ *********************************************************************/
+static int      ixgbe_probe(device_t);
+static int      ixgbe_attach(device_t);
+static int      ixgbe_detach(device_t);
+static int      ixgbe_shutdown(device_t);
+static void     ixgbe_start(struct ifnet *);
+static void     ixgbe_start_locked(struct tx_ring *, struct ifnet *);
+#if 0 /* __FreeBSD_version >= 800000 */
+static int     ixgbe_mq_start(struct ifnet *, struct mbuf *);
+static int     ixgbe_mq_start_locked(struct ifnet *,
+                    struct tx_ring *, struct mbuf *);
+static void    ixgbe_qflush(struct ifnet *);
+#endif
+static int     ixgbe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
+static void    ixgbe_init(void *);
+static void    ixgbe_init_locked(struct adapter *);
+static void     ixgbe_stop(void *);
+static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
+static int      ixgbe_media_change(struct ifnet *);
+static void     ixgbe_identify_hardware(struct adapter *);
+static int      ixgbe_allocate_pci_resources(struct adapter *);
+static int      ixgbe_allocate_msix(struct adapter *);
+static int      ixgbe_allocate_legacy(struct adapter *);
+static int     ixgbe_allocate_queues(struct adapter *);
+#if 0  /* HAVE_MSIX */
+static int     ixgbe_setup_msix(struct adapter *);
+#endif
+static void    ixgbe_free_pci_resources(struct adapter *);
+static void    ixgbe_local_timer(void *);
+static int     ixgbe_setup_interface(device_t, struct adapter *);
+static void    ixgbe_config_link(struct adapter *);
+
+static int      ixgbe_allocate_transmit_buffers(struct tx_ring *);
+static int     ixgbe_setup_transmit_structures(struct adapter *);
+static void    ixgbe_setup_transmit_ring(struct tx_ring *);
+static void     ixgbe_initialize_transmit_units(struct adapter *);
+static void     ixgbe_free_transmit_structures(struct adapter *);
+static void     ixgbe_free_transmit_buffers(struct tx_ring *);
+
+static int      ixgbe_allocate_receive_buffers(struct rx_ring *);
+static int      ixgbe_setup_receive_structures(struct adapter *);
+static int     ixgbe_setup_receive_ring(struct rx_ring *);
+static void     ixgbe_initialize_receive_units(struct adapter *);
+static void     ixgbe_free_receive_structures(struct adapter *);
+static void     ixgbe_free_receive_buffers(struct rx_ring *);
+#if 0  /* NET_LRO */
+static void    ixgbe_setup_hw_rsc(struct rx_ring *);
+#endif
+
+static void     ixgbe_enable_intr(struct adapter *);
+static void     ixgbe_disable_intr(struct adapter *);
+static void     ixgbe_update_stats_counters(struct adapter *);
+static bool    ixgbe_txeof(struct tx_ring *);
+static bool    ixgbe_rxeof(struct ix_queue *, int);
+static void    ixgbe_rx_checksum(u32, struct mbuf *, u32);
+static void     ixgbe_set_promisc(struct adapter *);
+static void     ixgbe_set_multi(struct adapter *);
+static void     ixgbe_update_link_status(struct adapter *);
+static void    ixgbe_refresh_mbufs(struct rx_ring *, int);
+static int      ixgbe_xmit(struct tx_ring *, struct mbuf **);
+static int     ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
+static int     ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
+static int     ixgbe_set_thermal_test(SYSCTL_HANDLER_ARGS);
+static int     ixgbe_dma_malloc(struct adapter *, bus_size_t,
+                   struct ixgbe_dma_alloc *, int);
+static void     ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
+static void    ixgbe_add_rx_process_limit(struct adapter *, const char *,
+                   const char *, int *, int);
+static bool    ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
+#if 0  /* NET_TSO */
+static bool    ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *, u32 *);
+#endif
+static void    ixgbe_set_ivar(struct adapter *, u8, u8, s8);
+static void    ixgbe_configure_ivars(struct adapter *);
+static u8 *    ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
+
+static void    ixgbe_setup_vlan_hw_support(struct adapter *);
+static void    ixgbe_register_vlan(void *, struct ifnet *, u16);
+static void    ixgbe_unregister_vlan(void *, struct ifnet *, u16);
+
+static void     ixgbe_add_hw_stats(struct adapter *adapter);
+
+static __inline void ixgbe_rx_discard(struct rx_ring *, int);
+static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
+                   struct mbuf *, u32);
+
+/* Support for pluggable optic modules */
+static bool    ixgbe_sfp_probe(struct adapter *);
+static void    ixgbe_setup_optics(struct adapter *);
+
+/* Legacy (single vector interrupt handler */
+static void    ixgbe_legacy_irq(void *);
+
+/* The MSI/X Interrupt handlers */
+static void    ixgbe_msix_que(void *);
+static void    ixgbe_msix_link(void *);
+
+/* Deferred interrupt tasklets */
+static void    ixgbe_handle_que(void *, int);
+static void    ixgbe_handle_link(void *, int);
+static void    ixgbe_handle_msf(void *, int);
+static void    ixgbe_handle_mod(void *, int);
+
+#ifdef IXGBE_FDIR
+static void    ixgbe_atr(struct tx_ring *, struct mbuf *);
+static void    ixgbe_reinit_fdir(void *, int);
+#endif
+
+/*********************************************************************
+ *  FreeBSD Device Interface Entry Points
+ *********************************************************************/
+
+static device_method_t ixgbe_methods[] = {
+       /* Device interface */
+       DEVMETHOD(device_probe, ixgbe_probe),
+       DEVMETHOD(device_attach, ixgbe_attach),
+       DEVMETHOD(device_detach, ixgbe_detach),
+       DEVMETHOD(device_shutdown, ixgbe_shutdown),
+       {0, 0}
+};
+
+static driver_t ixgbe_driver = {
+       "ix", ixgbe_methods, sizeof(struct adapter),
+};
+
+devclass_t ixgbe_devclass;
+DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
+
+MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
+MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
+
+/*
+** TUNEABLE PARAMETERS:
+*/
+
+/*
+** AIM: Adaptive Interrupt Moderation
+** which means that the interrupt rate
+** is varied over time based on the
+** traffic for that interrupt vector
+*/
+static int ixgbe_enable_aim = TRUE;
+TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
+
+static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
+TUNABLE_INT("hw.ixgbe.max_interrupt_rate", &ixgbe_max_interrupt_rate);
+
+/* How many packets rxeof tries to clean at a time */
+static int ixgbe_rx_process_limit = 128;
+TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
+
+/* Flow control setting, default to full */
+static int ixgbe_flow_control = ixgbe_fc_full;
+TUNABLE_INT("hw.ixgbe.flow_control", &ixgbe_flow_control);
+
+/*
+** Smart speed setting, default to on
+** this only works as a compile option
+** right now as its during attach, set
+** this to 'ixgbe_smart_speed_off' to
+** disable.
+*/
+static int ixgbe_smart_speed = ixgbe_smart_speed_on;
+
+/*
+ * MSIX should be the default for best performance,
+ * but this allows it to be forced off for testing.
+ */
+static int ixgbe_enable_msix = 1;
+TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
+
+/*
+ * Header split: this causes the hardware to DMA
+ * the header into a separate mbuf from the payload,
+ * it can be a performance win in some workloads, but
+ * in others it actually hurts, its off by default. 
+ */
+static int ixgbe_header_split = FALSE;
+TUNABLE_INT("hw.ixgbe.hdr_split", &ixgbe_header_split);
+
+/*
+ * Number of Queues, can be set to 0,
+ * it then autoconfigures based on the
+ * number of cpus with a max of 8. This
+ * can be overriden manually here.
+ */
+static int ixgbe_num_queues = 0;
+TUNABLE_INT("hw.ixgbe.num_queues", &ixgbe_num_queues);
+
+/*
+** Number of TX descriptors per ring,
+** setting higher than RX as this seems
+** the better performing choice.
+*/
+static int ixgbe_txd = PERFORM_TXD;
+TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
+
+/* Number of RX descriptors per ring */
+static int ixgbe_rxd = PERFORM_RXD;
+TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
+
+/* Keep running tab on them for sanity check */
+static int ixgbe_total_ports;
+
+#ifdef IXGBE_FDIR
+/*
+** For Flow Director: this is the
+** number of TX packets we sample
+** for the filter pool, this means
+** every 20th packet will be probed.
+**
+** This feature can be disabled by 
+** setting this to 0.
+*/
+static int atr_sample_rate = 20;
+/* 
+** Flow Director actually 'steals'
+** part of the packet buffer as its
+** filter pool, this variable controls
+** how much it uses:
+**  0 = 64K, 1 = 128K, 2 = 256K
+*/
+static int fdir_pballoc = 1;
+#endif
+
+#ifdef DEV_NETMAP
+/*
+ * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
+ * be a reference on how to implement netmap support in a driver.
+ * Additional comments are in ixgbe_netmap.h .
+ *
+ * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
+ * that extend the standard driver.
+ */
+#include <dev/netmap/ixgbe_netmap.h>
+#endif /* DEV_NETMAP */
+
+/*********************************************************************
+ *  Device identification routine
+ *
+ *  ixgbe_probe determines if the driver should be loaded on
+ *  adapter based on PCI vendor/device id of the adapter.
+ *
+ *  return BUS_PROBE_DEFAULT on success, positive on failure
+ *********************************************************************/
+
+static int
+ixgbe_probe(device_t dev)
+{
+       ixgbe_vendor_info_t *ent;
+
+       u16     pci_vendor_id = 0;
+       u16     pci_device_id = 0;
+       u16     pci_subvendor_id = 0;
+       u16     pci_subdevice_id = 0;
+       char    adapter_name[256];
+
+       INIT_DEBUGOUT("ixgbe_probe: begin");
+
+       pci_vendor_id = pci_get_vendor(dev);
+       if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
+               return (ENXIO);
+
+       pci_device_id = pci_get_device(dev);
+       pci_subvendor_id = pci_get_subvendor(dev);
+       pci_subdevice_id = pci_get_subdevice(dev);
+
+       ent = ixgbe_vendor_info_array;
+       while (ent->vendor_id != 0) {
+               if ((pci_vendor_id == ent->vendor_id) &&
+                   (pci_device_id == ent->device_id) &&
+
+                   ((pci_subvendor_id == ent->subvendor_id) ||
+                    (ent->subvendor_id == 0)) &&
+
+                   ((pci_subdevice_id == ent->subdevice_id) ||
+                    (ent->subdevice_id == 0))) {
+                       ksprintf(adapter_name, "%s, Version - %s",
+                               ixgbe_strings[ent->index],
+                               ixgbe_driver_version);
+                       device_set_desc_copy(dev, adapter_name);
+                       ++ixgbe_total_ports;
+                       return (BUS_PROBE_DEFAULT);
+               }
+               ent++;
+       }
+       return (ENXIO);
+}
+
+/*********************************************************************
+ *  Device initialization routine
+ *
+ *  The attach entry point is called when the driver is being loaded.
+ *  This routine identifies the type of hardware, allocates all resources
+ *  and initializes the hardware.
+ *
+ *  return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+ixgbe_attach(device_t dev)
+{
+       struct adapter *adapter;
+       struct ixgbe_hw *hw;
+       int             error = 0;
+       u16             csum;
+       u32             ctrl_ext;
+
+       INIT_DEBUGOUT("ixgbe_attach: begin");
+
+       if (resource_disabled("ixgbe", device_get_unit(dev))) {
+               device_printf(dev, "Disabled by device hint\n");
+               return (ENXIO);
+       }
+
+       /* Allocate, clear, and link in our adapter structure */
+       adapter = device_get_softc(dev);
+       adapter->dev = adapter->osdep.dev = dev;
+       hw = &adapter->hw;
+
+       /* Core Lock Init*/
+       IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
+       spin_init(&adapter->mcast_spin);
+
+       /* SYSCTL APIs */
+
+       sysctl_ctx_init(&adapter->sysctl_ctx);
+       adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx,
+           SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
+           device_get_nameunit(adapter->dev), CTLFLAG_RD, 0, "");
+       if (adapter->sysctl_tree == NULL) {
+               device_printf(adapter->dev, "can't add sysctl node\n");
+               return (EINVAL);
+       }
+       SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
+                       SYSCTL_CHILDREN(adapter->sysctl_tree),
+                       OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
+                       adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control");
+
+        SYSCTL_ADD_INT(&adapter->sysctl_ctx,
+                       SYSCTL_CHILDREN(adapter->sysctl_tree),
+                       OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
+                       &ixgbe_enable_aim, 1, "Interrupt Moderation");
+
+       /*
+       ** Allow a kind of speed control by forcing the autoneg
+       ** advertised speed list to only a certain value, this
+       ** supports 1G on 82599 devices, and 100Mb on x540.
+       */
+       SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
+                       SYSCTL_CHILDREN(adapter->sysctl_tree),
+                       OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
+                       adapter, 0, ixgbe_set_advertise, "I", "Link Speed");
+
+       SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
+                       SYSCTL_CHILDREN(adapter->sysctl_tree),
+                       OID_AUTO, "ts", CTLTYPE_INT | CTLFLAG_RW, adapter,
+                       0, ixgbe_set_thermal_test, "I", "Thermal Test");
+
+       /* Set up the timer callout */
+       /* XXX: shouldn't this be a spin lock ? */
+       lockinit(&adapter->core_lock, "ixgbe core lock", 0, LK_CANRECURSE);
+       callout_init(&adapter->timer);
+
+       /* Determine hardware revision */
+       ixgbe_identify_hardware(adapter);
+
+       /* Do base PCI setup - map BAR0 */
+       if (ixgbe_allocate_pci_resources(adapter)) {
+               device_printf(dev, "Allocation of PCI resources failed\n");
+               error = ENXIO;
+               goto err_out;
+       }
+
+       /* Do descriptor calc and sanity checks */
+       if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
+           ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
+               device_printf(dev, "TXD config issue, using default!\n");
+               adapter->num_tx_desc = DEFAULT_TXD;
+       } else
+               adapter->num_tx_desc = ixgbe_txd;
+
+       /*
+       ** With many RX rings it is easy to exceed the
+       ** system mbuf allocation. Tuning nmbclusters
+       ** can alleviate this.
+       */
+       if (nmbclusters > 0 ) {
+               int s;
+               s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
+               if (s > nmbclusters) {
+                       device_printf(dev, "RX Descriptors exceed "
+                           "system mbuf max, using default instead!\n");
+                       ixgbe_rxd = DEFAULT_RXD;
+               }
+       }
+
+       if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
+           ixgbe_rxd < MIN_TXD || ixgbe_rxd > MAX_TXD) {
+               device_printf(dev, "RXD config issue, using default!\n");
+               adapter->num_rx_desc = DEFAULT_RXD;
+       } else
+               adapter->num_rx_desc = ixgbe_rxd;
+
+       /* Allocate our TX/RX Queues */
+       if (ixgbe_allocate_queues(adapter)) {
+               error = ENOMEM;
+               goto err_out;
+       }
+
+       /* Allocate multicast array memory. */
+       adapter->mta = kmalloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
+           MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
+       if (adapter->mta == NULL) {
+               device_printf(dev, "Can not allocate multicast setup array\n");
+               error = ENOMEM;
+               goto err_late;
+       }
+
+       /* Initialize the shared code */
+       error = ixgbe_init_shared_code(hw);
+       if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
+               /*
+               ** No optics in this port, set up
+               ** so the timer routine will probe 
+               ** for later insertion.
+               */
+               adapter->sfp_probe = TRUE;
+               error = 0;
+       } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+               device_printf(dev,"Unsupported SFP+ module detected!\n");
+               error = EIO;
+               goto err_late;
+       } else if (error) {
+               device_printf(dev,"Unable to initialize the shared code\n");
+               error = EIO;
+               goto err_late;
+       }
+
+       /* Make sure we have a good EEPROM before we read from it */
+       if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
+               device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
+               error = EIO;
+               goto err_late;
+       }
+
+       /* Get Hardware Flow Control setting */
+       hw->fc.requested_mode = ixgbe_fc_full;
+       adapter->fc = hw->fc.requested_mode;
+       hw->fc.pause_time = IXGBE_FC_PAUSE;
+       hw->fc.low_water = IXGBE_FC_LO;
+       hw->fc.high_water[0] = IXGBE_FC_HI;
+       hw->fc.send_xon = TRUE;
+
+       error = ixgbe_init_hw(hw);
+       if (error == IXGBE_ERR_EEPROM_VERSION) {
+               device_printf(dev, "This device is a pre-production adapter/"
+                   "LOM.  Please be aware there may be issues associated "
+                   "with your hardware.\n If you are experiencing problems "
+                   "please contact your Intel or hardware representative "
+                   "who provided you with this hardware.\n");
+       } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED)
+               device_printf(dev,"Unsupported SFP+ Module\n");
+
+       if (error) {
+               error = EIO;
+               device_printf(dev,"Hardware Initialization Failure\n");
+               goto err_late;
+       }
+
+       /* Detect and set physical type */
+       ixgbe_setup_optics(adapter);
+
+       if ((adapter->msix > 1) && (ixgbe_enable_msix))
+               error = ixgbe_allocate_msix(adapter); 
+       else
+               error = ixgbe_allocate_legacy(adapter); 
+       if (error) 
+               goto err_late;
+
+       /* Setup OS specific network interface */
+       if (ixgbe_setup_interface(dev, adapter) != 0)
+               goto err_late;
+
+       /* Sysctl for limiting the amount of work done in the taskqueue */
+       ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
+           "max number of rx packets to process", &adapter->rx_process_limit,
+           ixgbe_rx_process_limit);
+
+       /* Initialize statistics */
+       ixgbe_update_stats_counters(adapter);
+
+       /* Register for VLAN events */
+       adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
+           ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
+       adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
+           ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
+
+        /* Print PCIE bus type/speed/width info */
+       ixgbe_get_bus_info(hw);
+       device_printf(dev,"PCI Express Bus: Speed %s %s\n",
+           ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
+           (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
+           (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
+           (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
+           (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
+           ("Unknown"));
+
+       if ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
+           (hw->bus.speed == ixgbe_bus_speed_2500)) {
+               device_printf(dev, "PCI-Express bandwidth available"
+                   " for this card\n     is not sufficient for"
+                   " optimal performance.\n");
+               device_printf(dev, "For optimal performance a x8 "
+                   "PCIE, or x4 PCIE 2 slot is required.\n");
+        }
+
+       /* let hardware know driver is loaded */
+       ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+       ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
+       IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+
+       ixgbe_add_hw_stats(adapter);
+
+#ifdef DEV_NETMAP
+       ixgbe_netmap_attach(adapter);
+#endif /* DEV_NETMAP */
+       INIT_DEBUGOUT("ixgbe_attach: end");
+       return (0);
+err_late:
+       ixgbe_free_transmit_structures(adapter);
+       ixgbe_free_receive_structures(adapter);
+err_out:
+       if (adapter->ifp != NULL)
+               if_free(adapter->ifp);
+       ixgbe_free_pci_resources(adapter);
+       kfree(adapter->mta, M_DEVBUF);
+       return (error);
+
+}
+
+/*********************************************************************
+ *  Device removal routine
+ *
+ *  The detach entry point is called when the driver is being removed.
+ *  This routine stops the adapter and deallocates all the resources
+ *  that were allocated for driver operation.
+ *
+ *  return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+ixgbe_detach(device_t dev)
+{
+       struct adapter *adapter = device_get_softc(dev);
+       struct ix_queue *que = adapter->queues;
+       u32     ctrl_ext;
+
+       INIT_DEBUGOUT("ixgbe_detach: begin");
+
+#ifdef NET_VLAN
+       /* Make sure VLANS are not using driver */
+       if (adapter->ifp->if_vlantrunk != NULL) {
+               device_printf(dev,"Vlan in use, detach first\n");
+               return (EBUSY);
+       }
+#endif
+
+       IXGBE_CORE_LOCK(adapter);
+       ixgbe_stop(adapter);
+       IXGBE_CORE_UNLOCK(adapter);
+
+       for (int i = 0; i < adapter->num_queues; i++, que++) {
+               if (que->tq) {
+                       taskqueue_drain(que->tq, &que->que_task);
+                       taskqueue_free(que->tq);
+               }
+       }
+
+       /* Drain the Link queue */
+       if (adapter->tq) {
+               taskqueue_drain(adapter->tq, &adapter->link_task);
+               taskqueue_drain(adapter->tq, &adapter->mod_task);
+               taskqueue_drain(adapter->tq, &adapter->msf_task);
+#ifdef IXGBE_FDIR
+               taskqueue_drain(adapter->tq, &adapter->fdir_task);
+#endif
+               taskqueue_free(adapter->tq);
+       }
+
+       /* let hardware know driver is unloading */
+       ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
+       ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
+
+       /* Unregister VLAN events */
+#ifdef NET_VLAN
+       if (adapter->vlan_attach != NULL)
+               EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
+       if (adapter->vlan_detach != NULL)
+               EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
+#endif
+
+       ether_ifdetach(adapter->ifp);
+       callout_stop(&adapter->timer);
+       lockuninit(&adapter->core_lock);
+#ifdef DEV_NETMAP
+       netmap_detach(adapter->ifp);
+#endif /* DEV_NETMAP */
+       ixgbe_free_pci_resources(adapter);
+       bus_generic_detach(dev);
+       if_free(adapter->ifp);
+
+       ixgbe_free_transmit_structures(adapter);
+       ixgbe_free_receive_structures(adapter);
+       kfree(adapter->mta, M_DEVBUF);
+       sysctl_ctx_free(&adapter->sysctl_ctx);
+       
+       spin_uninit(&adapter->mcast_spin);
+       IXGBE_CORE_LOCK_DESTROY(adapter);
+       return (0);
+}
+
+/*********************************************************************
+ *
+ *  Shutdown entry point
+ *
+ **********************************************************************/
+
+static int
+ixgbe_shutdown(device_t dev)
+{
+       struct adapter *adapter = device_get_softc(dev);
+       IXGBE_CORE_LOCK(adapter);
+       ixgbe_stop(adapter);
+       IXGBE_CORE_UNLOCK(adapter);
+       return (0);
+}
+
+
+/*********************************************************************
+ *  Transmit entry point
+ *
+ *  ixgbe_start is called by the stack to initiate a transmit.
+ *  The driver will remain in this routine as long as there are
+ *  packets to transmit and transmit resources are available.
+ *  In case resources are not available stack is notified and
+ *  the packet is requeued.
+ **********************************************************************/
+
+static void
+ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
+{
+       struct mbuf    *m_head;
+       struct adapter *adapter = txr->adapter;
+
+       IXGBE_TX_LOCK_ASSERT(txr);
+
+       if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
+               return;
+       if (!adapter->link_active)
+               return;
+
+       while (!ifq_is_empty(&ifp->if_snd)) {
+               if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE) {
+                       txr->queue_status |= IXGBE_QUEUE_DEPLETED;
+                       break;
+                }
+
+               m_head = ifq_dequeue(&ifp->if_snd, NULL);
+               if (m_head == NULL)
+                       break;
+
+               if (ixgbe_xmit(txr, &m_head)) {
+#if 0 /* XXX: prepend to an ALTQ queue ? */
+                       if (m_head != NULL)
+                               IF_PREPEND(&ifp->if_snd, m_head);
+#endif
+                       if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
+                               txr->queue_status |= IXGBE_QUEUE_DEPLETED;
+                       break;
+               }
+               /* Send a copy of the frame to the BPF listener */
+               ETHER_BPF_MTAP(ifp, m_head);
+
+               /* Set watchdog on */
+               txr->watchdog_time = ticks;
+               txr->queue_status = IXGBE_QUEUE_WORKING;
+
+       }
+       return;
+}
+
+/*
+ * Legacy TX start - called by the stack, this
+ * always uses the first tx ring, and should
+ * not be used with multiqueue tx enabled.
+ */
+static void
+ixgbe_start(struct ifnet *ifp)
+{
+       struct adapter *adapter = ifp->if_softc;
+       struct tx_ring  *txr = adapter->tx_rings;
+
+       if (ifp->if_flags & IFF_RUNNING) {
+               IXGBE_TX_LOCK(txr);
+               ixgbe_start_locked(txr, ifp);
+               IXGBE_TX_UNLOCK(txr);
+       }
+       return;
+}
+
+#if 0 /* __FreeBSD_version >= 800000 */
+/*
+** Multiqueue Transmit driver
+**
+*/
+static int
+ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       struct ix_queue *que;
+       struct tx_ring  *txr;
+       int             i = 0, err = 0;
+
+       /* Which queue to use */
+       if ((m->m_flags & M_FLOWID) != 0)
+               i = m->m_pkthdr.flowid % adapter->num_queues;
+       else
+               i = curcpu % adapter->num_queues;
+
+       txr = &adapter->tx_rings[i];
+       que = &adapter->queues[i];
+
+       if (((txr->queue_status & IXGBE_QUEUE_DEPLETED) == 0) &&
+           IXGBE_TX_TRYLOCK(txr)) {
+               err = ixgbe_mq_start_locked(ifp, txr, m);
+               IXGBE_TX_UNLOCK(txr);
+       } else {
+               err = drbr_enqueue(ifp, txr->br, m);
+               taskqueue_enqueue(que->tq, &que->que_task);
+       }
+
+       return (err);
+}
+
+static int
+ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
+{
+       struct adapter  *adapter = txr->adapter;
+        struct mbuf     *next;
+        int             enqueued, err = 0;
+
+       if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
+           (txr->queue_status == IXGBE_QUEUE_DEPLETED) ||
+           adapter->link_active == 0) {
+               if (m != NULL)
+                       err = drbr_enqueue(ifp, txr->br, m);
+               return (err);
+       }
+
+       enqueued = 0;
+       if (m == NULL) {
+               next = drbr_dequeue(ifp, txr->br);
+       } else if (drbr_needs_enqueue(ifp, txr->br)) {
+               if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
+                       return (err);
+               next = drbr_dequeue(ifp, txr->br);
+       } else
+               next = m;
+
+       /* Process the queue */
+       while (next != NULL) {
+               if ((err = ixgbe_xmit(txr, &next)) != 0) {
+                       if (next != NULL)
+                               err = drbr_enqueue(ifp, txr->br, next);
+                       break;
+               }
+               enqueued++;
+               drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
+               /* Send a copy of the frame to the BPF listener */
+               ETHER_BPF_MTAP(ifp, next);
+               if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+                       break;
+               if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD)
+                       ixgbe_txeof(txr);
+               if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD) {
+                       txr->queue_status |= IXGBE_QUEUE_DEPLETED;
+                       break;
+               }
+               next = drbr_dequeue(ifp, txr->br);
+       }
+
+       if (enqueued > 0) {
+               /* Set watchdog on */
+               txr->queue_status |= IXGBE_QUEUE_WORKING;
+               txr->watchdog_time = ticks;
+       }
+
+       if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD)
+               ixgbe_txeof(txr);
+
+       return (err);
+}
+
+/*
+** Flush all ring buffers
+*/
+static void
+ixgbe_qflush(struct ifnet *ifp)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       struct tx_ring  *txr = adapter->tx_rings;
+       struct mbuf     *m;
+
+       for (int i = 0; i < adapter->num_queues; i++, txr++) {
+               IXGBE_TX_LOCK(txr);
+               while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
+                       m_freem(m);
+               IXGBE_TX_UNLOCK(txr);
+       }
+       if_qflush(ifp);
+}
+#endif /* __FreeBSD_version >= 800000 */
+
+/*********************************************************************
+ *  Ioctl entry point
+ *
+ *  ixgbe_ioctl is called when the user wants to configure the
+ *  interface.
+ *
+ *  return 0 on success, positive on failure
+ **********************************************************************/
+
+static int
+ixgbe_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       struct ifreq    *ifr = (struct ifreq *) data;
+#if defined(INET) || defined(INET6)
+       struct ifaddr *ifa = (struct ifaddr *)data;
+       bool            avoid_reset = FALSE;
+#endif
+       int             error = 0;
+
+       switch (command) {
+
+        case SIOCSIFADDR:
+#ifdef INET
+               if (ifa->ifa_addr->sa_family == AF_INET)
+                       avoid_reset = TRUE;
+#endif
+#ifdef INET6
+               if (ifa->ifa_addr->sa_family == AF_INET6)
+                       avoid_reset = TRUE;
+#endif
+#if defined(INET) || defined(INET6)
+               /*
+               ** Calling init results in link renegotiation,
+               ** so we avoid doing it when possible.
+               */
+               if (avoid_reset) {
+                       ifp->if_flags |= IFF_UP;
+                       if (!(ifp->if_flags & IFF_RUNNING))
+                               ixgbe_init(adapter);
+                       if (!(ifp->if_flags & IFF_NOARP))
+                               arp_ifinit(ifp, ifa);
+               } else
+                       error = ether_ioctl(ifp, command, data);
+#endif
+               break;
+       case SIOCSIFMTU:
+               IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
+               if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
+                       error = EINVAL;
+               } else {
+                       IXGBE_CORE_LOCK(adapter);
+                       ifp->if_mtu = ifr->ifr_mtu;
+                       adapter->max_frame_size =
+                               ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+                       ixgbe_init_locked(adapter);
+                       IXGBE_CORE_UNLOCK(adapter);
+               }
+               break;
+       case SIOCSIFFLAGS:
+               IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
+               IXGBE_CORE_LOCK(adapter);
+               if (ifp->if_flags & IFF_UP) {
+                       if ((ifp->if_flags & IFF_RUNNING)) {
+                               if ((ifp->if_flags ^ adapter->if_flags) &
+                                   (IFF_PROMISC | IFF_ALLMULTI)) {
+                                       ixgbe_set_promisc(adapter);
+                                }
+                       } else
+                               ixgbe_init_locked(adapter);
+               } else
+                       if (ifp->if_flags & IFF_RUNNING)
+                               ixgbe_stop(adapter);
+               adapter->if_flags = ifp->if_flags;
+               IXGBE_CORE_UNLOCK(adapter);
+               break;
+       case SIOCADDMULTI:
+       case SIOCDELMULTI:
+               IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
+               if (ifp->if_flags & IFF_RUNNING) {
+                       IXGBE_CORE_LOCK(adapter);
+                       ixgbe_disable_intr(adapter);
+                       ixgbe_set_multi(adapter);
+                       ixgbe_enable_intr(adapter);
+                       IXGBE_CORE_UNLOCK(adapter);
+               }
+               break;
+       case SIOCSIFMEDIA:
+       case SIOCGIFMEDIA:
+               IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
+               error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
+               break;
+       case SIOCSIFCAP:
+       {
+               int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+               IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
+               if (mask & IFCAP_HWCSUM)
+                       ifp->if_capenable ^= IFCAP_HWCSUM;
+#if 0 /* NET_TSO */
+               if (mask & IFCAP_TSO4)
+                       ifp->if_capenable ^= IFCAP_TSO4;
+               if (mask & IFCAP_TSO6)
+                       ifp->if_capenable ^= IFCAP_TSO6;
+#endif
+#if 0 /* NET_LRO */
+               if (mask & IFCAP_LRO)
+                       ifp->if_capenable ^= IFCAP_LRO;
+#endif
+               if (mask & IFCAP_VLAN_HWTAGGING)
+                       ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+               if (mask & IFCAP_VLAN_HWFILTER)
+                       ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
+#if 0 /* NET_TSO */
+               if (mask & IFCAP_VLAN_HWTSO)
+                       ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
+#endif
+               if (ifp->if_flags & IFF_RUNNING) {
+                       IXGBE_CORE_LOCK(adapter);
+                       ixgbe_init_locked(adapter);
+                       IXGBE_CORE_UNLOCK(adapter);
+               }
+#if 0
+               VLAN_CAPABILITIES(ifp);
+#endif
+               break;
+       }
+
+       default:
+               IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
+               error = ether_ioctl(ifp, command, data);
+               break;
+       }
+
+       return (error);
+}
+
+/*********************************************************************
+ *  Init entry point
+ *
+ *  This routine is used in two ways. It is used by the stack as
+ *  init entry point in network interface structure. It is also used
+ *  by the driver as a hw/sw initialization routine to get to a
+ *  consistent state.
+ *
+ *  return 0 on success, positive on failure
+ **********************************************************************/
+#define IXGBE_MHADD_MFS_SHIFT 16
+
+static void
+ixgbe_init_locked(struct adapter *adapter)
+{
+       struct ifnet   *ifp = adapter->ifp;
+       device_t        dev = adapter->dev;
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32             k, txdctl, mhadd, gpie;
+       u32             rxdctl, rxctrl;
+
+       KKASSERT(lockstatus(&adapter->core_lock, curthread) != 0);
+       INIT_DEBUGOUT("ixgbe_init: begin");
+       hw->adapter_stopped = FALSE;
+       ixgbe_stop_adapter(hw);
+        callout_stop(&adapter->timer);
+
+        /* reprogram the RAR[0] in case user changed it. */
+        ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
+
+       /* Get the latest mac address, User can use a LAA */
+       bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
+             IXGBE_ETH_LENGTH_OF_ADDRESS);
+       ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
+       hw->addr_ctrl.rar_used_count = 1;
+
+       /* Set the various hardware offload abilities */
+       ifp->if_hwassist = 0;
+#if 0 /* NET_TSO */
+       if (ifp->if_capenable & IFCAP_TSO)
+               ifp->if_hwassist |= CSUM_TSO;
+#endif
+       if (ifp->if_capenable & IFCAP_TXCSUM) {
+               ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
+#if 0
+               if (hw->mac.type != ixgbe_mac_82598EB)
+                       ifp->if_hwassist |= CSUM_SCTP;
+#endif
+       }
+
+       /* Prepare transmit descriptors and buffers */
+       if (ixgbe_setup_transmit_structures(adapter)) {
+               device_printf(dev,"Could not setup transmit structures\n");
+               ixgbe_stop(adapter);
+               return;
+       }
+
+       ixgbe_init_hw(hw);
+       ixgbe_initialize_transmit_units(adapter);
+
+       /* Setup Multicast table */
+       ixgbe_set_multi(adapter);
+
+       /*
+       ** Determine the correct mbuf pool
+       ** for doing jumbo/headersplit
+       */
+       if (adapter->max_frame_size <= 2048)
+               adapter->rx_mbuf_sz = MCLBYTES;
+       else if (adapter->max_frame_size <= 4096)
+               adapter->rx_mbuf_sz = MJUMPAGESIZE;
+       else if (adapter->max_frame_size <= 9216)
+               adapter->rx_mbuf_sz = MJUM9BYTES;
+       else
+               adapter->rx_mbuf_sz = MJUM16BYTES;
+
+       /* Prepare receive descriptors and buffers */
+       if (ixgbe_setup_receive_structures(adapter)) {
+               device_printf(dev,"Could not setup receive structures\n");
+               ixgbe_stop(adapter);
+               return;
+       }
+
+       /* Configure RX settings */
+       ixgbe_initialize_receive_units(adapter);
+
+       gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
+
+       /* Enable Fan Failure Interrupt */
+       gpie |= IXGBE_SDP1_GPIEN;
+
+       /* Add for Module detection */
+       if (hw->mac.type == ixgbe_mac_82599EB)
+               gpie |= IXGBE_SDP2_GPIEN;
+
+       /* Thermal Failure Detection */
+       if (hw->mac.type == ixgbe_mac_X540)
+               gpie |= IXGBE_SDP0_GPIEN;
+
+       if (adapter->msix > 1) {
+               /* Enable Enhanced MSIX mode */
+               gpie |= IXGBE_GPIE_MSIX_MODE;
+               gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
+                   IXGBE_GPIE_OCD;
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+       /* Set MTU size */
+       if (ifp->if_mtu > ETHERMTU) {
+               mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
+               mhadd &= ~IXGBE_MHADD_MFS_MASK;
+               mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
+               IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
+       }
+       
+       /* Now enable all the queues */
+
+       for (int i = 0; i < adapter->num_queues; i++) {
+               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
+               txdctl |= IXGBE_TXDCTL_ENABLE;
+               /* Set WTHRESH to 8, burst writeback */
+               txdctl |= (8 << 16);
+               /*
+                * When the internal queue falls below PTHRESH (32),
+                * start prefetching as long as there are at least
+                * HTHRESH (1) buffers ready. The values are taken
+                * from the Intel linux driver 3.8.21.
+                * Prefetching enables tx line rate even with 1 queue.
+                */
+               txdctl |= (32 << 0) | (1 << 8);
+               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
+       }
+
+       for (int i = 0; i < adapter->num_queues; i++) {
+               rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+               if (hw->mac.type == ixgbe_mac_82598EB) {
+                       /*
+                       ** PTHRESH = 21
+                       ** HTHRESH = 4
+                       ** WTHRESH = 8
+                       */
+                       rxdctl &= ~0x3FFFFF;
+                       rxdctl |= 0x080420;
+               }
+               rxdctl |= IXGBE_RXDCTL_ENABLE;
+               IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
+               for (k = 0; k < 10; k++) {
+                       if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
+                           IXGBE_RXDCTL_ENABLE)
+                               break;
+                       else
+                               msec_delay(1);
+               }
+               wmb();
+#ifdef DEV_NETMAP
+               /*
+                * In netmap mode, we must preserve the buffers made
+                * available to userspace before the if_init()
+                * (this is true by default on the TX side, because
+                * init makes all buffers available to userspace).
+                *
+                * netmap_reset() and the device specific routines
+                * (e.g. ixgbe_setup_receive_rings()) map these
+                * buffers at the end of the NIC ring, so here we
+                * must set the RDT (tail) register to make sure
+                * they are not overwritten.
+                *
+                * In this driver the NIC ring starts at RDH = 0,
+                * RDT points to the last slot available for reception (?),
+                * so RDT = num_rx_desc - 1 means the whole ring is available.
+                */
+               if (ifp->if_capenable & IFCAP_NETMAP) {
+                       struct netmap_adapter *na = NA(adapter->ifp);
+                       struct netmap_kring *kring = &na->rx_rings[i];
+                       int t = na->num_rx_desc - 1 - kring->nr_hwavail;
+
+                       IXGBE_WRITE_REG(hw, IXGBE_RDT(i), t);
+               } else
+#endif /* DEV_NETMAP */
+               IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
+       }
+
+       /* Set up VLAN support and filter */
+       ixgbe_setup_vlan_hw_support(adapter);
+
+       /* Enable Receive engine */
+       rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               rxctrl |= IXGBE_RXCTRL_DMBYPS;
+       rxctrl |= IXGBE_RXCTRL_RXEN;
+       ixgbe_enable_rx_dma(hw, rxctrl);
+
+       callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
+
+       /* Set up MSI/X routing */
+       if (ixgbe_enable_msix)  {
+               ixgbe_configure_ivars(adapter);
+               /* Set up auto-mask */
+               if (hw->mac.type == ixgbe_mac_82598EB)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+               else {
+                       IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
+                       IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
+               }
+       } else {  /* Simple settings for Legacy/MSI */
+                ixgbe_set_ivar(adapter, 0, 0, 0);
+                ixgbe_set_ivar(adapter, 0, 0, 1);
+               IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+       }
+
+#ifdef IXGBE_FDIR
+       /* Init Flow director */
+       if (hw->mac.type != ixgbe_mac_82598EB) {
+               u32 hdrm = 64 << fdir_pballoc;
+
+               hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
+               ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
+       }
+#endif
+
+       /*
+       ** Check on any SFP devices that
+       ** need to be kick-started
+       */
+       if (hw->phy.type == ixgbe_phy_none) {
+               int err = hw->phy.ops.identify(hw);
+               if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+                       device_printf(dev,
+                           "Unsupported SFP+ module type was detected.\n");
+                       return;
+               }
+       }
+
+       /* Set moderation on the Link interrupt */
+       IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->linkvec), IXGBE_LINK_ITR);
+
+       /* Config/Enable Link */
+       ixgbe_config_link(adapter);
+
+       /* And now turn on interrupts */
+       ixgbe_enable_intr(adapter);
+
+       /* Now inform the stack we're ready */
+       ifp->if_flags |= IFF_RUNNING;
+       ifp->if_flags &= ~IFF_OACTIVE;
+
+       return;
+}
+
+static void
+ixgbe_init(void *arg)
+{
+       struct adapter *adapter = arg;
+
+       IXGBE_CORE_LOCK(adapter);
+       ixgbe_init_locked(adapter);
+       IXGBE_CORE_UNLOCK(adapter);
+       return;
+}
+
+
+/*
+**
+** MSIX Interrupt Handlers and Tasklets
+**
+*/
+
+static inline void
+ixgbe_enable_queue(struct adapter *adapter, u32 vector)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u64     queue = (u64)(1 << vector);
+       u32     mask;
+
+       if (hw->mac.type == ixgbe_mac_82598EB) {
+                mask = (IXGBE_EIMS_RTX_QUEUE & queue);
+                IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
+       } else {
+                mask = (queue & 0xFFFFFFFF);
+                if (mask)
+                        IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+                mask = (queue >> 32);
+                if (mask)
+                        IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+       }
+}
+
+static inline void
+ixgbe_disable_queue(struct adapter *adapter, u32 vector)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u64     queue = (u64)(1 << vector);
+       u32     mask;
+
+       if (hw->mac.type == ixgbe_mac_82598EB) {
+                mask = (IXGBE_EIMS_RTX_QUEUE & queue);
+                IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
+       } else {
+                mask = (queue & 0xFFFFFFFF);
+                if (mask)
+                        IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
+                mask = (queue >> 32);
+                if (mask)
+                        IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
+       }
+}
+
+static inline void
+ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
+{
+       u32 mask;
+
+       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+               mask = (IXGBE_EIMS_RTX_QUEUE & queues);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
+       } else {
+               mask = (queues & 0xFFFFFFFF);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
+               mask = (queues >> 32);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
+       }
+}
+
+
+static void
+ixgbe_handle_que(void *context, int pending)
+{
+       struct ix_queue *que = context;
+       struct adapter  *adapter = que->adapter;
+       struct tx_ring  *txr = que->txr;
+       struct ifnet    *ifp = adapter->ifp;
+       bool            more;
+
+       if (ifp->if_flags & IFF_RUNNING) {
+               more = ixgbe_rxeof(que, adapter->rx_process_limit);
+               IXGBE_TX_LOCK(txr);
+               ixgbe_txeof(txr);
+#if 0 /*__FreeBSD_version >= 800000*/
+               if (!drbr_empty(ifp, txr->br))
+                       ixgbe_mq_start_locked(ifp, txr, NULL);
+#else
+               if (!ifq_is_empty(&ifp->if_snd))
+                       ixgbe_start_locked(txr, ifp);
+#endif
+               IXGBE_TX_UNLOCK(txr);
+               if (more) {
+                       taskqueue_enqueue(que->tq, &que->que_task);
+                       return;
+               }
+       }
+
+       /* Reenable this interrupt */
+       ixgbe_enable_queue(adapter, que->msix);
+       return;
+}
+
+
+/*********************************************************************
+ *
+ *  Legacy Interrupt Service routine
+ *
+ **********************************************************************/
+
+static void
+ixgbe_legacy_irq(void *arg)
+{
+       struct ix_queue *que = arg;
+       struct adapter  *adapter = que->adapter;
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct          tx_ring *txr = adapter->tx_rings;
+       bool            more_tx, more_rx;
+       u32             reg_eicr, loop = MAX_LOOP;
+
+
+       reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+
+       ++que->irqs;
+       if (reg_eicr == 0) {
+               ixgbe_enable_intr(adapter);
+               return;
+       }
+
+       more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
+
+       IXGBE_TX_LOCK(txr);
+       do {
+               more_tx = ixgbe_txeof(txr);
+       } while (loop-- && more_tx);
+       IXGBE_TX_UNLOCK(txr);
+
+       if (more_rx || more_tx)
+               taskqueue_enqueue(que->tq, &que->que_task);
+
+       /* Check for fan failure */
+       if ((hw->phy.media_type == ixgbe_media_type_copper) &&
+           (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
+                device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
+                   "REPLACE IMMEDIATELY!!\n");
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
+       }
+
+       /* Link status change */
+       if (reg_eicr & IXGBE_EICR_LSC)
+               taskqueue_enqueue(adapter->tq, &adapter->link_task);
+
+       ixgbe_enable_intr(adapter);
+       return;
+}
+
+
+/*********************************************************************
+ *
+ *  MSIX Queue Interrupt Service routine
+ *
+ **********************************************************************/
+void
+ixgbe_msix_que(void *arg)
+{
+       struct ix_queue *que = arg;
+       struct adapter  *adapter = que->adapter;
+       struct tx_ring  *txr = que->txr;
+       struct rx_ring  *rxr = que->rxr;
+       bool            more_tx, more_rx;
+       u32             newitr = 0;
+
+       ixgbe_disable_queue(adapter, que->msix);
+       ++que->irqs;
+
+       more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
+
+       IXGBE_TX_LOCK(txr);
+       more_tx = ixgbe_txeof(txr);
+       /*
+       ** Make certain that if the stack 
+       ** has anything queued the task gets
+       ** scheduled to handle it.
+       */
+#if 0
+#if __FreeBSD_version < 800000
+       if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
+#else
+       if (!drbr_empty(adapter->ifp, txr->br))
+#endif
+#endif
+       if (!ifq_is_empty(&adapter->ifp->if_snd))
+               more_tx = 1;
+       IXGBE_TX_UNLOCK(txr);
+
+       /* Do AIM now? */
+
+       if (ixgbe_enable_aim == FALSE)
+               goto no_calc;
+       /*
+       ** Do Adaptive Interrupt Moderation:
+        **  - Write out last calculated setting
+       **  - Calculate based on average size over
+       **    the last interval.
+       */
+        if (que->eitr_setting)
+                IXGBE_WRITE_REG(&adapter->hw,
+                    IXGBE_EITR(que->msix), que->eitr_setting);
+        que->eitr_setting = 0;
+
+        /* Idle, do nothing */
+        if ((txr->bytes == 0) && (rxr->bytes == 0))
+                goto no_calc;
+                                
+       if ((txr->bytes) && (txr->packets))
+                       newitr = txr->bytes/txr->packets;
+       if ((rxr->bytes) && (rxr->packets))
+               newitr = max(newitr,
+                   (rxr->bytes / rxr->packets));
+       newitr += 24; /* account for hardware frame, crc */
+
+       /* set an upper boundary */
+       newitr = min(newitr, 3000);
+
+       /* Be nice to the mid range */
+       if ((newitr > 300) && (newitr < 1200))
+               newitr = (newitr / 3);
+       else
+               newitr = (newitr / 2);
+
+        if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+                newitr |= newitr << 16;
+        else
+                newitr |= IXGBE_EITR_CNT_WDIS;
+                 
+        /* save for next interrupt */
+        que->eitr_setting = newitr;
+
+        /* Reset state */
+        txr->bytes = 0;
+        txr->packets = 0;
+        rxr->bytes = 0;
+        rxr->packets = 0;
+
+no_calc:
+       if (more_tx || more_rx)
+               taskqueue_enqueue(que->tq, &que->que_task);
+       else /* Reenable this interrupt */
+               ixgbe_enable_queue(adapter, que->msix);
+       return;
+}
+
+
+static void
+ixgbe_msix_link(void *arg)
+{
+       struct adapter  *adapter = arg;
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32             reg_eicr;
+
+       ++adapter->link_irq;
+
+       /* First get the cause */
+       reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
+       /* Clear interrupt with write */
+       IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
+
+       /* Link status change */
+       if (reg_eicr & IXGBE_EICR_LSC)
+               taskqueue_enqueue(adapter->tq, &adapter->link_task);
+
+       if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
+#ifdef IXGBE_FDIR
+               if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
+                       /* This is probably overkill :) */
+                       if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
+                               return;
+                       /* Clear the interrupt */
+                       IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
+                       /* Turn off the interface */
+                       adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+                       taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
+               } else
+#endif
+               if (reg_eicr & IXGBE_EICR_ECC) {
+                       device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
+                           "Please Reboot!!\n");
+                       IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
+               } else
+
+               if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
+                       /* Clear the interrupt */
+                       IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
+                       taskqueue_enqueue(adapter->tq, &adapter->msf_task);
+               } else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
+                       /* Clear the interrupt */
+                       IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
+                       taskqueue_enqueue(adapter->tq, &adapter->mod_task);
+               }
+        } 
+
+       /* Check for fan failure */
+       if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
+           (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
+                device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
+                   "REPLACE IMMEDIATELY!!\n");
+               IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
+       }
+
+       /* Check for over temp condition */
+       if ((hw->mac.type == ixgbe_mac_X540) &&
+           (reg_eicr & IXGBE_EICR_GPI_SDP0)) {
+                device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
+                   "PHY IS SHUT DOWN!!\n");
+                device_printf(adapter->dev, "System shutdown required\n");
+               IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
+       }
+
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Media Ioctl callback
+ *
+ *  This routine is called whenever the user queries the status of
+ *  the interface using ifconfig.
+ *
+ **********************************************************************/
+static void
+ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
+{
+       struct adapter *adapter = ifp->if_softc;
+
+       INIT_DEBUGOUT("ixgbe_media_status: begin");
+       IXGBE_CORE_LOCK(adapter);
+       ixgbe_update_link_status(adapter);
+
+       ifmr->ifm_status = IFM_AVALID;
+       ifmr->ifm_active = IFM_ETHER;
+
+       if (!adapter->link_active) {
+               IXGBE_CORE_UNLOCK(adapter);
+               return;
+       }
+
+       ifmr->ifm_status |= IFM_ACTIVE;
+
+       switch (adapter->link_speed) {
+               case IXGBE_LINK_SPEED_100_FULL:
+                       ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
+                       break;
+               case IXGBE_LINK_SPEED_1GB_FULL:
+                       ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
+                       break;
+               case IXGBE_LINK_SPEED_10GB_FULL:
+                       ifmr->ifm_active |= adapter->optics | IFM_FDX;
+                       break;
+       }
+
+       IXGBE_CORE_UNLOCK(adapter);
+
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Media Ioctl callback
+ *
+ *  This routine is called when the user changes speed/duplex using
+ *  media/mediopt option with ifconfig.
+ *
+ **********************************************************************/
+static int
+ixgbe_media_change(struct ifnet * ifp)
+{
+       struct adapter *adapter = ifp->if_softc;
+       struct ifmedia *ifm = &adapter->media;
+
+       INIT_DEBUGOUT("ixgbe_media_change: begin");
+
+       if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+               return (EINVAL);
+
+        switch (IFM_SUBTYPE(ifm->ifm_media)) {
+        case IFM_AUTO:
+                adapter->hw.phy.autoneg_advertised =
+                   IXGBE_LINK_SPEED_100_FULL |
+                   IXGBE_LINK_SPEED_1GB_FULL |
+                   IXGBE_LINK_SPEED_10GB_FULL;
+                break;
+        default:
+                device_printf(adapter->dev, "Only auto media type\n");
+               return (EINVAL);
+        }
+
+       return (0);
+}
+
+/*********************************************************************
+ *
+ *  This routine maps the mbufs to tx descriptors, allowing the
+ *  TX engine to transmit the packets. 
+ *     - return 0 on success, positive on failure
+ *
+ **********************************************************************/
+
+static int
+ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
+{
+       struct adapter  *adapter = txr->adapter;
+       u32             olinfo_status = 0, cmd_type_len;
+       u32             paylen = 0;
+       int             i, j, error, nsegs;
+       int             first, last = 0;
+       struct mbuf     *m_head;
+       bus_dma_segment_t segs[1];
+       bus_dmamap_t    map;
+       struct ixgbe_tx_buf *txbuf;
+       union ixgbe_adv_tx_desc *txd = NULL;
+
+       m_head = *m_headp;
+
+       /* Basic descriptor defines */
+        cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
+           IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
+
+       if (m_head->m_flags & M_VLANTAG)
+               cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+
+        /*
+         * Important to capture the first descriptor
+         * used because it will contain the index of
+         * the one we tell the hardware to report back
+         */
+        first = txr->next_avail_desc;
+       txbuf = &txr->tx_buffers[first];
+       map = txbuf->map;
+
+       /*
+        * Map the packet for DMA.
+        */
+       error = bus_dmamap_load_mbuf_segment(txr->txtag, map,
+           *m_headp, segs, 1, &nsegs, BUS_DMA_NOWAIT);
+
+       if (error == EFBIG) {
+               struct mbuf *m;
+
+               m = m_defrag(*m_headp, MB_DONTWAIT);
+               if (m == NULL) {
+                       adapter->mbuf_defrag_failed++;
+                       m_freem(*m_headp);
+                       *m_headp = NULL;
+                       return (ENOBUFS);
+               }
+               *m_headp = m;
+
+               /* Try it again */
+               error = bus_dmamap_load_mbuf_segment(txr->txtag, map,
+                   *m_headp, segs, 1, &nsegs, BUS_DMA_NOWAIT);
+
+               if (error == ENOMEM) {
+                       adapter->no_tx_dma_setup++;
+                       return (error);
+               } else if (error != 0) {
+                       adapter->no_tx_dma_setup++;
+                       m_freem(*m_headp);
+                       *m_headp = NULL;
+                       return (error);
+               }
+       } else if (error == ENOMEM) {
+               adapter->no_tx_dma_setup++;
+               return (error);
+       } else if (error != 0) {
+               adapter->no_tx_dma_setup++;
+               m_freem(*m_headp);
+               *m_headp = NULL;
+               return (error);
+       }
+
+       /* Make certain there are enough descriptors */
+       if (nsegs > txr->tx_avail - 2) {
+               txr->no_desc_avail++;
+               error = ENOBUFS;
+               goto xmit_fail;
+       }
+       m_head = *m_headp;
+
+       /*
+       ** Set up the appropriate offload context
+       ** this becomes the first descriptor of 
+       ** a packet.
+       */
+#if 0 /* NET_TSO */
+       if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
+               if (ixgbe_tso_setup(txr, m_head, &paylen, &olinfo_status)) {
+                       cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+                       olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
+                       olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
+                       ++adapter->tso_tx;
+               } else
+                       return (ENXIO);
+       } else if (ixgbe_tx_ctx_setup(txr, m_head))
+#endif
+       if (ixgbe_tx_ctx_setup(txr, m_head))
+               olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
+
+#ifdef IXGBE_IEEE1588
+        /* This is changing soon to an mtag detection */
+        if (we detect this mbuf has a TSTAMP mtag)
+                cmd_type_len |= IXGBE_ADVTXD_MAC_TSTAMP;
+#endif
+
+#ifdef IXGBE_FDIR
+       /* Do the flow director magic */
+       if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
+               ++txr->atr_count;
+               if (txr->atr_count >= atr_sample_rate) {
+                       ixgbe_atr(txr, m_head);
+                       txr->atr_count = 0;
+               }
+       }
+#endif
+        /* Record payload length */
+       if (paylen == 0)
+               olinfo_status |= m_head->m_pkthdr.len <<
+                   IXGBE_ADVTXD_PAYLEN_SHIFT;
+
+       i = txr->next_avail_desc;
+       for (j = 0; j < nsegs; j++) {
+               bus_size_t seglen;
+               bus_addr_t segaddr;
+
+               txbuf = &txr->tx_buffers[i];
+               txd = &txr->tx_base[i];
+               seglen = segs[j].ds_len;
+               segaddr = htole64(segs[j].ds_addr);
+
+               txd->read.buffer_addr = segaddr;
+               txd->read.cmd_type_len = htole32(txr->txd_cmd |
+                   cmd_type_len |seglen);
+               txd->read.olinfo_status = htole32(olinfo_status);
+               last = i; /* descriptor that will get completion IRQ */
+
+               if (++i == adapter->num_tx_desc)
+                       i = 0;
+
+               txbuf->m_head = NULL;
+               txbuf->eop_index = -1;
+       }
+
+       txd->read.cmd_type_len |=
+           htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
+       txr->tx_avail -= nsegs;
+       txr->next_avail_desc = i;
+
+       txbuf->m_head = m_head;
+       /* Swap the dma map between the first and last descriptor */
+       txr->tx_buffers[first].map = txbuf->map;
+       txbuf->map = map;
+       bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
+
+        /* Set the index of the descriptor that will be marked done */
+        txbuf = &txr->tx_buffers[first];
+       txbuf->eop_index = last;
+
+        bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+       /*
+        * Advance the Transmit Descriptor Tail (Tdt), this tells the
+        * hardware that this frame is available to transmit.
+        */
+       ++txr->total_packets;
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
+
+       return (0);
+
+xmit_fail:
+       bus_dmamap_unload(txr->txtag, txbuf->map);
+       return (error);
+
+}
+
+static void
+ixgbe_set_promisc(struct adapter *adapter)
+{
+       u_int32_t       reg_rctl;
+       struct ifnet   *ifp = adapter->ifp;
+
+       reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
+       reg_rctl &= (~IXGBE_FCTRL_UPE);
+       reg_rctl &= (~IXGBE_FCTRL_MPE);
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
+
+       if (ifp->if_flags & IFF_PROMISC) {
+               reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
+       } else if (ifp->if_flags & IFF_ALLMULTI) {
+               reg_rctl |= IXGBE_FCTRL_MPE;
+               reg_rctl &= ~IXGBE_FCTRL_UPE;
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
+       }
+       return;
+}
+
+
+/*********************************************************************
+ *  Multicast Update
+ *
+ *  This routine is called whenever multicast address list is updated.
+ *
+ **********************************************************************/
+#define IXGBE_RAR_ENTRIES 16
+
+static void
+ixgbe_set_multi(struct adapter *adapter)
+{
+       u32     fctrl;
+       u8      *mta;
+       u8      *update_ptr;
+       struct  ifmultiaddr *ifma;
+       int     mcnt = 0;
+       struct ifnet   *ifp = adapter->ifp;
+
+       IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
+
+       mta = adapter->mta;
+       bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
+           MAX_NUM_MULTICAST_ADDRESSES);
+
+       fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
+       fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+       if (ifp->if_flags & IFF_PROMISC)
+               fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+       else if (ifp->if_flags & IFF_ALLMULTI) {
+               fctrl |= IXGBE_FCTRL_MPE;
+               fctrl &= ~IXGBE_FCTRL_UPE;
+       } else
+               fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+       
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
+
+       spin_lock(&adapter->mcast_spin);
+       TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+               if (ifma->ifma_addr->sa_family != AF_LINK)
+                       continue;
+               bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
+                   &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
+                   IXGBE_ETH_LENGTH_OF_ADDRESS);
+               mcnt++;
+       }
+       spin_unlock(&adapter->mcast_spin);
+
+       update_ptr = mta;
+       ixgbe_update_mc_addr_list(&adapter->hw,
+           update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
+
+       return;
+}
+
+/*
+ * This is an iterator function now needed by the multicast
+ * shared code. It simply feeds the shared code routine the
+ * addresses in the array of ixgbe_set_multi() one by one.
+ */
+static u8 *
+ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
+{
+       u8 *addr = *update_ptr;
+       u8 *newptr;
+       *vmdq = 0;
+
+       newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
+       *update_ptr = newptr;
+       return addr;
+}
+
+
+/*********************************************************************
+ *  Timer routine
+ *
+ *  This routine checks for link status,updates statistics,
+ *  and runs the watchdog check.
+ *
+ **********************************************************************/
+
+static void
+ixgbe_local_timer(void *arg)
+{
+       struct adapter  *adapter = arg;
+       device_t        dev = adapter->dev;
+       struct ifnet    *ifp = adapter->ifp;
+       struct ix_queue *que = adapter->queues;
+       struct tx_ring  *txr = adapter->tx_rings;
+       int             hung, busy, paused;
+
+       lockmgr(&adapter->core_lock, LK_EXCLUSIVE);
+       KKASSERT(lockstatus(&adapter->core_lock, curthread) != 0);
+       hung = busy = paused = 0;
+
+       /* Check for pluggable optics */
+       if (adapter->sfp_probe)
+               if (!ixgbe_sfp_probe(adapter))
+                       goto out; /* Nothing to do */
+
+       ixgbe_update_link_status(adapter);
+       ixgbe_update_stats_counters(adapter);
+
+       /*
+        * If the interface has been paused
+        * then don't do the watchdog check
+        */
+       if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
+               paused = 1;
+
+       /*
+       ** Check the TX queues status
+       **      - central locked handling of OACTIVE
+       **      - watchdog only if all queues show hung
+       */          
+       for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
+               if ((txr->queue_status & IXGBE_QUEUE_HUNG) &&
+                   (paused == 0))
+                       ++hung;
+               if (txr->queue_status & IXGBE_QUEUE_DEPLETED)
+                       ++busy;
+               if ((txr->queue_status & IXGBE_QUEUE_IDLE) == 0)
+                       taskqueue_enqueue(que->tq, &que->que_task);
+        }
+       /* Only truely watchdog if all queues show hung */
+        if (hung == adapter->num_queues)
+                goto watchdog;
+       /* Only turn off the stack flow when ALL are depleted */
+        if (busy == adapter->num_queues)
+                ifp->if_flags |= IFF_OACTIVE;
+        else if ((ifp->if_flags & IFF_OACTIVE) &&
+            (busy < adapter->num_queues))
+                ifp->if_flags &= ~IFF_OACTIVE;
+
+out:
+       ixgbe_rearm_queues(adapter, adapter->que_mask);
+       callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
+       lockmgr(&adapter->core_lock, LK_RELEASE);
+       return;
+
+watchdog:
+       device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
+       device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
+           IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me)),
+           IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me)));
+       device_printf(dev,"TX(%d) desc avail = %d,"
+           "Next TX to Clean = %d\n",
+           txr->me, txr->tx_avail, txr->next_to_clean);
+       adapter->ifp->if_flags &= ~IFF_RUNNING;
+       adapter->watchdog_events++;
+       ixgbe_init_locked(adapter);
+
+       lockmgr(&adapter->core_lock, LK_RELEASE);
+}
+
+/*
+** Note: this routine updates the OS on the link state
+**     the real check of the hardware only happens with
+**     a link interrupt.
+*/
+static void
+ixgbe_update_link_status(struct adapter *adapter)
+{
+       struct ifnet    *ifp = adapter->ifp;
+       struct tx_ring *txr = adapter->tx_rings;
+       device_t dev = adapter->dev;
+
+
+       if (adapter->link_up){ 
+               if (adapter->link_active == FALSE) {
+                       if (bootverbose)
+                               device_printf(dev,"Link is up %d Gbps %s \n",
+                                   ((adapter->link_speed == 128)? 10:1),
+                                   "Full Duplex");
+                       adapter->link_active = TRUE;
+                       ifp->if_link_state = LINK_STATE_UP;
+                       if_link_state_change(ifp);
+               }
+       } else { /* Link down */
+               if (adapter->link_active == TRUE) {
+                       if (bootverbose)
+                               device_printf(dev,"Link is Down\n");
+                       ifp->if_link_state = LINK_STATE_DOWN;
+                       if_link_state_change(ifp);
+                       adapter->link_active = FALSE;
+                       for (int i = 0; i < adapter->num_queues;
+                           i++, txr++)
+                               txr->queue_status = IXGBE_QUEUE_IDLE;
+               }
+       }
+
+       return;
+}
+
+
+/*********************************************************************
+ *
+ *  This routine disables all traffic on the adapter by issuing a
+ *  global reset on the MAC and deallocates TX/RX buffers.
+ *
+ **********************************************************************/
+
+static void
+ixgbe_stop(void *arg)
+{
+       struct ifnet   *ifp;
+       struct adapter *adapter = arg;
+       struct ixgbe_hw *hw = &adapter->hw;
+       ifp = adapter->ifp;
+
+       KKASSERT(lockstatus(&adapter->core_lock, curthread) != 0);
+
+       INIT_DEBUGOUT("ixgbe_stop: begin\n");
+       ixgbe_disable_intr(adapter);
+       callout_stop(&adapter->timer);
+
+       /* Let the stack know...*/
+       ifp->if_flags &= ~IFF_RUNNING;
+       ifp->if_flags |= IFF_OACTIVE;
+
+       ixgbe_reset_hw(hw);
+       hw->adapter_stopped = FALSE;
+       ixgbe_stop_adapter(hw);
+       /* Turn off the laser */
+       if (hw->phy.multispeed_fiber)
+               ixgbe_disable_tx_laser(hw);
+
+       /* reprogram the RAR[0] in case user changed it. */
+       ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
+
+       return;
+}
+
+
+/*********************************************************************
+ *
+ *  Determine hardware revision.
+ *
+ **********************************************************************/
+static void
+ixgbe_identify_hardware(struct adapter *adapter)
+{
+       device_t        dev = adapter->dev;
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       /* Save off the information about this board */
+       hw->vendor_id = pci_get_vendor(dev);
+       hw->device_id = pci_get_device(dev);
+       hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
+       hw->subsystem_vendor_id =
+           pci_read_config(dev, PCIR_SUBVEND_0, 2);
+       hw->subsystem_device_id =
+           pci_read_config(dev, PCIR_SUBDEV_0, 2);
+
+       /* We need this here to set the num_segs below */
+       ixgbe_set_mac_type(hw);
+
+       /* Pick up the 82599 and VF settings */
+       if (hw->mac.type != ixgbe_mac_82598EB) {
+               hw->phy.smart_speed = ixgbe_smart_speed;
+               adapter->num_segs = IXGBE_82599_SCATTER;
+       } else
+               adapter->num_segs = IXGBE_82598_SCATTER;
+
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Determine optic type
+ *
+ **********************************************************************/
+static void
+ixgbe_setup_optics(struct adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int             layer;
+       
+       layer = ixgbe_get_supported_physical_layer(hw);
+
+       if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
+               adapter->optics = IFM_10G_T;
+               return;
+       }
+
+       if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
+               adapter->optics = IFM_1000_T;
+               return;
+       }
+
+       if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
+           IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
+               adapter->optics = IFM_10G_LR;
+               return;
+       }
+
+       if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
+               adapter->optics = IFM_10G_SR;
+               return;
+       }
+
+       if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
+               adapter->optics = IFM_10G_TWINAX;
+               return;
+       }
+
+       if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
+           IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
+               adapter->optics = IFM_10G_CX4;
+               return;
+       }
+
+       /* If we get here just set the default */
+       adapter->optics = IFM_ETHER | IFM_AUTO;
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Setup the Legacy or MSI Interrupt handler
+ *
+ **********************************************************************/
+static int
+ixgbe_allocate_legacy(struct adapter *adapter)
+{
+       device_t dev = adapter->dev;
+       struct          ix_queue *que = adapter->queues;
+       int error, rid = 0;
+
+       /* MSI RID at 1 */
+       if (adapter->msix == 1)
+               rid = 1;
+
+       /* We allocate a single interrupt resource */
+       adapter->res = bus_alloc_resource_any(dev,
+            SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+       if (adapter->res == NULL) {
+               device_printf(dev, "Unable to allocate bus resource: "
+                   "interrupt\n");
+               return (ENXIO);
+       }
+
+       /*
+        * Try allocating a fast interrupt and the associated deferred
+        * processing contexts.
+        */
+       TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
+       que->tq = taskqueue_create("ixgbe_que", M_NOWAIT,
+            taskqueue_thread_enqueue, &que->tq);
+       taskqueue_start_threads(&que->tq, 1, PI_NET, -1, "%s ixq",
+            device_get_nameunit(adapter->dev));
+
+       /* Tasklets for Link, SFP and Multispeed Fiber */
+       TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
+       TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
+       TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
+#ifdef IXGBE_FDIR
+       TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
+#endif
+       adapter->tq = taskqueue_create("ixgbe_link", M_NOWAIT,
+           taskqueue_thread_enqueue, &adapter->tq);
+       taskqueue_start_threads(&adapter->tq, 1, PI_NET, -1, "%s linkq",
+           device_get_nameunit(adapter->dev));
+
+       if ((error = bus_setup_intr(dev, adapter->res, INTR_MPSAFE,
+           ixgbe_legacy_irq, que, &adapter->tag, &adapter->serializer)) != 0) {
+               device_printf(dev, "Failed to register fast interrupt "
+                   "handler: %d\n", error);
+               taskqueue_free(que->tq);
+               taskqueue_free(adapter->tq);
+               que->tq = NULL;
+               adapter->tq = NULL;
+               return (error);
+       }
+       /* For simplicity in the handlers */
+       adapter->que_mask = IXGBE_EIMS_ENABLE_MASK;
+
+       return (0);
+}
+
+
+/*********************************************************************
+ *
+ *  Setup MSIX Interrupt resources and handlers 
+ *
+ **********************************************************************/
+static int
+ixgbe_allocate_msix(struct adapter *adapter)
+{
+       device_t        dev = adapter->dev;
+       struct          ix_queue *que = adapter->queues;
+       int             error, rid, vector = 0;
+
+       for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
+               rid = vector + 1;
+               que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+                   RF_SHAREABLE | RF_ACTIVE);
+               if (que->res == NULL) {
+                       device_printf(dev,"Unable to allocate"
+                           " bus resource: que interrupt [%d]\n", vector);
+                       return (ENXIO);
+               }
+               /* Set the handler function */
+               error = bus_setup_intr(dev, que->res, INTR_MPSAFE,
+                   ixgbe_msix_que, que, &que->tag, &que->serializer);
+               if (error) {
+                       que->res = NULL;
+                       device_printf(dev, "Failed to register QUE handler");
+                       return (error);
+               }
+#if 0 /* __FreeBSD_version >= 800504 */
+               bus_describe_intr(dev, que->res, que->tag, "que %d", i);
+#endif
+               que->msix = vector;
+               adapter->que_mask |= (u64)(1 << que->msix);
+               /*
+               ** Bind the msix vector, and thus the
+               ** ring to the corresponding cpu.
+               */
+#if 0 /* XXX */
+               if (adapter->num_queues > 1)
+                       bus_bind_intr(dev, que->res, i);
+#endif
+
+               TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
+               que->tq = taskqueue_create("ixgbe_que", M_NOWAIT,
+                   taskqueue_thread_enqueue, &que->tq);
+               taskqueue_start_threads(&que->tq, 1, PI_NET, -1, "%s que",
+                   device_get_nameunit(adapter->dev));
+       }
+
+       /* and Link */
+       rid = vector + 1;
+       adapter->res = bus_alloc_resource_any(dev,
+           SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+       if (!adapter->res) {
+               device_printf(dev,"Unable to allocate"
+           " bus resource: Link interrupt [%d]\n", rid);
+               return (ENXIO);
+       }
+       /* Set the link handler function */
+       error = bus_setup_intr(dev, adapter->res, INTR_MPSAFE,
+           ixgbe_msix_link, adapter, &adapter->tag, &adapter->serializer);
+       if (error) {
+               adapter->res = NULL;
+               device_printf(dev, "Failed to register LINK handler");
+               return (error);
+       }
+#if 0 /* __FreeBSD_version >= 800504 */
+       bus_describe_intr(dev, adapter->res, adapter->tag, "link");
+#endif
+       adapter->linkvec = vector;
+       /* Tasklets for Link, SFP and Multispeed Fiber */
+       TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
+       TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
+       TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
+#ifdef IXGBE_FDIR
+       TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
+#endif
+       adapter->tq = taskqueue_create("ixgbe_link", M_NOWAIT,
+           taskqueue_thread_enqueue, &adapter->tq);
+       taskqueue_start_threads(&adapter->tq, 1, PI_NET, -1, "%s linkq",
+           device_get_nameunit(adapter->dev));
+
+       return (0);
+}
+
+#if 0  /* HAVE_MSIX */
+/*
+ * Setup Either MSI/X or MSI
+ */
+static int
+ixgbe_setup_msix(struct adapter *adapter)
+{
+       device_t dev = adapter->dev;
+       int rid, want, queues, msgs;
+
+       /* Override by tuneable */
+       if (ixgbe_enable_msix == 0)
+               goto msi;
+
+       /* First try MSI/X */
+       rid = PCIR_BAR(MSIX_82598_BAR);
+       adapter->msix_mem = bus_alloc_resource_any(dev,
+           SYS_RES_MEMORY, &rid, RF_ACTIVE);
+               if (!adapter->msix_mem) {
+               rid += 4;       /* 82599 maps in higher BAR */
+               adapter->msix_mem = bus_alloc_resource_any(dev,
+                   SYS_RES_MEMORY, &rid, RF_ACTIVE);
+       }
+               if (!adapter->msix_mem) {
+               /* May not be enabled */
+               device_printf(adapter->dev,
+                   "Unable to map MSIX table \n");
+               goto msi;
+       }
+
+       msgs = pci_msix_count(dev); 
+       if (msgs == 0) { /* system has msix disabled */
+               bus_release_resource(dev, SYS_RES_MEMORY,
+                   rid, adapter->msix_mem);
+               adapter->msix_mem = NULL;
+               goto msi;
+       }
+
+       /* Figure out a reasonable auto config value */
+       queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
+
+       if (ixgbe_num_queues != 0)
+               queues = ixgbe_num_queues;
+       /* Set max queues to 8 when autoconfiguring */
+       else if ((ixgbe_num_queues == 0) && (queues > 8))
+               queues = 8;
+
+       /*
+       ** Want one vector (RX/TX pair) per queue
+       ** plus an additional for Link.
+       */
+       want = queues + 1;
+       if (msgs >= want)
+               msgs = want;
+       else {
+                       device_printf(adapter->dev,
+                   "MSIX Configuration Problem, "
+                   "%d vectors but %d queues wanted!\n",
+                   msgs, want);
+               return (0); /* Will go to Legacy setup */
+       }
+       if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
+                       device_printf(adapter->dev,
+                   "Using MSIX interrupts with %d vectors\n", msgs);
+               adapter->num_queues = queues;
+               return (msgs);
+       }
+msi:
+               msgs = pci_msi_count(dev);
+               if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
+                       device_printf(adapter->dev,"Using an MSI interrupt\n");
+       else
+                       device_printf(adapter->dev,"Using a Legacy interrupt\n");
+       return (msgs);
+}
+#endif
+
+
+static int
+ixgbe_allocate_pci_resources(struct adapter *adapter)
+{
+       int             rid;
+       device_t        dev = adapter->dev;
+
+       rid = PCIR_BAR(0);
+       adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+           &rid, RF_ACTIVE);
+
+       if (!(adapter->pci_mem)) {
+               device_printf(dev,"Unable to allocate bus resource: memory\n");
+               return (ENXIO);
+       }
+
+       adapter->osdep.mem_bus_space_tag =
+               rman_get_bustag(adapter->pci_mem);
+       adapter->osdep.mem_bus_space_handle =
+               rman_get_bushandle(adapter->pci_mem);
+       adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
+
+       /* Legacy defaults */
+       adapter->num_queues = 1;
+       adapter->hw.back = &adapter->osdep;
+
+       /*
+       ** Now setup MSI or MSI/X, should
+       ** return us the number of supported
+       ** vectors. (Will be 1 for MSI)
+       */
+#if 0  /* HAVE_MSIX */
+       adapter->msix = ixgbe_setup_msix(adapter);
+#endif
+       return (0);
+}
+
+static void
+ixgbe_free_pci_resources(struct adapter * adapter)
+{
+       struct          ix_queue *que = adapter->queues;
+       device_t        dev = adapter->dev;
+       int             rid, memrid;
+
+       if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+               memrid = PCIR_BAR(MSIX_82598_BAR);
+       else
+               memrid = PCIR_BAR(MSIX_82599_BAR);
+
+       /*
+       ** There is a slight possibility of a failure mode
+       ** in attach that will result in entering this function
+       ** before interrupt resources have been initialized, and
+       ** in that case we do not want to execute the loops below
+       ** We can detect this reliably by the state of the adapter
+       ** res pointer.
+       */
+       if (adapter->res == NULL)
+               goto mem;
+
+       /*
+       **  Release all msix queue resources:
+       */
+       for (int i = 0; i < adapter->num_queues; i++, que++) {
+               rid = que->msix + 1;
+               if (que->tag != NULL) {
+                       bus_teardown_intr(dev, que->res, que->tag);
+                       que->tag = NULL;
+               }
+               if (que->res != NULL)
+                       bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
+       }
+
+
+       /* Clean the Legacy or Link interrupt last */
+       if (adapter->linkvec) /* we are doing MSIX */
+               rid = adapter->linkvec + 1;
+       else
+               (adapter->msix != 0) ? (rid = 1):(rid = 0);
+
+       if (adapter->tag != NULL) {
+               bus_teardown_intr(dev, adapter->res, adapter->tag);
+               adapter->tag = NULL;
+       }
+       if (adapter->res != NULL)
+               bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
+
+mem:
+       if (adapter->msix)
+               pci_release_msi(dev);
+
+       if (adapter->msix_mem != NULL)
+               bus_release_resource(dev, SYS_RES_MEMORY,
+                   memrid, adapter->msix_mem);
+
+       if (adapter->pci_mem != NULL)
+               bus_release_resource(dev, SYS_RES_MEMORY,
+                   PCIR_BAR(0), adapter->pci_mem);
+
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Setup networking device structure and register an interface.
+ *
+ **********************************************************************/
+static int
+ixgbe_setup_interface(device_t dev, struct adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct ifnet   *ifp;
+
+       INIT_DEBUGOUT("ixgbe_setup_interface: begin");
+
+       ifp = adapter->ifp = if_alloc(IFT_ETHER);
+       if (ifp == NULL) {
+               device_printf(dev, "can not allocate ifnet structure\n");
+               return (-1);
+       }
+       if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+       ifp->if_baudrate = 1000000000;
+       ifp->if_init = ixgbe_init;
+       ifp->if_softc = adapter;
+       ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+       ifp->if_ioctl = ixgbe_ioctl;
+       ifp->if_start = ixgbe_start;
+#if 0 /* __FreeBSD_version >= 800000 */
+       ifp->if_transmit = ixgbe_mq_start;
+       ifp->if_qflush = ixgbe_qflush;
+#endif
+       ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
+
+       ether_ifattach(ifp, adapter->hw.mac.addr, NULL);
+
+       adapter->max_frame_size =
+           ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+       /*
+        * Tell the upper layer(s) we support long frames.
+        */
+       ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
+
+#if 0 /* NET_TSO */
+       ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM;
+#endif
+       ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
+       ifp->if_capabilities |= IFCAP_JUMBO_MTU;
+       ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
+#if 0 /* NET_TSO */
+                            |  IFCAP_VLAN_HWTSO
+#endif
+                            |  IFCAP_VLAN_MTU;
+       ifp->if_capenable = ifp->if_capabilities;
+
+       /* Don't enable LRO by default */
+#if 0 /* NET_LRO */
+       ifp->if_capabilities |= IFCAP_LRO;
+#endif
+
+       /*
+       ** Don't turn this on by default, if vlans are
+       ** created on another pseudo device (eg. lagg)
+       ** then vlan events are not passed thru, breaking
+       ** operation, but with HW FILTER off it works. If
+       ** using vlans directly on the ixgbe driver you can
+       ** enable this and get full hardware tag filtering.
+       */
+       ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
+
+       /*
+        * Specify the media types supported by this adapter and register
+        * callbacks to update media and link information
+        */
+       ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
+                    ixgbe_media_status);
+       ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics, 0, NULL);
+       ifmedia_set(&adapter->media, IFM_ETHER | adapter->optics);
+       if (hw->device_id == IXGBE_DEV_ID_82598AT) {
+               ifmedia_add(&adapter->media,
+                   IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
+               ifmedia_add(&adapter->media,
+                   IFM_ETHER | IFM_1000_T, 0, NULL);
+       }
+       ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+       ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
+
+       return (0);
+}
+
+static void
+ixgbe_config_link(struct adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32     autoneg, err = 0;
+       bool    sfp, negotiate;
+
+       sfp = ixgbe_is_sfp(hw);
+
+       if (sfp) { 
+               if (hw->phy.multispeed_fiber) {
+                       hw->mac.ops.setup_sfp(hw);
+                       ixgbe_enable_tx_laser(hw);
+                       taskqueue_enqueue(adapter->tq, &adapter->msf_task);
+               } else
+                       taskqueue_enqueue(adapter->tq, &adapter->mod_task);
+       } else {
+               if (hw->mac.ops.check_link)
+                       err = ixgbe_check_link(hw, &autoneg,
+                           &adapter->link_up, FALSE);
+               if (err)
+                       goto out;
+               autoneg = hw->phy.autoneg_advertised;
+               if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
+                       err  = hw->mac.ops.get_link_capabilities(hw,
+                           &autoneg, &negotiate);
+               if (err)
+                       goto out;
+               if (hw->mac.ops.setup_link)
+                       err = hw->mac.ops.setup_link(hw, autoneg,
+                           negotiate, adapter->link_up);
+       }
+out:
+       return;
+}
+
+/********************************************************************
+ * Manage DMA'able memory.
+ *******************************************************************/
+static void
+ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
+{
+       if (error)
+               return;
+       *(bus_addr_t *) arg = segs->ds_addr;
+       return;
+}
+
+static int
+ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
+               struct ixgbe_dma_alloc *dma, int mapflags)
+{
+       device_t dev = adapter->dev;
+       int             r;
+
+       r = bus_dma_tag_create(NULL,    /* parent */
+                              DBA_ALIGN, 0,    /* alignment, bounds */
+                              BUS_SPACE_MAXADDR,       /* lowaddr */
+                              BUS_SPACE_MAXADDR,       /* highaddr */
+                              NULL, NULL,      /* filter, filterarg */
+                              size,    /* maxsize */
+                              1,       /* nsegments */
+                              size,    /* maxsegsize */
+                              BUS_DMA_ALLOCNOW,        /* flags */
+                              &dma->dma_tag);
+       if (r != 0) {
+               device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; "
+                      "error %u\n", r);
+               goto fail_0;
+       }
+       r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
+                            BUS_DMA_NOWAIT, &dma->dma_map);
+       if (r != 0) {
+               device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; "
+                      "error %u\n", r);
+               goto fail_1;
+       }
+       r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
+                           size,
+                           ixgbe_dmamap_cb,
+                           &dma->dma_paddr,
+                           mapflags | BUS_DMA_NOWAIT);
+       if (r != 0) {
+               device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; "
+                      "error %u\n", r);
+               goto fail_2;
+       }
+       dma->dma_size = size;
+       return (0);
+fail_2:
+       bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
+fail_1:
+       bus_dma_tag_destroy(dma->dma_tag);
+fail_0:
+       dma->dma_map = NULL;
+       dma->dma_tag = NULL;
+       return (r);
+}
+
+static void
+ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
+{
+       bus_dmamap_sync(dma->dma_tag, dma->dma_map,
+           BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+       bus_dmamap_unload(dma->dma_tag, dma->dma_map);
+       bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
+       bus_dma_tag_destroy(dma->dma_tag);
+}
+
+
+/*********************************************************************
+ *
+ *  Allocate memory for the transmit and receive rings, and then
+ *  the descriptors associated with each, called only once at attach.
+ *
+ **********************************************************************/
+static int
+ixgbe_allocate_queues(struct adapter *adapter)
+{
+       device_t        dev = adapter->dev;
+       struct ix_queue *que;
+       struct tx_ring  *txr;
+       struct rx_ring  *rxr;
+       int rsize, tsize, error = IXGBE_SUCCESS;
+       int txconf = 0, rxconf = 0;
+
+        /* First allocate the top level queue structs */
+        if (!(adapter->queues =
+            (struct ix_queue *) kmalloc(sizeof(struct ix_queue) *
+            adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+                device_printf(dev, "Unable to allocate queue memory\n");
+                error = ENOMEM;
+                goto fail;
+        }
+
+       /* First allocate the TX ring struct memory */
+       if (!(adapter->tx_rings =
+           (struct tx_ring *) kmalloc(sizeof(struct tx_ring) *
+           adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+               device_printf(dev, "Unable to allocate TX ring memory\n");
+               error = ENOMEM;
+               goto tx_fail;
+       }
+
+       /* Next allocate the RX */
+       if (!(adapter->rx_rings =
+           (struct rx_ring *) kmalloc(sizeof(struct rx_ring) *
+           adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+               device_printf(dev, "Unable to allocate RX ring memory\n");
+               error = ENOMEM;
+               goto rx_fail;
+       }
+
+       /* For the ring itself */
+       tsize = roundup2(adapter->num_tx_desc *
+           sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
+
+       /*
+        * Now set up the TX queues, txconf is needed to handle the
+        * possibility that things fail midcourse and we need to
+        * undo memory gracefully
+        */ 
+       for (int i = 0; i < adapter->num_queues; i++, txconf++) {
+               /* Set up some basics */
+               txr = &adapter->tx_rings[i];
+               txr->adapter = adapter;
+               txr->me = i;
+
+               /* Initialize the TX side lock */
+               ksnprintf(txr->lock_name, sizeof(txr->lock_name), "%s:tx(%d)",
+                   device_get_nameunit(dev), txr->me);
+               lockinit(&txr->tx_lock, txr->lock_name, 0, LK_CANRECURSE);
+
+               if (ixgbe_dma_malloc(adapter, tsize,
+                       &txr->txdma, BUS_DMA_NOWAIT)) {
+                       device_printf(dev,
+                           "Unable to allocate TX Descriptor memory\n");
+                       error = ENOMEM;
+                       goto err_tx_desc;
+               }
+               txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
+               bzero((void *)txr->tx_base, tsize);
+
+               /* Now allocate transmit buffers for the ring */
+               if (ixgbe_allocate_transmit_buffers(txr)) {
+                       device_printf(dev,
+                           "Critical Failure setting up transmit buffers\n");
+                       error = ENOMEM;
+                       goto err_tx_desc;
+               }
+#if 0 /* __FreeBSD_version >= 800000 */
+               /* Allocate a buf ring */
+               txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
+                   M_WAITOK, &txr->tx_mtx);
+               if (txr->br == NULL) {
+                       device_printf(dev,
+                           "Critical Failure setting up buf ring\n");
+                       error = ENOMEM;
+                       goto err_tx_desc;
+               }
+#endif
+       }
+
+       /*
+        * Next the RX queues...
+        */ 
+       rsize = roundup2(adapter->num_rx_desc *
+           sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
+       for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
+               rxr = &adapter->rx_rings[i];
+               /* Set up some basics */
+               rxr->adapter = adapter;
+               rxr->me = i;
+
+               /* Initialize the RX side lock */
+               ksnprintf(rxr->lock_name, sizeof(rxr->lock_name), "%s:rx(%d)",
+                   device_get_nameunit(dev), rxr->me);
+               lockinit(&rxr->rx_lock, rxr->lock_name, 0, LK_CANRECURSE);
+
+               if (ixgbe_dma_malloc(adapter, rsize,
+                       &rxr->rxdma, BUS_DMA_NOWAIT)) {
+                       device_printf(dev,
+                           "Unable to allocate RxDescriptor memory\n");
+                       error = ENOMEM;
+                       goto err_rx_desc;
+               }
+               rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
+               bzero((void *)rxr->rx_base, rsize);
+
+               /* Allocate receive buffers for the ring*/
+               if (ixgbe_allocate_receive_buffers(rxr)) {
+                       device_printf(dev,
+                           "Critical Failure setting up receive buffers\n");
+                       error = ENOMEM;
+                       goto err_rx_desc;
+               }
+       }
+
+       /*
+       ** Finally set up the queue holding structs
+       */
+       for (int i = 0; i < adapter->num_queues; i++) {
+               que = &adapter->queues[i];
+               que->adapter = adapter;
+               que->txr = &adapter->tx_rings[i];
+               que->rxr = &adapter->rx_rings[i];
+       }
+
+       return (0);
+
+err_rx_desc:
+       for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
+               ixgbe_dma_free(adapter, &rxr->rxdma);
+err_tx_desc:
+       for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
+               ixgbe_dma_free(adapter, &txr->txdma);
+       kfree(adapter->rx_rings, M_DEVBUF);
+rx_fail:
+       kfree(adapter->tx_rings, M_DEVBUF);
+tx_fail:
+       kfree(adapter->queues, M_DEVBUF);
+fail:
+       return (error);
+}
+
+/*********************************************************************
+ *
+ *  Allocate memory for tx_buffer structures. The tx_buffer stores all
+ *  the information needed to transmit a packet on the wire. This is
+ *  called only once at attach, setup is done every reset.
+ *
+ **********************************************************************/
+static int
+ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
+{
+       struct adapter *adapter = txr->adapter;
+       device_t dev = adapter->dev;
+       struct ixgbe_tx_buf *txbuf;
+       int error, i;
+
+       /*
+        * Setup DMA descriptor areas.
+        */
+       if ((error = bus_dma_tag_create(
+                              NULL,    /* parent */
+                              1, 0,            /* alignment, bounds */
+                              BUS_SPACE_MAXADDR,       /* lowaddr */
+                              BUS_SPACE_MAXADDR,       /* highaddr */
+                              NULL, NULL,              /* filter, filterarg */
+                              IXGBE_TSO_SIZE,          /* maxsize */
+                              adapter->num_segs,       /* nsegments */
+                              PAGE_SIZE,               /* maxsegsize */
+                              0,                       /* flags */
+                              &txr->txtag))) {
+               device_printf(dev,"Unable to allocate TX DMA tag\n");
+               goto fail;
+       }
+
+       if (!(txr->tx_buffers =
+           (struct ixgbe_tx_buf *) kmalloc(sizeof(struct ixgbe_tx_buf) *
+           adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+               device_printf(dev, "Unable to allocate tx_buffer memory\n");
+               error = ENOMEM;
+               goto fail;
+       }
+
+        /* Create the descriptor buffer dma maps */
+       txbuf = txr->tx_buffers;
+       for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
+               error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
+               if (error != 0) {
+                       device_printf(dev, "Unable to create TX DMA map\n");
+                       goto fail;
+               }
+       }
+
+       return 0;
+fail:
+       /* We free all, it handles case where we are in the middle */
+       ixgbe_free_transmit_structures(adapter);
+       return (error);
+}
+
+/*********************************************************************
+ *
+ *  Initialize a transmit ring.
+ *
+ **********************************************************************/
+static void
+ixgbe_setup_transmit_ring(struct tx_ring *txr)
+{
+       struct adapter *adapter = txr->adapter;
+       struct ixgbe_tx_buf *txbuf;
+       int i;
+#ifdef DEV_NETMAP
+       struct netmap_adapter *na = NA(adapter->ifp);
+       struct netmap_slot *slot;
+#endif /* DEV_NETMAP */
+
+       /* Clear the old ring contents */
+       IXGBE_TX_LOCK(txr);
+#ifdef DEV_NETMAP
+       /*
+        * (under lock): if in netmap mode, do some consistency
+        * checks and set slot to entry 0 of the netmap ring.
+        */
+       slot = netmap_reset(na, NR_TX, txr->me, 0);
+#endif /* DEV_NETMAP */
+       bzero((void *)txr->tx_base,
+             (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
+       /* Reset indices */
+       txr->next_avail_desc = 0;
+       txr->next_to_clean = 0;
+
+       /* Free any existing tx buffers. */
+        txbuf = txr->tx_buffers;
+       for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
+               if (txbuf->m_head != NULL) {
+                       bus_dmamap_sync(txr->txtag, txbuf->map,
+                           BUS_DMASYNC_POSTWRITE);
+                       bus_dmamap_unload(txr->txtag, txbuf->map);
+                       m_freem(txbuf->m_head);
+                       txbuf->m_head = NULL;
+               }
+#ifdef DEV_NETMAP
+               /*
+                * In netmap mode, set the map for the packet buffer.
+                * NOTE: Some drivers (not this one) also need to set
+                * the physical buffer address in the NIC ring.
+                * Slots in the netmap ring (indexed by "si") are
+                * kring->nkr_hwofs positions "ahead" wrt the
+                * corresponding slot in the NIC ring. In some drivers
+                * (not here) nkr_hwofs can be negative. Function
+                * netmap_idx_n2k() handles wraparounds properly.
+                */
+               if (slot) {
+                       int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
+                       netmap_load_map(txr->txtag, txbuf->map, NMB(slot + si));
+               }
+#endif /* DEV_NETMAP */
+               /* Clear the EOP index */
+               txbuf->eop_index = -1;
+        }
+
+#ifdef IXGBE_FDIR
+       /* Set the rate at which we sample packets */
+       if (adapter->hw.mac.type != ixgbe_mac_82598EB)
+               txr->atr_sample = atr_sample_rate;
+#endif
+
+       /* Set number of descriptors available */
+       txr->tx_avail = adapter->num_tx_desc;
+
+       bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+       IXGBE_TX_UNLOCK(txr);
+}
+
+/*********************************************************************
+ *
+ *  Initialize all transmit rings.
+ *
+ **********************************************************************/
+static int
+ixgbe_setup_transmit_structures(struct adapter *adapter)
+{
+       struct tx_ring *txr = adapter->tx_rings;
+
+       for (int i = 0; i < adapter->num_queues; i++, txr++)
+               ixgbe_setup_transmit_ring(txr);
+
+       return (0);
+}
+
+/*********************************************************************
+ *
+ *  Enable transmit unit.
+ *
+ **********************************************************************/
+static void
+ixgbe_initialize_transmit_units(struct adapter *adapter)
+{
+       struct tx_ring  *txr = adapter->tx_rings;
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       /* Setup the Base and Length of the Tx Descriptor Ring */
+
+       for (int i = 0; i < adapter->num_queues; i++, txr++) {
+               u64     tdba = txr->txdma.dma_paddr;
+               u32     txctrl;
+
+               IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
+                      (tdba & 0x00000000ffffffffULL));
+               IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
+               IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
+                   adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
+
+               /* Setup the HW Tx Head and Tail descriptor pointers */
+               IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
+
+               /* Setup Transmit Descriptor Cmd Settings */
+               txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
+               txr->queue_status = IXGBE_QUEUE_IDLE;
+
+               /* Disable Head Writeback */
+               switch (hw->mac.type) {
+               case ixgbe_mac_82598EB:
+                       txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
+                       break;
+               case ixgbe_mac_82599EB:
+               case ixgbe_mac_X540:
+               default:
+                       txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
+                       break;
+                }
+               txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+               switch (hw->mac.type) {
+               case ixgbe_mac_82598EB:
+                       IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
+                       break;
+               case ixgbe_mac_82599EB:
+               case ixgbe_mac_X540:
+               default:
+                       IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
+                       break;
+               }
+
+       }
+
+       if (hw->mac.type != ixgbe_mac_82598EB) {
+               u32 dmatxctl, rttdcs;
+               dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+               dmatxctl |= IXGBE_DMATXCTL_TE;
+               IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
+               /* Disable arbiter to set MTQC */
+               rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+               rttdcs |= IXGBE_RTTDCS_ARBDIS;
+               IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+               IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
+               rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
+               IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+       }
+
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Free all transmit rings.
+ *
+ **********************************************************************/
+static void
+ixgbe_free_transmit_structures(struct adapter *adapter)
+{
+       struct tx_ring *txr = adapter->tx_rings;
+
+       for (int i = 0; i < adapter->num_queues; i++, txr++) {
+               IXGBE_TX_LOCK(txr);
+               ixgbe_free_transmit_buffers(txr);
+               ixgbe_dma_free(adapter, &txr->txdma);
+               IXGBE_TX_UNLOCK(txr);
+               IXGBE_TX_LOCK_DESTROY(txr);
+       }
+       kfree(adapter->tx_rings, M_DEVBUF);
+}
+
+/*********************************************************************
+ *
+ *  Free transmit ring related data structures.
+ *
+ **********************************************************************/
+static void
+ixgbe_free_transmit_buffers(struct tx_ring *txr)
+{
+       struct adapter *adapter = txr->adapter;
+       struct ixgbe_tx_buf *tx_buffer;
+       int             i;
+
+       INIT_DEBUGOUT("free_transmit_ring: begin");
+
+       if (txr->tx_buffers == NULL)
+               return;
+
+       tx_buffer = txr->tx_buffers;
+       for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
+               if (tx_buffer->m_head != NULL) {
+                       bus_dmamap_sync(txr->txtag, tx_buffer->map,
+                           BUS_DMASYNC_POSTWRITE);
+                       bus_dmamap_unload(txr->txtag,
+                           tx_buffer->map);
+                       m_freem(tx_buffer->m_head);
+                       tx_buffer->m_head = NULL;
+                       if (tx_buffer->map != NULL) {
+                               bus_dmamap_destroy(txr->txtag,
+                                   tx_buffer->map);
+                               tx_buffer->map = NULL;
+                       }
+               } else if (tx_buffer->map != NULL) {
+                       bus_dmamap_unload(txr->txtag,
+                           tx_buffer->map);
+                       bus_dmamap_destroy(txr->txtag,
+                           tx_buffer->map);
+                       tx_buffer->map = NULL;
+               }
+       }
+#if 0 /* __FreeBSD_version >= 800000 */
+       if (txr->br != NULL)
+               buf_ring_free(txr->br, M_DEVBUF);
+#endif
+       if (txr->tx_buffers != NULL) {
+               kfree(txr->tx_buffers, M_DEVBUF);
+               txr->tx_buffers = NULL;
+       }
+       if (txr->txtag != NULL) {
+               bus_dma_tag_destroy(txr->txtag);
+               txr->txtag = NULL;
+       }
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Advanced Context Descriptor setup for VLAN or CSUM
+ *
+ **********************************************************************/
+
+static bool
+ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
+{
+       struct adapter *adapter = txr->adapter;
+       struct ixgbe_adv_tx_context_desc *TXD;
+       struct ixgbe_tx_buf        *tx_buffer;
+       u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+       struct ether_vlan_header *eh;
+       struct ip *ip;
+       struct ip6_hdr *ip6;
+       int  ehdrlen, ip_hlen = 0;
+       u16     etype;
+       u8      ipproto = 0;
+       bool    offload = TRUE;
+       int ctxd = txr->next_avail_desc;
+#ifdef NET_VLAN
+       u16 vtag = 0;
+#endif
+
+
+       if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
+               offload = FALSE;
+
+       tx_buffer = &txr->tx_buffers[ctxd];
+       TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
+
+       /*
+       ** In advanced descriptors the vlan tag must 
+       ** be placed into the descriptor itself.
+       */
+#ifdef NET_VLAN
+       if (mp->m_flags & M_VLANTAG) {
+               vtag = htole16(mp->m_pkthdr.ether_vtag);
+               vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
+       } else if (offload == FALSE)
+               return FALSE;
+#endif
+
+       /*
+        * Determine where frame payload starts.
+        * Jump over vlan headers if already present,
+        * helpful for QinQ too.
+        */
+       eh = mtod(mp, struct ether_vlan_header *);
+       if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+               etype = ntohs(eh->evl_proto);
+               ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+       } else {
+               etype = ntohs(eh->evl_encap_proto);
+               ehdrlen = ETHER_HDR_LEN;
+       }
+
+       /* Set the ether header length */
+       vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
+
+       switch (etype) {
+               case ETHERTYPE_IP:
+                       ip = (struct ip *)(mp->m_data + ehdrlen);
+                       ip_hlen = ip->ip_hl << 2;
+                       ipproto = ip->ip_p;
+                       type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+                       break;
+               case ETHERTYPE_IPV6:
+                       ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
+                       ip_hlen = sizeof(struct ip6_hdr);
+                       /* XXX-BZ this will go badly in case of ext hdrs. */
+                       ipproto = ip6->ip6_nxt;
+                       type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
+                       break;
+               default:
+                       offload = FALSE;
+                       break;
+       }
+
+       vlan_macip_lens |= ip_hlen;
+       type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
+
+       switch (ipproto) {
+               case IPPROTO_TCP:
+                       if (mp->m_pkthdr.csum_flags & CSUM_TCP)
+                               type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+                       break;
+
+               case IPPROTO_UDP:
+                       if (mp->m_pkthdr.csum_flags & CSUM_UDP)
+                               type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
+                       break;
+
+#if 0
+               case IPPROTO_SCTP:
+                       if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
+                               type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+                       break;
+#endif
+               default:
+                       offload = FALSE;
+                       break;
+       }
+
+       /* Now copy bits into descriptor */
+       TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
+       TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
+       TXD->seqnum_seed = htole32(0);
+       TXD->mss_l4len_idx = htole32(0);
+
+       tx_buffer->m_head = NULL;
+       tx_buffer->eop_index = -1;
+
+       /* We've consumed the first desc, adjust counters */
+       if (++ctxd == adapter->num_tx_desc)
+               ctxd = 0;
+       txr->next_avail_desc = ctxd;
+       --txr->tx_avail;
+
+        return (offload);
+}
+
+/**********************************************************************
+ *
+ *  Setup work for hardware segmentation offload (TSO) on
+ *  adapters using advanced tx descriptors
+ *
+ **********************************************************************/
+#if 0  /* NET_TSO */
+static bool
+ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen,
+    u32 *olinfo_status)
+{
+       struct adapter *adapter = txr->adapter;
+       struct ixgbe_adv_tx_context_desc *TXD;
+       struct ixgbe_tx_buf        *tx_buffer;
+#ifdef NET_VLAN
+       u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+       u16 vtag = 0, eh_type;
+#else
+       u16 eh_type;
+       u32 type_tucmd_mlhl = 0;
+#endif
+       u32 mss_l4len_idx = 0, len;
+       int ctxd, ehdrlen, ip_hlen, tcp_hlen;
+       struct ether_vlan_header *eh;
+#ifdef INET6
+       struct ip6_hdr *ip6;
+#endif
+#ifdef INET
+       struct ip *ip;
+#endif
+       struct tcphdr *th;
+
+
+       /*
+        * Determine where frame payload starts.
+        * Jump over vlan headers if already present
+        */
+       eh = mtod(mp, struct ether_vlan_header *);
+       if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+               ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+               eh_type = eh->evl_proto;
+       } else {
+               ehdrlen = ETHER_HDR_LEN;
+               eh_type = eh->evl_encap_proto;
+       }
+
+        /* Ensure we have at least the IP+TCP header in the first mbuf. */
+       len = ehdrlen + sizeof(struct tcphdr);
+       switch (ntohs(eh_type)) {
+#ifdef INET6
+       case ETHERTYPE_IPV6:
+               if (mp->m_len < len + sizeof(struct ip6_hdr))
+                       return FALSE;
+               ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
+               /* XXX-BZ For now we do not pretend to support ext. hdrs. */
+               if (ip6->ip6_nxt != IPPROTO_TCP)
+                       return FALSE;
+               ip_hlen = sizeof(struct ip6_hdr);
+               th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
+               th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
+               type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
+               break;
+#endif
+#ifdef INET
+       case ETHERTYPE_IP:
+               if (mp->m_len < len + sizeof(struct ip))
+                       return FALSE;
+               ip = (struct ip *)(mp->m_data + ehdrlen);
+               if (ip->ip_p != IPPROTO_TCP)
+                       return FALSE;
+               ip->ip_sum = 0;
+               ip_hlen = ip->ip_hl << 2;
+               th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
+               th->th_sum = in_pseudo(ip->ip_src.s_addr,
+                   ip->ip_dst.s_addr, htons(IPPROTO_TCP));
+               type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+               /* Tell transmit desc to also do IPv4 checksum. */
+               *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
+               break;
+#endif
+       default:
+               panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
+                   __func__, ntohs(eh_type));
+               break;
+       }
+
+       ctxd = txr->next_avail_desc;
+       tx_buffer = &txr->tx_buffers[ctxd];
+       TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
+
+       tcp_hlen = th->th_off << 2;
+
+       /* This is used in the transmit desc in encap */
+       *paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
+
+       /* VLAN MACLEN IPLEN */
+#ifdef NET_VLAN
+       if (mp->m_flags & M_VLANTAG) {
+               vtag = htole16(mp->m_pkthdr.ether_vtag);
+                vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
+       }
+
+       vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
+       vlan_macip_lens |= ip_hlen;
+       TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
+#endif
+
+       /* ADV DTYPE TUCMD */
+       type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
+       type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+       TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
+
+       /* MSS L4LEN IDX */
+       mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
+       mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
+       TXD->mss_l4len_idx = htole32(mss_l4len_idx);
+
+       TXD->seqnum_seed = htole32(0);
+       tx_buffer->m_head = NULL;
+       tx_buffer->eop_index = -1;
+
+       if (++ctxd == adapter->num_tx_desc)
+               ctxd = 0;
+
+       txr->tx_avail--;
+       txr->next_avail_desc = ctxd;
+       return TRUE;
+}
+#endif
+
+#ifdef IXGBE_FDIR
+/*
+** This routine parses packet headers so that Flow
+** Director can make a hashed filter table entry 
+** allowing traffic flows to be identified and kept
+** on the same cpu.  This would be a performance
+** hit, but we only do it at IXGBE_FDIR_RATE of
+** packets.
+*/
+static void
+ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
+{
+       struct adapter                  *adapter = txr->adapter;
+       struct ix_queue                 *que;
+       struct ip                       *ip;
+       struct tcphdr                   *th;
+       struct udphdr                   *uh;
+       struct ether_vlan_header        *eh;
+       union ixgbe_atr_hash_dword      input = {.dword = 0}; 
+       union ixgbe_atr_hash_dword      common = {.dword = 0}; 
+       int                             ehdrlen, ip_hlen;
+       u16                             etype;
+
+       eh = mtod(mp, struct ether_vlan_header *);
+       if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+               ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+               etype = eh->evl_proto;
+       } else {
+               ehdrlen = ETHER_HDR_LEN;
+               etype = eh->evl_encap_proto;
+       }
+
+       /* Only handling IPv4 */
+       if (etype != htons(ETHERTYPE_IP))
+               return;
+
+       ip = (struct ip *)(mp->m_data + ehdrlen);
+       ip_hlen = ip->ip_hl << 2;
+
+       /* check if we're UDP or TCP */
+       switch (ip->ip_p) {
+       case IPPROTO_TCP:
+               th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
+               /* src and dst are inverted */
+               common.port.dst ^= th->th_sport;
+               common.port.src ^= th->th_dport;
+               input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_TCPV4;
+               break;
+       case IPPROTO_UDP:
+               uh = (struct udphdr *)((caddr_t)ip + ip_hlen);
+               /* src and dst are inverted */
+               common.port.dst ^= uh->uh_sport;
+               common.port.src ^= uh->uh_dport;
+               input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_UDPV4;
+               break;
+       default:
+               return;
+       }
+
+       input.formatted.vlan_id = htobe16(mp->m_pkthdr.ether_vtag);
+       if (mp->m_pkthdr.ether_vtag)
+               common.flex_bytes ^= htons(ETHERTYPE_VLAN);
+       else
+               common.flex_bytes ^= etype;
+       common.ip ^= ip->ip_src.s_addr ^ ip->ip_dst.s_addr;
+
+       que = &adapter->queues[txr->me];
+       /*
+       ** This assumes the Rx queue and Tx
+       ** queue are bound to the same CPU
+       */
+       ixgbe_fdir_add_signature_filter_82599(&adapter->hw,
+           input, common, que->msix);
+}
+#endif /* IXGBE_FDIR */
+
+/**********************************************************************
+ *
+ *  Examine each tx_buffer in the used queue. If the hardware is done
+ *  processing the packet then free associated resources. The
+ *  tx_buffer is put back on the free queue.
+ *
+ **********************************************************************/
+static bool
+ixgbe_txeof(struct tx_ring *txr)
+{
+       struct adapter  *adapter = txr->adapter;
+       struct ifnet    *ifp = adapter->ifp;
+       u32     first, last, done, processed;
+       struct ixgbe_tx_buf *tx_buffer;
+       struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
+
+       KKASSERT(lockstatus(&txr->tx_lock, curthread) != 0);
+
+#ifdef DEV_NETMAP
+       if (ifp->if_capenable & IFCAP_NETMAP) {
+               struct netmap_adapter *na = NA(ifp);
+               struct netmap_kring *kring = &na->tx_rings[txr->me];
+
+               tx_desc = (struct ixgbe_legacy_tx_desc *)txr->tx_base;
+
+               bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+                   BUS_DMASYNC_POSTREAD);
+               /*
+                * In netmap mode, all the work is done in the context
+                * of the client thread. Interrupt handlers only wake up
+                * clients, which may be sleeping on individual rings
+                * or on a global resource for all rings.
+                * To implement tx interrupt mitigation, we wake up the client
+                * thread roughly every half ring, even if the NIC interrupts
+                * more frequently. This is implemented as follows:
+                * - ixgbe_txsync() sets kring->nr_kflags with the index of
+                *   the slot that should wake up the thread (nkr_num_slots
+                *   means the user thread should not be woken up);
+                * - the driver ignores tx interrupts unless netmap_mitigate=0
+                *   or the slot has the DD bit set.
+                *
+                * When the driver has separate locks, we need to
+                * release and re-acquire txlock to avoid deadlocks.
+                * XXX see if we can find a better way.
+                */
+               if (!netmap_mitigate ||
+                   (kring->nr_kflags < kring->nkr_num_slots &&
+                    tx_desc[kring->nr_kflags].upper.fields.status & IXGBE_TXD_STAT_DD)) {
+                       kring->nr_kflags = kring->nkr_num_slots;
+                       selwakeuppri(&na->tx_rings[txr->me].si, PI_NET);
+                       IXGBE_TX_UNLOCK(txr);
+                       IXGBE_CORE_LOCK(adapter);
+                       selwakeuppri(&na->tx_si, PI_NET);
+                       IXGBE_CORE_UNLOCK(adapter);
+                       IXGBE_TX_LOCK(txr);
+               }
+               return FALSE;
+       }
+#endif /* DEV_NETMAP */
+
+       if (txr->tx_avail == adapter->num_tx_desc) {
+               txr->queue_status = IXGBE_QUEUE_IDLE;
+               return FALSE;
+       }
+
+       processed = 0;
+       first = txr->next_to_clean;
+       tx_buffer = &txr->tx_buffers[first];
+       /* For cleanup we just use legacy struct */
+       tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
+       last = tx_buffer->eop_index;
+       if (last == -1)
+               return FALSE;
+       eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
+
+       /*
+       ** Get the index of the first descriptor
+       ** BEYOND the EOP and call that 'done'.
+       ** I do this so the comparison in the
+       ** inner while loop below can be simple
+       */
+       if (++last == adapter->num_tx_desc) last = 0;
+       done = last;
+
+        bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+            BUS_DMASYNC_POSTREAD);
+       /*
+       ** Only the EOP descriptor of a packet now has the DD
+       ** bit set, this is what we look for...
+       */
+       while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
+               /* We clean the range of the packet */
+               while (first != done) {
+                       tx_desc->upper.data = 0;
+                       tx_desc->lower.data = 0;
+                       tx_desc->buffer_addr = 0;
+                       ++txr->tx_avail;
+                       ++processed;
+
+                       if (tx_buffer->m_head) {
+                               txr->bytes +=
+                                   tx_buffer->m_head->m_pkthdr.len;
+                               bus_dmamap_sync(txr->txtag,
+                                   tx_buffer->map,
+                                   BUS_DMASYNC_POSTWRITE);
+                               bus_dmamap_unload(txr->txtag,
+                                   tx_buffer->map);
+                               m_freem(tx_buffer->m_head);
+                               tx_buffer->m_head = NULL;
+                               tx_buffer->map = NULL;
+                       }
+                       tx_buffer->eop_index = -1;
+                       txr->watchdog_time = ticks;
+
+                       if (++first == adapter->num_tx_desc)
+                               first = 0;
+
+                       tx_buffer = &txr->tx_buffers[first];
+                       tx_desc =
+                           (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
+               }
+               ++txr->packets;
+               ++ifp->if_opackets;
+               /* See if there is more work now */
+               last = tx_buffer->eop_index;
+               if (last != -1) {
+                       eop_desc =
+                           (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
+                       /* Get next done point */
+                       if (++last == adapter->num_tx_desc) last = 0;
+                       done = last;
+               } else
+                       break;
+       }
+       bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+       txr->next_to_clean = first;
+
+       /*
+       ** Watchdog calculation, we know there's
+       ** work outstanding or the first return
+       ** would have been taken, so none processed
+       ** for too long indicates a hang.
+       */
+       if ((!processed) && ((ticks - txr->watchdog_time) > IXGBE_WATCHDOG))
+               txr->queue_status = IXGBE_QUEUE_HUNG;
+
+       /* With a minimum free clear the depleted state bit.  */
+       if (txr->tx_avail > IXGBE_TX_CLEANUP_THRESHOLD)
+               txr->queue_status &= ~IXGBE_QUEUE_DEPLETED;
+
+       if (txr->tx_avail == adapter->num_tx_desc) {
+               txr->queue_status = IXGBE_QUEUE_IDLE;
+               return (FALSE);
+       }
+
+       return TRUE;
+}
+
+/*********************************************************************
+ *
+ *  Refresh mbuf buffers for RX descriptor rings
+ *   - now keeps its own state so discards due to resource
+ *     exhaustion are unnecessary, if an mbuf cannot be obtained
+ *     it just returns, keeping its placeholder, thus it can simply
+ *     be recalled to try again.
+ *
+ **********************************************************************/
+static void
+ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
+{
+       struct adapter          *adapter = rxr->adapter;
+       bus_dma_segment_t       hseg[1];
+       bus_dma_segment_t       pseg[1];
+       struct ixgbe_rx_buf     *rxbuf;
+       struct mbuf             *mh, *mp;
+       int                     i, j, nsegs, error;
+       bool                    refreshed = FALSE;
+
+       i = j = rxr->next_to_refresh;
+       /* Control the loop with one beyond */
+       if (++j == adapter->num_rx_desc)
+               j = 0;
+
+       while (j != limit) {
+               rxbuf = &rxr->rx_buffers[i];
+               if (rxr->hdr_split == FALSE)
+                       goto no_split;
+
+               if (rxbuf->m_head == NULL) {
+                       mh = m_gethdr(MB_DONTWAIT, MT_DATA);
+                       if (mh == NULL)
+                               goto update;
+               } else
+                       mh = rxbuf->m_head;
+
+               mh->m_pkthdr.len = mh->m_len = MHLEN;
+               mh->m_len = MHLEN;
+               mh->m_flags |= M_PKTHDR;
+               /* Get the memory mapping */
+               error = bus_dmamap_load_mbuf_segment(rxr->htag,
+                   rxbuf->hmap, mh, hseg, 1, &nsegs, BUS_DMA_NOWAIT);
+               if (error != 0) {
+                       kprintf("Refresh mbufs: hdr dmamap load"
+                           " failure - %d\n", error);
+                       m_free(mh);
+                       rxbuf->m_head = NULL;
+                       goto update;
+               }
+               rxbuf->m_head = mh;
+               bus_dmamap_sync(rxr->htag, rxbuf->hmap,
+                   BUS_DMASYNC_PREREAD);
+               rxr->rx_base[i].read.hdr_addr =
+                   htole64(hseg[0].ds_addr);
+
+no_split:
+               if (rxbuf->m_pack == NULL) {
+                       mp = m_getjcl(MB_DONTWAIT, MT_DATA,
+                           M_PKTHDR, adapter->rx_mbuf_sz);
+                       if (mp == NULL)
+                               goto update;
+               } else
+                       mp = rxbuf->m_pack;
+
+               mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
+               /* Get the memory mapping */
+               error = bus_dmamap_load_mbuf_segment(rxr->ptag,
+                   rxbuf->pmap, mp, pseg, 1, &nsegs, BUS_DMA_NOWAIT);
+               if (error != 0) {
+                       kprintf("Refresh mbufs: payload dmamap load"
+                           " failure - %d\n", error);
+                       m_free(mp);
+                       rxbuf->m_pack = NULL;
+                       goto update;
+               }
+               rxbuf->m_pack = mp;
+               bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+                   BUS_DMASYNC_PREREAD);
+               rxr->rx_base[i].read.pkt_addr =
+                   htole64(pseg[0].ds_addr);
+
+               refreshed = TRUE;
+               /* Next is precalculated */
+               i = j;
+               rxr->next_to_refresh = i;
+               if (++j == adapter->num_rx_desc)
+                       j = 0;
+       }
+update:
+       if (refreshed) /* Update hardware tail index */
+               IXGBE_WRITE_REG(&adapter->hw,
+                   IXGBE_RDT(rxr->me), rxr->next_to_refresh);
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Allocate memory for rx_buffer structures. Since we use one
+ *  rx_buffer per received packet, the maximum number of rx_buffer's
+ *  that we'll need is equal to the number of receive descriptors
+ *  that we've allocated.
+ *
+ **********************************************************************/
+static int
+ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
+{
+       struct  adapter         *adapter = rxr->adapter;
+       device_t                dev = adapter->dev;
+       struct ixgbe_rx_buf     *rxbuf;
+       int                     i, bsize, error;
+
+       bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc;
+       if (!(rxr->rx_buffers =
+           (struct ixgbe_rx_buf *) kmalloc(bsize,
+           M_DEVBUF, M_NOWAIT | M_ZERO))) {
+               device_printf(dev, "Unable to allocate rx_buffer memory\n");
+               error = ENOMEM;
+               goto fail;
+       }
+
+       if ((error = bus_dma_tag_create(NULL,   /* parent */
+                                  1, 0,        /* alignment, bounds */
+                                  BUS_SPACE_MAXADDR,   /* lowaddr */
+                                  BUS_SPACE_MAXADDR,   /* highaddr */
+                                  NULL, NULL,          /* filter, filterarg */
+                                  MSIZE,               /* maxsize */
+                                  1,                   /* nsegments */
+                                  MSIZE,               /* maxsegsize */
+                                  0,                   /* flags */
+                                  &rxr->htag))) {
+               device_printf(dev, "Unable to create RX DMA tag\n");
+               goto fail;
+       }
+
+       if ((error = bus_dma_tag_create(NULL,   /* parent */
+                                  1, 0,        /* alignment, bounds */
+                                  BUS_SPACE_MAXADDR,   /* lowaddr */
+                                  BUS_SPACE_MAXADDR,   /* highaddr */
+                                  NULL, NULL,          /* filter, filterarg */
+                                  MJUM16BYTES,         /* maxsize */
+                                  1,                   /* nsegments */
+                                  MJUM16BYTES,         /* maxsegsize */
+                                  0,                   /* flags */
+                                  &rxr->ptag))) {
+               device_printf(dev, "Unable to create RX DMA tag\n");
+               goto fail;
+       }
+
+       for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
+               rxbuf = &rxr->rx_buffers[i];
+               error = bus_dmamap_create(rxr->htag,
+                   BUS_DMA_NOWAIT, &rxbuf->hmap);
+               if (error) {
+                       device_printf(dev, "Unable to create RX head map\n");
+                       goto fail;
+               }
+               error = bus_dmamap_create(rxr->ptag,
+                   BUS_DMA_NOWAIT, &rxbuf->pmap);
+               if (error) {
+                       device_printf(dev, "Unable to create RX pkt map\n");
+                       goto fail;
+               }
+       }
+
+       return (0);
+
+fail:
+       /* Frees all, but can handle partial completion */
+       ixgbe_free_receive_structures(adapter);
+       return (error);
+}
+
+/*
+** Used to detect a descriptor that has
+** been merged by Hardware RSC.
+*/
+static inline u32
+ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
+{
+       return (le32toh(rx->wb.lower.lo_dword.data) &
+           IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
+}
+
+/*********************************************************************
+ *
+ *  Initialize Hardware RSC (LRO) feature on 82599
+ *  for an RX ring, this is toggled by the LRO capability
+ *  even though it is transparent to the stack.
+ *
+ **********************************************************************/
+#if 0  /* NET_LRO */
+static void
+ixgbe_setup_hw_rsc(struct rx_ring *rxr)
+{
+       struct  adapter         *adapter = rxr->adapter;
+       struct  ixgbe_hw        *hw = &adapter->hw;
+       u32                     rscctrl, rdrxctl;
+
+       rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+       rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
+#ifdef DEV_NETMAP /* crcstrip is optional in netmap */
+       if (adapter->ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
+#endif /* DEV_NETMAP */
+       rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
+       rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
+       IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
+
+       rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
+       rscctrl |= IXGBE_RSCCTL_RSCEN;
+       /*
+       ** Limit the total number of descriptors that
+       ** can be combined, so it does not exceed 64K
+       */
+       if (adapter->rx_mbuf_sz == MCLBYTES)
+               rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+       else if (adapter->rx_mbuf_sz == MJUMPAGESIZE)
+               rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+       else if (adapter->rx_mbuf_sz == MJUM9BYTES)
+               rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+       else  /* Using 16K cluster */
+               rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
+
+       IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
+
+       /* Enable TCP header recognition */
+       IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
+           (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
+           IXGBE_PSRTYPE_TCPHDR));
+
+       /* Disable RSC for ACK packets */
+       IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
+           (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
+
+       rxr->hw_rsc = TRUE;
+}
+#endif
+
+static void     
+ixgbe_free_receive_ring(struct rx_ring *rxr)
+{ 
+       struct  adapter         *adapter;
+       struct ixgbe_rx_buf       *rxbuf;
+       int i;
+
+       adapter = rxr->adapter;
+       for (i = 0; i < adapter->num_rx_desc; i++) {
+               rxbuf = &rxr->rx_buffers[i];
+               if (rxbuf->m_head != NULL) {
+                       bus_dmamap_sync(rxr->htag, rxbuf->hmap,
+                           BUS_DMASYNC_POSTREAD);
+                       bus_dmamap_unload(rxr->htag, rxbuf->hmap);
+                       rxbuf->m_head->m_flags |= M_PKTHDR;
+                       m_freem(rxbuf->m_head);
+               }
+               if (rxbuf->m_pack != NULL) {
+                       bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+                           BUS_DMASYNC_POSTREAD);
+                       bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
+                       rxbuf->m_pack->m_flags |= M_PKTHDR;
+                       m_freem(rxbuf->m_pack);
+               }
+               rxbuf->m_head = NULL;
+               rxbuf->m_pack = NULL;
+       }
+}
+
+
+/*********************************************************************
+ *
+ *  Initialize a receive ring and its buffers.
+ *
+ **********************************************************************/
+static int
+ixgbe_setup_receive_ring(struct rx_ring *rxr)
+{
+       struct  adapter         *adapter;
+       struct ifnet            *ifp;
+       device_t                dev;
+       struct ixgbe_rx_buf     *rxbuf;
+       bus_dma_segment_t       pseg[1], hseg[1];
+#if 0  /* NET_LRO */
+       struct lro_ctrl         *lro = &rxr->lro;
+#endif
+       int                     rsize, nsegs, error = 0;
+#ifdef DEV_NETMAP
+       struct netmap_adapter *na = NA(rxr->adapter->ifp);
+       struct netmap_slot *slot;
+#endif /* DEV_NETMAP */
+
+       adapter = rxr->adapter;
+       ifp = adapter->ifp;
+       dev = adapter->dev;
+
+       /* Clear the ring contents */
+       IXGBE_RX_LOCK(rxr);
+#ifdef DEV_NETMAP
+       /* same as in ixgbe_setup_transmit_ring() */
+       slot = netmap_reset(na, NR_RX, rxr->me, 0);
+#endif /* DEV_NETMAP */
+       rsize = roundup2(adapter->num_rx_desc *
+           sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
+       bzero((void *)rxr->rx_base, rsize);
+
+       /* Free current RX buffer structs and their mbufs */
+       ixgbe_free_receive_ring(rxr);
+
+       /* Configure header split? */
+       if (ixgbe_header_split)
+               rxr->hdr_split = TRUE;
+
+       /* Now replenish the mbufs */
+       for (int j = 0; j != adapter->num_rx_desc; ++j) {
+               struct mbuf     *mh, *mp;
+
+               rxbuf = &rxr->rx_buffers[j];
+#ifdef DEV_NETMAP
+               /*
+                * In netmap mode, fill the map and set the buffer
+                * address in the NIC ring, considering the offset
+                * between the netmap and NIC rings (see comment in
+                * ixgbe_setup_transmit_ring() ). No need to allocate
+                * an mbuf, so end the block with a continue;
+                */
+               if (slot) {
+                       int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j);
+                       uint64_t paddr;
+                       void *addr;
+
+                       addr = PNMB(slot + sj, &paddr);
+                       netmap_load_map(rxr->ptag, rxbuf->pmap, addr);
+                       /* Update descriptor */
+                       rxr->rx_base[j].read.pkt_addr = htole64(paddr);
+                       continue;
+               }
+#endif /* DEV_NETMAP */
+               /*
+               ** Don't allocate mbufs if not
+               ** doing header split, its wasteful
+               */ 
+               if (rxr->hdr_split == FALSE)
+                       goto skip_head;
+
+               /* First the header */
+               rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
+               if (rxbuf->m_head == NULL) {
+                       error = ENOBUFS;
+                       goto fail;
+               }
+               m_adj(rxbuf->m_head, ETHER_ALIGN);
+               mh = rxbuf->m_head;
+               mh->m_len = mh->m_pkthdr.len = MHLEN;
+               mh->m_flags |= M_PKTHDR;
+               /* Get the memory mapping */
+               error = bus_dmamap_load_mbuf_segment(rxr->htag,
+                   rxbuf->hmap, rxbuf->m_head, hseg, 1,
+                   &nsegs, BUS_DMA_NOWAIT);
+
+               if (error != 0) /* Nothing elegant to do here */
+                       goto fail;
+               bus_dmamap_sync(rxr->htag,
+                   rxbuf->hmap, BUS_DMASYNC_PREREAD);
+               /* Update descriptor */
+               rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
+
+skip_head:
+               /* Now the payload cluster */
+               rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
+                   M_PKTHDR, adapter->rx_mbuf_sz);
+               if (rxbuf->m_pack == NULL) {
+                       error = ENOBUFS;
+                        goto fail;
+               }
+               mp = rxbuf->m_pack;
+               mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
+               /* Get the memory mapping */
+               error = bus_dmamap_load_mbuf_segment(rxr->ptag,
+                   rxbuf->pmap, mp, hseg, 1,
+                   &nsegs, BUS_DMA_NOWAIT);
+               if (error != 0)
+                        goto fail;
+               bus_dmamap_sync(rxr->ptag,
+                   rxbuf->pmap, BUS_DMASYNC_PREREAD);
+               /* Update descriptor */
+               rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
+       }
+
+
+       /* Setup our descriptor indices */
+       rxr->next_to_check = 0;
+       rxr->next_to_refresh = 0;
+       rxr->lro_enabled = FALSE;
+       rxr->rx_split_packets = 0;
+       rxr->rx_bytes = 0;
+       rxr->discard = FALSE;
+       rxr->vtag_strip = FALSE;
+
+       bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+       /*
+       ** Now set up the LRO interface:
+       ** 82598 uses software LRO, the
+       ** 82599 and X540 use a hardware assist.
+       */
+#if 0 /* NET_LRO */
+       if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
+           (ifp->if_capenable & IFCAP_RXCSUM) &&
+           (ifp->if_capenable & IFCAP_LRO))
+               ixgbe_setup_hw_rsc(rxr);
+       else if (ifp->if_capenable & IFCAP_LRO) {
+               int err = tcp_lro_init(lro);
+               if (err) {
+                       device_printf(dev, "LRO Initialization failed!\n");
+                       goto fail;
+               }
+               INIT_DEBUGOUT("RX Soft LRO Initialized\n");
+               rxr->lro_enabled = TRUE;
+               lro->ifp = adapter->ifp;
+       }
+#endif
+
+       IXGBE_RX_UNLOCK(rxr);
+       return (0);
+
+fail:
+       ixgbe_free_receive_ring(rxr);
+       IXGBE_RX_UNLOCK(rxr);
+       return (error);
+}
+
+/*********************************************************************
+ *
+ *  Initialize all receive rings.
+ *
+ **********************************************************************/
+static int
+ixgbe_setup_receive_structures(struct adapter *adapter)
+{
+       struct rx_ring *rxr = adapter->rx_rings;
+       int j;
+
+       for (j = 0; j < adapter->num_queues; j++, rxr++)
+               if (ixgbe_setup_receive_ring(rxr))
+                       goto fail;
+
+       return (0);
+fail:
+       /*
+        * Free RX buffers allocated so far, we will only handle
+        * the rings that completed, the failing case will have
+        * cleaned up for itself. 'j' failed, so its the terminus.
+        */
+       for (int i = 0; i < j; ++i) {
+               rxr = &adapter->rx_rings[i];
+               ixgbe_free_receive_ring(rxr);
+       }
+
+       return (ENOBUFS);
+}
+
+/*********************************************************************
+ *
+ *  Setup receive registers and features.
+ *
+ **********************************************************************/
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
+
+#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
+       
+static void
+ixgbe_initialize_receive_units(struct adapter *adapter)
+{
+       struct  rx_ring *rxr = adapter->rx_rings;
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct ifnet   *ifp = adapter->ifp;
+       u32             bufsz, rxctrl, fctrl, srrctl, rxcsum;
+       u32             reta, mrqc = 0, hlreg, random[10];
+
+
+       /*
+        * Make sure receives are disabled while
+        * setting up the descriptor ring
+        */
+       rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+       IXGBE_WRITE_REG(hw, IXGBE_RXCTRL,
+           rxctrl & ~IXGBE_RXCTRL_RXEN);
+
+       /* Enable broadcasts */
+       fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+       fctrl |= IXGBE_FCTRL_BAM;
+       fctrl |= IXGBE_FCTRL_DPF;
+       fctrl |= IXGBE_FCTRL_PMCF;
+       IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+
+       /* Set for Jumbo Frames? */
+       hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+       if (ifp->if_mtu > ETHERMTU)
+               hlreg |= IXGBE_HLREG0_JUMBOEN;
+       else
+               hlreg &= ~IXGBE_HLREG0_JUMBOEN;
+#ifdef DEV_NETMAP
+       /* crcstrip is conditional in netmap (in RDRXCTL too ?) */
+       if (ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
+               hlreg &= ~IXGBE_HLREG0_RXCRCSTRP;
+       else
+               hlreg |= IXGBE_HLREG0_RXCRCSTRP;
+#endif /* DEV_NETMAP */
+       IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
+
+       bufsz = (adapter->rx_mbuf_sz +
+           BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+
+       for (int i = 0; i < adapter->num_queues; i++, rxr++) {
+               u64 rdba = rxr->rxdma.dma_paddr;
+
+               /* Setup the Base and Length of the Rx Descriptor Ring */
+               IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
+                              (rdba & 0x00000000ffffffffULL));
+               IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
+               IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
+                   adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
+
+               /* Set up the SRRCTL register */
+               srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
+               srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
+               srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
+               srrctl |= bufsz;
+               if (rxr->hdr_split) {
+                       /* Use a standard mbuf for the header */
+                       srrctl |= ((IXGBE_RX_HDR <<
+                           IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
+                           & IXGBE_SRRCTL_BSIZEHDR_MASK);
+                       srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+               } else
+                       srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+               IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
+
+               /* Setup the HW Rx Head and Tail Descriptor Pointers */
+               IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
+       }
+
+       if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
+               u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
+                             IXGBE_PSRTYPE_UDPHDR |
+                             IXGBE_PSRTYPE_IPV4HDR |
+                             IXGBE_PSRTYPE_IPV6HDR;
+               IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
+       }
+
+       rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+
+       /* Setup RSS */
+       if (adapter->num_queues > 1) {
+               int i, j;
+               reta = 0;
+
+               /* set up random bits */
+               karc4rand(&random, sizeof(random));
+
+               /* Set up the redirection table */
+               for (i = 0, j = 0; i < 128; i++, j++) {
+                       if (j == adapter->num_queues) j = 0;
+                       reta = (reta << 8) | (j * 0x11);
+                       if ((i & 3) == 3)
+                               IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
+               }
+
+               /* Now fill our hash function seeds */
+               for (int i = 0; i < 10; i++)
+                       IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
+
+               /* Perform hash on these packet types */
+               mrqc = IXGBE_MRQC_RSSEN
+                    | IXGBE_MRQC_RSS_FIELD_IPV4
+                    | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
+                    | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
+                    | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
+                    | IXGBE_MRQC_RSS_FIELD_IPV6_EX
+                    | IXGBE_MRQC_RSS_FIELD_IPV6
+                    | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
+                    | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
+                    | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
+               IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+               /* RSS and RX IPP Checksum are mutually exclusive */
+               rxcsum |= IXGBE_RXCSUM_PCSD;
+       }
+
+       if (ifp->if_capenable & IFCAP_RXCSUM)
+               rxcsum |= IXGBE_RXCSUM_PCSD;
+
+       if (!(rxcsum & IXGBE_RXCSUM_PCSD))
+               rxcsum |= IXGBE_RXCSUM_IPPCSE;
+
+       IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
+
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Free all receive rings.
+ *
+ **********************************************************************/
+static void
+ixgbe_free_receive_structures(struct adapter *adapter)
+{
+       struct rx_ring *rxr = adapter->rx_rings;
+
+       for (int i = 0; i < adapter->num_queues; i++, rxr++) {
+#if 0  /* NET_LRO */
+               struct lro_ctrl         *lro = &rxr->lro;
+#endif
+               ixgbe_free_receive_buffers(rxr);
+               /* Free LRO memory */
+#if 0  /* NET_LRO */
+               tcp_lro_free(lro);
+#endif
+               /* Free the ring memory as well */
+               ixgbe_dma_free(adapter, &rxr->rxdma);
+       }
+
+       kfree(adapter->rx_rings, M_DEVBUF);
+}
+
+
+/*********************************************************************
+ *
+ *  Free receive ring data structures
+ *
+ **********************************************************************/
+static void
+ixgbe_free_receive_buffers(struct rx_ring *rxr)
+{
+       struct adapter          *adapter = rxr->adapter;
+       struct ixgbe_rx_buf     *rxbuf;
+
+       INIT_DEBUGOUT("free_receive_structures: begin");
+
+       /* Cleanup any existing buffers */
+       if (rxr->rx_buffers != NULL) {
+               for (int i = 0; i < adapter->num_rx_desc; i++) {
+                       rxbuf = &rxr->rx_buffers[i];
+                       if (rxbuf->m_head != NULL) {
+                               bus_dmamap_sync(rxr->htag, rxbuf->hmap,
+                                   BUS_DMASYNC_POSTREAD);
+                               bus_dmamap_unload(rxr->htag, rxbuf->hmap);
+                               rxbuf->m_head->m_flags |= M_PKTHDR;
+                               m_freem(rxbuf->m_head);
+                       }
+                       if (rxbuf->m_pack != NULL) {
+                               bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+                                   BUS_DMASYNC_POSTREAD);
+                               bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
+                               rxbuf->m_pack->m_flags |= M_PKTHDR;
+                               m_freem(rxbuf->m_pack);
+                       }
+                       rxbuf->m_head = NULL;
+                       rxbuf->m_pack = NULL;
+                       if (rxbuf->hmap != NULL) {
+                               bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
+                               rxbuf->hmap = NULL;
+                       }
+                       if (rxbuf->pmap != NULL) {
+                               bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
+                               rxbuf->pmap = NULL;
+                       }
+               }
+               if (rxr->rx_buffers != NULL) {
+                       kfree(rxr->rx_buffers, M_DEVBUF);
+                       rxr->rx_buffers = NULL;
+               }
+       }
+
+       if (rxr->htag != NULL) {
+               bus_dma_tag_destroy(rxr->htag);
+               rxr->htag = NULL;
+       }
+       if (rxr->ptag != NULL) {
+               bus_dma_tag_destroy(rxr->ptag);
+               rxr->ptag = NULL;
+       }
+
+       return;
+}
+
+static __inline void
+ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
+{
+                 
+        /*
+         * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
+         * should be computed by hardware. Also it should not have VLAN tag in
+         * ethernet header.  In case of IPv6 we do not yet support ext. hdrs.
+         */
+#if 0  /* NET_LRO */
+        if (rxr->lro_enabled &&
+            (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
+            (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
+            ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
+            (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
+            (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
+            (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
+            (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
+            (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
+                /*
+                 * Send to the stack if:
+                 **  - LRO not enabled, or
+                 **  - no LRO resources, or
+                 **  - lro enqueue fails
+                 */
+                if (rxr->lro.lro_cnt != 0)
+                        if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
+                                return;
+        }
+#endif
+       IXGBE_RX_UNLOCK(rxr);
+        (*ifp->if_input)(ifp, m);
+       IXGBE_RX_LOCK(rxr);
+}
+
+static __inline void
+ixgbe_rx_discard(struct rx_ring *rxr, int i)
+{
+       struct ixgbe_rx_buf     *rbuf;
+
+       rbuf = &rxr->rx_buffers[i];
+
+        if (rbuf->fmp != NULL) {/* Partial chain ? */
+               rbuf->fmp->m_flags |= M_PKTHDR;
+                m_freem(rbuf->fmp);
+                rbuf->fmp = NULL;
+       }
+
+       /*
+       ** With advanced descriptors the writeback
+       ** clobbers the buffer addrs, so its easier
+       ** to just free the existing mbufs and take
+       ** the normal refresh path to get new buffers
+       ** and mapping.
+       */
+       if (rbuf->m_head) {
+               m_free(rbuf->m_head);
+               rbuf->m_head = NULL;
+       }
+       if (rbuf->m_pack) {
+               m_free(rbuf->m_pack);
+               rbuf->m_pack = NULL;
+       }
+
+       return;
+}
+
+
+/*********************************************************************
+ *
+ *  This routine executes in interrupt context. It replenishes
+ *  the mbufs in the descriptor and sends data which has been
+ *  dma'ed into host memory to upper layer.
+ *
+ *  We loop at most count times if count is > 0, or until done if
+ *  count < 0.
+ *
+ *  Return TRUE for more work, FALSE for all clean.
+ *********************************************************************/
+static bool
+ixgbe_rxeof(struct ix_queue *que, int count)
+{
+       struct adapter          *adapter = que->adapter;
+       struct rx_ring          *rxr = que->rxr;
+       struct ifnet            *ifp = adapter->ifp;
+#if 0  /* NET_LRO */
+       struct lro_ctrl         *lro = &rxr->lro;
+       struct lro_entry        *queued;
+#endif
+       int                     i, nextp, processed = 0;
+       u32                     staterr = 0;
+       union ixgbe_adv_rx_desc *cur;
+       struct ixgbe_rx_buf     *rbuf, *nbuf;
+
+       IXGBE_RX_LOCK(rxr);
+
+#ifdef DEV_NETMAP
+       if (ifp->if_capenable & IFCAP_NETMAP) {
+               /*
+                * Same as the txeof routine: only wakeup clients on intr.
+                * NKR_PENDINTR in nr_kflags is used to implement interrupt
+                * mitigation (ixgbe_rxsync() will not look for new packets
+                * unless NKR_PENDINTR is set).
+                */
+               struct netmap_adapter *na = NA(ifp);
+
+               na->rx_rings[rxr->me].nr_kflags |= NKR_PENDINTR;
+               selwakeuppri(&na->rx_rings[rxr->me].si, PI_NET);
+               IXGBE_RX_UNLOCK(rxr);
+               IXGBE_CORE_LOCK(adapter);
+               selwakeuppri(&na->rx_si, PI_NET);
+               IXGBE_CORE_UNLOCK(adapter);
+               return (FALSE);
+       }
+#endif /* DEV_NETMAP */
+       for (i = rxr->next_to_check; count != 0;) {
+               struct mbuf     *sendmp, *mh, *mp;
+               u32             rsc, ptype;
+               u16             hlen, plen, hdr;
+#ifdef NET_VLAN
+               u16             vtag = 0;
+#endif
+               bool            eop;
+               /* Sync the ring. */
+               bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+                   BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+               cur = &rxr->rx_base[i];
+               staterr = le32toh(cur->wb.upper.status_error);
+
+               if ((staterr & IXGBE_RXD_STAT_DD) == 0)
+                       break;
+               if ((ifp->if_flags & IFF_RUNNING) == 0)
+                       break;
+
+               count--;
+               sendmp = NULL;
+               nbuf = NULL;
+               rsc = 0;
+               cur->wb.upper.status_error = 0;
+               rbuf = &rxr->rx_buffers[i];
+               mh = rbuf->m_head;
+               mp = rbuf->m_pack;
+
+               plen = le16toh(cur->wb.upper.length);
+               ptype = le32toh(cur->wb.lower.lo_dword.data) &
+                   IXGBE_RXDADV_PKTTYPE_MASK;
+               hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
+               eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
+
+               /* Process vlan info */
+#ifdef NET_VLAN
+               if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP))
+                       vtag = le16toh(cur->wb.upper.vlan);
+#endif
+
+               /* Make sure bad packets are discarded */
+               if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
+                   (rxr->discard)) {
+                       ifp->if_ierrors++;
+                       rxr->rx_discarded++;
+                       if (eop)
+                               rxr->discard = FALSE;
+                       else
+                               rxr->discard = TRUE;
+                       ixgbe_rx_discard(rxr, i);
+                       goto next_desc;
+               }
+
+               /*
+               ** On 82599 which supports a hardware
+               ** LRO (called HW RSC), packets need
+               ** not be fragmented across sequential
+               ** descriptors, rather the next descriptor
+               ** is indicated in bits of the descriptor.
+               ** This also means that we might proceses
+               ** more than one packet at a time, something
+               ** that has never been true before, it
+               ** required eliminating global chain pointers
+               ** in favor of what we are doing here.  -jfv
+               */
+               if (!eop) {
+                       /*
+                       ** Figure out the next descriptor
+                       ** of this frame.
+                       */
+                       if (rxr->hw_rsc == TRUE) {
+                               rsc = ixgbe_rsc_count(cur);
+                               rxr->rsc_num += (rsc - 1);
+                       }
+                       if (rsc) { /* Get hardware index */
+                               nextp = ((staterr &
+                                   IXGBE_RXDADV_NEXTP_MASK) >>
+                                   IXGBE_RXDADV_NEXTP_SHIFT);
+                       } else { /* Just sequential */
+                               nextp = i + 1;
+                               if (nextp == adapter->num_rx_desc)
+                                       nextp = 0;
+                       }
+                       nbuf = &rxr->rx_buffers[nextp];
+                       prefetch(nbuf);
+               }
+               /*
+               ** The header mbuf is ONLY used when header 
+               ** split is enabled, otherwise we get normal 
+               ** behavior, ie, both header and payload
+               ** are DMA'd into the payload buffer.
+               **
+               ** Rather than using the fmp/lmp global pointers
+               ** we now keep the head of a packet chain in the
+               ** buffer struct and pass this along from one
+               ** descriptor to the next, until we get EOP.
+               */
+               if (rxr->hdr_split && (rbuf->fmp == NULL)) {
+                       /* This must be an initial descriptor */
+                       hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+                           IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+                       if (hlen > IXGBE_RX_HDR)
+                               hlen = IXGBE_RX_HDR;
+                       mh->m_len = hlen;
+                       mh->m_flags |= M_PKTHDR;
+                       mh->m_next = NULL;
+                       mh->m_pkthdr.len = mh->m_len;
+                       /* Null buf pointer so it is refreshed */
+                       rbuf->m_head = NULL;
+                       /*
+                       ** Check the payload length, this
+                       ** could be zero if its a small
+                       ** packet.
+                       */
+                       if (plen > 0) {
+                               mp->m_len = plen;
+                               mp->m_next = NULL;
+                               mp->m_flags &= ~M_PKTHDR;
+                               mh->m_next = mp;
+                               mh->m_pkthdr.len += mp->m_len;
+                               /* Null buf pointer so it is refreshed */
+                               rbuf->m_pack = NULL;
+                               rxr->rx_split_packets++;
+                       }
+                       /*
+                       ** Now create the forward
+                       ** chain so when complete 
+                       ** we wont have to.
+                       */
+                        if (eop == 0) {
+                               /* stash the chain head */
+                                nbuf->fmp = mh;
+                               /* Make forward chain */
+                                if (plen)
+                                        mp->m_next = nbuf->m_pack;
+                                else
+                                        mh->m_next = nbuf->m_pack;
+                        } else {
+                               /* Singlet, prepare to send */
+                                sendmp = mh;
+                               /* If hardware handled vtag */
+#ifdef NET_VLAN
+                                if (vtag) {
+                                        sendmp->m_pkthdr.ether_vtag = vtag;
+                                        sendmp->m_flags |= M_VLANTAG;
+                                }
+#endif
+                        }
+               } else {
+                       /*
+                       ** Either no header split, or a
+                       ** secondary piece of a fragmented
+                       ** split packet.
+                       */
+                       mp->m_len = plen;
+                       /*
+                       ** See if there is a stored head
+                       ** that determines what we are
+                       */
+                       sendmp = rbuf->fmp;
+                       rbuf->m_pack = rbuf->fmp = NULL;
+
+                       if (sendmp != NULL) {  /* secondary frag */
+                               mp->m_flags &= ~M_PKTHDR;
+                               sendmp->m_pkthdr.len += mp->m_len;
+                       } else {
+                               /* first desc of a non-ps chain */
+                               sendmp = mp;
+                               sendmp->m_flags |= M_PKTHDR;
+                               sendmp->m_pkthdr.len = mp->m_len;
+#ifdef NET_VLAN
+                               if (staterr & IXGBE_RXD_STAT_VP) {
+                                       sendmp->m_pkthdr.ether_vtag = vtag;
+                                       sendmp->m_flags |= M_VLANTAG;
+                               }
+#endif
+                        }
+                       /* Pass the head pointer on */
+                       if (eop == 0) {
+                               nbuf->fmp = sendmp;
+                               sendmp = NULL;
+                               mp->m_next = nbuf->m_pack;
+                       }
+               }
+               ++processed;
+               /* Sending this frame? */
+               if (eop) {
+                       sendmp->m_pkthdr.rcvif = ifp;
+                       ifp->if_ipackets++;
+                       rxr->rx_packets++;
+                       /* capture data for AIM */
+                       rxr->bytes += sendmp->m_pkthdr.len;
+                       rxr->rx_bytes += sendmp->m_pkthdr.len;
+                       if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
+                               ixgbe_rx_checksum(staterr, sendmp, ptype);
+#if 0 /* __FreeBSD_version >= 800000 */
+                       sendmp->m_pkthdr.flowid = que->msix;
+                       sendmp->m_flags |= M_FLOWID;
+#endif
+               }
+next_desc:
+               bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+                   BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+               /* Advance our pointers to the next descriptor. */
+               if (++i == adapter->num_rx_desc)
+                       i = 0;
+
+               /* Now send to the stack or do LRO */
+               if (sendmp != NULL) {
+                       rxr->next_to_check = i;
+                       ixgbe_rx_input(rxr, ifp, sendmp, ptype);
+                       i = rxr->next_to_check;
+               }
+
+               /* Every 8 descriptors we go to refresh mbufs */
+               if (processed == 8) {
+                       ixgbe_refresh_mbufs(rxr, i);
+                       processed = 0;
+               }
+       }
+
+       /* Refresh any remaining buf structs */
+       if (ixgbe_rx_unrefreshed(rxr))
+               ixgbe_refresh_mbufs(rxr, i);
+
+       rxr->next_to_check = i;
+
+       /*
+        * Flush any outstanding LRO work
+        */
+#if 0  /* NET_LRO */
+       while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
+               SLIST_REMOVE_HEAD(&lro->lro_active, next);
+               tcp_lro_flush(lro, queued);
+       }
+#endif
+
+       IXGBE_RX_UNLOCK(rxr);
+
+       /*
+       ** We still have cleaning to do?
+       ** Schedule another interrupt if so.
+       */
+       if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
+               ixgbe_rearm_queues(adapter, (u64)(1 << que->msix));
+               return (TRUE);
+       }
+
+       return (FALSE);
+}
+
+
+/*********************************************************************
+ *
+ *  Verify that the hardware indicated that the checksum is valid.
+ *  Inform the stack about the status of checksum so that stack
+ *  doesn't spend time verifying the checksum.
+ *
+ *********************************************************************/
+static void
+ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
+{
+       u16     status = (u16) staterr;
+       u8      errors = (u8) (staterr >> 24);
+       bool    sctp = FALSE;
+
+       if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
+           (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
+               sctp = TRUE;
+
+       if (status & IXGBE_RXD_STAT_IPCS) {
+               if (!(errors & IXGBE_RXD_ERR_IPE)) {
+                       /* IP Checksum Good */
+                       mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
+                       mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+
+               } else
+                       mp->m_pkthdr.csum_flags = 0;
+       }
+       if (status & IXGBE_RXD_STAT_L4CS) {
+               u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
+#if 0
+               if (sctp)
+                       type = CSUM_SCTP_VALID;
+#endif
+               if (!(errors & IXGBE_RXD_ERR_TCPE)) {
+                       mp->m_pkthdr.csum_flags |= type;
+                       if (!sctp)
+                               mp->m_pkthdr.csum_data = htons(0xffff);
+               } 
+       }
+       return;
+}
+
+
+/*
+** This routine is run via an vlan config EVENT,
+** it enables us to use the HW Filter table since
+** we can get the vlan id. This just creates the
+** entry in the soft version of the VFTA, init will
+** repopulate the real table.
+*/
+static void
+ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       u16             index, bit;
+
+       if (ifp->if_softc !=  arg)   /* Not our event */
+               return;
+
+       if ((vtag == 0) || (vtag > 4095))       /* Invalid */
+               return;
+
+       IXGBE_CORE_LOCK(adapter);
+       index = (vtag >> 5) & 0x7F;
+       bit = vtag & 0x1F;
+       adapter->shadow_vfta[index] |= (1 << bit);
+       ++adapter->num_vlans;
+       ixgbe_init_locked(adapter);
+       IXGBE_CORE_UNLOCK(adapter);
+}
+
+/*
+** This routine is run via an vlan
+** unconfig EVENT, remove our entry
+** in the soft vfta.
+*/
+static void
+ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       u16             index, bit;
+
+       if (ifp->if_softc !=  arg)
+               return;
+
+       if ((vtag == 0) || (vtag > 4095))       /* Invalid */
+               return;
+
+       IXGBE_CORE_LOCK(adapter);
+       index = (vtag >> 5) & 0x7F;
+       bit = vtag & 0x1F;
+       adapter->shadow_vfta[index] &= ~(1 << bit);
+       --adapter->num_vlans;
+       /* Re-init to load the changes */
+       ixgbe_init_locked(adapter);
+       IXGBE_CORE_UNLOCK(adapter);
+}
+
+static void
+ixgbe_setup_vlan_hw_support(struct adapter *adapter)
+{
+#ifdef NET_VLAN
+       struct ifnet    *ifp = adapter->ifp;
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct rx_ring  *rxr;
+       u32             ctrl;
+
+       /*
+       ** We get here thru init_locked, meaning
+       ** a soft reset, this has already cleared
+       ** the VFTA and other state, so if there
+       ** have been no vlan's registered do nothing.
+       */
+       if (adapter->num_vlans == 0)
+               return;
+
+       /*
+       ** A soft reset zero's out the VFTA, so
+       ** we need to repopulate it now.
+       */
+       for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
+               if (adapter->shadow_vfta[i] != 0)
+                       IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
+                           adapter->shadow_vfta[i]);
+
+       ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+       /* Enable the Filter Table if enabled */
+       if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
+               ctrl &= ~IXGBE_VLNCTRL_CFIEN;
+               ctrl |= IXGBE_VLNCTRL_VFE;
+       }
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               ctrl |= IXGBE_VLNCTRL_VME;
+       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+
+       /* Setup the queues for vlans */
+       for (int i = 0; i < adapter->num_queues; i++) {
+               rxr = &adapter->rx_rings[i];
+               /* On 82599 the VLAN enable is per/queue in RXDCTL */
+               if (hw->mac.type != ixgbe_mac_82598EB) {
+                       ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+                       ctrl |= IXGBE_RXDCTL_VME;
+                       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
+               }
+               rxr->vtag_strip = TRUE;
+       }
+#endif
+}
+
+static void
+ixgbe_enable_intr(struct adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct ix_queue *que = adapter->queues;
+       u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
+
+
+       /* Enable Fan Failure detection */
+       if (hw->device_id == IXGBE_DEV_ID_82598AT)
+                   mask |= IXGBE_EIMS_GPI_SDP1;
+       else {
+                   mask |= IXGBE_EIMS_ECC;
+                   mask |= IXGBE_EIMS_GPI_SDP0;
+                   mask |= IXGBE_EIMS_GPI_SDP1;
+                   mask |= IXGBE_EIMS_GPI_SDP2;
+#ifdef IXGBE_FDIR
+                   mask |= IXGBE_EIMS_FLOW_DIR;
+#endif
+       }
+
+       IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
+
+       /* With RSS we use auto clear */
+       if (adapter->msix_mem) {
+               mask = IXGBE_EIMS_ENABLE_MASK;
+               /* Don't autoclear Link */
+               mask &= ~IXGBE_EIMS_OTHER;
+               mask &= ~IXGBE_EIMS_LSC;
+               IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
+       }
+
+       /*
+       ** Now enable all queues, this is done separately to
+       ** allow for handling the extended (beyond 32) MSIX
+       ** vectors that can be used by 82599
+       */
+        for (int i = 0; i < adapter->num_queues; i++, que++)
+                ixgbe_enable_queue(adapter, que->msix);
+
+       IXGBE_WRITE_FLUSH(hw);
+
+       return;
+}
+
+static void
+ixgbe_disable_intr(struct adapter *adapter)
+{
+       if (adapter->msix_mem)
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
+       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
+       } else {
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
+       }
+       IXGBE_WRITE_FLUSH(&adapter->hw);
+       return;
+}
+
+u16
+ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
+{
+       u16 value;
+
+       value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev,
+           reg, 2);
+
+       return (value);
+}
+
+void
+ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
+{
+       pci_write_config(((struct ixgbe_osdep *)hw->back)->dev,
+           reg, value, 2);
+
+       return;
+}
+
+/*
+** Setup the correct IVAR register for a particular MSIX interrupt
+**   (yes this is all very magic and confusing :)
+**  - entry is the register array entry
+**  - vector is the MSIX vector for this queue
+**  - type is RX/TX/MISC
+*/
+static void
+ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 ivar, index;
+
+       vector |= IXGBE_IVAR_ALLOC_VAL;
+
+       switch (hw->mac.type) {
+
+       case ixgbe_mac_82598EB:
+               if (type == -1)
+                       entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
+               else
+                       entry += (type * 64);
+               index = (entry >> 2) & 0x1F;
+               ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
+               ivar &= ~(0xFF << (8 * (entry & 0x3)));
+               ivar |= (vector << (8 * (entry & 0x3)));
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
+               break;
+
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               if (type == -1) { /* MISC IVAR */
+                       index = (entry & 1) * 8;
+                       ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
+                       ivar &= ~(0xFF << index);
+                       ivar |= (vector << index);
+                       IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
+               } else {        /* RX/TX IVARS */
+                       index = (16 * (entry & 1)) + (8 * type);
+                       ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
+                       ivar &= ~(0xFF << index);
+                       ivar |= (vector << index);
+                       IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
+               }
+
+       default:
+               break;
+       }
+}
+
+static void
+ixgbe_configure_ivars(struct adapter *adapter)
+{
+       struct  ix_queue *que = adapter->queues;
+       u32 newitr;
+
+       if (ixgbe_max_interrupt_rate > 0)
+               newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
+       else
+               newitr = 0;
+
+        for (int i = 0; i < adapter->num_queues; i++, que++) {
+               /* First the RX queue entry */
+                ixgbe_set_ivar(adapter, i, que->msix, 0);
+               /* ... and the TX */
+               ixgbe_set_ivar(adapter, i, que->msix, 1);
+               /* Set an Initial EITR value */
+                IXGBE_WRITE_REG(&adapter->hw,
+                    IXGBE_EITR(que->msix), newitr);
+       }
+
+       /* For the Link interrupt */
+        ixgbe_set_ivar(adapter, 1, adapter->linkvec, -1);
+}
+
+/*
+** ixgbe_sfp_probe - called in the local timer to
+** determine if a port had optics inserted.
+*/  
+static bool ixgbe_sfp_probe(struct adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       device_t        dev = adapter->dev;
+       bool            result = FALSE;
+
+       if ((hw->phy.type == ixgbe_phy_nl) &&
+           (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
+               s32 ret = hw->phy.ops.identify_sfp(hw);
+               if (ret)
+                        goto out;
+               ret = hw->phy.ops.reset(hw);
+               if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+                       device_printf(dev,"Unsupported SFP+ module detected!");
+                       kprintf(" Reload driver with supported module.\n");
+                       adapter->sfp_probe = FALSE;
+                        goto out;
+               } else
+                       device_printf(dev,"SFP+ module detected!\n");
+               /* We now have supported optics */
+               adapter->sfp_probe = FALSE;
+               /* Set the optics type so system reports correctly */
+               ixgbe_setup_optics(adapter);
+               result = TRUE;
+       }
+out:
+       return (result);
+}
+
+/*
+** Tasklet handler for MSIX Link interrupts
+**  - do outside interrupt since it might sleep
+*/
+static void
+ixgbe_handle_link(void *context, int pending)
+{
+       struct adapter  *adapter = context;
+
+       ixgbe_check_link(&adapter->hw,
+           &adapter->link_speed, &adapter->link_up, 0);
+               ixgbe_update_link_status(adapter);
+}
+
+/*
+** Tasklet for handling SFP module interrupts
+*/
+static void
+ixgbe_handle_mod(void *context, int pending)
+{
+       struct adapter  *adapter = context;
+       struct ixgbe_hw *hw = &adapter->hw;
+       device_t        dev = adapter->dev;
+       u32 err;
+
+       err = hw->phy.ops.identify_sfp(hw);
+       if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+               device_printf(dev,
+                   "Unsupported SFP+ module type was detected.\n");
+               return;
+       }
+       err = hw->mac.ops.setup_sfp(hw);
+       if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+               device_printf(dev,
+                   "Setup failure - unsupported SFP+ module type.\n");
+               return;
+       }
+       taskqueue_enqueue(adapter->tq, &adapter->msf_task);
+       return;
+}
+
+
+/*
+** Tasklet for handling MSF (multispeed fiber) interrupts
+*/
+static void
+ixgbe_handle_msf(void *context, int pending)
+{
+       struct adapter  *adapter = context;
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 autoneg;
+       bool negotiate;
+
+       autoneg = hw->phy.autoneg_advertised;
+       if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
+               hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
+       if (hw->mac.ops.setup_link)
+               hw->mac.ops.setup_link(hw, autoneg, negotiate, TRUE);
+       return;
+}
+
+#ifdef IXGBE_FDIR
+/*
+** Tasklet for reinitializing the Flow Director filter table
+*/
+static void
+ixgbe_reinit_fdir(void *context, int pending)
+{
+       struct adapter  *adapter = context;
+       struct ifnet   *ifp = adapter->ifp;
+
+       if (adapter->fdir_reinit != 1) /* Shouldn't happen */
+               return;
+       ixgbe_reinit_fdir_tables_82599(&adapter->hw);
+       adapter->fdir_reinit = 0;
+       /* Restart the interface */
+       ifp->if_drv_flags |= IFF_DRV_RUNNING;
+       return;
+}
+#endif
+
+/**********************************************************************
+ *
+ *  Update the board statistics counters.
+ *
+ **********************************************************************/
+static void
+ixgbe_update_stats_counters(struct adapter *adapter)
+{
+       struct ifnet   *ifp = adapter->ifp;
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32  missed_rx = 0, bprc, lxon, lxoff, total;
+       u64  total_missed_rx = 0;
+
+       adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+       adapter->stats.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
+       adapter->stats.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
+       adapter->stats.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
+
+       for (int i = 0; i < 8; i++) {
+               u32 mp;
+               mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+               /* missed_rx tallies misses for the gprc workaround */
+               missed_rx += mp;
+               /* global total per queue */
+               adapter->stats.mpc[i] += mp;
+               /* Running comprehensive total for stats display */
+               total_missed_rx += adapter->stats.mpc[i];
+               if (hw->mac.type == ixgbe_mac_82598EB)
+                       adapter->stats.rnbc[i] +=
+                           IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+               adapter->stats.pxontxc[i] +=
+                   IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
+               adapter->stats.pxonrxc[i] +=
+                   IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+               adapter->stats.pxofftxc[i] +=
+                   IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
+               adapter->stats.pxoffrxc[i] +=
+                   IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+               adapter->stats.pxon2offc[i] +=
+                   IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
+       }
+       for (int i = 0; i < 16; i++) {
+               adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+               adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+               adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+               adapter->stats.qbrc[i] += 
+                   ((u64)IXGBE_READ_REG(hw, IXGBE_QBRC(i)) << 32);
+               adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+               adapter->stats.qbtc[i] +=
+                   ((u64)IXGBE_READ_REG(hw, IXGBE_QBTC(i)) << 32);
+               adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+       }
+       adapter->stats.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
+       adapter->stats.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
+       adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
+
+       /* Hardware workaround, gprc counts missed packets */
+       adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
+       adapter->stats.gprc -= missed_rx;
+
+       if (hw->mac.type != ixgbe_mac_82598EB) {
+               adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
+                   ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
+               adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
+                   ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
+               adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
+                   ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
+               adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
+               adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+       } else {
+               adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+               adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+               /* 82598 only has a counter in the high register */
+               adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+               adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+               adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+       }
+
+       /*
+        * Workaround: mprc hardware is incorrectly counting
+        * broadcasts, so for now we subtract those.
+        */
+       bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
+       adapter->stats.bprc += bprc;
+       adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               adapter->stats.mprc -= bprc;
+
+       adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
+       adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
+       adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
+       adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
+       adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
+       adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
+
+       lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+       adapter->stats.lxontxc += lxon;
+       lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+       adapter->stats.lxofftxc += lxoff;
+       total = lxon + lxoff;
+
+       adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
+       adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
+       adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
+       adapter->stats.gptc -= total;
+       adapter->stats.mptc -= total;
+       adapter->stats.ptc64 -= total;
+       adapter->stats.gotc -= total * ETHER_MIN_LEN;
+
+       adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
+       adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
+       adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
+       adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
+       adapter->stats.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
+       adapter->stats.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
+       adapter->stats.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
+       adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
+       adapter->stats.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
+       adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
+       adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
+       adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
+       adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
+       adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
+       adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
+       adapter->stats.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
+       adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
+       adapter->stats.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
+       /* Only read FCOE on 82599 */
+       if (hw->mac.type != ixgbe_mac_82598EB) {
+               adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
+               adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
+               adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
+               adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
+               adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
+       }
+
+       /* Fill out the OS statistics structure */
+       ifp->if_ipackets = adapter->stats.gprc;
+       ifp->if_opackets = adapter->stats.gptc;
+       ifp->if_ibytes = adapter->stats.gorc;
+       ifp->if_obytes = adapter->stats.gotc;
+       ifp->if_imcasts = adapter->stats.mprc;
+       ifp->if_collisions = 0;
+
+       /* Rx Errors */
+       ifp->if_ierrors = total_missed_rx + adapter->stats.crcerrs +
+               adapter->stats.rlec;
+}
+
+/** ixgbe_sysctl_tdh_handler - Handler function
+ *  Retrieves the TDH value from the hardware
+ */
+static int 
+ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
+{
+       int error;
+
+       struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
+       if (!txr) return 0;
+
+       unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
+       error = sysctl_handle_int(oidp, &val, 0, req);
+       if (error || !req->newptr)
+               return error;
+       return 0;
+}
+
+/** ixgbe_sysctl_tdt_handler - Handler function
+ *  Retrieves the TDT value from the hardware
+ */
+static int 
+ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
+{
+       int error;
+
+       struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
+       if (!txr) return 0;
+
+       unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
+       error = sysctl_handle_int(oidp, &val, 0, req);
+       if (error || !req->newptr)
+               return error;
+       return 0;
+}
+
+/** ixgbe_sysctl_rdh_handler - Handler function
+ *  Retrieves the RDH value from the hardware
+ */
+static int 
+ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
+{
+       int error;
+
+       struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
+       if (!rxr) return 0;
+
+       unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
+       error = sysctl_handle_int(oidp, &val, 0, req);
+       if (error || !req->newptr)
+               return error;
+       return 0;
+}
+
+/** ixgbe_sysctl_rdt_handler - Handler function
+ *  Retrieves the RDT value from the hardware
+ */
+static int 
+ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
+{
+       int error;
+
+       struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
+       if (!rxr) return 0;
+
+       unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
+       error = sysctl_handle_int(oidp, &val, 0, req);
+       if (error || !req->newptr)
+               return error;
+       return 0;
+}
+
+static int
+ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
+{
+       int error;
+       struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
+       unsigned int reg, usec, rate;
+
+       reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
+       usec = ((reg & 0x0FF8) >> 3);
+       if (usec > 0)
+               rate = 500000 / usec;
+       else
+               rate = 0;
+       error = sysctl_handle_int(oidp, &rate, 0, req);
+       if (error || !req->newptr)
+               return error;
+       reg &= ~0xfff; /* default, no limitation */
+       ixgbe_max_interrupt_rate = 0;
+       if (rate > 0 && rate < 500000) {
+               if (rate < 1000)
+                       rate = 1000;
+               ixgbe_max_interrupt_rate = rate;
+               reg |= ((4000000/rate) & 0xff8 );
+       }
+       IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg);
+       return 0;
+}
+
+/*
+ * Add sysctl variables, one per statistic, to the system.
+ */
+static void
+ixgbe_add_hw_stats(struct adapter *adapter)
+{
+       struct tx_ring *txr = adapter->tx_rings;
+       struct rx_ring *rxr = adapter->rx_rings;
+
+       struct sysctl_ctx_list *ctx = &adapter->sysctl_ctx;
+       struct sysctl_oid *tree = adapter->sysctl_tree;
+       struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
+       struct ixgbe_hw_stats *stats = &adapter->stats;
+
+       struct sysctl_oid *stat_node, *queue_node;
+       struct sysctl_oid_list *stat_list, *queue_list;
+
+#define QUEUE_NAME_LEN 32
+       char namebuf[QUEUE_NAME_LEN];
+
+       /* Driver Statistics */
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
+                       CTLFLAG_RD, &adapter->dropped_pkts,
+                       "Driver dropped packets");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
+                       CTLFLAG_RD, &adapter->mbuf_defrag_failed,
+                       "m_defrag() failed");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_tx_dma_setup",
+                       CTLFLAG_RD, &adapter->no_tx_dma_setup,
+                       "Driver tx dma failure in xmit");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
+                       CTLFLAG_RD, &adapter->watchdog_events,
+                       "Watchdog timeouts");
+#if 0  /* NET_TSO */
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tso_tx",
+                       CTLFLAG_RD, &adapter->tso_tx,
+                       "TSO");
+#endif
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
+                       CTLFLAG_RD, &adapter->link_irq,
+                       "Link MSIX IRQ Handled");
+
+       for (int i = 0; i < adapter->num_queues; i++, txr++) {
+       ksnprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
+               queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
+                                           CTLFLAG_RD, NULL, "Queue Name");
+               queue_list = SYSCTL_CHILDREN(queue_node);
+
+               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
+                               CTLTYPE_UINT | CTLFLAG_RW, &adapter->queues[i],
+                               sizeof(&adapter->queues[i]),
+                               ixgbe_sysctl_interrupt_rate_handler, "IU",
+                               "Interrupt Rate");
+               SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
+                               CTLFLAG_RD, &(adapter->queues[i].irqs), 0,
+                               "irqs on this queue");
+               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 
+                               CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
+                               ixgbe_sysctl_tdh_handler, "IU",
+                               "Transmit Descriptor Head");
+               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 
+                               CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
+                               ixgbe_sysctl_tdt_handler, "IU",
+                               "Transmit Descriptor Tail");
+               SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
+                               CTLFLAG_RD, &txr->no_desc_avail, 0,
+                               "Queue No Descriptor Available");
+               SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
+                               CTLFLAG_RD, &txr->total_packets, 0,
+                               "Queue Packets Transmitted");
+       }
+
+       for (int i = 0; i < adapter->num_queues; i++, rxr++) {
+       ksnprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
+               queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 
+                                           CTLFLAG_RD, NULL, "Queue Name");
+               queue_list = SYSCTL_CHILDREN(queue_node);
+
+#if 0  /* NET_LRO */
+               struct lro_ctrl *lro = &rxr->lro;
+#endif
+
+       ksnprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
+               queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 
+                                           CTLFLAG_RD, NULL, "Queue Name");
+               queue_list = SYSCTL_CHILDREN(queue_node);
+
+               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 
+                               CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
+                               ixgbe_sysctl_rdh_handler, "IU",
+                               "Receive Descriptor Head");
+               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 
+                               CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
+                               ixgbe_sysctl_rdt_handler, "IU",
+                               "Receive Descriptor Tail");
+               SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
+                               CTLFLAG_RD, &rxr->rx_packets, 0,
+                               "Queue Packets Received");
+               SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
+                               CTLFLAG_RD, &rxr->rx_bytes, 0,
+                               "Queue Bytes Received");
+#if 0  /* NET_LRO */
+               SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
+                               CTLFLAG_RD, &lro->lro_queued, 0,
+                               "LRO Queued");
+               SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
+                               CTLFLAG_RD, &lro->lro_flushed, 0,
+                               "LRO Flushed");
+#endif
+       }
+
+       /* MAC stats get the own sub node */
+
+       stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 
+                                   CTLFLAG_RD, NULL, "MAC Statistics");
+       stat_list = SYSCTL_CHILDREN(stat_node);
+
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
+                       CTLFLAG_RD, &stats->crcerrs, 0,
+                       "CRC Errors");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
+                       CTLFLAG_RD, &stats->illerrc, 0,
+                       "Illegal Byte Errors");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
+                       CTLFLAG_RD, &stats->errbc, 0,
+                       "Byte Errors");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
+                       CTLFLAG_RD, &stats->mspdc, 0,
+                       "MAC Short Packets Discarded");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
+                       CTLFLAG_RD, &stats->mlfc, 0,
+                       "MAC Local Faults");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
+                       CTLFLAG_RD, &stats->mrfc, 0,
+                       "MAC Remote Faults");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
+                       CTLFLAG_RD, &stats->rlec, 0,
+                       "Receive Length Errors");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xon_txd",
+                       CTLFLAG_RD, &stats->lxontxc, 0,
+                       "Link XON Transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xon_rcvd",
+                       CTLFLAG_RD, &stats->lxonrxc, 0,
+                       "Link XON Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xoff_txd",
+                       CTLFLAG_RD, &stats->lxofftxc, 0,
+                       "Link XOFF Transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xoff_rcvd",
+                       CTLFLAG_RD, &stats->lxoffrxc, 0,
+                       "Link XOFF Received");
+
+       /* Packet Reception Stats */
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
+                       CTLFLAG_RD, &stats->tor, 0,
+                       "Total Octets Received"); 
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
+                       CTLFLAG_RD, &stats->gorc, 0,
+                       "Good Octets Received"); 
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
+                       CTLFLAG_RD, &stats->tpr, 0,
+                       "Total Packets Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
+                       CTLFLAG_RD, &stats->gprc, 0,
+                       "Good Packets Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
+                       CTLFLAG_RD, &stats->mprc, 0,
+                       "Multicast Packets Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
+                       CTLFLAG_RD, &stats->bprc, 0,
+                       "Broadcast Packets Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
+                       CTLFLAG_RD, &stats->prc64, 0,
+                       "64 byte frames received ");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
+                       CTLFLAG_RD, &stats->prc127, 0,
+                       "65-127 byte frames received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
+                       CTLFLAG_RD, &stats->prc255, 0,
+                       "128-255 byte frames received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
+                       CTLFLAG_RD, &stats->prc511, 0,
+                       "256-511 byte frames received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
+                       CTLFLAG_RD, &stats->prc1023, 0,
+                       "512-1023 byte frames received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
+                       CTLFLAG_RD, &stats->prc1522, 0,
+                       "1023-1522 byte frames received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
+                       CTLFLAG_RD, &stats->ruc, 0,
+                       "Receive Undersized");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
+                       CTLFLAG_RD, &stats->rfc, 0,
+                       "Fragmented Packets Received ");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
+                       CTLFLAG_RD, &stats->roc, 0,
+                       "Oversized Packets Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
+                       CTLFLAG_RD, &stats->rjc, 0,
+                       "Received Jabber");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
+                       CTLFLAG_RD, &stats->mngprc, 0,
+                       "Management Packets Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
+                       CTLFLAG_RD, &stats->mngptc, 0,
+                       "Management Packets Dropped");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
+                       CTLFLAG_RD, &stats->xec, 0,
+                       "Checksum Errors");
+
+       /* Packet Transmission Stats */
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
+                       CTLFLAG_RD, &stats->gotc, 0,
+                       "Good Octets Transmitted"); 
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
+                       CTLFLAG_RD, &stats->tpt, 0,
+                       "Total Packets Transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
+                       CTLFLAG_RD, &stats->gptc, 0,
+                       "Good Packets Transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
+                       CTLFLAG_RD, &stats->bptc, 0,
+                       "Broadcast Packets Transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
+                       CTLFLAG_RD, &stats->mptc, 0,
+                       "Multicast Packets Transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
+                       CTLFLAG_RD, &stats->mngptc, 0,
+                       "Management Packets Transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
+                       CTLFLAG_RD, &stats->ptc64, 0,
+                       "64 byte frames transmitted ");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
+                       CTLFLAG_RD, &stats->ptc127, 0,
+                       "65-127 byte frames transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
+                       CTLFLAG_RD, &stats->ptc255, 0,
+                       "128-255 byte frames transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
+                       CTLFLAG_RD, &stats->ptc511, 0,
+                       "256-511 byte frames transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
+                       CTLFLAG_RD, &stats->ptc1023, 0,
+                       "512-1023 byte frames transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
+                       CTLFLAG_RD, &stats->ptc1522, 0,
+                       "1024-1522 byte frames transmitted");
+
+       /* FC Stats */
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_crc",
+               CTLFLAG_RD, &stats->fccrc, 0,
+               "FC CRC Errors");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_last",
+               CTLFLAG_RD, &stats->fclast, 0,
+               "FC Last Error");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_drpd",
+               CTLFLAG_RD, &stats->fcoerpdc, 0,
+               "FCoE Packets Dropped");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_pkts_rcvd",
+               CTLFLAG_RD, &stats->fcoeprc, 0,
+               "FCoE Packets Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_pkts_txd",
+               CTLFLAG_RD, &stats->fcoeptc, 0,
+               "FCoE Packets Transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_dword_rcvd",
+               CTLFLAG_RD, &stats->fcoedwrc, 0,
+               "FCoE DWords Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_dword_txd",
+               CTLFLAG_RD, &stats->fcoedwtc, 0,
+               "FCoE DWords Transmitted");
+}
+
+/*
+** Set flow control using sysctl:
+** Flow control values:
+**     0 - off
+**     1 - rx pause
+**     2 - tx pause
+**     3 - full
+*/
+static int
+ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
+{
+       int error, last;
+       struct adapter *adapter = (struct adapter *) arg1;
+
+       last = adapter->fc;
+       error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
+       if ((error) || (req->newptr == NULL))
+               return (error);
+
+       /* Don't bother if it's not changed */
+       if (adapter->fc == last)
+               return (0);
+
+       switch (adapter->fc) {
+               case ixgbe_fc_rx_pause:
+               case ixgbe_fc_tx_pause:
+               case ixgbe_fc_full:
+                       adapter->hw.fc.requested_mode = adapter->fc;
+                       break;
+               case ixgbe_fc_none:
+               default:
+                       adapter->hw.fc.requested_mode = ixgbe_fc_none;
+       }
+
+       ixgbe_fc_enable(&adapter->hw, 0);
+       return error;
+}
+
+static void
+ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
+        const char *description, int *limit, int value)
+{
+        *limit = value;
+        SYSCTL_ADD_INT(&adapter->sysctl_ctx,
+            SYSCTL_CHILDREN(adapter->sysctl_tree),
+            OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
+}
+
+/*
+** Control link advertise speed:
+**     0 - normal
+**     1 - advertise only 1G
+**     2 - advertise 100Mb
+*/
+static int
+ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
+{
+       int                     error = 0;
+       struct adapter          *adapter;
+       device_t                dev;
+       struct ixgbe_hw         *hw;
+       ixgbe_link_speed        speed, last;
+
+       adapter = (struct adapter *) arg1;
+       dev = adapter->dev;
+       hw = &adapter->hw;
+       last = hw->phy.autoneg_advertised;
+
+       error = sysctl_handle_int(oidp, &adapter->advertise, 0, req);
+
+       if ((error) || (adapter->advertise == -1))
+               return (error);
+
+       if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
+            (hw->phy.multispeed_fiber)))
+               return (error);
+
+       if ((adapter->advertise == 2) && (hw->mac.type != ixgbe_mac_X540)) {
+               device_printf(dev, "Set Advertise: 100Mb on X540 only\n");
+               return (error);
+       }
+
+       if (adapter->advertise == 1)
+                speed = IXGBE_LINK_SPEED_1GB_FULL;
+       else if (adapter->advertise == 2)
+                speed = IXGBE_LINK_SPEED_100_FULL;
+       else
+                speed = IXGBE_LINK_SPEED_1GB_FULL |
+                       IXGBE_LINK_SPEED_10GB_FULL;
+
+       if (speed == last) /* no change */
+               return (error);
+
+       hw->mac.autotry_restart = TRUE;
+       hw->mac.ops.setup_link(hw, speed, TRUE, TRUE);
+
+       return (error);
+}
+
+/*
+** Thermal Shutdown Trigger
+**   - cause a Thermal Overtemp IRQ
+*/
+static int
+ixgbe_set_thermal_test(SYSCTL_HANDLER_ARGS)
+{
+       int             error, fire = 0;
+       struct adapter  *adapter = (struct adapter *) arg1;
+       struct ixgbe_hw *hw = &adapter->hw;
+
+
+       if (hw->mac.type != ixgbe_mac_X540)
+               return (0);
+
+       error = sysctl_handle_int(oidp, &fire, 0, req);
+       if ((error) || (req->newptr == NULL))
+               return (error);
+
+       if (fire) {
+               u32 reg = IXGBE_READ_REG(hw, IXGBE_EICS);
+               reg |= IXGBE_EICR_TS;
+               IXGBE_WRITE_REG(hw, IXGBE_EICS, reg);
+       }
+
+       return (0);
+}
diff --git a/sys/dev/netif/ixgbe/ixgbe.h b/sys/dev/netif/ixgbe/ixgbe.h
new file mode 100644 (file)
index 0000000..25d1f6a
--- /dev/null
@@ -0,0 +1,537 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2012, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: src/sys/dev/ixgbe/ixgbe.h,v 1.26 2012/04/23 22:05:09 bz Exp $*/
+
+
+#ifndef _IXGBE_H_
+#define _IXGBE_H_
+
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#if __FreeBSD_version >= 800000
+#include <sys/buf_ring.h>
+#endif
+#include <sys/mbuf.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/sockio.h>
+
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/ifq_var.h>
+#include <net/bpf.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#include <net/bpf.h>
+#include <net/if_types.h>
+#include <net/vlan/if_vlan_var.h>
+#include <net/vlan/if_vlan_ether.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp.h>
+#ifdef NET_LRO
+#include <netinet/tcp_lro.h>   /* XXX: IPv4 only */
+#endif
+#include <netinet/udp.h>
+
+#include <sys/in_cksum.h>
+
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/resource.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <machine/clock.h>
+#include <bus/pci/pcivar.h>
+#include <bus/pci/pcireg.h>
+#include <sys/proc.h>
+#include <sys/sysctl.h>
+#include <sys/endian.h>
+#include <sys/taskqueue.h>
+#include <machine/smp.h>
+
+#include <sys/spinlock.h>
+#include <sys/spinlock2.h>
+
+#ifdef IXGBE_IEEE1588
+#include <sys/ieee1588.h>
+#endif
+
+#include "ixgbe_api.h"
+#include "ixgbe_defines.h"
+
+/* Tunables */
+
+/*
+ * TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
+ * number of transmit descriptors allocated by the driver. Increasing this
+ * value allows the driver to queue more transmits. Each descriptor is 16
+ * bytes. Performance tests have show the 2K value to be optimal for top
+ * performance.
+ */
+#define DEFAULT_TXD    1024
+#define PERFORM_TXD    2048
+#define MAX_TXD                4096
+#define MIN_TXD                64
+
+/*
+ * RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
+ * number of receive descriptors allocated for each RX queue. Increasing this
+ * value allows the driver to buffer more incoming packets. Each descriptor
+ * is 16 bytes.  A receive buffer is also allocated for each descriptor. 
+ * 
+ * Note: with 8 rings and a dual port card, it is possible to bump up 
+ *     against the system mbuf pool limit, you can tune nmbclusters
+ *     to adjust for this.
+ */
+#define DEFAULT_RXD    1024
+#define PERFORM_RXD    2048
+#define MAX_RXD                4096
+#define MIN_RXD                64
+
+/* Alignment for rings */
+#define DBA_ALIGN      128
+
+/*
+ * This parameter controls the maximum no of times the driver will loop in
+ * the isr. Minimum Value = 1
+ */
+#define MAX_LOOP       10
+
+/*
+ * This is the max watchdog interval, ie. the time that can
+ * pass between any two TX clean operations, such only happening
+ * when the TX hardware is functioning.
+ */
+#define IXGBE_WATCHDOG                   (10 * hz)
+
+/*
+ * This parameters control when the driver calls the routine to reclaim
+ * transmit descriptors.
+ */
+#define IXGBE_TX_CLEANUP_THRESHOLD     (adapter->num_tx_desc / 8)
+#define IXGBE_TX_OP_THRESHOLD          (adapter->num_tx_desc / 32)
+
+#define IXGBE_MAX_FRAME_SIZE   0x3F00
+
+/* Flow control constants */
+#define IXGBE_FC_PAUSE         0xFFFF
+#define IXGBE_FC_HI            0x20000
+#define IXGBE_FC_LO            0x10000
+
+/* Keep older OS drivers building... */
+#if !defined(SYSCTL_ADD_UQUAD)
+#define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
+#endif
+
+/* Defines for printing debug information */
+#define DEBUG_INIT  0
+#define DEBUG_IOCTL 0
+#define DEBUG_HW    0
+
+#define INIT_DEBUGOUT(S)            if (DEBUG_INIT)  kprintf(S "\n")
+#define INIT_DEBUGOUT1(S, A)        if (DEBUG_INIT)  kprintf(S "\n", A)
+#define INIT_DEBUGOUT2(S, A, B)     if (DEBUG_INIT)  kprintf(S "\n", A, B)
+#define IOCTL_DEBUGOUT(S)           if (DEBUG_IOCTL) kprintf(S "\n")
+#define IOCTL_DEBUGOUT1(S, A)       if (DEBUG_IOCTL) kprintf(S "\n", A)
+#define IOCTL_DEBUGOUT2(S, A, B)    if (DEBUG_IOCTL) kprintf(S "\n", A, B)
+#define HW_DEBUGOUT(S)              if (DEBUG_HW) kprintf(S "\n")
+#define HW_DEBUGOUT1(S, A)          if (DEBUG_HW) kprintf(S "\n", A)
+#define HW_DEBUGOUT2(S, A, B)       if (DEBUG_HW) kprintf(S "\n", A, B)
+
+#define MAX_NUM_MULTICAST_ADDRESSES     128
+#define IXGBE_82598_SCATTER            100
+#define IXGBE_82599_SCATTER            32
+#define MSIX_82598_BAR                 3
+#define MSIX_82599_BAR                 4
+#define IXGBE_TSO_SIZE                 262140
+#define IXGBE_TX_BUFFER_SIZE           ((u32) 1514)
+#define IXGBE_RX_HDR                   128
+#define IXGBE_VFTA_SIZE                        128
+#define IXGBE_BR_SIZE                  4096
+#define IXGBE_QUEUE_MIN_FREE           32
+#define IXGBE_QUEUE_IDLE               1
+#define IXGBE_QUEUE_WORKING            2
+#define IXGBE_QUEUE_HUNG               4
+#define IXGBE_QUEUE_DEPLETED           8
+
+/* Offload bits in mbuf flag */
+#if __FreeBSD_version >= 800000
+#define CSUM_OFFLOAD           (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
+#else
+#define CSUM_OFFLOAD           (CSUM_IP|CSUM_TCP|CSUM_UDP)
+#endif
+
+/* For 6.X code compatibility */
+#if !defined(ETHER_BPF_MTAP)
+#define ETHER_BPF_MTAP         BPF_MTAP
+#endif
+
+#if __FreeBSD_version < 700000
+#define CSUM_TSO               0
+#define IFCAP_TSO4             0
+#endif
+
+/*
+ * Interrupt Moderation parameters 
+ */
+#define IXGBE_LOW_LATENCY      128
+#define IXGBE_AVE_LATENCY      400
+#define IXGBE_BULK_LATENCY     1200
+#define IXGBE_LINK_ITR         2000
+
+/*
+ *****************************************************************************
+ * vendor_info_array
+ * 
+ * This array contains the list of Subvendor/Subdevice IDs on which the driver
+ * should load.
+ * 
+ *****************************************************************************
+ */
+typedef struct _ixgbe_vendor_info_t {
+       unsigned int    vendor_id;
+       unsigned int    device_id;
+       unsigned int    subvendor_id;
+       unsigned int    subdevice_id;
+       unsigned int    index;
+} ixgbe_vendor_info_t;
+
+
+struct ixgbe_tx_buf {
+       u32             eop_index;
+       struct mbuf     *m_head;
+       bus_dmamap_t    map;
+};
+
+struct ixgbe_rx_buf {
+       struct mbuf     *m_head;
+       struct mbuf     *m_pack;
+       struct mbuf     *fmp;
+       bus_dmamap_t    hmap;
+       bus_dmamap_t    pmap;
+};
+
+/*
+ * Bus dma allocation structure used by ixgbe_dma_malloc and ixgbe_dma_free.
+ */
+struct ixgbe_dma_alloc {
+       bus_addr_t              dma_paddr;
+       caddr_t                 dma_vaddr;
+       bus_dma_tag_t           dma_tag;
+       bus_dmamap_t            dma_map;
+       bus_dma_segment_t       dma_seg;
+       bus_size_t              dma_size;
+       int                     dma_nseg;
+};
+
+/*
+** Driver queue struct: this is the interrupt container
+**  for the associated tx and rx ring.
+*/
+struct ix_queue {
+       struct adapter          *adapter;
+       u32                     msix;           /* This queue's MSIX vector */
+       u32                     eims;           /* This queue's EIMS bit */
+       u32                     eitr_setting;
+       struct resource         *res;
+       void                    *tag;
+       struct tx_ring          *txr;
+       struct rx_ring          *rxr;
+       struct task             que_task;
+       struct taskqueue        *tq;
+       u64                     irqs;
+       struct lwkt_serialize   serializer;
+};
+
+/*
+ * The transmit ring, one per queue
+ */
+struct tx_ring {
+        struct adapter         *adapter;
+       struct lock             tx_lock;
+       u32                     me;
+       int                     queue_status;
+       int                     watchdog_time;
+       union ixgbe_adv_tx_desc *tx_base;
+       struct ixgbe_dma_alloc  txdma;
+       u32                     next_avail_desc;
+       u32                     next_to_clean;
+       struct ixgbe_tx_buf     *tx_buffers;
+       volatile u16            tx_avail;
+       u32                     txd_cmd;
+       bus_dma_tag_t           txtag;
+       char                    lock_name[16];
+#if __FreeBSD_version >= 800000
+       struct buf_ring         *br;
+#endif
+#ifdef IXGBE_FDIR
+       u16                     atr_sample;
+       u16                     atr_count;
+#endif
+       u32                     bytes;  /* used for AIM */
+       u32                     packets;
+       /* Soft Stats */
+       u64                     no_desc_avail;
+       u64                     total_packets;
+};
+
+
+/*
+ * The Receive ring, one per rx queue
+ */
+struct rx_ring {
+        struct adapter         *adapter;
+       struct lock             rx_lock;
+       u32                     me;
+       union ixgbe_adv_rx_desc *rx_base;
+       struct ixgbe_dma_alloc  rxdma;
+#ifdef NET_LRO
+       struct lro_ctrl         lro;
+#endif
+       bool                    lro_enabled;
+       bool                    hdr_split;
+       bool                    hw_rsc;
+       bool                    discard;
+       bool                    vtag_strip;
+        u32                    next_to_refresh;
+        u32                    next_to_check;
+       char                    lock_name[16];
+       struct ixgbe_rx_buf     *rx_buffers;
+       bus_dma_tag_t           htag;
+       bus_dma_tag_t           ptag;
+
+       u32                     bytes; /* Used for AIM calc */
+       u32                     packets;
+
+       /* Soft stats */
+       u64                     rx_irq;
+       u64                     rx_split_packets;
+       u64                     rx_packets;
+       u64                     rx_bytes;
+       u64                     rx_discarded;
+       u64                     rsc_num;
+#ifdef IXGBE_FDIR
+       u64                     flm;
+#endif
+};
+
+/* Our adapter structure */
+struct adapter {
+       struct ifnet            *ifp;
+       struct ixgbe_hw         hw;
+
+       struct ixgbe_osdep      osdep;
+       struct device           *dev;
+
+       struct resource         *pci_mem;
+       struct resource         *msix_mem;
+
+       /*
+        * Interrupt resources: this set is
+        * either used for legacy, or for Link
+        * when doing MSIX
+        */
+       void                    *tag;
+       struct resource         *res;
+
+       struct ifmedia          media;
+       struct callout          timer;
+       int                     msix;
+       int                     if_flags;
+
+       struct lock             core_lock;
+       struct spinlock         mcast_spin;
+
+       eventhandler_tag        vlan_attach;
+       eventhandler_tag        vlan_detach;
+
+       u16                     num_vlans;
+       u16                     num_queues;
+
+       /*
+       ** Shadow VFTA table, this is needed because
+       ** the real vlan filter table gets cleared during
+       ** a soft reset and the driver needs to be able
+       ** to repopulate it.
+       */
+       u32                     shadow_vfta[IXGBE_VFTA_SIZE];
+
+       /* Info about the interface */
+       u32                     optics;
+       u32                     fc; /* local flow ctrl setting */
+       int                     advertise;  /* link speeds */
+       bool                    link_active;
+       u16                     max_frame_size;
+       u16                     num_segs;
+       u32                     link_speed;
+       bool                    link_up;
+       u32                     linkvec;
+
+       /* Mbuf cluster size */
+       u32                     rx_mbuf_sz;
+
+       /* Support for pluggable optics */
+       bool                    sfp_probe;
+       struct task             link_task;  /* Link tasklet */
+       struct task             mod_task;   /* SFP tasklet */
+       struct task             msf_task;   /* Multispeed Fiber */
+#ifdef IXGBE_FDIR
+       int                     fdir_reinit;
+       struct task             fdir_task;
+#endif
+       struct taskqueue        *tq;
+
+       /*
+       ** Queues: 
+       **   This is the irq holder, it has
+       **   and RX/TX pair or rings associated
+       **   with it.
+       */
+       struct ix_queue         *queues;
+
+       /*
+        * Transmit rings:
+        *      Allocated at run time, an array of rings.
+        */
+       struct tx_ring          *tx_rings;
+       int                     num_tx_desc;
+
+       /*
+        * Receive rings:
+        *      Allocated at run time, an array of rings.
+        */
+       struct rx_ring          *rx_rings;
+       int                     num_rx_desc;
+       u64                     que_mask;
+       u32                     rx_process_limit;
+
+       /* Multicast array memory */
+       u8                      *mta;
+
+       /* Misc stats maintained by the driver */
+       unsigned long           dropped_pkts;
+       unsigned long           mbuf_defrag_failed;
+       unsigned long           mbuf_header_failed;
+       unsigned long           mbuf_packet_failed;
+       unsigned long           no_tx_map_avail;
+       unsigned long           no_tx_dma_setup;
+       unsigned long           watchdog_events;
+       unsigned long           tso_tx;
+       unsigned long           link_irq;
+
+       struct ixgbe_hw_stats   stats;
+
+       struct lwkt_serialize   serializer;
+       struct sysctl_ctx_list  sysctl_ctx;
+       struct sysctl_oid       *sysctl_tree;
+};
+
+/* Precision Time Sync (IEEE 1588) defines */
+#define ETHERTYPE_IEEE1588      0x88F7
+#define PICOSECS_PER_TICK       20833
+#define TSYNC_UDP_PORT          319 /* UDP port for the protocol */
+#define IXGBE_ADVTXD_TSTAMP    0x00080000
+
+
+#define IXGBE_CORE_LOCK_INIT(_sc, _name) \
+        lockinit(&(_sc)->core_lock, _name, 0, LK_CANRECURSE)
+#define IXGBE_CORE_LOCK_DESTROY(_sc)      lockuninit(&(_sc)->core_lock)
+#define IXGBE_TX_LOCK_DESTROY(_sc)        lockuninit(&(_sc)->tx_lock)
+#define IXGBE_RX_LOCK_DESTROY(_sc)        lockuninit(&(_sc)->rx_lock)
+#define IXGBE_CORE_LOCK(_sc)              lockmgr(&(_sc)->core_lock, LK_EXCLUSIVE)
+#define IXGBE_TX_LOCK(_sc)                lockmgr(&(_sc)->tx_lock, LK_EXCLUSIVE)
+#define IXGBE_TX_TRYLOCK(_sc)             lockmgr(&(_sc)->tx_mtx, LK_EXCLUSIVE|LK_NOWAIT)
+#define IXGBE_RX_LOCK(_sc)                lockmgr(&(_sc)->rx_lock, LK_EXCLUSIVE)
+#define IXGBE_CORE_UNLOCK(_sc)            lockmgr(&(_sc)->core_lock, LK_RELEASE)
+#define IXGBE_TX_UNLOCK(_sc)              lockmgr(&(_sc)->tx_lock, LK_RELEASE)
+#define IXGBE_RX_UNLOCK(_sc)              lockmgr(&(_sc)->rx_lock, LK_RELEASE)
+#define IXGBE_CORE_LOCK_ASSERT(_sc)       KKASSERT(lockstatus(&(_sc)->core_lock, curthread) !=0)
+#define IXGBE_TX_LOCK_ASSERT(_sc)         KKASSERT(lockstatus(&(_sc)->tx_lock, curthread) != 0)
+
+
+static inline bool
+ixgbe_is_sfp(struct ixgbe_hw *hw)
+{
+       switch (hw->phy.type) {
+       case ixgbe_phy_sfp_avago:
+       case ixgbe_phy_sfp_ftl:
+       case ixgbe_phy_sfp_intel:
+       case ixgbe_phy_sfp_unknown:
+       case ixgbe_phy_sfp_passive_tyco:
+       case ixgbe_phy_sfp_passive_unknown:
+               return TRUE;
+       default:
+               return FALSE;
+       }
+}
+
+/* Workaround to make 8.0 buildable */
+#if __FreeBSD_version >= 800000 && __FreeBSD_version < 800504
+static __inline int
+drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
+{
+#ifdef ALTQ
+        if (ALTQ_IS_ENABLED(&ifp->if_snd))
+                return (1);
+#endif
+        return (!buf_ring_empty(br));
+}
+#endif
+
+/*
+** Find the number of unrefreshed RX descriptors
+*/
+static inline u16
+ixgbe_rx_unrefreshed(struct rx_ring *rxr)
+{       
+       struct adapter  *adapter = rxr->adapter;
+        
+       if (rxr->next_to_check > rxr->next_to_refresh)
+               return (rxr->next_to_check - rxr->next_to_refresh - 1);
+       else
+               return ((adapter->num_rx_desc + rxr->next_to_check) -
+                   rxr->next_to_refresh - 1);
+}       
+
+#endif /* _IXGBE_H_ */
diff --git a/sys/dev/netif/ixgbe/ixgbe_82598.c b/sys/dev/netif/ixgbe/ixgbe_82598.c
new file mode 100644 (file)
index 0000000..212127e
--- /dev/null
@@ -0,0 +1,1382 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2012, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82598.c,v 1.12 2012/01/30 16:42:02 jfv Exp $*/
+
+#include "ixgbe_type.h"
+#include "ixgbe_82598.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+                                            ixgbe_link_speed *speed,
+                                            bool *autoneg);
+static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
+static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
+                                     bool autoneg_wait_to_complete);
+static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+                                     ixgbe_link_speed *speed, bool *link_up,
+                                     bool link_up_wait_to_complete);
+static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
+                                     ixgbe_link_speed speed,
+                                     bool autoneg,
+                                     bool autoneg_wait_to_complete);
+static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+                                        ixgbe_link_speed speed,
+                                        bool autoneg,
+                                        bool autoneg_wait_to_complete);
+static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
+static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
+static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
+                                 u32 headroom, int strategy);
+
+/**
+ *  ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
+ *  @hw: pointer to the HW structure
+ *
+ *  The defaults for 82598 should be in the range of 50us to 50ms,
+ *  however the hardware default for these parts is 500us to 1ms which is less
+ *  than the 10ms recommended by the pci-e spec.  To address this we need to
+ *  increase the value to either 10ms to 250ms for capability version 1 config,
+ *  or 16ms to 55ms for version 2.
+ **/
+void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
+{
+       u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
+       u16 pcie_devctl2;
+
+       /* only take action if timeout value is defaulted to 0 */
+       if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
+               goto out;
+
+       /*
+        * if capababilities version is type 1 we can write the
+        * timeout of 10ms to 250ms through the GCR register
+        */
+       if (!(gcr & IXGBE_GCR_CAP_VER2)) {
+               gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
+               goto out;
+       }
+
+       /*
+        * for version 2 capabilities we need to write the config space
+        * directly in order to set the completion timeout value for
+        * 16ms to 55ms
+        */
+       pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
+       pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
+       IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
+out:
+       /* disable completion timeout resend */
+       gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
+       IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
+}
+
+/**
+ *  ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
+ *  @hw: pointer to hardware structure
+ *
+ *  Read PCIe configuration space, and get the MSI-X vector count from
+ *  the capabilities table.
+ **/
+u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
+{
+       u32 msix_count = 18;
+
+       DEBUGFUNC("ixgbe_get_pcie_msix_count_82598");
+
+       if (hw->mac.msix_vectors_from_pcie) {
+               msix_count = IXGBE_READ_PCIE_WORD(hw,
+                                                 IXGBE_PCIE_MSIX_82598_CAPS);
+               msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
+
+               /* MSI-X count is zero-based in HW, so increment to give
+                * proper value */
+               msix_count++;
+       }
+       return msix_count;
+}
+
+/**
+ *  ixgbe_init_ops_82598 - Inits func ptrs and MAC type
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize the function pointers and assign the MAC type for 82598.
+ *  Does not touch the hardware.
+ **/
+s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
+{
+       struct ixgbe_mac_info *mac = &hw->mac;
+       struct ixgbe_phy_info *phy = &hw->phy;
+       s32 ret_val;
+
+       DEBUGFUNC("ixgbe_init_ops_82598");
+
+       ret_val = ixgbe_init_phy_ops_generic(hw);
+       ret_val = ixgbe_init_ops_generic(hw);
+
+       /* PHY */
+       phy->ops.init = &ixgbe_init_phy_ops_82598;
+
+       /* MAC */
+       mac->ops.start_hw = &ixgbe_start_hw_82598;
+       mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
+       mac->ops.reset_hw = &ixgbe_reset_hw_82598;
+       mac->ops.get_media_type = &ixgbe_get_media_type_82598;
+       mac->ops.get_supported_physical_layer =
+                               &ixgbe_get_supported_physical_layer_82598;
+       mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
+       mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
+       mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
+
+       /* RAR, Multicast, VLAN */
+       mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
+       mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
+       mac->ops.set_vfta = &ixgbe_set_vfta_82598;
+       mac->ops.set_vlvf = NULL;
+       mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
+
+       /* Flow Control */
+       mac->ops.fc_enable = &ixgbe_fc_enable_82598;
+
+       mac->mcft_size          = 128;
+       mac->vft_size           = 128;
+       mac->num_rar_entries    = 16;
+       mac->rx_pb_size         = 512;
+       mac->max_tx_queues      = 32;
+       mac->max_rx_queues      = 64;
+       mac->max_msix_vectors   = ixgbe_get_pcie_msix_count_82598(hw);
+
+       /* SFP+ Module */
+       phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
+
+       /* Link */
+       mac->ops.check_link = &ixgbe_check_mac_link_82598;
+       mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
+       mac->ops.flap_tx_laser = NULL;
+       mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
+       mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
+
+       /* Manageability interface */
+       mac->ops.set_fw_drv_ver = NULL;
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_init_phy_ops_82598 - PHY/SFP specific init
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize any function pointers that were not able to be
+ *  set during init_shared_code because the PHY/SFP type was
+ *  not known.  Perform the SFP init if necessary.
+ *
+ **/
+s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
+{
+       struct ixgbe_mac_info *mac = &hw->mac;
+       struct ixgbe_phy_info *phy = &hw->phy;
+       s32 ret_val = IXGBE_SUCCESS;
+       u16 list_offset, data_offset;
+
+       DEBUGFUNC("ixgbe_init_phy_ops_82598");
+
+       /* Identify the PHY */
+       phy->ops.identify(hw);
+
+       /* Overwrite the link function pointers if copper PHY */
+       if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
+               mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
+               mac->ops.get_link_capabilities =
+                               &ixgbe_get_copper_link_capabilities_generic;
+       }
+
+       switch (hw->phy.type) {
+       case ixgbe_phy_tn:
+               phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
+               phy->ops.check_link = &ixgbe_check_phy_link_tnx;
+               phy->ops.get_firmware_version =
+                                       &ixgbe_get_phy_firmware_version_tnx;
+               break;
+       case ixgbe_phy_nl:
+               phy->ops.reset = &ixgbe_reset_phy_nl;
+
+               /* Call SFP+ identify routine to get the SFP+ module type */
+               ret_val = phy->ops.identify_sfp(hw);
+               if (ret_val != IXGBE_SUCCESS)
+                       goto out;
+               else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
+                       ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
+                       goto out;
+               }
+
+               /* Check to see if SFP+ module is supported */
+               ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
+                                                             &list_offset,
+                                                             &data_offset);
+               if (ret_val != IXGBE_SUCCESS) {
+                       ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
+                       goto out;
+               }
+               break;
+       default:
+               break;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
+ *  @hw: pointer to hardware structure
+ *
+ *  Starts the hardware using the generic start_hw function.
+ *  Disables relaxed ordering Then set pcie completion timeout
+ *
+ **/
+s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
+{
+       u32 regval;
+       u32 i;
+       s32 ret_val = IXGBE_SUCCESS;
+
+       DEBUGFUNC("ixgbe_start_hw_82598");
+
+       ret_val = ixgbe_start_hw_generic(hw);
+
+       /* Disable relaxed ordering */
+       for (i = 0; ((i < hw->mac.max_tx_queues) &&
+            (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+               regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
+               regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+               IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
+       }
+
+       for (i = 0; ((i < hw->mac.max_rx_queues) &&
+            (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+               regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+               regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
+                           IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+               IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+       }
+
+       /* set the completion timeout for interface */
+       if (ret_val == IXGBE_SUCCESS)
+               ixgbe_set_pcie_completion_timeout(hw);
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_get_link_capabilities_82598 - Determines link capabilities
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @autoneg: boolean auto-negotiation value
+ *
+ *  Determines the link capabilities by reading the AUTOC register.
+ **/
+static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+                                            ixgbe_link_speed *speed,
+                                            bool *autoneg)
+{
+       s32 status = IXGBE_SUCCESS;
+       u32 autoc = 0;
+
+       DEBUGFUNC("ixgbe_get_link_capabilities_82598");
+
+       /*
+        * Determine link capabilities based on the stored value of AUTOC,
+        * which represents EEPROM defaults.  If AUTOC value has not been
+        * stored, use the current register value.
+        */
+       if (hw->mac.orig_link_settings_stored)
+               autoc = hw->mac.orig_autoc;
+       else
+               autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+       switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+       case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+               *speed = IXGBE_LINK_SPEED_1GB_FULL;
+               *autoneg = FALSE;
+               break;
+
+       case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+               *speed = IXGBE_LINK_SPEED_10GB_FULL;
+               *autoneg = FALSE;
+               break;
+
+       case IXGBE_AUTOC_LMS_1G_AN:
+               *speed = IXGBE_LINK_SPEED_1GB_FULL;
+               *autoneg = TRUE;
+               break;
+
+       case IXGBE_AUTOC_LMS_KX4_AN:
+       case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
+               *speed = IXGBE_LINK_SPEED_UNKNOWN;
+               if (autoc & IXGBE_AUTOC_KX4_SUPP)
+                       *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+               if (autoc & IXGBE_AUTOC_KX_SUPP)
+                       *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+               *autoneg = TRUE;
+               break;
+
+       default:
+               status = IXGBE_ERR_LINK_SETUP;
+               break;
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_get_media_type_82598 - Determines media type
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the media type (fiber, copper, backplane)
+ **/
+static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
+{
+       enum ixgbe_media_type media_type;
+
+       DEBUGFUNC("ixgbe_get_media_type_82598");
+
+       /* Detect if there is a copper PHY attached. */
+       switch (hw->phy.type) {
+       case ixgbe_phy_cu_unknown:
+       case ixgbe_phy_tn:
+               media_type = ixgbe_media_type_copper;
+               goto out;
+       default:
+               break;
+       }
+
+       /* Media type for I82598 is based on device ID */
+       switch (hw->device_id) {
+       case IXGBE_DEV_ID_82598:
+       case IXGBE_DEV_ID_82598_BX:
+               /* Default device ID is mezzanine card KX/KX4 */
+               media_type = ixgbe_media_type_backplane;
+               break;
+       case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+       case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+       case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+       case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+       case IXGBE_DEV_ID_82598EB_XF_LR:
+       case IXGBE_DEV_ID_82598EB_SFP_LOM:
+               media_type = ixgbe_media_type_fiber;
+               break;
+       case IXGBE_DEV_ID_82598EB_CX4:
+       case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+               media_type = ixgbe_media_type_cx4;
+               break;
+       case IXGBE_DEV_ID_82598AT:
+       case IXGBE_DEV_ID_82598AT2:
+               media_type = ixgbe_media_type_copper;
+               break;
+       default:
+               media_type = ixgbe_media_type_unknown;
+               break;
+       }
+out:
+       return media_type;
+}
+
+/**
+ *  ixgbe_fc_enable_82598 - Enable flow control
+ *  @hw: pointer to hardware structure
+ *  @packetbuf_num: packet buffer number (0-7)
+ *
+ *  Enable flow control according to the current settings.
+ **/
+s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
+{
+       s32 ret_val = IXGBE_SUCCESS;
+       u32 fctrl_reg;
+       u32 rmcs_reg;
+       u32 reg;
+       u32 link_speed = 0;
+       bool link_up;
+
+       DEBUGFUNC("ixgbe_fc_enable_82598");
+
+       /*
+        * On 82598 having Rx FC on causes resets while doing 1G
+        * so if it's on turn it off once we know link_speed. For
+        * more details see 82598 Specification update.
+        */
+       hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
+       if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
+               switch (hw->fc.requested_mode) {
+               case ixgbe_fc_full:
+                       hw->fc.requested_mode = ixgbe_fc_tx_pause;
+                       break;
+               case ixgbe_fc_rx_pause:
+                       hw->fc.requested_mode = ixgbe_fc_none;
+                       break;
+               default:
+                       /* no change */
+                       break;
+               }
+       }
+
+       /* Negotiate the fc mode to use */
+       ret_val = ixgbe_fc_autoneg(hw);
+       if (ret_val == IXGBE_ERR_FLOW_CONTROL)
+               goto out;
+
+       /* Disable any previous flow control settings */
+       fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+       fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
+
+       rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+       rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
+
+       /*
+        * The possible values of fc.current_mode are:
+        * 0: Flow control is completely disabled
+        * 1: Rx flow control is enabled (we can receive pause frames,
+        *    but not send pause frames).
+        * 2: Tx flow control is enabled (we can send pause frames but
+        *     we do not support receiving pause frames).
+        * 3: Both Rx and Tx flow control (symmetric) are enabled.
+        * other: Invalid.
+        */
+       switch (hw->fc.current_mode) {
+       case ixgbe_fc_none:
+               /*
+                * Flow control is disabled by software override or autoneg.
+                * The code below will actually disable it in the HW.
+                */
+               break;
+       case ixgbe_fc_rx_pause:
+               /*
+                * Rx Flow control is enabled and Tx Flow control is
+                * disabled by software override. Since there really
+                * isn't a way to advertise that we are capable of RX
+                * Pause ONLY, we will advertise that we support both
+                * symmetric and asymmetric Rx PAUSE.  Later, we will
+                * disable the adapter's ability to send PAUSE frames.
+                */
+               fctrl_reg |= IXGBE_FCTRL_RFCE;
+               break;
+       case ixgbe_fc_tx_pause:
+               /*
+                * Tx Flow control is enabled, and Rx Flow control is
+                * disabled by software override.
+                */
+               rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
+               break;
+       case ixgbe_fc_full:
+               /* Flow control (both Rx and Tx) is enabled by SW override. */
+               fctrl_reg |= IXGBE_FCTRL_RFCE;
+               rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
+               break;
+       default:
+               DEBUGOUT("Flow control param set incorrectly\n");
+               ret_val = IXGBE_ERR_CONFIG;
+               goto out;
+               break;
+       }
+
+       /* Set 802.3x based flow control settings. */
+       fctrl_reg |= IXGBE_FCTRL_DPF;
+       IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
+       IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
+
+       /* Set up and enable Rx high/low water mark thresholds, enable XON. */
+       if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
+               reg = hw->fc.low_water << 6;
+               if (hw->fc.send_xon)
+                       reg |= IXGBE_FCRTL_XONE;
+
+               IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
+
+               reg = hw->fc.high_water[packetbuf_num] << 6;
+               reg |= IXGBE_FCRTH_FCEN;
+
+               IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
+       }
+
+       /* Configure pause time (2 TCs per register) */
+       reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
+       if ((packetbuf_num & 1) == 0)
+               reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
+       else
+               reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
+       IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
+
+       IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
+
+out:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_start_mac_link_82598 - Configures MAC link settings
+ *  @hw: pointer to hardware structure
+ *
+ *  Configures link settings based on values in the ixgbe_hw struct.
+ *  Restarts the link.  Performs autonegotiation if needed.
+ **/
+static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
+                                     bool autoneg_wait_to_complete)
+{
+       u32 autoc_reg;
+       u32 links_reg;
+       u32 i;
+       s32 status = IXGBE_SUCCESS;
+
+       DEBUGFUNC("ixgbe_start_mac_link_82598");
+
+       /* Restart link */
+       autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+       /* Only poll for autoneg to complete if specified to do so */
+       if (autoneg_wait_to_complete) {
+               if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+                    IXGBE_AUTOC_LMS_KX4_AN ||
+                   (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+                    IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+                       links_reg = 0; /* Just in case Autoneg time = 0 */
+                       for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
+                               links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+                               if (links_reg & IXGBE_LINKS_KX_AN_COMP)
+                                       break;
+                               msec_delay(100);
+                       }
+                       if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+                               status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+                               DEBUGOUT("Autonegotiation did not complete.\n");
+                       }
+               }
+       }
+
+       /* Add delay to filter out noises during initial link setup */
+       msec_delay(50);
+
+       return status;
+}
+
+/**
+ *  ixgbe_validate_link_ready - Function looks for phy link
+ *  @hw: pointer to hardware structure
+ *
+ *  Function indicates success when phy link is available. If phy is not ready
+ *  within 5 seconds of MAC indicating link, the function returns error.
+ **/
+static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
+{
+       u32 timeout;
+       u16 an_reg;
+
+       if (hw->device_id != IXGBE_DEV_ID_82598AT2)
+               return IXGBE_SUCCESS;
+
+       for (timeout = 0;
+            timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
+               hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+                                    IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
+
+               if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
+                   (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
+                       break;
+
+               msec_delay(100);
+       }
+
+       if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
+               DEBUGOUT("Link was indicated but link is down\n");
+               return IXGBE_ERR_LINK_SETUP;
+       }
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_check_mac_link_82598 - Get link/speed status
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @link_up: TRUE is link is up, FALSE otherwise
+ *  @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ *  Reads the links register to determine if link is up and the current speed
+ **/
+static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+                                     ixgbe_link_speed *speed, bool *link_up,
+                                     bool link_up_wait_to_complete)
+{
+       u32 links_reg;
+       u32 i;
+       u16 link_reg, adapt_comp_reg;
+
+       DEBUGFUNC("ixgbe_check_mac_link_82598");
+
+       /*
+        * SERDES PHY requires us to read link status from undocumented
+        * register 0xC79F.  Bit 0 set indicates link is up/ready; clear
+        * indicates link down.  OxC00C is read to check that the XAUI lanes
+        * are active.  Bit 0 clear indicates active; set indicates inactive.
+        */
+       if (hw->phy.type == ixgbe_phy_nl) {
+               hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
+               hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
+               hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
+                                    &adapt_comp_reg);
+               if (link_up_wait_to_complete) {
+                       for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+                               if ((link_reg & 1) &&
+                                   ((adapt_comp_reg & 1) == 0)) {
+                                       *link_up = TRUE;
+                                       break;
+                               } else {
+                                       *link_up = FALSE;
+                               }
+                               msec_delay(100);
+                               hw->phy.ops.read_reg(hw, 0xC79F,
+                                                    IXGBE_TWINAX_DEV,
+                                                    &link_reg);
+                               hw->phy.ops.read_reg(hw, 0xC00C,
+                                                    IXGBE_TWINAX_DEV,
+                                                    &adapt_comp_reg);
+                       }
+               } else {
+                       if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
+                               *link_up = TRUE;
+                       else
+                               *link_up = FALSE;
+               }
+
+               if (*link_up == FALSE)
+                       goto out;
+       }
+
+       links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+       if (link_up_wait_to_complete) {
+               for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+                       if (links_reg & IXGBE_LINKS_UP) {
+                               *link_up = TRUE;
+                               break;
+                       } else {
+                               *link_up = FALSE;
+                       }
+                       msec_delay(100);
+                       links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+               }
+       } else {
+               if (links_reg & IXGBE_LINKS_UP)
+                       *link_up = TRUE;
+               else
+                       *link_up = FALSE;
+       }
+
+       if (links_reg & IXGBE_LINKS_SPEED)
+               *speed = IXGBE_LINK_SPEED_10GB_FULL;
+       else
+               *speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+       if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
+           (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
+               *link_up = FALSE;
+
+out:
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_setup_mac_link_82598 - Set MAC link speed
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg: TRUE if autonegotiation enabled
+ *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
+ *
+ *  Set the link speed in the AUTOC register and restarts link.
+ **/
+static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
+                                     ixgbe_link_speed speed, bool autoneg,
+                                     bool autoneg_wait_to_complete)
+{
+       s32 status = IXGBE_SUCCESS;
+       ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+       u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       u32 autoc = curr_autoc;
+       u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
+
+       DEBUGFUNC("ixgbe_setup_mac_link_82598");
+
+       /* Check to see if speed passed in is supported. */
+       ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
+       speed &= link_capabilities;
+
+       if (speed == IXGBE_LINK_SPEED_UNKNOWN)
+               status = IXGBE_ERR_LINK_SETUP;
+
+       /* Set KX4/KX support according to speed requested */
+       else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
+                link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+               autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
+               if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+                       autoc |= IXGBE_AUTOC_KX4_SUPP;
+               if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+                       autoc |= IXGBE_AUTOC_KX_SUPP;
+               if (autoc != curr_autoc)
+                       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+       }
+
+       if (status == IXGBE_SUCCESS) {
+               /*
+                * Setup and restart the link based on the new values in
+                * ixgbe_hw This will write the AUTOC register based on the new
+                * stored values
+                */
+               status = ixgbe_start_mac_link_82598(hw,
+                                                   autoneg_wait_to_complete);
+       }
+
+       return status;
+}
+
+
+/**
+ *  ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg: TRUE if autonegotiation enabled
+ *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
+ *
+ *  Sets the link speed in the AUTOC register in the MAC and restarts link.
+ **/
+static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+                                        ixgbe_link_speed speed,
+                                        bool autoneg,
+                                        bool autoneg_wait_to_complete)
+{
+       s32 status;
+
+       DEBUGFUNC("ixgbe_setup_copper_link_82598");
+
+       /* Setup the PHY according to input speed */
+       status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+                                             autoneg_wait_to_complete);
+       /* Set up MAC */
+       ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
+
+       return status;
+}
+
+/**
+ *  ixgbe_reset_hw_82598 - Performs hardware reset
+ *  @hw: pointer to hardware structure
+ *
+ *  Resets the hardware by resetting the transmit and receive units, masks and
+ *  clears all interrupts, performing a PHY reset, and performing a link (MAC)
+ *  reset.
+ **/
+static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
+{
+       s32 status = IXGBE_SUCCESS;
+       s32 phy_status = IXGBE_SUCCESS;
+       u32 ctrl;
+       u32 gheccr;
+       u32 i;
+       u32 autoc;
+       u8  analog_val;
+
+       DEBUGFUNC("ixgbe_reset_hw_82598");
+
+       /* Call adapter stop to disable tx/rx and clear interrupts */
+       status = hw->mac.ops.stop_adapter(hw);
+       if (status != IXGBE_SUCCESS)
+               goto reset_hw_out;
+
+       /*
+        * Power up the Atlas Tx lanes if they are currently powered down.
+        * Atlas Tx lanes are powered down for MAC loopback tests, but
+        * they are not automatically restored on reset.
+        */
+       hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
+       if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
+               /* Enable Tx Atlas so packets can be transmitted again */
+               hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
+                                            &analog_val);
+               analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
+               hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
+                                             analog_val);
+
+               hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
+                                            &analog_val);
+               analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
+               hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
+                                             analog_val);
+
+               hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
+                                            &analog_val);
+               analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
+               hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
+                                             analog_val);
+
+               hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
+                                            &analog_val);
+               analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
+               hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
+                                             analog_val);
+       }
+
+       /* Reset PHY */
+       if (hw->phy.reset_disable == FALSE) {
+               /* PHY ops must be identified and initialized prior to reset */
+
+               /* Init PHY and function pointers, perform SFP setup */
+               phy_status = hw->phy.ops.init(hw);
+               if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+                       goto reset_hw_out;
+               if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
+                       goto mac_reset_top;
+
+               hw->phy.ops.reset(hw);
+       }
+
+mac_reset_top:
+       /*
+        * Issue global reset to the MAC.  This needs to be a SW reset.
+        * If link reset is used, it might reset the MAC when mng is using it
+        */
+       ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
+       IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+       IXGBE_WRITE_FLUSH(hw);
+
+       /* Poll for reset bit to self-clear indicating reset is complete */
+       for (i = 0; i < 10; i++) {
+               usec_delay(1);
+               ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+               if (!(ctrl & IXGBE_CTRL_RST))
+                       break;
+       }
+       if (ctrl & IXGBE_CTRL_RST) {
+               status = IXGBE_ERR_RESET_FAILED;
+               DEBUGOUT("Reset polling failed to complete.\n");
+       }
+
+       msec_delay(50);
+
+       /*
+        * Double resets are required for recovery from certain error
+        * conditions.  Between resets, it is necessary to stall to allow time
+        * for any pending HW events to complete.
+        */
+       if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+               hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+               goto mac_reset_top;
+       }
+
+       gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
+       gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
+       IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
+
+       /*
+        * Store the original AUTOC value if it has not been
+        * stored off yet.  Otherwise restore the stored original
+        * AUTOC value since the reset operation sets back to deaults.
+        */
+       autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       if (hw->mac.orig_link_settings_stored == FALSE) {
+               hw->mac.orig_autoc = autoc;
+               hw->mac.orig_link_settings_stored = TRUE;
+       } else if (autoc != hw->mac.orig_autoc) {
+               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
+       }
+
+       /* Store the permanent mac address */
+       hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+       /*
+        * Store MAC address from RAR0, clear receive address registers, and
+        * clear the multicast table
+        */
+       hw->mac.ops.init_rx_addrs(hw);
+
+reset_hw_out:
+       if (phy_status != IXGBE_SUCCESS)
+               status = phy_status;
+
+       return status;
+}
+
+/**
+ *  ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
+ *  @hw: pointer to hardware struct
+ *  @rar: receive address register index to associate with a VMDq index
+ *  @vmdq: VMDq set index
+ **/
+s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+       u32 rar_high;
+       u32 rar_entries = hw->mac.num_rar_entries;
+
+       DEBUGFUNC("ixgbe_set_vmdq_82598");
+
+       /* Make sure we are using a valid rar index range */
+       if (rar >= rar_entries) {
+               DEBUGOUT1("RAR index %d is out of range.\n", rar);
+               return IXGBE_ERR_INVALID_ARGUMENT;
+       }
+
+       rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+       rar_high &= ~IXGBE_RAH_VIND_MASK;
+       rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
+       IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
+ *  @hw: pointer to hardware struct
+ *  @rar: receive address register index to associate with a VMDq index
+ *  @vmdq: VMDq clear index (not used in 82598, but elsewhere)
+ **/
+static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+       u32 rar_high;
+       u32 rar_entries = hw->mac.num_rar_entries;
+
+       UNREFERENCED_1PARAMETER(vmdq);
+
+       /* Make sure we are using a valid rar index range */
+       if (rar >= rar_entries) {
+               DEBUGOUT1("RAR index %d is out of range.\n", rar);
+               return IXGBE_ERR_INVALID_ARGUMENT;
+       }
+
+       rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+       if (rar_high & IXGBE_RAH_VIND_MASK) {
+               rar_high &= ~IXGBE_RAH_VIND_MASK;
+               IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
+       }
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_set_vfta_82598 - Set VLAN filter table
+ *  @hw: pointer to hardware structure
+ *  @vlan: VLAN id to write to VLAN filter
+ *  @vind: VMDq output index that maps queue to VLAN id in VFTA
+ *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ *
+ *  Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+                        bool vlan_on)
+{
+       u32 regindex;
+       u32 bitindex;
+       u32 bits;
+       u32 vftabyte;
+
+       DEBUGFUNC("ixgbe_set_vfta_82598");
+
+       if (vlan > 4095)
+               return IXGBE_ERR_PARAM;
+
+       /* Determine 32-bit word position in array */
+       regindex = (vlan >> 5) & 0x7F;   /* upper seven bits */
+
+       /* Determine the location of the (VMD) queue index */
+       vftabyte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
+       bitindex = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */
+
+       /* Set the nibble for VMD queue index */
+       bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
+       bits &= (~(0x0F << bitindex));
+       bits |= (vind << bitindex);
+       IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
+
+       /* Determine the location of the bit for this VLAN id */
+       bitindex = vlan & 0x1F;   /* lower five bits */
+
+       bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+       if (vlan_on)
+               /* Turn on this VLAN id */
+               bits |= (1 << bitindex);
+       else
+               /* Turn off this VLAN id */
+               bits &= ~(1 << bitindex);
+       IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_clear_vfta_82598 - Clear VLAN filter table
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
+{
+       u32 offset;
+       u32 vlanbyte;
+
+       DEBUGFUNC("ixgbe_clear_vfta_82598");
+
+       for (offset = 0; offset < hw->mac.vft_size; offset++)
+               IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
+
+       for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
+               for (offset = 0; offset < hw->mac.vft_size; offset++)
+                       IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
+                                       0);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
+ *  @hw: pointer to hardware structure
+ *  @reg: analog register to read
+ *  @val: read value
+ *
+ *  Performs read operation to Atlas analog register specified.
+ **/
+s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+       u32  atlas_ctl;
+
+       DEBUGFUNC("ixgbe_read_analog_reg8_82598");
+
+       IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
+                       IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
+       IXGBE_WRITE_FLUSH(hw);
+       usec_delay(10);
+       atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
+       *val = (u8)atlas_ctl;
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
+ *  @hw: pointer to hardware structure
+ *  @reg: atlas register to write
+ *  @val: value to write
+ *
+ *  Performs write operation to Atlas analog register specified.
+ **/
+s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+       u32  atlas_ctl;
+
+       DEBUGFUNC("ixgbe_write_analog_reg8_82598");
+
+       atlas_ctl = (reg << 8) | val;
+       IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
+       IXGBE_WRITE_FLUSH(hw);
+       usec_delay(10);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: EEPROM byte offset to read
+ *  @eeprom_data: value read
+ *
+ *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+                               u8 *eeprom_data)
+{
+       s32 status = IXGBE_SUCCESS;
+       u16 sfp_addr = 0;
+       u16 sfp_data = 0;
+       u16 sfp_stat = 0;
+       u32 i;
+
+       DEBUGFUNC("ixgbe_read_i2c_eeprom_82598");
+
+       if (hw->phy.type == ixgbe_phy_nl) {
+               /*
+                * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
+                * 0xC30D. These registers are used to talk to the SFP+
+                * module's EEPROM through the SDA/SCL (I2C) interface.
+                */
+               sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
+               sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
+               hw->phy.ops.write_reg(hw,
+                                     IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
+                                     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+                                     sfp_addr);
+
+               /* Poll status */
+               for (i = 0; i < 100; i++) {
+                       hw->phy.ops.read_reg(hw,
+                                            IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
+                                            IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+                                            &sfp_stat);
+                       sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
+                       if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
+                               break;
+                       msec_delay(10);
+               }
+
+               if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
+                       DEBUGOUT("EEPROM read did not pass.\n");
+                       status = IXGBE_ERR_SFP_NOT_PRESENT;
+                       goto out;
+               }
+
+               /* Read data */
+               hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
+                                    IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
+
+               *eeprom_data = (u8)(sfp_data >> 8);
+       } else {
+               status = IXGBE_ERR_PHY;
+               goto out;
+       }
+
+out:
+       return status;
+}
+
+/**
+ *  ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
+{
+       u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+       u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
+       u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+       u16 ext_ability = 0;
+
+       DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
+
+       hw->phy.ops.identify(hw);
+
+       /* Copper PHY must be checked before AUTOC LMS to determine correct
+        * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
+       switch (hw->phy.type) {
+       case ixgbe_phy_tn:
+       case ixgbe_phy_cu_unknown:
+               hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+               IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+               if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
+                       physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+               if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
+                       physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+               if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
+                       physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+               goto out;
+       default:
+               break;
+       }
+
+       switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+       case IXGBE_AUTOC_LMS_1G_AN:
+       case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+               if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
+                       physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+               else
+                       physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
+               break;
+       case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+               if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
+                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
+               else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
+                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+               else /* XAUI */
+                       physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+               break;
+       case IXGBE_AUTOC_LMS_KX4_AN:
+       case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
+               if (autoc & IXGBE_AUTOC_KX_SUPP)
+                       physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+               if (autoc & IXGBE_AUTOC_KX4_SUPP)
+                       physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+               break;
+       default:
+               break;
+       }
+
+       if (hw->phy.type == ixgbe_phy_nl) {
+               hw->phy.ops.identify_sfp(hw);
+
+               switch (hw->phy.sfp_type) {
+               case ixgbe_sfp_type_da_cu:
+                       physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+                       break;
+               case ixgbe_sfp_type_sr:
+                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;