ix: Import Intel ixgbe-2.5.15
authorSepherosa Ziehau <sephe@dragonflybsd.org>
Sun, 24 Nov 2013 13:41:57 +0000 (21:41 +0800)
committerSepherosa Ziehau <sephe@dragonflybsd.org>
Thu, 2 Jan 2014 12:43:00 +0000 (20:43 +0800)
MSI and legacy interrupt (line based interrupt) are working as of this
commit.  polling(4) will be supported and MSI-X will be brought back in
the later commits.

ixgbe is renamed to ix, which is consistent w/ the interface name, and
this makes 'ifconfig ix0' work as expected.

Local changes:
- Reorganize RX and TX ring data structure; nuke useless 'queue'.
- Rework RX and TX ring creation, destroy, initialization and cleanup.
- Reorganize interrupt related data structure.  This also prepares for
  bringing MSI-X back.
- Rework interrupt allocation.
- Rework MSI and legacy interrupt handler.
- Enable RSS (2 RX rings) even if MSI or legacy interrupt is used.
- Apply DragonFly specific RSS configuration.
- Partially rework RX code path.
- Enable TX header write-back.
- Rework TX code path and TX watchdog mechanism.
- Rework busdma(9) related bits.
- Rework ifnet.if_ioctl method.
- Reorganize initialize and stop code.
- Reorganize attach and detach code.
- Code clean up.

42 files changed:
share/man/man4/Makefile
share/man/man4/ix.4 [moved from share/man/man4/ixgbe.4 with 79% similarity]
sys/conf/files
sys/conf/options
sys/config/GENERIC
sys/config/LINT
sys/config/LINT64
sys/config/X86_64_GENERIC
sys/dev/netif/Makefile
sys/dev/netif/ix/Makefile [new file with mode: 0644]
sys/dev/netif/ix/if_ix.c [new file with mode: 0644]
sys/dev/netif/ix/if_ix.h [new file with mode: 0644]
sys/dev/netif/ix/ixgbe_82598.c [moved from sys/dev/netif/ixgbe/ixgbe_82598.c with 94% similarity]
sys/dev/netif/ix/ixgbe_82598.h [moved from sys/dev/netif/ixgbe/ixgbe_82598.h with 95% similarity]
sys/dev/netif/ix/ixgbe_82599.c [moved from sys/dev/netif/ixgbe/ixgbe_82599.c with 85% similarity]
sys/dev/netif/ix/ixgbe_82599.h [moved from sys/dev/netif/ixgbe/ixgbe_82599.h with 90% similarity]
sys/dev/netif/ix/ixgbe_api.c [moved from sys/dev/netif/ixgbe/ixgbe_api.c with 94% similarity]
sys/dev/netif/ix/ixgbe_api.h [moved from sys/dev/netif/ixgbe/ixgbe_api.h with 96% similarity]
sys/dev/netif/ix/ixgbe_common.c [moved from sys/dev/netif/ixgbe/ixgbe_common.c with 88% similarity]
sys/dev/netif/ix/ixgbe_common.h [moved from sys/dev/netif/ixgbe/ixgbe_common.h with 86% similarity]
sys/dev/netif/ix/ixgbe_dcb.c [new file with mode: 0644]
sys/dev/netif/ix/ixgbe_dcb.h [new file with mode: 0644]
sys/dev/netif/ix/ixgbe_dcb_82598.c [new file with mode: 0644]
sys/dev/netif/ix/ixgbe_dcb_82598.h [new file with mode: 0644]
sys/dev/netif/ix/ixgbe_dcb_82599.c [new file with mode: 0644]
sys/dev/netif/ix/ixgbe_dcb_82599.h [new file with mode: 0644]
sys/dev/netif/ix/ixgbe_mbx.c [moved from sys/dev/netif/ixgbe/ixgbe_mbx.c with 97% similarity]
sys/dev/netif/ix/ixgbe_mbx.h [moved from sys/dev/netif/ixgbe/ixgbe_mbx.h with 90% similarity]
sys/dev/netif/ix/ixgbe_osdep.h [moved from sys/dev/netif/ixgbe/ixgbe_osdep.h with 84% similarity]
sys/dev/netif/ix/ixgbe_phy.c [moved from sys/dev/netif/ixgbe/ixgbe_phy.c with 85% similarity]
sys/dev/netif/ix/ixgbe_phy.h [moved from sys/dev/netif/ixgbe/ixgbe_phy.h with 85% similarity]
sys/dev/netif/ix/ixgbe_type.h [moved from sys/dev/netif/ixgbe/ixgbe_type.h with 95% similarity]
sys/dev/netif/ix/ixgbe_vf.c [moved from sys/dev/netif/ixgbe/ixgbe_vf.c with 79% similarity]
sys/dev/netif/ix/ixgbe_vf.h [moved from sys/dev/netif/ixgbe/ixgbe_vf.h with 92% similarity]
sys/dev/netif/ix/ixgbe_x540.c [moved from sys/dev/netif/ixgbe/ixgbe_x540.c with 92% similarity]
sys/dev/netif/ix/ixgbe_x540.h [moved from sys/dev/netif/ixgbe/ixgbe_x540.h with 94% similarity]
sys/dev/netif/ixgbe/LICENSE [deleted file]
sys/dev/netif/ixgbe/Makefile [deleted file]
sys/dev/netif/ixgbe/README [deleted file]
sys/dev/netif/ixgbe/ixgbe.c [deleted file]
sys/dev/netif/ixgbe/ixgbe.h [deleted file]
sys/dev/netif/ixgbe/ixgbe_defines.h [deleted file]

index 9c8654a..f9a0c47 100644 (file)
@@ -140,7 +140,7 @@ MAN=        aac.4 \
        iwifw.4 \
        iwn.4 \
        iwnfw.4 \
-       ixgbe.4 \
+       ix.4 \
        jme.4 \
        joy.4 \
        kate.4 \
@@ -447,7 +447,9 @@ MLINKS+=igb.4 if_igb.4
 MLINKS+=iwi.4 if_iwi.4
 #MLINKS+=iwl.4 if_iwl.4
 MLINKS+=iwn.4 if_iwn.4
-MLINKS+=ixgbe.4 if_ixgbe.4
+MLINKS+=ix.4 if_ix.4 \
+       ix.4 ixgbe.4 \
+       ix.4 if_ixgbe.4
 MLINKS+=jme.4 if_jme.4
 MLINKS+=kue.4 if_kue.4
 MLINKS+=lge.4 if_lge.4
similarity index 79%
rename from share/man/man4/ixgbe.4
rename to share/man/man4/ix.4
index bc8ca0a..c13fa02 100644 (file)
 .\"
 .\" $FreeBSD: src/share/man/man4/ixgbe.4,v 1.2 2008/06/17 21:14:02 brueffer Exp $
 .\"
-.Dd June 30, 2012
-.Dt IXGBE 4
+.Dd December 30, 2013
+.Dt IX 4
 .Os
 .Sh NAME
-.Nm ixgbe
-.Nd "Intel(R) 10Gb Ethernet driver for the DragonFly BSD operating system"
+.Nm ix
+.Nd "Intel(R) 10Gb Ethernet driver"
 .Sh SYNOPSIS
 To compile this driver into the kernel,
 place the following line in your
 kernel configuration file:
 .Bd -ragged -offset indent
-.Cd "device ixgbe"
+.Cd "device ix"
 .Ed
 .Pp
 Alternatively, to load the driver as a
 module at boot time, place the following line in
 .Xr loader.conf 5 :
 .Bd -literal -offset indent
-if_ixgbe_load="YES"
+if_ix_load="YES"
 .Ed
 .Sh DESCRIPTION
 The
 .Nm
 driver provides support for PCI 10Gb Ethernet adapters based on
-the Intel 82598EB Intel(R) Network Connections.
+the Intel
+82598EB,
+82599 and
+X540
+Intel(R) Network Connections.
 The driver supports Jumbo Frames, MSIX, TSO, and RSS.
 .Pp
 For questions related to hardware requirements,
@@ -79,22 +83,44 @@ driver supports the following cards:
 .Pp
 .Bl -bullet -compact
 .It
-Intel(R) 10 Gigabit XF SR/AF Dual Port Server Adapter
+Intel 10 Gigabit AF DA Dual Port Server Adapter
 .It
-Intel(R) 10 Gigabit XF SR/LR Server Adapter
+Intel 10 Gigabit AT Server Adapter
 .It
-Intel(R) 82598EB 10 Gigabit AF Network Connection
+Intel 10 Gigabit AT2 Server Adapter
 .It
-Intel(R) 82598EB 10 Gigabit AT CX4 Network Connection
+Intel 10 Gigabit CX4 Dual Port Server Adapter
+.It
+Intel 10 Gigabit XF LR Server Adapter
+.It
+Intel 10 Gigabit XF SR Dual Port Server Adapter
+.It
+Intel 10 Gigabit XF SR Server Adapter
+.It
+Intel 82598 10 Gigabit Ethernet Controller
+.It
+Intel 82599 10 Gigabit Ethernet Controller
+.It
+Intel Ethernet Controller X540-AT2
+.It
+Intel Ethernet Converged Network Adapter X520 Series
+.It
+Intel Ethernet Converged Network Adapter X540-T1
+.It
+Intel Ethernet Converged Network Adapter X540-T2
+.It
+Intel Ethernet Server Adapter X520 Series
+.It
+Intel Ethernet Server Adapter X520-DA2
+.It
+Intel Ethernet Server Adapter X520-LR1
+.It
+Intel Ethernet Server Adapter X520-SR1
+.It
+Intel Ethernet Server Adapter X520-SR2
+.It
+Intel Ethernet Server Adapter X520-T2
 .El
-.Sh PERFORMANCE
-The default
-.Dx
-configuration may not give the best possible
-performance with single streams TCP connections.
-Increasing the value of the
-.Va net.inet.tcp.sosend_agglim
-sysctl to 12 or more will increase sending speeds.
 .Sh DIAGNOSTICS
 .Bl -diag
 .It "ix%d: Unable to allocate bus resource: memory"
index d9e0950..9bbffa3 100644 (file)
@@ -298,15 +298,18 @@ dev/netif/ep/if_ep_pccard.c       optional ep pccard
 dev/netif/em/if_em.c           optional em
 dev/netif/emx/if_emx.c         optional emx
 dev/netif/igb/if_igb.c         optional igb
-dev/netif/ixgbe/ixgbe.c                optional ixgbe
-dev/netif/ixgbe/ixgbe_82598.c  optional ixgbe
-dev/netif/ixgbe/ixgbe_82599.c  optional ixgbe
-dev/netif/ixgbe/ixgbe_api.c    optional ixgbe
-dev/netif/ixgbe/ixgbe_common.c optional ixgbe
-dev/netif/ixgbe/ixgbe_mbx.c    optional ixgbe
-dev/netif/ixgbe/ixgbe_phy.c    optional ixgbe
-dev/netif/ixgbe/ixgbe_vf.c     optional ixgbe
-dev/netif/ixgbe/ixgbe_x540.c   optional ixgbe
+dev/netif/ix/if_ix.c           optional ix
+dev/netif/ix/ixgbe_82598.c     optional ix
+dev/netif/ix/ixgbe_82599.c     optional ix
+dev/netif/ix/ixgbe_api.c       optional ix
+dev/netif/ix/ixgbe_common.c    optional ix
+dev/netif/ix/ixgbe_dcb.c       optional ix
+dev/netif/ix/ixgbe_dcb_82598.c optional ix
+dev/netif/ix/ixgbe_dcb_82599.c optional ix
+dev/netif/ix/ixgbe_mbx.c       optional ix
+dev/netif/ix/ixgbe_phy.c       optional ix
+dev/netif/ix/ixgbe_vf.c                optional ix
+dev/netif/ix/ixgbe_x540.c      optional ix
 dev/netif/ig_hal/e1000_80003es2lan.c   optional ig_hal
 dev/netif/ig_hal/e1000_82540.c optional ig_hal
 dev/netif/ig_hal/e1000_82541.c optional ig_hal
index 11675a5..3d72312 100644 (file)
@@ -636,6 +636,9 @@ IGB_RSS_DEBUG               opt_igb.h
 IGB_TSS_DEBUG          opt_igb.h
 IGB_MSIX_DEBUG         opt_igb.h
 
+# ix driver
+IX_RSS_DEBUG           opt_ix.h
+
 # Options for the Intel 802.11n wireless driver
 IWN_DEBUG              opt_iwn.h
 
index 097973d..cc168b9 100644 (file)
@@ -232,7 +232,7 @@ device              em              # Intel PRO/1000 adapter Gigabit Ethernet Card (``Wiseman'')
 device         igb             # Intel Pro/1000 (82575, 82576, 82580, i350)
                                # Requires ig_hal
 device         ig_hal          # Intel PRO/1000 hardware abstraction layer
-device         ixgbe           # Intel PRO/10GbE PCIE Ethernet Family
+device         ix              # Intel PRO/10GbE PCIE Ethernet Family
 
 device         oce             # Emulex OneConnect 10Gb
 
index 431ebc4..dd145f1 100644 (file)
@@ -1778,7 +1778,7 @@ device            emx             # Intel Pro/1000 (8257{1,2,3,4})
 device         igb             # Intel Pro/1000 (82575, 82576, 82580, i350)
                                # Requires ig_hal
 device         ig_hal          # Intel Pro/1000 hardware abstraction layer
-device         ixgbe           # Intel PRO/10GbE PCIE Ethernet Family
+device         ix              # Intel PRO/10GbE PCIE Ethernet Family
 device         et              # Agere ET1310 10/100/1000 Ethernet
 device         lge             # Level 1 LXT1001 (``Mercury'')
 device         mxge            # Myricom Myri-10G 10GbE NIC
index 650f3f1..5a8125e 100644 (file)
@@ -1624,7 +1624,7 @@ device            emx             # Intel Pro/1000 (8257{1,2,3,4})
 device         igb             # Intel Pro/1000 (82575, 82576, 82580, i350)
                                # Requires ig_hal
 device         ig_hal          # Intel Pro/1000 hardware abstraction layer
-device         ixgbe           # Intel PRO/10GbE PCIE Ethernet Family
+device         ix              # Intel PRO/10GbE PCIE Ethernet Family
 device         et              # Agere ET1310 10/100/1000 Ethernet
 device         lge             # Level 1 LXT1001 (``Mercury'')
 device         mxge            # Myricom Myri-10G 10GbE NIC
index 3fc5b6c..db99db9 100644 (file)
@@ -211,7 +211,7 @@ device              em              # Intel PRO/1000 adapter Gigabit Ethernet Card (``Wiseman'')
 device         igb             # Intel Pro/1000 (82575, 82576, 82580, i350)
                                # Requires ig_hal
 device         ig_hal          # Intel PRO/1000 hardware abstraction layer
-device         ixgbe           # Intel PRO/10GbE PCIE Ethernet Family
+device         ix              # Intel PRO/10GbE PCIE Ethernet Family
 
 device         oce             # Emulex OneConnect 10Gb
 
index 25485a7..843420e 100644 (file)
@@ -3,7 +3,7 @@ SUBDIR= an age alc ale ath bce bfe bge \
        fwe fxp ic iwi iwn jme lge lnc \
        mii_layer my msk mxge ndis nfe nge oce pcn \
        ral re rl sbsh sf sis sk sln sr ste stge ti tl tx txp \
-       vge vr vx wb wi wpi xe xl ig_hal emx ae igb ixgbe bnx
+       vge vr vx wb wi wpi xe xl ig_hal emx ae igb ix bnx
 
 .if ${MACHINE_ARCH} == "i386"
 SUBDIR+=ar sbni
diff --git a/sys/dev/netif/ix/Makefile b/sys/dev/netif/ix/Makefile
new file mode 100644 (file)
index 0000000..a7638b4
--- /dev/null
@@ -0,0 +1,17 @@
+KMOD   = if_ix
+SRCS   = if_ix.c
+SRCS   += device_if.h bus_if.h pci_if.h
+SRCS   += opt_ix.h
+
+SRCS   += ixgbe_common.c ixgbe_api.c ixgbe_phy.c ixgbe_vf.c ixgbe_mbx.c
+SRCS   += ixgbe_dcb.c ixgbe_dcb_82598.c ixgbe_dcb_82599.c
+SRCS   += ixgbe_82598.c ixgbe_82599.c ixgbe_x540.c
+
+.ifndef BUILDING_WITH_KERNEL
+
+opt_ix.h:
+       touch ${.OBJDIR}/${.TARGET}
+
+.endif
+
+.include <bsd.kmod.mk>
diff --git a/sys/dev/netif/ix/if_ix.c b/sys/dev/netif/ix/if_ix.c
new file mode 100644 (file)
index 0000000..e0878fd
--- /dev/null
@@ -0,0 +1,4511 @@
+/*
+ * Copyright (c) 2001-2013, Intel Corporation 
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice, 
+ *     this list of conditions and the following disclaimer.
+ *
+ *  2. Redistributions in binary form must reproduce the above copyright 
+ *     notice, this list of conditions and the following disclaimer in the 
+ *     documentation and/or other materials provided with the distribution.
+ *
+ *  3. Neither the name of the Intel Corporation nor the names of its 
+ *     contributors may be used to endorse or promote products derived from 
+ *     this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "opt_ix.h"
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/interrupt.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/proc.h>
+#include <sys/rman.h>
+#include <sys/serialize.h>
+#include <sys/serialize2.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+
+#include <net/bpf.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/ifq_var.h>
+#include <net/toeplitz.h>
+#include <net/toeplitz2.h>
+#include <net/vlan/if_vlan_var.h>
+#include <net/vlan/if_vlan_ether.h>
+#include <net/if_poll.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+
+#include <bus/pci/pcivar.h>
+#include <bus/pci/pcireg.h>
+
+#include <dev/netif/ix/ixgbe_api.h>
+#include <dev/netif/ix/if_ix.h>
+
+#ifdef IX_RSS_DEBUG
+#define IX_RSS_DPRINTF(sc, lvl, fmt, ...) \
+do { \
+       if (sc->rss_debug >= lvl) \
+               if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
+} while (0)
+#else  /* !IX_RSS_DEBUG */
+#define IX_RSS_DPRINTF(sc, lvl, fmt, ...)      ((void)0)
+#endif /* IX_RSS_DEBUG */
+
+#define IX_NAME                        "Intel(R) PRO/10GbE "
+#define IX_DEVICE(id) \
+       { IXGBE_VENDOR_ID, IXGBE_DEV_ID_##id, IX_NAME #id }
+#define IX_DEVICE_NULL         { 0, 0, NULL }
+
+static struct ix_device {
+       uint16_t        vid;
+       uint16_t        did;
+       const char      *desc;
+} ix_devices[] = {
+       IX_DEVICE(82598AF_DUAL_PORT),
+       IX_DEVICE(82598AF_SINGLE_PORT),
+       IX_DEVICE(82598EB_CX4),
+       IX_DEVICE(82598AT),
+       IX_DEVICE(82598AT2),
+       IX_DEVICE(82598),
+       IX_DEVICE(82598_DA_DUAL_PORT),
+       IX_DEVICE(82598_CX4_DUAL_PORT),
+       IX_DEVICE(82598EB_XF_LR),
+       IX_DEVICE(82598_SR_DUAL_PORT_EM),
+       IX_DEVICE(82598EB_SFP_LOM),
+       IX_DEVICE(82599_KX4),
+       IX_DEVICE(82599_KX4_MEZZ),
+       IX_DEVICE(82599_SFP),
+       IX_DEVICE(82599_XAUI_LOM),
+       IX_DEVICE(82599_CX4),
+       IX_DEVICE(82599_T3_LOM),
+       IX_DEVICE(82599_COMBO_BACKPLANE),
+       IX_DEVICE(82599_BACKPLANE_FCOE),
+       IX_DEVICE(82599_SFP_SF2),
+       IX_DEVICE(82599_SFP_FCOE),
+       IX_DEVICE(82599EN_SFP),
+       IX_DEVICE(82599_SFP_SF_QP),
+       IX_DEVICE(X540T),
+
+       /* required last entry */
+       IX_DEVICE_NULL
+};
+
+static int     ix_probe(device_t);
+static int     ix_attach(device_t);
+static int     ix_detach(device_t);
+static int     ix_shutdown(device_t);
+
+static void    ix_serialize(struct ifnet *, enum ifnet_serialize);
+static void    ix_deserialize(struct ifnet *, enum ifnet_serialize);
+static int     ix_tryserialize(struct ifnet *, enum ifnet_serialize);
+#ifdef INVARIANTS
+static void    ix_serialize_assert(struct ifnet *, enum ifnet_serialize,
+                   boolean_t);
+#endif
+static void    ix_start(struct ifnet *, struct ifaltq_subque *);
+static void    ix_watchdog(struct ifaltq_subque *);
+static int     ix_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
+static void    ix_init(void *);
+static void    ix_stop(struct ix_softc *);
+static void    ix_media_status(struct ifnet *, struct ifmediareq *);
+static int     ix_media_change(struct ifnet *);
+static void    ix_timer(void *);
+
+static void    ix_add_sysctl(struct ix_softc *);
+static int     ix_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS);
+static int     ix_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS);
+static int     ix_sysctl_txd(SYSCTL_HANDLER_ARGS);
+static int     ix_sysctl_rxd(SYSCTL_HANDLER_ARGS);
+static int     ix_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS);
+static int     ix_sysctl_intr_rate(SYSCTL_HANDLER_ARGS);
+static int     ix_sysctl_flowctrl(SYSCTL_HANDLER_ARGS);
+#ifdef foo
+static int     ix_sysctl_advspeed(SYSCTL_HANDLER_ARGS);
+#endif
+#if 0
+static void     ix_add_hw_stats(struct ix_softc *);
+#endif
+
+static void    ix_slot_info(struct ix_softc *);
+static int     ix_alloc_rings(struct ix_softc *);
+static void    ix_free_rings(struct ix_softc *);
+static void    ix_setup_ifp(struct ix_softc *);
+static void    ix_setup_serialize(struct ix_softc *);
+static void    ix_set_ring_inuse(struct ix_softc *, boolean_t);
+static void    ix_set_timer_cpuid(struct ix_softc *, boolean_t);
+static void    ix_update_stats(struct ix_softc *);
+
+static void    ix_set_promisc(struct ix_softc *);
+static void    ix_set_multi(struct ix_softc *);
+static void    ix_set_vlan(struct ix_softc *);
+static uint8_t *ix_mc_array_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
+
+static int     ix_get_txring_inuse(const struct ix_softc *, boolean_t);
+static void    ix_init_tx_ring(struct ix_tx_ring *);
+static void    ix_free_tx_ring(struct ix_tx_ring *);
+static int     ix_create_tx_ring(struct ix_tx_ring *);
+static void    ix_destroy_tx_ring(struct ix_tx_ring *, int);
+static void    ix_init_tx_unit(struct ix_softc *);
+static int     ix_encap(struct ix_tx_ring *, struct mbuf **,
+                   uint16_t *, int *);
+static int     ix_tx_ctx_setup(struct ix_tx_ring *,
+                   const struct mbuf *, uint32_t *, uint32_t *);
+static int     ix_tso_ctx_setup(struct ix_tx_ring *,
+                   const struct mbuf *, uint32_t *, uint32_t *);
+static void    ix_txeof(struct ix_tx_ring *);
+
+static int     ix_get_rxring_inuse(const struct ix_softc *, boolean_t);
+static int     ix_init_rx_ring(struct ix_rx_ring *);
+static void    ix_free_rx_ring(struct ix_rx_ring *);
+static int     ix_create_rx_ring(struct ix_rx_ring *);
+static void    ix_destroy_rx_ring(struct ix_rx_ring *, int);
+static void    ix_init_rx_unit(struct ix_softc *);
+#if 0
+static void    ix_setup_hw_rsc(struct ix_rx_ring *);
+#endif
+static int     ix_newbuf(struct ix_rx_ring *, int, boolean_t);
+static void    ix_rxeof(struct ix_rx_ring *);
+static void    ix_rx_discard(struct ix_rx_ring *, int, boolean_t);
+static void    ix_enable_rx_drop(struct ix_softc *);
+static void    ix_disable_rx_drop(struct ix_softc *);
+
+static int     ix_alloc_intr(struct ix_softc *);
+static void    ix_free_intr(struct ix_softc *);
+static int     ix_setup_intr(struct ix_softc *);
+static void    ix_teardown_intr(struct ix_softc *, int);
+static void    ix_enable_intr(struct ix_softc *);
+static void    ix_disable_intr(struct ix_softc *);
+static void    ix_set_ivar(struct ix_softc *, uint8_t, uint8_t, int8_t);
+#if 0
+static void    ix_configure_ivars(struct ix_softc *);
+#endif
+static void    ix_set_eitr(struct ix_softc *, int, int);
+static void    ix_intr(void *);
+
+static void    ix_config_link(struct ix_softc *);
+static boolean_t ix_sfp_probe(struct ix_softc *);
+static boolean_t ix_is_sfp(const struct ixgbe_hw *);
+static void    ix_setup_optics(struct ix_softc *);
+static void    ix_update_link_status(struct ix_softc *);
+static void    ix_handle_link(struct ix_softc *);
+static void    ix_handle_mod(struct ix_softc *);
+static void    ix_handle_msf(struct ix_softc *);
+
+#if 0
+static void    ix_msix_que(void *);
+static void    ix_msix_link(void *);
+static int     ix_allocate_msix(struct ix_softc *);
+static int     ix_setup_msix(struct ix_softc *);
+static void    ix_handle_que(void *, int);
+#endif
+
+/* XXX Shared code structure requires this for the moment */
+extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *);
+
+static device_method_t ix_methods[] = {
+       /* Device interface */
+       DEVMETHOD(device_probe,         ix_probe),
+       DEVMETHOD(device_attach,        ix_attach),
+       DEVMETHOD(device_detach,        ix_detach),
+       DEVMETHOD(device_shutdown,      ix_shutdown),
+       DEVMETHOD_END
+};
+
+static driver_t ix_driver = {
+       "ix",
+       ix_methods,
+       sizeof(struct ix_softc)
+};
+
+static devclass_t ix_devclass;
+
+DECLARE_DUMMY_MODULE(if_ix);
+DRIVER_MODULE(if_ix, pci, ix_driver, ix_devclass, NULL, NULL);
+
+static int     ix_msi_enable = 1;
+static int     ix_rxr = 0;
+static int     ix_txd = IX_PERF_TXD;
+static int     ix_rxd = IX_PERF_RXD;
+static int     ix_unsupported_sfp = 0;
+
+TUNABLE_INT("hw.ix.msi.enable", &ix_msi_enable);
+TUNABLE_INT("hw.ix.rxr", &ix_rxr);
+TUNABLE_INT("hw.ix.txd", &ix_txd);
+TUNABLE_INT("hw.ix.rxd", &ix_rxd);
+TUNABLE_INT("hw.ix.unsupported_sfp", &ix_unsupported_sfp);
+
+/*
+ * Smart speed setting, default to on.  This only works
+ * as a compile option right now as its during attach,
+ * set this to 'ixgbe_smart_speed_off' to disable.
+ */
+static const enum ixgbe_smart_speed ix_smart_speed =
+    ixgbe_smart_speed_on;
+
+static int
+ix_probe(device_t dev)
+{
+       const struct ix_device *d;
+       uint16_t vid, did;
+
+       vid = pci_get_vendor(dev);
+       did = pci_get_device(dev);
+
+       for (d = ix_devices; d->desc != NULL; ++d) {
+               if (vid == d->vid && did == d->did) {
+                       device_set_desc(dev, d->desc);
+                       return 0;
+               }
+       }
+       return ENXIO;
+}
+
+static int
+ix_attach(device_t dev)
+{
+       struct ix_softc *sc = device_get_softc(dev);
+       struct ixgbe_hw *hw;
+       int error;
+       uint16_t csum;
+       uint32_t ctrl_ext;
+
+       sc->dev = sc->osdep.dev = dev;
+       hw = &sc->hw;
+
+       if_initname(&sc->arpcom.ac_if, device_get_name(dev),
+           device_get_unit(dev));
+       ifmedia_init(&sc->media, IFM_IMASK,
+           ix_media_change, ix_media_status);
+
+       /* Save frame size */
+       sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+       callout_init_mp(&sc->timer);
+       lwkt_serialize_init(&sc->main_serialize);
+
+       /*
+        * Save off the information about this board
+        */
+       hw->vendor_id = pci_get_vendor(dev);
+       hw->device_id = pci_get_device(dev);
+       hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
+       hw->subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
+       hw->subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
+
+       ixgbe_set_mac_type(hw);
+
+       /* Pick up the 82599 and VF settings */
+       if (hw->mac.type != ixgbe_mac_82598EB)
+               hw->phy.smart_speed = ix_smart_speed;
+
+       /* Enable bus mastering */
+       pci_enable_busmaster(dev);
+
+       /*
+        * Allocate IO memory
+        */
+       sc->mem_rid = PCIR_BAR(0);
+       sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+           &sc->mem_rid, RF_ACTIVE);
+       if (sc->mem_res == NULL) {
+               device_printf(dev, "Unable to allocate bus resource: memory\n");
+               error = ENXIO;
+               goto failed;
+       }
+
+       sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->mem_res);
+       sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->mem_res);
+
+       sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle;
+       sc->hw.back = &sc->osdep;
+
+       /*
+        * Configure total supported RX/TX ring count
+        */
+       sc->rx_ring_cnt = device_getenv_int(dev, "rxr", ix_rxr);
+       sc->rx_ring_cnt = if_ring_count2(sc->rx_ring_cnt, IX_MAX_RXRING);
+       sc->rx_ring_inuse = sc->rx_ring_cnt;
+
+       sc->tx_ring_cnt = 1;
+       sc->tx_ring_inuse = sc->tx_ring_cnt;
+
+       /* Allocate TX/RX rings */
+       error = ix_alloc_rings(sc);
+       if (error)
+               goto failed;
+
+       /* Allocate interrupt */
+       error = ix_alloc_intr(sc);
+       if (error)
+               goto failed;
+
+       /* Setup serializes */
+       ix_setup_serialize(sc);
+
+       /* Allocate multicast array memory. */
+       sc->mta = kmalloc(IXGBE_ETH_LENGTH_OF_ADDRESS * IX_MAX_MCASTADDR,
+           M_DEVBUF, M_WAITOK);
+
+       /* Initialize the shared code */
+       hw->allow_unsupported_sfp = ix_unsupported_sfp;
+       error = ixgbe_init_shared_code(hw);
+       if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
+               /*
+                * No optics in this port; ask timer routine
+                * to probe for later insertion.
+                */
+               sc->sfp_probe = TRUE;
+               error = 0;
+       } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+               device_printf(dev, "Unsupported SFP+ module detected!\n");
+               error = EIO;
+               goto failed;
+       } else if (error) {
+               device_printf(dev, "Unable to initialize the shared code\n");
+               error = EIO;
+               goto failed;
+       }
+
+       /* Make sure we have a good EEPROM before we read from it */
+       if (ixgbe_validate_eeprom_checksum(&sc->hw, &csum) < 0) {
+               device_printf(dev, "The EEPROM Checksum Is Not Valid\n");
+               error = EIO;
+               goto failed;
+       }
+
+       error = ixgbe_init_hw(hw);
+       if (error == IXGBE_ERR_EEPROM_VERSION) {
+               device_printf(dev, "Pre-production device detected\n");
+       } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+               device_printf(dev, "Unsupported SFP+ Module\n");
+               error = EIO;
+               goto failed;
+       } else if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
+               device_printf(dev, "No SFP+ Module found\n");
+       }
+
+       /* Detect and set physical type */
+       ix_setup_optics(sc);
+
+       /* Setup OS specific network interface */
+       ix_setup_ifp(sc);
+
+       /* Add sysctl tree */
+       ix_add_sysctl(sc);
+
+       error = ix_setup_intr(sc);
+       if (error) {
+               ether_ifdetach(&sc->arpcom.ac_if);
+               goto failed;
+       }
+
+       /* Initialize statistics */
+       ix_update_stats(sc);
+
+       /*
+        * Check PCIE slot type/speed/width
+        */
+       ix_slot_info(sc);
+
+       /* Set an initial default flow control value */
+       sc->fc = ixgbe_fc_full;
+
+       /* Let hardware know driver is loaded */
+       ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+       ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
+       IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+
+       return 0;
+failed:
+       ix_detach(dev);
+       return error;
+}
+
+static int
+ix_detach(device_t dev)
+{
+       struct ix_softc *sc = device_get_softc(dev);
+
+       if (device_is_attached(dev)) {
+               struct ifnet *ifp = &sc->arpcom.ac_if;
+               uint32_t ctrl_ext;
+
+               ifnet_serialize_all(ifp);
+
+               ix_stop(sc);
+               ix_teardown_intr(sc, sc->intr_cnt);
+
+               ifnet_deserialize_all(ifp);
+
+               callout_terminate(&sc->timer);
+               ether_ifdetach(ifp);
+
+               /* Let hardware know driver is unloading */
+               ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
+               ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
+               IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
+       }
+
+       ifmedia_removeall(&sc->media);
+       bus_generic_detach(dev);
+
+       if (sc->sysctl_tree != NULL)
+               sysctl_ctx_free(&sc->sysctl_ctx);
+
+       ix_free_intr(sc);
+
+       if (sc->mem_res != NULL) {
+               bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid,
+                   sc->mem_res);
+       }
+
+       ix_free_rings(sc);
+
+       if (sc->mta != NULL)
+               kfree(sc->mta, M_DEVBUF);
+       if (sc->serializes != NULL)
+               kfree(sc->serializes, M_DEVBUF);
+
+       return 0;
+}
+
+static int
+ix_shutdown(device_t dev)
+{
+       struct ix_softc *sc = device_get_softc(dev);
+       struct ifnet *ifp = &sc->arpcom.ac_if;
+
+       ifnet_serialize_all(ifp);
+       ix_stop(sc);
+       ifnet_deserialize_all(ifp);
+
+       return 0;
+}
+
+static void
+ix_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
+{
+       struct ix_softc *sc = ifp->if_softc;
+       struct ix_tx_ring *txr = ifsq_get_priv(ifsq);
+       int idx = -1;
+       uint16_t nsegs;
+
+       KKASSERT(txr->tx_ifsq == ifsq);
+       ASSERT_SERIALIZED(&txr->tx_serialize);
+
+       if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq))
+               return;
+
+       if (!sc->link_active) {
+               ifsq_purge(ifsq);
+               return;
+       }
+
+       while (!ifsq_is_empty(ifsq)) {
+               struct mbuf *m_head;
+
+               if (txr->tx_avail <= IX_MAX_SCATTER + IX_TX_RESERVED) {
+                       ifsq_set_oactive(ifsq);
+                       txr->tx_watchdog.wd_timer = 5;
+                       break;
+               }
+
+               m_head = ifsq_dequeue(ifsq);
+               if (m_head == NULL)
+                       break;
+
+               if (ix_encap(txr, &m_head, &nsegs, &idx)) {
+                       IFNET_STAT_INC(ifp, oerrors, 1);
+                       continue;
+               }
+
+               if (nsegs >= txr->tx_wreg_nsegs) {
+                       IXGBE_WRITE_REG(&sc->hw, IXGBE_TDT(txr->tx_idx), idx);
+                       nsegs = 0;
+                       idx = -1;
+               }
+
+               ETHER_BPF_MTAP(ifp, m_head);
+       }
+       if (idx >= 0)
+               IXGBE_WRITE_REG(&sc->hw, IXGBE_TDT(txr->tx_idx), idx);
+}
+
+static int
+ix_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
+{
+       struct ix_softc *sc = ifp->if_softc;
+       struct ifreq *ifr = (struct ifreq *) data;
+       int error = 0, mask, reinit;
+
+       ASSERT_IFNET_SERIALIZED_ALL(ifp);
+
+       switch (command) {
+       case SIOCSIFMTU:
+               if (ifr->ifr_mtu > IX_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
+                       error = EINVAL;
+               } else {
+                       ifp->if_mtu = ifr->ifr_mtu;
+                       sc->max_frame_size =
+                           ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+                       ix_init(sc);
+               }
+               break;
+
+       case SIOCSIFFLAGS:
+               if (ifp->if_flags & IFF_UP) {
+                       if (ifp->if_flags & IFF_RUNNING) {
+                               if ((ifp->if_flags ^ sc->if_flags) &
+                                   (IFF_PROMISC | IFF_ALLMULTI))
+                                       ix_set_promisc(sc);
+                       } else {
+                               ix_init(sc);
+                       }
+               } else if (ifp->if_flags & IFF_RUNNING) {
+                       ix_stop(sc);
+               }
+               sc->if_flags = ifp->if_flags;
+               break;
+
+       case SIOCADDMULTI:
+       case SIOCDELMULTI:
+               if (ifp->if_flags & IFF_RUNNING) {
+                       ix_disable_intr(sc);
+                       ix_set_multi(sc);
+                       ix_enable_intr(sc);
+               }
+               break;
+
+       case SIOCSIFMEDIA:
+       case SIOCGIFMEDIA:
+               error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
+               break;
+
+       case SIOCSIFCAP:
+               reinit = 0;
+               mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+               if (mask & IFCAP_RXCSUM) {
+                       ifp->if_capenable ^= IFCAP_RXCSUM;
+                       reinit = 1;
+               }
+               if (mask & IFCAP_VLAN_HWTAGGING) {
+                       ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+                       reinit = 1;
+               }
+               if (mask & IFCAP_TXCSUM) {
+                       ifp->if_capenable ^= IFCAP_TXCSUM;
+                       if (ifp->if_capenable & IFCAP_TXCSUM)
+                               ifp->if_hwassist |= CSUM_OFFLOAD;
+                       else
+                               ifp->if_hwassist &= ~CSUM_OFFLOAD;
+               }
+               if (mask & IFCAP_TSO) {
+                       ifp->if_capenable ^= IFCAP_TSO;
+                       if (ifp->if_capenable & IFCAP_TSO)
+                               ifp->if_hwassist |= CSUM_TSO;
+                       else
+                               ifp->if_hwassist &= ~CSUM_TSO;
+               }
+               if (mask & IFCAP_RSS)
+                       ifp->if_capenable ^= IFCAP_RSS;
+               if (reinit && (ifp->if_flags & IFF_RUNNING))
+                       ix_init(sc);
+               break;
+
+#if 0
+       case SIOCGI2C:
+       {
+               struct ixgbe_i2c_req    i2c;
+               error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
+               if (error)
+                       break;
+               if ((i2c.dev_addr != 0xA0) || (i2c.dev_addr != 0xA2)){
+                       error = EINVAL;
+                       break;
+               }
+               hw->phy.ops.read_i2c_byte(hw, i2c.offset,
+                   i2c.dev_addr, i2c.data);
+               error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
+               break;
+       }
+#endif
+
+       default:
+               error = ether_ioctl(ifp, command, data);
+               break;
+       }
+       return error;
+}
+
+#define IXGBE_MHADD_MFS_SHIFT 16
+
+static void
+ix_init(void *xsc)
+{
+       struct ix_softc *sc = xsc;
+       struct ifnet *ifp = &sc->arpcom.ac_if;
+       struct ixgbe_hw *hw = &sc->hw;
+       uint32_t rxpb, frame, size, tmp;
+       uint32_t gpie, rxctrl;
+       int i, error;
+
+       ASSERT_IFNET_SERIALIZED_ALL(ifp);
+
+       ix_stop(sc);
+
+       /* Configure # of used RX/TX rings */
+       ix_set_ring_inuse(sc, FALSE);
+       ifq_set_subq_mask(&ifp->if_snd, sc->tx_ring_inuse - 1);
+
+       /* Get the latest mac address, User can use a LAA */
+       bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+       ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
+       hw->addr_ctrl.rar_used_count = 1;
+
+       /* Prepare transmit descriptors and buffers */
+       for (i = 0; i < sc->tx_ring_inuse; ++i)
+               ix_init_tx_ring(&sc->tx_rings[i]);
+
+       ixgbe_init_hw(hw);
+       ix_init_tx_unit(sc);
+
+       /* Setup Multicast table */
+       ix_set_multi(sc);
+
+       /* Prepare receive descriptors and buffers */
+       for (i = 0; i < sc->rx_ring_inuse; ++i) {
+               error = ix_init_rx_ring(&sc->rx_rings[i]);
+               if (error) {
+                       if_printf(ifp, "Could not initialize RX ring%d\n", i);
+                       ix_stop(sc);
+                       return;
+               }
+       }
+
+       /* Configure RX settings */
+       ix_init_rx_unit(sc);
+
+       gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+
+       /* Enable Fan Failure Interrupt */
+       gpie |= IXGBE_SDP1_GPIEN;
+
+       /* Add for Module detection */
+       if (hw->mac.type == ixgbe_mac_82599EB)
+               gpie |= IXGBE_SDP2_GPIEN;
+
+       /* Thermal Failure Detection */
+       if (hw->mac.type == ixgbe_mac_X540)
+               gpie |= IXGBE_SDP0_GPIEN;
+
+       if (sc->intr_type == PCI_INTR_TYPE_MSIX) {
+               /* Enable Enhanced MSIX mode */
+               gpie |= IXGBE_GPIE_MSIX_MODE;
+               gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
+                   IXGBE_GPIE_OCD;
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+       /* Set MTU size */
+       if (ifp->if_mtu > ETHERMTU) {
+               uint32_t mhadd;
+
+               mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
+               mhadd &= ~IXGBE_MHADD_MFS_MASK;
+               mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
+               IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
+       }
+
+       /*
+        * Enable TX rings
+        */
+       for (i = 0; i < sc->tx_ring_inuse; ++i) {
+               uint32_t txdctl;
+
+               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
+               txdctl |= IXGBE_TXDCTL_ENABLE;
+
+               /*
+                * Set WTHRESH to 0, since TX head write-back is used
+                */
+               txdctl &= ~(0x7f << 16);
+
+               /*
+                * When the internal queue falls below PTHRESH (32),
+                * start prefetching as long as there are at least
+                * HTHRESH (1) buffers ready. The values are taken
+                * from the Intel linux driver 3.8.21.
+                * Prefetching enables tx line rate even with 1 queue.
+                */
+               txdctl |= (32 << 0) | (1 << 8);
+               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
+       }
+
+       /*
+        * Enable RX rings
+        */
+       for (i = 0; i < sc->rx_ring_inuse; ++i) {
+               uint32_t rxdctl;
+               int k;
+
+               rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+               if (hw->mac.type == ixgbe_mac_82598EB) {
+                       /*
+                        * PTHRESH = 21
+                        * HTHRESH = 4
+                        * WTHRESH = 8
+                        */
+                       rxdctl &= ~0x3FFFFF;
+                       rxdctl |= 0x080420;
+               }
+               rxdctl |= IXGBE_RXDCTL_ENABLE;
+               IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
+               for (k = 0; k < 10; ++k) {
+                       if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
+                           IXGBE_RXDCTL_ENABLE)
+                               break;
+                       else
+                               msec_delay(1);
+               }
+               wmb();
+               IXGBE_WRITE_REG(hw, IXGBE_RDT(i),
+                   sc->rx_rings[0].rx_ndesc - 1);
+       }
+
+       /* Set up VLAN support and filter */
+       ix_set_vlan(sc);
+
+       /* Enable Receive engine */
+       rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               rxctrl |= IXGBE_RXCTRL_DMBYPS;
+       rxctrl |= IXGBE_RXCTRL_RXEN;
+       ixgbe_enable_rx_dma(hw, rxctrl);
+
+       if (sc->intr_type == PCI_INTR_TYPE_MSIX) {
+#if 0
+               ix_configure_ivars(sc);
+#endif
+               /* Set up auto-mask */
+               if (hw->mac.type == ixgbe_mac_82598EB)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+               else {
+                       IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
+                       IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
+               }
+       } else {
+               for (i = 0; i < sc->tx_ring_inuse; ++i)
+                       ix_set_ivar(sc, i, sc->tx_rings[i].tx_intr_vec, 1);
+               for (i = 0; i < sc->rx_ring_inuse; ++i)
+                       ix_set_ivar(sc, i, sc->rx_rings[i].rx_intr_vec, 0);
+               IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+               ix_set_eitr(sc, 0, sc->intr_data[0].intr_rate);
+       }
+
+       /*
+        * Check on any SFP devices that need to be kick-started
+        */
+       if (hw->phy.type == ixgbe_phy_none) {
+               error = hw->phy.ops.identify(hw);
+               if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+                       if_printf(ifp,
+                           "Unsupported SFP+ module type was detected.\n");
+                       /* XXX stop */
+                       return;
+               }
+       }
+
+#if 0
+       /* Set moderation on the Link interrupt */
+       IXGBE_WRITE_REG(hw, IXGBE_EITR(sc->linkvec), IXGBE_LINK_ITR);
+#endif
+
+       /* Config/Enable Link */
+       ix_config_link(sc);
+
+       /*
+        * Hardware Packet Buffer & Flow Control setup
+        */
+       frame = sc->max_frame_size;
+
+       /* Calculate High Water */
+       if (hw->mac.type == ixgbe_mac_X540)
+               tmp = IXGBE_DV_X540(frame, frame);
+       else
+               tmp = IXGBE_DV(frame, frame);
+       size = IXGBE_BT2KB(tmp);
+       rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
+       hw->fc.high_water[0] = rxpb - size;
+
+       /* Now calculate Low Water */
+       if (hw->mac.type == ixgbe_mac_X540)
+               tmp = IXGBE_LOW_DV_X540(frame);
+       else
+               tmp = IXGBE_LOW_DV(frame);
+       hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
+
+       hw->fc.requested_mode = sc->fc;
+       hw->fc.pause_time = IX_FC_PAUSE;
+       hw->fc.send_xon = TRUE;
+
+       /* Initialize the FC settings */
+       ixgbe_start_hw(hw);
+
+       /* And now turn on interrupts */
+       ix_enable_intr(sc);
+
+       ifp->if_flags |= IFF_RUNNING;
+       for (i = 0; i < sc->tx_ring_inuse; ++i) {
+               ifsq_clr_oactive(sc->tx_rings[i].tx_ifsq);
+               ifsq_watchdog_start(&sc->tx_rings[i].tx_watchdog);
+       }
+
+       ix_set_timer_cpuid(sc, FALSE);
+       callout_reset_bycpu(&sc->timer, hz, ix_timer, sc, sc->timer_cpuid);
+}
+
+#if 0
+/*
+**
+** MSIX Interrupt Handlers and Tasklets
+**
+*/
+
+static __inline void
+ix_enable_queue(struct ix_softc *sc, uint32_t vector)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       uint64_t        queue = (uint64_t)(1 << vector);
+       uint32_t        mask;
+
+       if (hw->mac.type == ixgbe_mac_82598EB) {
+               mask = (IXGBE_EIMS_RTX_QUEUE & queue);
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
+       } else {
+               mask = (queue & 0xFFFFFFFF);
+               if (mask)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+               mask = (queue >> 32);
+               if (mask)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+       }
+}
+
+static __inline void
+ix_disable_queue(struct ix_softc *sc, uint32_t vector)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       uint64_t        queue = (uint64_t)(1 << vector);
+       uint32_t        mask;
+
+       if (hw->mac.type == ixgbe_mac_82598EB) {
+               mask = (IXGBE_EIMS_RTX_QUEUE & queue);
+               IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
+       } else {
+               mask = (queue & 0xFFFFFFFF);
+               if (mask)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
+               mask = (queue >> 32);
+               if (mask)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
+       }
+}
+
+static __inline void
+ix_rearm_queues(struct ix_softc *sc, uint64_t queues)
+{
+       uint32_t mask;
+
+       if (sc->hw.mac.type == ixgbe_mac_82598EB) {
+               mask = (IXGBE_EIMS_RTX_QUEUE & queues);
+               IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS, mask);
+       } else {
+               mask = (queues & 0xFFFFFFFF);
+               IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS_EX(0), mask);
+               mask = (queues >> 32);
+               IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS_EX(1), mask);
+       }
+}
+
+static void
+ix_handle_que(void *context, int pending)
+{
+       struct ix_queue *que = context;
+       struct ix_softc  *sc = que->sc;
+       struct ix_tx_ring  *txr = que->txr;
+       struct ifnet    *ifp = &sc->arpcom.ac_if;
+
+       if (ifp->if_flags & IFF_RUNNING) {
+               more = ix_rxeof(que);
+               ix_txeof(txr);
+               if (!ifq_is_empty(&ifp->if_snd))
+                       ixgbe_start_locked(txr, ifp);
+       }
+
+       /* Reenable this interrupt */
+       if (que->res != NULL)
+               ixgbe_enable_queue(sc, que->msix);
+       else
+               ix_enable_intr(sc);
+       return;
+}
+#endif
+
+static void
+ix_intr(void *xsc)
+{
+       struct ix_softc *sc = xsc;
+       struct ixgbe_hw *hw = &sc->hw;
+       uint32_t eicr;
+
+       ASSERT_SERIALIZED(&sc->main_serialize);
+
+       eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+       if (eicr == 0) {
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS, sc->intr_mask);
+               return;
+       }
+
+       if (eicr & IX_RX0_INTR_MASK) {
+               struct ix_rx_ring *rxr = &sc->rx_rings[0];
+
+               lwkt_serialize_enter(&rxr->rx_serialize);
+               ix_rxeof(rxr);
+               lwkt_serialize_exit(&rxr->rx_serialize);
+       }
+       if (eicr & IX_RX1_INTR_MASK) {
+               struct ix_rx_ring *rxr;
+
+               KKASSERT(sc->rx_ring_inuse == IX_MIN_RXRING_RSS);
+               rxr = &sc->rx_rings[1];
+
+               lwkt_serialize_enter(&rxr->rx_serialize);
+               ix_rxeof(rxr);
+               lwkt_serialize_exit(&rxr->rx_serialize);
+       }
+
+       if (eicr & IX_TX_INTR_MASK) {
+               struct ix_tx_ring *txr = &sc->tx_rings[0];
+
+               lwkt_serialize_enter(&txr->tx_serialize);
+               ix_txeof(txr);
+               if (!ifsq_is_empty(txr->tx_ifsq))
+                       ifsq_devstart(txr->tx_ifsq);
+               lwkt_serialize_exit(&txr->tx_serialize);
+       }
+
+       /* Check for fan failure */
+       if (__predict_false((eicr & IXGBE_EICR_GPI_SDP1) &&
+           hw->phy.media_type == ixgbe_media_type_copper)) {
+               if_printf(&sc->arpcom.ac_if, "CRITICAL: FAN FAILURE!!  "
+                   "REPLACE IMMEDIATELY!!\n");
+       }
+
+       /* Link status change */
+       if (__predict_false(eicr & IXGBE_EICR_LSC))
+               ix_handle_link(sc);
+
+       IXGBE_WRITE_REG(hw, IXGBE_EIMS, sc->intr_mask);
+}
+
+#if 0
+/*********************************************************************
+ *
+ *  MSIX Queue Interrupt Service routine
+ *
+ **********************************************************************/
+void
+ix_msix_que(void *arg)
+{
+       struct ix_queue *que = arg;
+       struct ix_softc  *sc = que->sc;
+       struct ifnet    *ifp = &sc->arpcom.ac_if;
+       struct ix_tx_ring       *txr = que->txr;
+       struct ix_rx_ring       *rxr = que->rxr;
+       uint32_t                newitr = 0;
+
+       ixgbe_disable_queue(sc, que->msix);
+       ++que->irqs;
+
+       more = ix_rxeof(que);
+
+       ix_txeof(txr);
+       if (!ifq_is_empty(&ifp->if_snd))
+               ixgbe_start_locked(txr, ifp);
+
+       /* Do AIM now? */
+
+       if (ixgbe_enable_aim == FALSE)
+               goto no_calc;
+       /*
+       ** Do Adaptive Interrupt Moderation:
+       **  - Write out last calculated setting
+       **  - Calculate based on average size over
+       **    the last interval.
+       */
+       if (que->eitr_setting)
+               IXGBE_WRITE_REG(&sc->hw,
+                   IXGBE_EITR(que->msix), que->eitr_setting);
+
+       que->eitr_setting = 0;
+
+       /* Idle, do nothing */
+       if ((txr->bytes == 0) && (rxr->bytes == 0))
+               goto no_calc;
+                               
+       if ((txr->bytes) && (txr->packets))
+               newitr = txr->bytes/txr->packets;
+       if ((rxr->bytes) && (rxr->packets))
+               newitr = max(newitr,
+                   (rxr->bytes / rxr->packets));
+       newitr += 24; /* account for hardware frame, crc */
+
+       /* set an upper boundary */
+       newitr = min(newitr, 3000);
+
+       /* Be nice to the mid range */
+       if ((newitr > 300) && (newitr < 1200))
+               newitr = (newitr / 3);
+       else
+               newitr = (newitr / 2);
+
+       if (sc->hw.mac.type == ixgbe_mac_82598EB)
+               newitr |= newitr << 16;
+       else
+               newitr |= IXGBE_EITR_CNT_WDIS;
+                
+       /* save for next interrupt */
+       que->eitr_setting = newitr;
+
+       /* Reset state */
+       txr->bytes = 0;
+       txr->packets = 0;
+       rxr->bytes = 0;
+       rxr->packets = 0;
+
+no_calc:
+#if 0
+       if (more)
+               taskqueue_enqueue(que->tq, &que->que_task);
+       else
+#endif
+               ixgbe_enable_queue(sc, que->msix);
+       return;
+}
+
+
+static void
+ix_msix_link(void *arg)
+{
+       struct ix_softc *sc = arg;
+       struct ixgbe_hw *hw = &sc->hw;
+       uint32_t                reg_eicr;
+
+       ++sc->link_irq;
+
+       /* First get the cause */
+       reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
+       /* Clear interrupt with write */
+       IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
+
+#if 0
+       /* Link status change */
+       if (reg_eicr & IXGBE_EICR_LSC)
+               taskqueue_enqueue(sc->tq, &sc->link_task);
+#endif
+
+       if (sc->hw.mac.type != ixgbe_mac_82598EB) {
+               if (reg_eicr & IXGBE_EICR_ECC) {
+                       device_printf(sc->dev, "\nCRITICAL: ECC ERROR!! "
+                           "Please Reboot!!\n");
+                       IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
+               } else
+
+               if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
+                       /* Clear the interrupt */
+                       IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
+#if 0
+                       taskqueue_enqueue(sc->tq, &sc->msf_task);
+#endif
+               } else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
+                       /* Clear the interrupt */
+                       IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
+#if 0
+                       taskqueue_enqueue(sc->tq, &sc->mod_task);
+#endif
+               }
+       } 
+
+       /* Check for fan failure */
+       if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
+           (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
+               device_printf(sc->dev, "\nCRITICAL: FAN FAILURE!! "
+                   "REPLACE IMMEDIATELY!!\n");
+               IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
+       }
+
+       /* Check for over temp condition */
+       if ((hw->mac.type == ixgbe_mac_X540) &&
+           (reg_eicr & IXGBE_EICR_TS)) {
+               device_printf(sc->dev, "\nCRITICAL: OVER TEMP!! "
+                   "PHY IS SHUT DOWN!!\n");
+               device_printf(sc->dev, "System shutdown required\n");
+               IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
+       }
+
+       IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
+       return;
+}
+
+#endif
+
+static void
+ix_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+       struct ix_softc *sc = ifp->if_softc;
+
+       ix_update_link_status(sc);
+
+       ifmr->ifm_status = IFM_AVALID;
+       ifmr->ifm_active = IFM_ETHER;
+
+       if (!sc->link_active)
+               return;
+
+       ifmr->ifm_status |= IFM_ACTIVE;
+
+       switch (sc->link_speed) {
+       case IXGBE_LINK_SPEED_100_FULL:
+               ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
+               break;
+       case IXGBE_LINK_SPEED_1GB_FULL:
+               ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
+               break;
+       case IXGBE_LINK_SPEED_10GB_FULL:
+               ifmr->ifm_active |= sc->optics | IFM_FDX;
+               break;
+       }
+}
+
+static int
+ix_media_change(struct ifnet *ifp)
+{
+       struct ix_softc *sc = ifp->if_softc;
+       struct ifmedia *ifm = &sc->media;
+
+       if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+               return EINVAL;
+
+       switch (IFM_SUBTYPE(ifm->ifm_media)) {
+       case IFM_AUTO:
+               sc->hw.phy.autoneg_advertised =
+                   IXGBE_LINK_SPEED_100_FULL |
+                   IXGBE_LINK_SPEED_1GB_FULL |
+                   IXGBE_LINK_SPEED_10GB_FULL;
+               break;
+       default:
+               if_printf(ifp, "Only auto media type\n");
+               return EINVAL;
+       }
+       return 0;
+}
+
+static __inline int
+ix_tso_pullup(struct mbuf **mp)
+{
+       int hoff, iphlen, thoff;
+       struct mbuf *m;
+
+       m = *mp;
+       KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
+
+       iphlen = m->m_pkthdr.csum_iphlen;
+       thoff = m->m_pkthdr.csum_thlen;
+       hoff = m->m_pkthdr.csum_lhlen;
+
+       KASSERT(iphlen > 0, ("invalid ip hlen"));
+       KASSERT(thoff > 0, ("invalid tcp hlen"));
+       KASSERT(hoff > 0, ("invalid ether hlen"));
+
+       if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
+               m = m_pullup(m, hoff + iphlen + thoff);
+               if (m == NULL) {
+                       *mp = NULL;
+                       return ENOBUFS;
+               }
+               *mp = m;
+       }
+       return 0;
+}
+
+static int
+ix_encap(struct ix_tx_ring *txr, struct mbuf **m_headp,
+    uint16_t *segs_used, int *idx)
+{
+       uint32_t olinfo_status = 0, cmd_type_len, cmd_rs = 0;
+       int i, j, error, nsegs, first, maxsegs;
+       struct mbuf *m_head = *m_headp;
+       bus_dma_segment_t segs[IX_MAX_SCATTER];
+       bus_dmamap_t map;
+       struct ix_tx_buf *txbuf;
+       union ixgbe_adv_tx_desc *txd = NULL;
+
+       if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
+               error = ix_tso_pullup(m_headp);
+               if (__predict_false(error))
+                       return error;
+               m_head = *m_headp;
+       }
+
+       /* Basic descriptor defines */
+       cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
+           IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
+
+       if (m_head->m_flags & M_VLANTAG)
+               cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+
+       /*
+        * Important to capture the first descriptor
+        * used because it will contain the index of
+        * the one we tell the hardware to report back
+        */
+       first = txr->tx_next_avail;
+       txbuf = &txr->tx_buf[first];
+       map = txbuf->map;
+
+       /*
+        * Map the packet for DMA.
+        */
+       maxsegs = txr->tx_avail - IX_TX_RESERVED;
+       if (maxsegs > IX_MAX_SCATTER)
+               maxsegs = IX_MAX_SCATTER;
+
+       error = bus_dmamap_load_mbuf_defrag(txr->tx_tag, map, m_headp,
+           segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
+       if (__predict_false(error)) {
+               m_freem(*m_headp);
+               *m_headp = NULL;
+               return error;
+       }
+       bus_dmamap_sync(txr->tx_tag, map, BUS_DMASYNC_PREWRITE);
+
+       m_head = *m_headp;
+
+       /*
+        * Set up the appropriate offload context if requested,
+        * this may consume one TX descriptor.
+        */
+       if (ix_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status)) {
+               (*segs_used)++;
+               txr->tx_nsegs++;
+       }
+
+       *segs_used += nsegs;
+       txr->tx_nsegs += nsegs;
+       if (txr->tx_nsegs >= txr->tx_intr_nsegs) {
+               /*
+                * Report Status (RS) is turned on every intr_nsegs
+                * descriptors (roughly).
+                */
+               txr->tx_nsegs = 0;
+               cmd_rs = IXGBE_TXD_CMD_RS;
+       }
+
+       i = txr->tx_next_avail;
+       for (j = 0; j < nsegs; j++) {
+               bus_size_t seglen;
+               bus_addr_t segaddr;
+
+               txbuf = &txr->tx_buf[i];
+               txd = &txr->tx_base[i];
+               seglen = segs[j].ds_len;
+               segaddr = htole64(segs[j].ds_addr);
+
+               txd->read.buffer_addr = segaddr;
+               txd->read.cmd_type_len = htole32(IXGBE_TXD_CMD_IFCS |
+                   cmd_type_len |seglen);
+               txd->read.olinfo_status = htole32(olinfo_status);
+
+               if (++i == txr->tx_ndesc)
+                       i = 0;
+       }
+       txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | cmd_rs);
+
+       txr->tx_avail -= nsegs;
+       txr->tx_next_avail = i;
+
+       txbuf->m_head = m_head;
+       txr->tx_buf[first].map = txbuf->map;
+       txbuf->map = map;
+
+       /*
+        * Defer TDT updating, until enough descrptors are setup
+        */
+       *idx = i;
+
+       return 0;
+}
+
+static void
+ix_set_promisc(struct ix_softc *sc)
+{
+       struct ifnet *ifp = &sc->arpcom.ac_if;
+       uint32_t reg_rctl;
+       int mcnt = 0;
+
+       reg_rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
+       reg_rctl &= ~IXGBE_FCTRL_UPE;
+       if (ifp->if_flags & IFF_ALLMULTI) {
+               mcnt = IX_MAX_MCASTADDR;
+       } else {
+               struct ifmultiaddr *ifma;
+
+               TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+                       if (ifma->ifma_addr->sa_family != AF_LINK)
+                               continue;
+                       if (mcnt == IX_MAX_MCASTADDR)
+                               break;
+                       mcnt++;
+               }
+       }
+       if (mcnt < IX_MAX_MCASTADDR)
+               reg_rctl &= ~IXGBE_FCTRL_MPE;
+       IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, reg_rctl);
+
+       if (ifp->if_flags & IFF_PROMISC) {
+               reg_rctl |= IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE;
+               IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, reg_rctl);
+       } else if (ifp->if_flags & IFF_ALLMULTI) {
+               reg_rctl |= IXGBE_FCTRL_MPE;
+               reg_rctl &= ~IXGBE_FCTRL_UPE;
+               IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, reg_rctl);
+       }
+}
+
+static void
+ix_set_multi(struct ix_softc *sc)
+{
+       struct ifnet *ifp = &sc->arpcom.ac_if;
+       struct ifmultiaddr *ifma;
+       uint32_t fctrl;
+       uint8_t *mta;
+       int mcnt = 0;
+
+       mta = sc->mta;
+       bzero(mta, IXGBE_ETH_LENGTH_OF_ADDRESS * IX_MAX_MCASTADDR);
+
+       TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+               if (ifma->ifma_addr->sa_family != AF_LINK)
+                       continue;
+               if (mcnt == IX_MAX_MCASTADDR)
+                       break;
+               bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
+                   &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
+                   IXGBE_ETH_LENGTH_OF_ADDRESS);
+               mcnt++;
+       }
+
+       fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
+       fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+       if (ifp->if_flags & IFF_PROMISC) {
+               fctrl |= IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE;
+       } else if (mcnt >= IX_MAX_MCASTADDR || (ifp->if_flags & IFF_ALLMULTI)) {
+               fctrl |= IXGBE_FCTRL_MPE;
+               fctrl &= ~IXGBE_FCTRL_UPE;
+       } else {
+               fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+       }
+       IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
+
+       if (mcnt < IX_MAX_MCASTADDR) {
+               ixgbe_update_mc_addr_list(&sc->hw,
+                   mta, mcnt, ix_mc_array_itr, TRUE);
+       }
+}
+
+/*
+ * This is an iterator function now needed by the multicast
+ * shared code. It simply feeds the shared code routine the
+ * addresses in the array of ix_set_multi() one by one.
+ */
+static uint8_t *
+ix_mc_array_itr(struct ixgbe_hw *hw, uint8_t **update_ptr, uint32_t *vmdq)
+{
+       uint8_t *addr = *update_ptr;
+       uint8_t *newptr;
+       *vmdq = 0;
+
+       newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
+       *update_ptr = newptr;
+       return addr;
+}
+
+static void
+ix_timer(void *arg)
+{
+       struct ix_softc *sc = arg;
+
+       lwkt_serialize_enter(&sc->main_serialize);
+
+       if ((sc->arpcom.ac_if.if_flags & IFF_RUNNING) == 0) {
+               lwkt_serialize_exit(&sc->main_serialize);
+               return;
+       }
+
+       /* Check for pluggable optics */
+       if (sc->sfp_probe) {
+               if (!ix_sfp_probe(sc))
+                       goto done; /* Nothing to do */
+       }
+
+       ix_update_link_status(sc);
+       ix_update_stats(sc);
+
+done:
+       callout_reset_bycpu(&sc->timer, hz, ix_timer, sc, sc->timer_cpuid);
+       lwkt_serialize_exit(&sc->main_serialize);
+}
+
+static void
+ix_update_link_status(struct ix_softc *sc)
+{
+       struct ifnet *ifp = &sc->arpcom.ac_if;
+
+       if (sc->link_up) {
+               if (sc->link_active == FALSE) {
+                       if (bootverbose) {
+                               if_printf(ifp, "Link is up %d Gbps %s\n",
+                                   sc->link_speed == 128 ? 10 : 1,
+                                   "Full Duplex");
+                       }
+                       sc->link_active = TRUE;
+
+                       /* Update any Flow Control changes */
+                       ixgbe_fc_enable(&sc->hw);
+
+                       ifp->if_link_state = LINK_STATE_UP;
+                       if_link_state_change(ifp);
+               }
+       } else { /* Link down */
+               if (sc->link_active == TRUE) {
+                       if (bootverbose)
+                               if_printf(ifp, "Link is Down\n");
+                       ifp->if_link_state = LINK_STATE_DOWN;
+                       if_link_state_change(ifp);
+
+                       sc->link_active = FALSE;
+               }
+       }
+}
+
+static void
+ix_stop(struct ix_softc *sc)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       struct ifnet *ifp = &sc->arpcom.ac_if;
+       int i;
+
+       ASSERT_IFNET_SERIALIZED_ALL(ifp);
+
+       ix_disable_intr(sc);
+       callout_stop(&sc->timer);
+
+       ifp->if_flags &= ~IFF_RUNNING;
+       for (i = 0; i < sc->tx_ring_cnt; ++i) {
+               ifsq_clr_oactive(sc->tx_rings[i].tx_ifsq);
+               ifsq_watchdog_stop(&sc->tx_rings[i].tx_watchdog);
+       }
+
+       ixgbe_reset_hw(hw);
+       hw->adapter_stopped = FALSE;
+       ixgbe_stop_adapter(hw);
+       if (hw->mac.type == ixgbe_mac_82599EB)
+               ixgbe_stop_mac_link_on_d3_82599(hw);
+       /* Turn off the laser - noop with no optics */
+       ixgbe_disable_tx_laser(hw);
+
+       /* Update the stack */
+       sc->link_up = FALSE;
+       ix_update_link_status(sc);
+
+       /* Reprogram the RAR[0] in case user changed it. */
+       ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+       for (i = 0; i < sc->tx_ring_cnt; ++i)
+               ix_free_tx_ring(&sc->tx_rings[i]);
+
+       for (i = 0; i < sc->rx_ring_cnt; ++i)
+               ix_free_rx_ring(&sc->rx_rings[i]);
+}
+
+static void
+ix_setup_optics(struct ix_softc *sc)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       int layer;
+
+       layer = ixgbe_get_supported_physical_layer(hw);
+
+       if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
+               sc->optics = IFM_10G_T;
+               return;
+       }
+
+       if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
+               sc->optics = IFM_1000_T;
+               return;
+       }
+
+       if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
+               sc->optics = IFM_1000_SX;
+               return;
+       }
+
+       if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
+           IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
+               sc->optics = IFM_10G_LR;
+               return;
+       }
+
+       if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
+               sc->optics = IFM_10G_SR;
+               return;
+       }
+
+       if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
+               sc->optics = IFM_10G_TWINAX;
+               return;
+       }
+
+       if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
+           IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
+               sc->optics = IFM_10G_CX4;
+               return;
+       }
+
+       /* If we get here just set the default */
+       sc->optics = IFM_ETHER | IFM_AUTO;
+}
+
+#if 0
+/*********************************************************************
+ *
+ *  Setup MSIX Interrupt resources and handlers 
+ *
+ **********************************************************************/
+static int
+ix_allocate_msix(struct ix_softc *sc)
+{
+       device_t        dev = sc->dev;
+       struct          ix_queue *que = sc->queues;
+       struct          ix_tx_ring *txr = sc->tx_rings;
+       int             error, rid, vector = 0;
+
+       for (int i = 0; i < sc->num_queues; i++, vector++, que++, txr++) {
+               rid = vector + 1;
+               que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+                   RF_SHAREABLE | RF_ACTIVE);
+               if (que->res == NULL) {
+                       device_printf(dev,"Unable to allocate"
+                           " bus resource: que interrupt [%d]\n", vector);
+                       return (ENXIO);
+               }
+               /* Set the handler function */
+               error = bus_setup_intr(dev, que->res,
+                   INTR_TYPE_NET | INTR_MPSAFE, NULL,
+                   ix_msix_que, que, &que->tag);
+               if (error) {
+                       que->res = NULL;
+                       device_printf(dev, "Failed to register QUE handler");
+                       return (error);
+               }
+#if __FreeBSD_version >= 800504
+               bus_describe_intr(dev, que->res, que->tag, "que %d", i);
+#endif
+               que->msix = vector;
+               sc->que_mask |= (uint64_t)(1 << que->msix);
+               /*
+               ** Bind the msix vector, and thus the
+               ** ring to the corresponding cpu.
+               */
+               if (sc->num_queues > 1)
+                       bus_bind_intr(dev, que->res, i);
+
+               TASK_INIT(&que->que_task, 0, ix_handle_que, que);
+               que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
+                   taskqueue_thread_enqueue, &que->tq);
+               taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
+                   device_get_nameunit(sc->dev));
+       }
+
+       /* and Link */
+       rid = vector + 1;
+       sc->res = bus_alloc_resource_any(dev,
+           SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+       if (!sc->res) {
+               device_printf(dev,"Unable to allocate"
+           " bus resource: Link interrupt [%d]\n", rid);
+               return (ENXIO);
+       }
+       /* Set the link handler function */
+       error = bus_setup_intr(dev, sc->res,
+           INTR_TYPE_NET | INTR_MPSAFE, NULL,
+           ix_msix_link, sc, &sc->tag);
+       if (error) {
+               sc->res = NULL;
+               device_printf(dev, "Failed to register LINK handler");
+               return (error);
+       }
+#if __FreeBSD_version >= 800504
+       bus_describe_intr(dev, sc->res, sc->tag, "link");
+#endif
+       sc->linkvec = vector;
+       /* Tasklets for Link, SFP and Multispeed Fiber */
+       TASK_INIT(&sc->link_task, 0, ix_handle_link, sc);
+       TASK_INIT(&sc->mod_task, 0, ix_handle_mod, sc);
+       TASK_INIT(&sc->msf_task, 0, ix_handle_msf, sc);
+       sc->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
+           taskqueue_thread_enqueue, &sc->tq);
+       taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s linkq",
+           device_get_nameunit(sc->dev));
+
+       return (0);
+}
+
+static int
+ix_setup_msix(struct ix_softc *sc)
+{
+       device_t dev = sc->dev;
+       int rid, want, queues, msgs;
+
+       /* Override by tuneable */
+       if (ixgbe_enable_msix == 0)
+               goto msi;
+
+       /* First try MSI/X */
+       rid = PCIR_BAR(MSIX_82598_BAR);
+       sc->msix_mem = bus_alloc_resource_any(dev,
+           SYS_RES_MEMORY, &rid, RF_ACTIVE);
+       if (!sc->msix_mem) {
+               rid += 4;       /* 82599 maps in higher BAR */
+               sc->msix_mem = bus_alloc_resource_any(dev,
+                   SYS_RES_MEMORY, &rid, RF_ACTIVE);
+       }
+       if (!sc->msix_mem) {
+               /* May not be enabled */
+               device_printf(sc->dev,
+                   "Unable to map MSIX table \n");
+               goto msi;
+       }
+
+       msgs = pci_msix_count(dev); 
+       if (msgs == 0) { /* system has msix disabled */
+               bus_release_resource(dev, SYS_RES_MEMORY,
+                   rid, sc->msix_mem);
+               sc->msix_mem = NULL;
+               goto msi;
+       }
+
+       /* Figure out a reasonable auto config value */
+       queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
+
+       if (ixgbe_num_queues != 0)
+               queues = ixgbe_num_queues;
+       /* Set max queues to 8 when autoconfiguring */
+       else if ((ixgbe_num_queues == 0) && (queues > 8))
+               queues = 8;
+
+       /*
+       ** Want one vector (RX/TX pair) per queue
+       ** plus an additional for Link.
+       */
+       want = queues + 1;
+       if (msgs >= want)
+               msgs = want;
+       else {
+               device_printf(sc->dev,
+                   "MSIX Configuration Problem, "
+                   "%d vectors but %d queues wanted!\n",
+                   msgs, want);
+               return (0); /* Will go to Legacy setup */
+       }
+       if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
+               device_printf(sc->dev,
+                   "Using MSIX interrupts with %d vectors\n", msgs);
+               sc->num_queues = queues;
+               return (msgs);
+       }
+msi:
+       msgs = pci_msi_count(dev);
+       if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
+               device_printf(sc->dev,"Using an MSI interrupt\n");
+       else
+               device_printf(sc->dev,"Using a Legacy interrupt\n");
+       return (msgs);
+}
+#endif
+
+static void
+ix_setup_ifp(struct ix_softc *sc)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       struct ifnet *ifp = &sc->arpcom.ac_if;
+       int i;
+
+       ifp->if_baudrate = IF_Gbps(10UL);
+
+       ifp->if_softc = sc;
+       ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+       ifp->if_init = ix_init;
+       ifp->if_ioctl = ix_ioctl;
+       ifp->if_start = ix_start;
+       ifp->if_serialize = ix_serialize;
+       ifp->if_deserialize = ix_deserialize;
+       ifp->if_tryserialize = ix_tryserialize;
+#ifdef INVARIANTS
+       ifp->if_serialize_assert = ix_serialize_assert;
+#endif
+
+       ifq_set_maxlen(&ifp->if_snd, sc->tx_rings[0].tx_ndesc - 2);
+       ifq_set_ready(&ifp->if_snd);
+       ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt);
+
+       ifp->if_mapsubq = ifq_mapsubq_mask;
+       ifq_set_subq_mask(&ifp->if_snd, 0);
+
+       ether_ifattach(ifp, hw->mac.addr, NULL);
+
+       ifp->if_capabilities =
+           IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
+       if (IX_ENABLE_HWRSS(sc))
+               ifp->if_capabilities |= IFCAP_RSS;
+       ifp->if_capenable = ifp->if_capabilities;
+       ifp->if_hwassist = CSUM_OFFLOAD | CSUM_TSO;
+
+       /*
+        * Tell the upper layer(s) we support long frames.
+        */
+       ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
+
+       /* Setup TX rings and subqueues */
+       for (i = 0; i < sc->tx_ring_cnt; ++i) {
+               struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i);
+               struct ix_tx_ring *txr = &sc->tx_rings[i];
+
+               ifsq_set_cpuid(ifsq, txr->tx_intr_cpuid);
+               ifsq_set_priv(ifsq, txr);
+               ifsq_set_hw_serialize(ifsq, &txr->tx_serialize);
+               txr->tx_ifsq = ifsq;
+
+               ifsq_watchdog_init(&txr->tx_watchdog, ifsq, ix_watchdog);
+       }
+
+       /*
+        * Specify the media types supported by this adapter and register
+        * callbacks to update media and link information
+        */
+       ifmedia_add(&sc->media, IFM_ETHER | sc->optics, 0, NULL);
+       ifmedia_set(&sc->media, IFM_ETHER | sc->optics);
+       if (hw->device_id == IXGBE_DEV_ID_82598AT) {
+               ifmedia_add(&sc->media,
+                   IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
+               ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
+       }
+       ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+       ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
+}
+
+static boolean_t
+ix_is_sfp(const struct ixgbe_hw *hw)
+{
+       switch (hw->phy.type) {
+       case ixgbe_phy_sfp_avago:
+       case ixgbe_phy_sfp_ftl:
+       case ixgbe_phy_sfp_intel:
+       case ixgbe_phy_sfp_unknown:
+       case ixgbe_phy_sfp_passive_tyco:
+       case ixgbe_phy_sfp_passive_unknown:
+               return TRUE;
+       default:
+               return FALSE;
+       }
+}
+
+static void
+ix_config_link(struct ix_softc *sc)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       boolean_t sfp;
+
+       sfp = ix_is_sfp(hw);
+       if (sfp) { 
+               if (hw->phy.multispeed_fiber) {
+                       hw->mac.ops.setup_sfp(hw);
+                       ixgbe_enable_tx_laser(hw);
+                       ix_handle_msf(sc);
+               } else {
+                       ix_handle_mod(sc);
+               }
+       } else {
+               uint32_t autoneg, err = 0;
+
+               if (hw->mac.ops.check_link != NULL) {
+                       err = ixgbe_check_link(hw, &sc->link_speed,
+                           &sc->link_up, FALSE);
+                       if (err)
+                               return;
+               }
+
+               autoneg = hw->phy.autoneg_advertised;
+               if (!autoneg && hw->mac.ops.get_link_capabilities != NULL) {
+                       bool negotiate;
+
+                       err = hw->mac.ops.get_link_capabilities(hw,
+                           &autoneg, &negotiate);
+                       if (err)
+                               return;
+               }
+
+               if (hw->mac.ops.setup_link != NULL) {
+                       err = hw->mac.ops.setup_link(hw,
+                           autoneg, sc->link_up);
+                       if (err)
+                               return;
+               }
+       }
+}
+
+static int
+ix_alloc_rings(struct ix_softc *sc)
+{
+       int error, i;
+
+       /*
+        * Create top level busdma tag
+        */
+       error = bus_dma_tag_create(NULL, 1, 0,
+           BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
+           BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
+           &sc->parent_tag);
+       if (error) {
+               device_printf(sc->dev, "could not create top level DMA tag\n");
+               return error;
+       }
+
+       /*
+        * Allocate TX descriptor rings and buffers
+        */
+       sc->tx_rings = kmalloc_cachealign(
+           sizeof(struct ix_tx_ring) * sc->tx_ring_cnt,
+           M_DEVBUF, M_WAITOK | M_ZERO);
+       for (i = 0; i < sc->tx_ring_cnt; ++i) {
+               struct ix_tx_ring *txr = &sc->tx_rings[i];
+
+               txr->tx_sc = sc;
+               txr->tx_idx = i;
+               lwkt_serialize_init(&txr->tx_serialize);
+
+               error = ix_create_tx_ring(txr);
+               if (error)
+                       return error;
+       }
+
+       /*
+        * Allocate RX descriptor rings and buffers
+        */ 
+       sc->rx_rings = kmalloc_cachealign(
+           sizeof(struct ix_rx_ring) * sc->rx_ring_cnt,
+           M_DEVBUF, M_WAITOK | M_ZERO);
+       for (i = 0; i < sc->rx_ring_cnt; ++i) {
+               struct ix_rx_ring *rxr = &sc->rx_rings[i];
+
+               rxr->rx_sc = sc;
+               rxr->rx_idx = i;
+               lwkt_serialize_init(&rxr->rx_serialize);
+
+               error = ix_create_rx_ring(rxr);
+               if (error)
+                       return error;
+       }
+
+       return 0;
+}
+
+static int
+ix_create_tx_ring(struct ix_tx_ring *txr)
+{
+       int error, i, tsize, ntxd;
+
+       /*
+        * Validate number of transmit descriptors.  It must not exceed
+        * hardware maximum, and must be multiple of IX_DBA_ALIGN.
+        */
+       ntxd = device_getenv_int(txr->tx_sc->dev, "txd", ix_txd);
+       if (((ntxd * sizeof(union ixgbe_adv_tx_desc)) % IX_DBA_ALIGN) != 0 ||
+           ntxd < IX_MIN_TXD || ntxd > IX_MAX_TXD) {
+               device_printf(txr->tx_sc->dev,
+                   "Using %d TX descriptors instead of %d!\n",
+                   IX_DEF_TXD, ntxd);
+               txr->tx_ndesc = IX_DEF_TXD;
+       } else {
+               txr->tx_ndesc = ntxd;
+       }
+
+       /*
+        * Allocate TX head write-back buffer
+        */
+       txr->tx_hdr = bus_dmamem_coherent_any(txr->tx_sc->parent_tag,
+           __VM_CACHELINE_SIZE, __VM_CACHELINE_SIZE, BUS_DMA_WAITOK,
+           &txr->tx_hdr_dtag, &txr->tx_hdr_map, &txr->tx_hdr_paddr);
+       if (txr->tx_hdr == NULL) {
+               device_printf(txr->tx_sc->dev,
+                   "Unable to allocate TX head write-back buffer\n");
+               return ENOMEM;
+       }
+
+       /*
+        * Allocate TX descriptor ring
+        */
+       tsize = roundup2(txr->tx_ndesc * sizeof(union ixgbe_adv_tx_desc),
+           IX_DBA_ALIGN);
+       txr->tx_base = bus_dmamem_coherent_any(txr->tx_sc->parent_tag,
+           IX_DBA_ALIGN, tsize, BUS_DMA_WAITOK | BUS_DMA_ZERO,
+           &txr->tx_base_dtag, &txr->tx_base_map, &txr->tx_base_paddr);
+       if (txr->tx_base == NULL) {
+               device_printf(txr->tx_sc->dev,
+                   "Unable to allocate TX Descriptor memory\n");
+               return ENOMEM;
+       }
+
+       tsize = __VM_CACHELINE_ALIGN(sizeof(struct ix_tx_buf) * txr->tx_ndesc);
+       txr->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO);
+
+       /*
+        * Create DMA tag for TX buffers
+        */
+       error = bus_dma_tag_create(txr->tx_sc->parent_tag,
+           1, 0,               /* alignment, bounds */
+           BUS_SPACE_MAXADDR,  /* lowaddr */
+           BUS_SPACE_MAXADDR,  /* highaddr */
+           NULL, NULL,         /* filter, filterarg */
+           IX_TSO_SIZE,        /* maxsize */
+           IX_MAX_SCATTER,     /* nsegments */
+           PAGE_SIZE,          /* maxsegsize */
+           BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW |
+           BUS_DMA_ONEBPAGE,   /* flags */
+           &txr->tx_tag);
+       if (error) {
+               device_printf(txr->tx_sc->dev,
+                   "Unable to allocate TX DMA tag\n");
+               kfree(txr->tx_buf, M_DEVBUF);
+               txr->tx_buf = NULL;
+               return error;
+       }
+
+       /*
+        * Create DMA maps for TX buffers
+        */
+       for (i = 0; i < txr->tx_ndesc; ++i) {
+               struct ix_tx_buf *txbuf = &txr->tx_buf[i];
+
+               error = bus_dmamap_create(txr->tx_tag,
+                   BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, &txbuf->map);
+               if (error) {
+                       device_printf(txr->tx_sc->dev,
+                           "Unable to create TX DMA map\n");
+                       ix_destroy_tx_ring(txr, i);
+                       return error;
+               }
+       }
+
+       /*
+        * Initialize various watermark
+        */
+       txr->tx_wreg_nsegs = IX_DEF_TXWREG_NSEGS;
+       txr->tx_intr_nsegs = txr->tx_ndesc / 16;
+
+       return 0;
+}
+
+static void
+ix_destroy_tx_ring(struct ix_tx_ring *txr, int ndesc)
+{
+       int i;
+
+       if (txr->tx_hdr != NULL) {
+               bus_dmamap_unload(txr->tx_hdr_dtag, txr->tx_hdr_map);
+               bus_dmamem_free(txr->tx_hdr_dtag,
+                   __DEVOLATILE(void *, txr->tx_hdr), txr->tx_hdr_map);
+               bus_dma_tag_destroy(txr->tx_hdr_dtag);
+               txr->tx_hdr = NULL;
+       }
+
+       if (txr->tx_base != NULL) {
+               bus_dmamap_unload(txr->tx_base_dtag, txr->tx_base_map);
+               bus_dmamem_free(txr->tx_base_dtag, txr->tx_base,
+                   txr->tx_base_map);
+               bus_dma_tag_destroy(txr->tx_base_dtag);
+               txr->tx_base = NULL;
+       }
+
+       if (txr->tx_buf == NULL)
+               return;
+
+       for (i = 0; i < ndesc; ++i) {
+               struct ix_tx_buf *txbuf = &txr->tx_buf[i];
+
+               KKASSERT(txbuf->m_head == NULL);
+               bus_dmamap_destroy(txr->tx_tag, txbuf->map);
+       }
+       bus_dma_tag_destroy(txr->tx_tag);
+
+       kfree(txr->tx_buf, M_DEVBUF);
+       txr->tx_buf = NULL;
+}
+
+static void
+ix_init_tx_ring(struct ix_tx_ring *txr)
+{
+       /* Clear the old ring contents */
+       bzero(txr->tx_base, sizeof(union ixgbe_adv_tx_desc) * txr->tx_ndesc);
+
+       /* Clear TX head write-back buffer */
+       *(txr->tx_hdr) = 0;
+
+       /* Reset indices */
+       txr->tx_next_avail = 0;
+       txr->tx_next_clean = 0;
+       txr->tx_nsegs = 0;
+
+       /* Set number of descriptors available */
+       txr->tx_avail = txr->tx_ndesc;
+}
+
+static void
+ix_init_tx_unit(struct ix_softc *sc)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       int i;
+
+       /*
+        * Setup the Base and Length of the Tx Descriptor Ring
+        */
+       for (i = 0; i < sc->tx_ring_inuse; ++i) {
+               struct ix_tx_ring *txr = &sc->tx_rings[i];
+               uint64_t tdba = txr->tx_base_paddr;
+               uint64_t hdr_paddr = txr->tx_hdr_paddr;
+               uint32_t txctrl;
+
+               IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i), (uint32_t)tdba);
+               IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (uint32_t)(tdba >> 32));
+               IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
+                   txr->tx_ndesc * sizeof(union ixgbe_adv_tx_desc));
+
+               /* Setup the HW Tx Head and Tail descriptor pointers */
+               IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
+
+               /* Disable TX head write-back relax ordering */
+               switch (hw->mac.type) {
+               case ixgbe_mac_82598EB:
+                       txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
+                       break;
+               case ixgbe_mac_82599EB:
+               case ixgbe_mac_X540:
+               default:
+                       txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
+                       break;
+               }
+               txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+               switch (hw->mac.type) {
+               case ixgbe_mac_82598EB:
+                       IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
+                       break;
+               case ixgbe_mac_82599EB:
+               case ixgbe_mac_X540:
+               default:
+                       IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
+                       break;
+               }
+
+               /* Enable TX head write-back */
+               IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(i),
+                   (uint32_t)(hdr_paddr >> 32));
+               IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(i),
+                   ((uint32_t)hdr_paddr) | IXGBE_TDWBAL_HEAD_WB_ENABLE);
+       }
+
+       if (hw->mac.type != ixgbe_mac_82598EB) {
+               uint32_t dmatxctl, rttdcs;
+
+               dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+               dmatxctl |= IXGBE_DMATXCTL_TE;
+               IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
+
+               /* Disable arbiter to set MTQC */
+               rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+               rttdcs |= IXGBE_RTTDCS_ARBDIS;
+               IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+
+               IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
+
+               /* Reenable aribter */
+               rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
+               IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+       }
+}
+
+static int
+ix_tx_ctx_setup(struct ix_tx_ring *txr, const struct mbuf *mp,
+    uint32_t *cmd_type_len, uint32_t *olinfo_status)
+{
+       struct ixgbe_adv_tx_context_desc *TXD;
+       uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+       int ehdrlen, ip_hlen = 0, ctxd;
+       boolean_t offload = TRUE;
+
+       /* First check if TSO is to be used */
+       if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
+               return ix_tso_ctx_setup(txr, mp,
+                   cmd_type_len, olinfo_status);
+       }
+
+       if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
+               offload = FALSE;
+
+       /* Indicate the whole packet as payload when not doing TSO */
+       *olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
+
+       /*
+        * In advanced descriptors the vlan tag must be placed into the
+        * context descriptor.  Hence we need to make one even if not
+        * doing checksum offloads.
+        */
+       if (mp->m_flags & M_VLANTAG) {
+               vlan_macip_lens |= htole16(mp->m_pkthdr.ether_vlantag) <<
+                   IXGBE_ADVTXD_VLAN_SHIFT;
+       } else if (!offload) {
+               /* No TX descriptor is consumed */
+               return 0;
+       }
+
+       /* Set the ether header length */
+       ehdrlen = mp->m_pkthdr.csum_lhlen;
+       KASSERT(ehdrlen > 0, ("invalid ether hlen"));
+       vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
+
+       if (mp->m_pkthdr.csum_flags & CSUM_IP) {
+               *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
+               type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+               ip_hlen = mp->m_pkthdr.csum_iphlen;
+               KASSERT(ip_hlen > 0, ("invalid ip hlen"));
+       }
+       vlan_macip_lens |= ip_hlen;
+
+       type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
+       if (mp->m_pkthdr.csum_flags & CSUM_TCP)
+               type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+       else if (mp->m_pkthdr.csum_flags & CSUM_UDP)
+               type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
+
+       if (mp->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
+               *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
+
+       /* Now ready a context descriptor */
+       ctxd = txr->tx_next_avail;
+       TXD = (struct ixgbe_adv_tx_context_desc *)&txr->tx_base[ctxd];
+
+       /* Now copy bits into descriptor */
+       TXD->vlan_macip_lens = htole32(vlan_macip_lens);
+       TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
+       TXD->seqnum_seed = htole32(0);
+       TXD->mss_l4len_idx = htole32(0);
+
+       /* We've consumed the first desc, adjust counters */
+       if (++ctxd == txr->tx_ndesc)
+               ctxd = 0;
+       txr->tx_next_avail = ctxd;
+       --txr->tx_avail;
+
+       /* One TX descriptor is consumed */
+       return 1;
+}
+
+static int
+ix_tso_ctx_setup(struct ix_tx_ring *txr, const struct mbuf *mp,
+    uint32_t *cmd_type_len, uint32_t *olinfo_status)
+{
+       struct ixgbe_adv_tx_context_desc *TXD;
+       uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+       uint32_t mss_l4len_idx = 0, paylen;
+       int ctxd, ehdrlen, ip_hlen, tcp_hlen;
+
+       ehdrlen = mp->m_pkthdr.csum_lhlen;
+       KASSERT(ehdrlen > 0, ("invalid ether hlen"));
+
+       ip_hlen = mp->m_pkthdr.csum_iphlen;
+       KASSERT(ip_hlen > 0, ("invalid ip hlen"));
+
+       tcp_hlen = mp->m_pkthdr.csum_thlen;
+       KASSERT(tcp_hlen > 0, ("invalid tcp hlen"));
+
+       ctxd = txr->tx_next_avail;
+       TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
+
+       if (mp->m_flags & M_VLANTAG) {
+               vlan_macip_lens |= htole16(mp->m_pkthdr.ether_vlantag) <<
+                   IXGBE_ADVTXD_VLAN_SHIFT;
+       }
+       vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
+       vlan_macip_lens |= ip_hlen;
+       TXD->vlan_macip_lens = htole32(vlan_macip_lens);
+
+       /* ADV DTYPE TUCMD */
+       type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+       type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
+       type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+       TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
+
+       /* MSS L4LEN IDX */
+       mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
+       mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
+       TXD->mss_l4len_idx = htole32(mss_l4len_idx);
+
+       TXD->seqnum_seed = htole32(0);
+
+       if (++ctxd == txr->tx_ndesc)
+               ctxd = 0;
+
+       txr->tx_avail--;
+       txr->tx_next_avail = ctxd;
+
+       *cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+
+       /* This is used in the transmit desc in encap */
+       paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
+
+       *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
+       *olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
+       *olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
+
+       /* One TX descriptor is consumed */
+       return 1;
+}
+
+static void
+ix_txeof(struct ix_tx_ring *txr)
+{
+       struct ifnet *ifp = &txr->tx_sc->arpcom.ac_if;
+       int first, hdr, avail;
+
+       if (txr->tx_avail == txr->tx_ndesc)
+               return;
+
+       first = txr->tx_next_clean;
+       hdr = *(txr->tx_hdr);
+
+       if (first == hdr)
+               return;
+
+       avail = txr->tx_avail;
+       while (first != hdr) {
+               struct ix_tx_buf *txbuf = &txr->tx_buf[first];
+
+               ++avail;
+               if (txbuf->m_head) {
+                       bus_dmamap_unload(txr->tx_tag, txbuf->map);
+                       m_freem(txbuf->m_head);
+                       txbuf->m_head = NULL;
+                       IFNET_STAT_INC(ifp, opackets, 1);
+               }
+               if (++first == txr->tx_ndesc)
+                       first = 0;
+       }
+       txr->tx_next_clean = first;
+       txr->tx_avail = avail;
+
+       if (txr->tx_avail > IX_MAX_SCATTER + IX_TX_RESERVED) {
+               ifsq_clr_oactive(txr->tx_ifsq);
+               txr->tx_watchdog.wd_timer = 0;
+       }
+}
+
+static int
+ix_create_rx_ring(struct ix_rx_ring *rxr)
+{
+       int i, rsize, error, nrxd;
+
+       /*
+        * Validate number of receive descriptors.  It must not exceed
+        * hardware maximum, and must be multiple of IX_DBA_ALIGN.
+        */
+       nrxd = device_getenv_int(rxr->rx_sc->dev, "rxd", ix_rxd);
+       if (((nrxd * sizeof(union ixgbe_adv_rx_desc)) % IX_DBA_ALIGN) != 0 ||
+           nrxd < IX_MIN_RXD || nrxd > IX_MAX_RXD) {
+               device_printf(rxr->rx_sc->dev,
+                   "Using %d RX descriptors instead of %d!\n",
+                   IX_DEF_RXD, nrxd);
+               rxr->rx_ndesc = IX_DEF_RXD;
+       } else {
+               rxr->rx_ndesc = nrxd;
+       }
+
+       /*
+        * Allocate RX descriptor ring
+        */
+       rsize = roundup2(rxr->rx_ndesc * sizeof(union ixgbe_adv_rx_desc),
+           IX_DBA_ALIGN);
+       rxr->rx_base = bus_dmamem_coherent_any(rxr->rx_sc->parent_tag,
+           IX_DBA_ALIGN, rsize, BUS_DMA_WAITOK | BUS_DMA_ZERO,
+           &rxr->rx_base_dtag, &rxr->rx_base_map, &rxr->rx_base_paddr);
+       if (rxr->rx_base == NULL) {
+               device_printf(rxr->rx_sc->dev,
+                   "Unable to allocate TX Descriptor memory\n");
+               return ENOMEM;
+       }
+
+       rsize = __VM_CACHELINE_ALIGN(sizeof(struct ix_rx_buf) * rxr->rx_ndesc);
+       rxr->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO);
+
+       /*
+        * Create DMA tag for RX buffers
+        */
+       error = bus_dma_tag_create(rxr->rx_sc->parent_tag,
+           1, 0,               /* alignment, bounds */
+           BUS_SPACE_MAXADDR,  /* lowaddr */
+           BUS_SPACE_MAXADDR,  /* highaddr */
+           NULL, NULL,         /* filter, filterarg */
+           PAGE_SIZE,          /* maxsize */
+           1,                  /* nsegments */
+           PAGE_SIZE,          /* maxsegsize */
+           BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */
+           &rxr->rx_tag);
+       if (error) {
+               device_printf(rxr->rx_sc->dev,
+                   "Unable to create RX DMA tag\n");
+               kfree(rxr->rx_buf, M_DEVBUF);
+               rxr->rx_buf = NULL;
+               return error;
+       }
+
+       /*
+        * Create spare DMA map for RX buffers
+        */
+       error = bus_dmamap_create(rxr->rx_tag, BUS_DMA_WAITOK,
+           &rxr->rx_sparemap);
+       if (error) {
+               device_printf(rxr->rx_sc->dev,
+                   "Unable to create spare RX DMA map\n");
+               bus_dma_tag_destroy(rxr->rx_tag);
+               kfree(rxr->rx_buf, M_DEVBUF);
+               rxr->rx_buf = NULL;
+               return error;
+       }
+
+       /*
+        * Create DMA maps for RX buffers
+        */
+       for (i = 0; i < rxr->rx_ndesc; ++i) {
+               struct ix_rx_buf *rxbuf = &rxr->rx_buf[i];
+
+               error = bus_dmamap_create(rxr->rx_tag,
+                   BUS_DMA_WAITOK, &rxbuf->map);
+               if (error) {
+                       device_printf(rxr->rx_sc->dev,
+                           "Unable to create RX dma map\n");
+                       ix_destroy_rx_ring(rxr, i);
+                       return error;
+               }
+       }
+
+       /*
+        * Initialize various watermark
+        */
+       rxr->rx_wreg_nsegs = IX_DEF_RXWREG_NSEGS;
+
+       return 0;
+}
+
+static void
+ix_destroy_rx_ring(struct ix_rx_ring *rxr, int ndesc)
+{
+       int i;
+
+       if (rxr->rx_base != NULL) {
+               bus_dmamap_unload(rxr->rx_base_dtag, rxr->rx_base_map);
+               bus_dmamem_free(rxr->rx_base_dtag, rxr->rx_base,
+                   rxr->rx_base_map);
+               bus_dma_tag_destroy(rxr->rx_base_dtag);
+               rxr->rx_base = NULL;
+       }
+
+       if (rxr->rx_buf == NULL)
+               return;
+
+       for (i = 0; i < ndesc; ++i) {
+               struct ix_rx_buf *rxbuf = &rxr->rx_buf[i];
+
+               KKASSERT(rxbuf->m_head == NULL);
+               bus_dmamap_destroy(rxr->rx_tag, rxbuf->map);
+       }
+       bus_dmamap_destroy(rxr->rx_tag, rxr->rx_sparemap);
+       bus_dma_tag_destroy(rxr->rx_tag);
+
+       kfree(rxr->rx_buf, M_DEVBUF);
+       rxr->rx_buf = NULL;
+}
+
+/*
+** Used to detect a descriptor that has
+** been merged by Hardware RSC.
+*/
+static __inline uint32_t
+ix_rsc_count(union ixgbe_adv_rx_desc *rx)
+{
+       return (le32toh(rx->wb.lower.lo_dword.data) &
+           IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
+}
+
+#if 0
+/*********************************************************************
+ *
+ *  Initialize Hardware RSC (LRO) feature on 82599
+ *  for an RX ring, this is toggled by the LRO capability
+ *  even though it is transparent to the stack.
+ *
+ *  NOTE: since this HW feature only works with IPV4 and 
+ *        our testing has shown soft LRO to be as effective
+ *        I have decided to disable this by default.
+ *
+ **********************************************************************/
+static void
+ix_setup_hw_rsc(struct ix_rx_ring *rxr)
+{
+       struct  ix_softc        *sc = rxr->rx_sc;
+       struct  ixgbe_hw        *hw = &sc->hw;
+       uint32_t                        rscctrl, rdrxctl;
+
+#if 0
+       /* If turning LRO/RSC off we need to disable it */
+       if ((sc->arpcom.ac_if.if_capenable & IFCAP_LRO) == 0) {
+               rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
+               rscctrl &= ~IXGBE_RSCCTL_RSCEN;
+               return;
+       }
+#endif
+
+       rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+       rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
+       rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
+       rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
+       IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
+
+       rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
+       rscctrl |= IXGBE_RSCCTL_RSCEN;
+       /*
+       ** Limit the total number of descriptors that
+       ** can be combined, so it does not exceed 64K
+       */
+       if (rxr->mbuf_sz == MCLBYTES)
+               rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+       else if (rxr->mbuf_sz == MJUMPAGESIZE)
+               rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+       else if (rxr->mbuf_sz == MJUM9BYTES)
+               rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+       else  /* Using 16K cluster */
+               rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
+
+       IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
+
+       /* Enable TCP header recognition */
+       IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
+           (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
+           IXGBE_PSRTYPE_TCPHDR));
+
+       /* Disable RSC for ACK packets */
+       IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
+           (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
+
+       rxr->hw_rsc = TRUE;
+}
+#endif
+
+static int
+ix_init_rx_ring(struct ix_rx_ring *rxr)
+{
+       int i;
+
+       /* Clear the ring contents */
+       bzero(rxr->rx_base, rxr->rx_ndesc * sizeof(union ixgbe_adv_rx_desc));
+
+       /* XXX we need JUMPAGESIZE for RSC too */
+       if (rxr->rx_sc->max_frame_size <= MCLBYTES)
+               rxr->rx_mbuf_sz = MCLBYTES;
+       else
+               rxr->rx_mbuf_sz = MJUMPAGESIZE;
+
+       /* Now replenish the mbufs */
+       for (i = 0; i < rxr->rx_ndesc; ++i) {
+               int error;
+
+               error = ix_newbuf(rxr, i, TRUE);
+               if (error)
+                       return error;
+       }
+
+       /* Setup our descriptor indices */
+       rxr->rx_next_check = 0;
+       rxr->rx_flags &= ~IX_RXRING_FLAG_DISC;
+
+#if 0
+       /*
+       ** Now set up the LRO interface:
+       */
+       if (ixgbe_rsc_enable)
+               ix_setup_hw_rsc(rxr);
+#endif
+
+       return 0;
+}
+
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
+
+#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
+       
+static void
+ix_init_rx_unit(struct ix_softc *sc)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       struct ifnet *ifp = &sc->arpcom.ac_if;
+       uint32_t bufsz, rxctrl, fctrl, rxcsum, hlreg;
+       int i;
+
+       /*
+        * Make sure receives are disabled while setting up the descriptor ring
+        */
+       rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+       IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
+
+       /* Enable broadcasts */
+       fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+       fctrl |= IXGBE_FCTRL_BAM;
+       fctrl |= IXGBE_FCTRL_DPF;
+       fctrl |= IXGBE_FCTRL_PMCF;
+       IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+
+       /* Set for Jumbo Frames? */
+       hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+       if (ifp->if_mtu > ETHERMTU)
+               hlreg |= IXGBE_HLREG0_JUMBOEN;
+       else
+               hlreg &= ~IXGBE_HLREG0_JUMBOEN;
+       IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
+
+       KKASSERT(sc->rx_rings[0].rx_mbuf_sz >= MCLBYTES);
+       bufsz = (sc->rx_rings[0].rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
+           IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+
+       for (i = 0; i < sc->rx_ring_inuse; ++i) {
+               struct ix_rx_ring *rxr = &sc->rx_rings[i];
+               uint64_t rdba = rxr->rx_base_paddr;
+               uint32_t srrctl;
+
+               /* Setup the Base and Length of the Rx Descriptor Ring */
+               IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), (uint32_t)rdba);
+               IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (uint32_t)(rdba >> 32));
+               IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
+                   rxr->rx_ndesc * sizeof(union ixgbe_adv_rx_desc));
+
+               /*
+                * Set up the SRRCTL register
+                */
+               srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
+
+               srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
+               srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
+               srrctl |= bufsz;
+               srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+               if (sc->rx_ring_inuse > 1) {
+                       /* See the commend near ix_enable_rx_drop() */
+                       switch (sc->fc) {
+                       case ixgbe_fc_rx_pause:
+                       case ixgbe_fc_tx_pause:
+                       case ixgbe_fc_full:
+                               srrctl &= ~IXGBE_SRRCTL_DROP_EN;
+                               if (i == 0 && bootverbose) {
+                                       if_printf(ifp, "flow control %d, "
+                                           "disable RX drop\n", sc->fc);
+                               }
+                               break;
+
+                       case ixgbe_fc_none:
+                               srrctl |= IXGBE_SRRCTL_DROP_EN;
+                               if (i == 0 && bootverbose) {
+                                       if_printf(ifp, "flow control %d, "
+                                           "enable RX drop\n", sc->fc);
+                               }
+                               break;
+
+                       default:
+                               break;
+                       }
+               }
+               IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
+
+               /* Setup the HW Rx Head and Tail Descriptor Pointers */
+               IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
+       }
+
+       if (sc->hw.mac.type != ixgbe_mac_82598EB)
+               IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), 0);
+
+       rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+
+       /*
+        * Setup RSS
+        */
+       if (IX_ENABLE_HWRSS(sc)) {
+               uint8_t key[IX_NRSSRK * IX_RSSRK_SIZE];
+               int j, r;
+
+               /*
+                * NOTE:
+                * When we reach here, RSS has already been disabled
+                * in ix_stop(), so we could safely configure RSS key
+                * and redirect table.
+                */
+
+               /*
+                * Configure RSS key
+                */
+               toeplitz_get_key(key, sizeof(key));
+               for (i = 0; i < IX_NRSSRK; ++i) {
+                       uint32_t rssrk;
+
+                       rssrk = IX_RSSRK_VAL(key, i);
+                       IX_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n",
+                           i, rssrk);
+
+                       IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rssrk);
+               }
+
+               /*
+                * Configure RSS redirect table in following fashion:
+                * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
+                */
+               r = 0;
+               for (j = 0; j < IX_NRETA; ++j) {
+                       uint32_t reta = 0;
+
+                       for (i = 0; i < IX_RETA_SIZE; ++i) {
+                               uint32_t q;
+
+                               q = r % sc->rx_ring_inuse;
+                               reta |= q << (8 * i);
+                               ++r;
+                       }
+                       IX_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta);
+                       IXGBE_WRITE_REG(hw, IXGBE_RETA(j), reta);
+               }
+
+               /*
+                * Enable multiple receive queues.
+                * Enable IPv4 RSS standard hash functions.
+                */
+               IXGBE_WRITE_REG(hw, IXGBE_MRQC,
+                   IXGBE_MRQC_RSSEN |
+                   IXGBE_MRQC_RSS_FIELD_IPV4 |
+                   IXGBE_MRQC_RSS_FIELD_IPV4_TCP);
+
+               /*
+                * NOTE:
+                * PCSD must be enabled to enable multiple
+                * receive queues.
+                */
+               rxcsum |= IXGBE_RXCSUM_PCSD;
+       }
+
+       if (ifp->if_capenable & IFCAP_RXCSUM)
+               rxcsum |= IXGBE_RXCSUM_PCSD;
+
+       IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
+}
+
+static __inline void
+ix_rx_refresh(struct ix_rx_ring *rxr, int i)
+{
+       if (--i < 0)
+               i = rxr->rx_ndesc - 1;
+       IXGBE_WRITE_REG(&rxr->rx_sc->hw, IXGBE_RDT(rxr->rx_idx), i);
+}
+
+static __inline void
+ix_rxcsum(uint32_t staterr, struct mbuf *mp, uint32_t ptype)
+{
+       if ((ptype &
+            (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_IPV4_EX)) == 0) {
+               /* Not IPv4 */
+               return;
+       }
+
+       if ((staterr & (IXGBE_RXD_STAT_IPCS | IXGBE_RXDADV_ERR_IPE)) ==
+           IXGBE_RXD_STAT_IPCS)
+               mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
+
+       if ((ptype &
+            (IXGBE_RXDADV_PKTTYPE_TCP | IXGBE_RXDADV_PKTTYPE_UDP)) == 0) {
+               /*
+                * - Neither TCP nor UDP
+                * - IPv4 fragment
+                */
+               return;
+       }
+
+       if ((staterr & (IXGBE_RXD_STAT_L4CS | IXGBE_RXDADV_ERR_TCPE)) ==
+           IXGBE_RXD_STAT_L4CS) {
+               mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR |
+                   CSUM_FRAG_NOT_CHECKED;
+               mp->m_pkthdr.csum_data = htons(0xffff);
+       }
+}
+
+static __inline struct pktinfo *
+ix_rssinfo(struct mbuf *m, struct pktinfo *pi,
+    uint32_t hash, uint32_t hashtype, uint32_t ptype)
+{
+       switch (hashtype) {
+       case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
+               pi->pi_netisr = NETISR_IP;
+               pi->pi_flags = 0;
+               pi->pi_l3proto = IPPROTO_TCP;
+               break;
+
+       case IXGBE_RXDADV_RSSTYPE_IPV4:
+               if ((ptype & IXGBE_RXDADV_PKTTYPE_UDP) == 0) {
+                       /* Not UDP or is fragment */
+                       return NULL;
+               }
+               pi->pi_netisr = NETISR_IP;
+               pi->pi_flags = 0;
+               pi->pi_l3proto = IPPROTO_UDP;
+               break;
+
+       default:
+               return NULL;
+       }
+
+       m->m_flags |= M_HASH;
+       m->m_pkthdr.hash = toeplitz_hash(hash);
+       return pi;
+}
+
+static __inline void
+ix_setup_rxdesc(union ixgbe_adv_rx_desc *rxd, const struct ix_rx_buf *rxbuf)
+{
+       rxd->read.pkt_addr = htole64(rxbuf->paddr);
+       rxd->wb.upper.status_error = 0;
+}
+
+static void
+ix_rx_discard(struct ix_rx_ring *rxr, int i, boolean_t eop)
+{
+       struct ix_rx_buf *rxbuf = &rxr->rx_buf[i];
+
+       /*
+        * XXX discard may not be correct
+        */
+       if (eop) {
+               IFNET_STAT_INC(&rxr->rx_sc->arpcom.ac_if, ierrors, 1);
+               rxr->rx_flags &= ~IX_RXRING_FLAG_DISC;
+       } else {
+               rxr->rx_flags |= IX_RXRING_FLAG_DISC;
+       }
+       if (rxbuf->fmp != NULL) {
+               m_freem(rxbuf->fmp);
+               rxbuf->fmp = NULL;
+               rxbuf->lmp = NULL;
+       }
+       ix_setup_rxdesc(&rxr->rx_base[i], rxbuf);
+}
+
+static void
+ix_rxeof(struct ix_rx_ring *rxr)
+{
+       struct ifnet *ifp = &rxr->rx_sc->arpcom.ac_if;
+       int i, nsegs = 0;
+
+       i = rxr->rx_next_check;
+       for (;;) {
+               struct ix_rx_buf *rxbuf, *nbuf = NULL;
+               union ixgbe_adv_rx_desc *cur;
+               struct mbuf *sendmp = NULL, *mp;
+               struct pktinfo *pi = NULL, pi0;
+               uint32_t rsc = 0, ptype, staterr, hash, hashtype;
+               uint16_t len;
+               boolean_t eop;
+
+               cur = &rxr->rx_base[i];
+               staterr = le32toh(cur->wb.upper.status_error);
+
+               if ((staterr & IXGBE_RXD_STAT_DD) == 0)
+                       break;
+               ++nsegs;
+
+               rxbuf = &rxr->rx_buf[i];
+               mp = rxbuf->m_head;
+
+               len = le16toh(cur->wb.upper.length);
+               ptype = le32toh(cur->wb.lower.lo_dword.data) &
+                   IXGBE_RXDADV_PKTTYPE_MASK;
+               hash = le32toh(cur->wb.lower.hi_dword.rss);
+               hashtype = le32toh(cur->wb.lower.lo_dword.data) &
+                   IXGBE_RXDADV_RSSTYPE_MASK;
+               eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
+
+               /*
+                * Make sure bad packets are discarded
+                */
+               if ((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) ||
+                   (rxr->rx_flags & IX_RXRING_FLAG_DISC)) {
+                       ix_rx_discard(rxr, i, eop);
+                       goto next_desc;
+               }
+
+               bus_dmamap_sync(rxr->rx_tag, rxbuf->map, BUS_DMASYNC_POSTREAD);
+               if (ix_newbuf(rxr, i, FALSE) != 0) {
+                       ix_rx_discard(rxr, i, eop);
+                       goto next_desc;
+               }
+
+               /*
+                * On 82599 which supports a hardware LRO, packets
+                * need not be fragmented across sequential descriptors,
+                * rather the next descriptor is indicated in bits
+                * of the descriptor.  This also means that we might
+                * proceses more than one packet at a time, something
+                * that has never been true before, it required
+                * eliminating global chain pointers in favor of what
+                * we are doing here.
+                */
+               if (!eop) {
+                       int nextp;
+
+                       /*
+                        * Figure out the next descriptor
+                        * of this frame.
+                        */
+                       if (rxr->rx_flags & IX_RXRING_FLAG_LRO)
+                               rsc = ix_rsc_count(cur);
+                       if (rsc) { /* Get hardware index */
+                               nextp = ((staterr &
+                                   IXGBE_RXDADV_NEXTP_MASK) >>
+                                   IXGBE_RXDADV_NEXTP_SHIFT);
+                       } else { /* Just sequential */
+                               nextp = i + 1;
+                               if (nextp == rxr->rx_ndesc)
+                                       nextp = 0;
+                       }
+                       nbuf = &rxr->rx_buf[nextp];
+                       prefetch(nbuf);
+               }
+               mp->m_len = len;
+
+               /*
+                * Rather than using the fmp/lmp global pointers
+                * we now keep the head of a packet chain in the
+                * buffer struct and pass this along from one
+                * descriptor to the next, until we get EOP.
+                */
+               if (rxbuf->fmp == NULL) {
+                       mp->m_pkthdr.len = len;
+                       rxbuf->fmp = mp;
+                       rxbuf->lmp = mp;
+               } else {
+                       rxbuf->fmp->m_pkthdr.len += len;
+                       rxbuf->lmp->m_next = mp;
+                       rxbuf->lmp = mp;
+               }
+
+               if (nbuf != NULL) {
+                       /*
+                        * Not the last fragment of this frame,
+                        * pass this fragment list on
+                        */
+                       nbuf->fmp = rxbuf->fmp;
+                       nbuf->lmp = rxbuf->lmp;
+               } else {
+                       /*
+                        * Send this frame
+                        */
+                       sendmp = rxbuf->fmp;
+
+                       sendmp->m_pkthdr.rcvif = ifp;
+                       IFNET_STAT_INC(ifp, ipackets, 1);
+#ifdef IX_RSS_DEBUG
+                       rxr->rx_pkts++;
+#endif
+
+                       /* Process vlan info */
+                       if (staterr & IXGBE_RXD_STAT_VP) {
+                               sendmp->m_pkthdr.ether_vlantag =
+                                   le16toh(cur->wb.upper.vlan);
+                               sendmp->m_flags |= M_VLANTAG;
+                       }
+                       if (ifp->if_capenable & IFCAP_RXCSUM)
+                               ix_rxcsum(staterr, sendmp, ptype);
+                       if (ifp->if_capenable & IFCAP_RSS) {
+                               pi = ix_rssinfo(sendmp, &pi0,
+                                   hash, hashtype, ptype);
+                       }
+               }
+               rxbuf->fmp = NULL;
+               rxbuf->lmp = NULL;
+next_desc:
+               /* Advance our pointers to the next descriptor. */
+               if (++i == rxr->rx_ndesc)
+                       i = 0;
+
+               if (sendmp != NULL)
+                       ether_input_pkt(ifp, sendmp, pi);
+
+               if (nsegs >= rxr->rx_wreg_nsegs) {
+                       ix_rx_refresh(rxr, i);
+                       nsegs = 0;
+               }
+       }
+       rxr->rx_next_check = i;
+
+       if (nsegs > 0)
+               ix_rx_refresh(rxr, i);
+}
+
+static void
+ix_set_vlan(struct ix_softc *sc)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       uint32_t ctrl;
+
+       if (hw->mac.type == ixgbe_mac_82598EB) {
+               ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+               ctrl |= IXGBE_VLNCTRL_VME;
+               IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+       } else {
+               int i;
+
+               /*
+                * On 82599 and later chips the VLAN enable is
+                * per queue in RXDCTL
+                */
+               for (i = 0; i < sc->rx_ring_inuse; ++i) {
+                       ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+                       ctrl |= IXGBE_RXDCTL_VME;
+                       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
+               }
+       }
+}
+
+static void
+ix_enable_intr(struct ix_softc *sc)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       uint32_t mask, fwsm;
+       int i;
+
+       for (i = 0; i < sc->intr_cnt; ++i)
+               lwkt_serialize_handler_enable(sc->intr_data[i].intr_serialize);
+
+       mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
+
+       /* Enable Fan Failure detection */
+       if (hw->device_id == IXGBE_DEV_ID_82598AT)
+               mask |= IXGBE_EIMS_GPI_SDP1;
+
+       switch (sc->hw.mac.type) {
+       case ixgbe_mac_82599EB:
+               mask |= IXGBE_EIMS_ECC;
+               mask |= IXGBE_EIMS_GPI_SDP0;
+               mask |= IXGBE_EIMS_GPI_SDP1;
+               mask |= IXGBE_EIMS_GPI_SDP2;
+               break;
+       case ixgbe_mac_X540:
+               mask |= IXGBE_EIMS_ECC;
+               /* Detect if Thermal Sensor is enabled */
+               fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+               if (fwsm & IXGBE_FWSM_TS_ENABLED)
+                       mask |= IXGBE_EIMS_TS;
+               /* FALL THROUGH */
+       default:
+               break;
+       }
+       sc->intr_mask = mask;
+
+       /* With MSI-X we use auto clear */
+       if (sc->intr_type == PCI_INTR_TYPE_MSIX) {
+               mask = IXGBE_EIMS_ENABLE_MASK;
+               /* Don't autoclear Link */
+               mask &= ~IXGBE_EIMS_OTHER;
+               mask &= ~IXGBE_EIMS_LSC;
+               IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
+       } else {
+               sc->intr_mask |= IX_TX_INTR_MASK |
+                   IX_RX0_INTR_MASK;
+
+               KKASSERT(sc->rx_ring_inuse <= IX_MIN_RXRING_RSS);
+               if (sc->rx_ring_inuse == IX_MIN_RXRING_RSS)
+                       sc->intr_mask |= IX_RX1_INTR_MASK;
+       }
+
+#if 0
+       /*
+       ** Now enable all queues, this is done separately to
+       ** allow for handling the extended (beyond 32) MSIX
+       ** vectors that can be used by 82599
+       */
+       for (int i = 0; i < sc->num_queues; i++, que++)
+               ixgbe_enable_queue(sc, que->msix);
+#else
+       IXGBE_WRITE_REG(hw, IXGBE_EIMS, sc->intr_mask);
+#endif
+
+       IXGBE_WRITE_FLUSH(hw);
+}
+
+static void
+ix_disable_intr(struct ix_softc *sc)
+{
+       int i;
+
+#if 0
+       if (sc->msix_mem)
+               IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
+#endif
+       if (sc->hw.mac.type == ixgbe_mac_82598EB) {
+               IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
+       } else {
+               IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000);
+               IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0);
+               IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0);
+       }
+       IXGBE_WRITE_FLUSH(&sc->hw);
+
+       for (i = 0; i < sc->intr_cnt; ++i)
+               lwkt_serialize_handler_disable(sc->intr_data[i].intr_serialize);
+}
+
+uint16_t
+ixgbe_read_pci_cfg(struct ixgbe_hw *hw, uint32_t reg)
+{
+       return pci_read_config(((struct ixgbe_osdep *)hw->back)->dev,
+           reg, 2);
+}
+
+void
+ixgbe_write_pci_cfg(struct ixgbe_hw *hw, uint32_t reg, uint16_t value)
+{
+       pci_write_config(((struct ixgbe_osdep *)hw->back)->dev,
+           reg, value, 2);
+}
+
+static void
+ix_slot_info(struct ix_softc *sc)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       device_t dev = sc->dev;
+       struct ixgbe_mac_info *mac = &hw->mac;
+       uint16_t link;
+       uint32_t offset;
+
+       /* For most devices simply call the shared code routine */
+       if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
+               ixgbe_get_bus_info(hw);
+               goto display;
+       }
+
+       /*
+        * For the Quad port adapter we need to parse back
+        * up the PCI tree to find the speed of the expansion
+        * slot into which this adapter is plugged. A bit more work.
+        */
+       dev = device_get_parent(device_get_parent(dev));
+#ifdef IXGBE_DEBUG
+       device_printf(dev, "parent pcib = %x,%x,%x\n",
+           pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
+#endif
+       dev = device_get_parent(device_get_parent(dev));
+#ifdef IXGBE_DEBUG
+       device_printf(dev, "slot pcib = %x,%x,%x\n",
+           pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
+#endif
+       /* Now get the PCI Express Capabilities offset */
+       offset = pci_get_pciecap_ptr(dev);
+       /* ...and read the Link Status Register */
+       link = pci_read_config(dev, offset + PCIER_LINKSTAT, 2);
+       switch (link & IXGBE_PCI_LINK_WIDTH) {
+       case IXGBE_PCI_LINK_WIDTH_1:
+               hw->bus.width = ixgbe_bus_width_pcie_x1;
+               break;
+       case IXGBE_PCI_LINK_WIDTH_2:
+               hw->bus.width = ixgbe_bus_width_pcie_x2;
+               break;
+       case IXGBE_PCI_LINK_WIDTH_4:
+               hw->bus.width = ixgbe_bus_width_pcie_x4;
+               break;
+       case IXGBE_PCI_LINK_WIDTH_8:
+               hw->bus.width = ixgbe_bus_width_pcie_x8;
+               break;
+       default:
+               hw->bus.width = ixgbe_bus_width_unknown;
+               break;
+       }
+
+       switch (link & IXGBE_PCI_LINK_SPEED) {
+       case IXGBE_PCI_LINK_SPEED_2500:
+               hw->bus.speed = ixgbe_bus_speed_2500;
+               break;
+       case IXGBE_PCI_LINK_SPEED_5000:
+               hw->bus.speed = ixgbe_bus_speed_5000;
+               break;
+       case IXGBE_PCI_LINK_SPEED_8000:
+               hw->bus.speed = ixgbe_bus_speed_8000;
+               break;
+       default:
+               hw->bus.speed = ixgbe_bus_speed_unknown;
+               break;
+       }
+
+       mac->ops.set_lan_id(hw);
+
+display:
+       device_printf(dev, "PCI Express Bus: Speed %s %s\n",
+           hw->bus.speed == ixgbe_bus_speed_8000 ? "8.0GT/s" :
+           hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
+           hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" : "Unknown",
+           hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
+           hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
+           hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" : "Unknown");
+
+       if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP &&
+           hw->bus.width <= ixgbe_bus_width_pcie_x4 &&
+           hw->bus.speed == ixgbe_bus_speed_2500) {
+               device_printf(dev, "For optimal performance a x8 "
+                   "PCIE, or x4 PCIE Gen2 slot is required.\n");
+       } else if (hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP &&
+           hw->bus.width <= ixgbe_bus_width_pcie_x8 &&
+           hw->bus.speed < ixgbe_bus_speed_8000) {
+               device_printf(dev, "For optimal performance a x8 "
+                   "PCIE Gen3 slot is required.\n");
+       }
+}
+
+/*
+ * TODO comment is incorrect
+ *
+ * Setup the correct IVAR register for a particular MSIX interrupt
+ * - entry is the register array entry
+ * - vector is the MSIX vector for this queue
+ * - type is RX/TX/MISC
+ */
+static void
+ix_set_ivar(struct ix_softc *sc, uint8_t entry, uint8_t vector,
+    int8_t type)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       uint32_t ivar, index;
+
+       vector |= IXGBE_IVAR_ALLOC_VAL;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               if (type == -1)
+                       entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
+               else
+                       entry += (type * 64);
+               index = (entry >> 2) & 0x1F;
+               ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
+               ivar &= ~(0xFF << (8 * (entry & 0x3)));
+               ivar |= (vector << (8 * (entry & 0x3)));
+               IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
+               break;
+
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               if (type == -1) { /* MISC IVAR */
+                       index = (entry & 1) * 8;
+                       ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
+                       ivar &= ~(0xFF << index);
+                       ivar |= (vector << index);
+                       IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
+               } else {        /* RX/TX IVARS */
+                       index = (16 * (entry & 1)) + (8 * type);
+                       ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
+                       ivar &= ~(0xFF << index);
+                       ivar |= (vector << index);
+                       IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
+               }
+
+       default:
+               break;
+       }
+}
+
+#if 0
+static void
+ix_configure_ivars(struct ix_softc *sc)
+{
+       struct  ix_queue *que = sc->queues;
+       uint32_t newitr;
+
+       if (ixgbe_max_interrupt_rate > 0)
+               newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
+       else
+               newitr = 0;
+
+       for (int i = 0; i < sc->num_queues; i++, que++) {
+               /* First the RX queue entry */
+               ix_set_ivar(sc, i, que->msix, 0);
+               /* ... and the TX */
+               ix_set_ivar(sc, i, que->msix, 1);
+               /* Set an Initial EITR value */
+               IXGBE_WRITE_REG(&sc->hw,
+                   IXGBE_EITR(que->msix), newitr);
+       }
+
+       /* For the Link interrupt */
+       ix_set_ivar(sc, 1, sc->linkvec, -1);
+}
+#endif
+
+static boolean_t
+ix_sfp_probe(struct ix_softc *sc)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+
+       if (hw->phy.type == ixgbe_phy_nl &&
+           hw->phy.sfp_type == ixgbe_sfp_type_not_present) {
+               int32_t ret;
+
+               ret = hw->phy.ops.identify_sfp(hw);
+               if (ret)
+                       return FALSE;
+
+               ret = hw->phy.ops.reset(hw);
+               if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+                       if_printf(&sc->arpcom.ac_if,
+                            "Unsupported SFP+ module detected!  "
+                            "Reload driver with supported module.\n");
+                       sc->sfp_probe = FALSE;
+                       return FALSE;
+               }
+               if_printf(&sc->arpcom.ac_if, "SFP+ module detected!\n");
+
+               /* We now have supported optics */
+               sc->sfp_probe = FALSE;
+               /* Set the optics type so system reports correctly */
+               ix_setup_optics(sc);
+
+               return TRUE;
+       }
+       return FALSE;
+}
+
+static void
+ix_handle_link(struct ix_softc *sc)
+{
+       ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0);
+       ix_update_link_status(sc);
+}
+
+/*
+ * Handling SFP module
+ */
+static void
+ix_handle_mod(struct ix_softc *sc)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       uint32_t err;
+
+       err = hw->phy.ops.identify_sfp(hw);
+       if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+               if_printf(&sc->arpcom.ac_if,
+                   "Unsupported SFP+ module type was detected.\n");
+               return;
+       }
+       err = hw->mac.ops.setup_sfp(hw);
+       if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+               if_printf(&sc->arpcom.ac_if,
+                   "Setup failure - unsupported SFP+ module type.\n");
+               return;
+       }
+       ix_handle_msf(sc);
+}
+
+/*
+ * Handling MSF (multispeed fiber)
+ */
+static void
+ix_handle_msf(struct ix_softc *sc)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       uint32_t autoneg;
+
+       autoneg = hw->phy.autoneg_advertised;
+       if (!autoneg && hw->mac.ops.get_link_capabilities != NULL) {
+               bool negotiate;
+
+               hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
+       }
+       if (hw->mac.ops.setup_link != NULL)
+               hw->mac.ops.setup_link(hw, autoneg, TRUE);
+}
+
+static void
+ix_update_stats(struct ix_softc *sc)
+{
+       struct ifnet *ifp = &sc->arpcom.ac_if;
+       struct ixgbe_hw *hw = &sc->hw;
+       uint32_t missed_rx = 0, bprc, lxon, lxoff, total;
+       uint64_t total_missed_rx = 0;
+       int i;
+
+       sc->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+       sc->stats.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
+       sc->stats.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
+       sc->stats.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
+
+       /*
+        * Note: These are for the 8 possible traffic classes, which
+        * in current implementation is unused, therefore only 0 should
+        * read real data.
+        */
+       for (i = 0; i < 8; i++) {
+               uint32_t mp;
+
+               mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+               /* missed_rx tallies misses for the gprc workaround */
+               missed_rx += mp;
+               /* global total per queue */
+               sc->stats.mpc[i] += mp;
+
+               /* Running comprehensive total for stats display */
+               total_missed_rx += sc->stats.mpc[i];
+
+               if (hw->mac.type == ixgbe_mac_82598EB) {
+                       sc->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+                       sc->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+                       sc->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+                       sc->stats.pxonrxc[i] +=
+                           IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+               } else {
+                       sc->stats.pxonrxc[i] +=
+                           IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+               }
+               sc->stats.pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
+               sc->stats.pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
+               sc->stats.pxoffrxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+               sc->stats.pxon2offc[i] +=
+                   IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
+       }
+       for (i = 0; i < 16; i++) {
+               sc->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+               sc->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+               sc->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+       }
+       sc->stats.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
+       sc->stats.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
+       sc->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
+
+       /* Hardware workaround, gprc counts missed packets */
+       sc->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
+       sc->stats.gprc -= missed_rx;
+
+       if (hw->mac.type != ixgbe_mac_82598EB) {
+               sc->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
+                   ((uint64_t)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
+               sc->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
+                   ((uint64_t)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
+               sc->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
+                   ((uint64_t)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
+               sc->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
+               sc->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+       } else {
+               sc->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+               sc->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+               /* 82598 only has a counter in the high register */
+               sc->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+               sc->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+               sc->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+       }
+
+       /*
+        * Workaround: mprc hardware is incorrectly counting
+        * broadcasts, so for now we subtract those.
+        */
+       bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
+       sc->stats.bprc += bprc;
+       sc->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               sc->stats.mprc -= bprc;
+
+       sc->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
+       sc->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
+       sc->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
+       sc->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
+       sc->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
+       sc->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
+
+       lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+       sc->stats.lxontxc += lxon;
+       lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+       sc->stats.lxofftxc += lxoff;
+       total = lxon + lxoff;
+
+       sc->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
+       sc->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
+       sc->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
+       sc->stats.gptc -= total;
+       sc->stats.mptc -= total;
+       sc->stats.ptc64 -= total;
+       sc->stats.gotc -= total * ETHER_MIN_LEN;
+
+       sc->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
+       sc->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
+       sc->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
+       sc->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
+       sc->stats.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
+       sc->stats.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
+       sc->stats.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
+       sc->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
+       sc->stats.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
+       sc->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
+       sc->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
+       sc->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
+       sc->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
+       sc->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
+       sc->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
+       sc->stats.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
+       sc->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
+       sc->stats.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
+       /* Only read FCOE on 82599 */
+       if (hw->mac.type != ixgbe_mac_82598EB) {
+               sc->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
+               sc->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
+               sc->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
+               sc->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
+               sc->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
+       }
+
+       /* Rx Errors */
+       IFNET_STAT_SET(ifp, iqdrops, total_missed_rx);
+       IFNET_STAT_SET(ifp, ierrors, sc->stats.crcerrs + sc->stats.rlec);
+}
+
+#if 0
+/*
+ * Add sysctl variables, one per statistic, to the system.
+ */
+static void
+ix_add_hw_stats(struct ix_softc *sc)
+{
+
+       device_t dev = sc->dev;
+
+       struct ix_tx_ring *txr = sc->tx_rings;
+       struct ix_rx_ring *rxr = sc->rx_rings;
+
+       struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+       struct sysctl_oid *tree = device_get_sysctl_tree(dev);
+       struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
+       struct ixgbe_hw_stats *stats = &sc->stats;
+
+       struct sysctl_oid *stat_node, *queue_node;
+       struct sysctl_oid_list *stat_list, *queue_list;
+
+#define QUEUE_NAME_LEN 32
+       char namebuf[QUEUE_NAME_LEN];
+
+       /* MAC stats get the own sub node */
+
+       stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 
+                                   CTLFLAG_RD, NULL, "MAC Statistics");
+       stat_list = SYSCTL_CHILDREN(stat_node);
+
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
+                       CTLFLAG_RD, &stats->crcerrs,
+                       "CRC Errors");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
+                       CTLFLAG_RD, &stats->illerrc,
+                       "Illegal Byte Errors");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
+                       CTLFLAG_RD, &stats->errbc,
+                       "Byte Errors");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
+                       CTLFLAG_RD, &stats->mspdc,
+                       "MAC Short Packets Discarded");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
+                       CTLFLAG_RD, &stats->mlfc,
+                       "MAC Local Faults");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
+                       CTLFLAG_RD, &stats->mrfc,
+                       "MAC Remote Faults");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
+                       CTLFLAG_RD, &stats->rlec,
+                       "Receive Length Errors");
+
+       /* Flow Control stats */
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
+                       CTLFLAG_RD, &stats->lxontxc,
+                       "Link XON Transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
+                       CTLFLAG_RD, &stats->lxonrxc,
+                       "Link XON Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
+                       CTLFLAG_RD, &stats->lxofftxc,
+                       "Link XOFF Transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
+                       CTLFLAG_RD, &stats->lxoffrxc,
+                       "Link XOFF Received");
+
+       /* Packet Reception Stats */
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
+                       CTLFLAG_RD, &stats->tor, 
+                       "Total Octets Received"); 
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
+                       CTLFLAG_RD, &stats->gorc, 
+                       "Good Octets Received"); 
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
+                       CTLFLAG_RD, &stats->tpr,
+                       "Total Packets Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
+                       CTLFLAG_RD, &stats->gprc,
+                       "Good Packets Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
+                       CTLFLAG_RD, &stats->mprc,
+                       "Multicast Packets Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
+                       CTLFLAG_RD, &stats->bprc,
+                       "Broadcast Packets Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
+                       CTLFLAG_RD, &stats->prc64,
+                       "64 byte frames received ");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
+                       CTLFLAG_RD, &stats->prc127,
+                       "65-127 byte frames received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
+                       CTLFLAG_RD, &stats->prc255,
+                       "128-255 byte frames received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
+                       CTLFLAG_RD, &stats->prc511,
+                       "256-511 byte frames received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
+                       CTLFLAG_RD, &stats->prc1023,
+                       "512-1023 byte frames received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
+                       CTLFLAG_RD, &stats->prc1522,
+                       "1023-1522 byte frames received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
+                       CTLFLAG_RD, &stats->ruc,
+                       "Receive Undersized");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
+                       CTLFLAG_RD, &stats->rfc,
+                       "Fragmented Packets Received ");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
+                       CTLFLAG_RD, &stats->roc,
+                       "Oversized Packets Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
+                       CTLFLAG_RD, &stats->rjc,
+                       "Received Jabber");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
+                       CTLFLAG_RD, &stats->mngprc,
+                       "Management Packets Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
+                       CTLFLAG_RD, &stats->mngptc,
+                       "Management Packets Dropped");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
+                       CTLFLAG_RD, &stats->xec,
+                       "Checksum Errors");
+
+       /* Packet Transmission Stats */
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
+                       CTLFLAG_RD, &stats->gotc, 
+                       "Good Octets Transmitted"); 
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
+                       CTLFLAG_RD, &stats->tpt,
+                       "Total Packets Transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
+                       CTLFLAG_RD, &stats->gptc,
+                       "Good Packets Transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
+                       CTLFLAG_RD, &stats->bptc,
+                       "Broadcast Packets Transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
+                       CTLFLAG_RD, &stats->mptc,
+                       "Multicast Packets Transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
+                       CTLFLAG_RD, &stats->mngptc,
+                       "Management Packets Transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
+                       CTLFLAG_RD, &stats->ptc64,
+                       "64 byte frames transmitted ");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
+                       CTLFLAG_RD, &stats->ptc127,
+                       "65-127 byte frames transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
+                       CTLFLAG_RD, &stats->ptc255,
+                       "128-255 byte frames transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
+                       CTLFLAG_RD, &stats->ptc511,
+                       "256-511 byte frames transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
+                       CTLFLAG_RD, &stats->ptc1023,
+                       "512-1023 byte frames transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
+                       CTLFLAG_RD, &stats->ptc1522,
+                       "1024-1522 byte frames transmitted");
+}
+#endif
+
+/*
+ * Enable the hardware to drop packets when the buffer is full.
+ * This is useful when multiple RX rings are used, so that no
+ * single RX ring being full stalls the entire RX engine.  We
+ * only enable this when multiple RX rings are used and when
+ * flow control is disabled.
+ */
+static void
+ix_enable_rx_drop(struct ix_softc *sc)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       int i;
+
+       if (bootverbose) {
+               if_printf(&sc->arpcom.ac_if,
+                   "flow control %d, enable RX drop\n", sc->fc);
+       }
+
+       for (i = 0; i < sc->rx_ring_inuse; ++i) {
+               uint32_t srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
+
+               srrctl |= IXGBE_SRRCTL_DROP_EN;
+               IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
+       }
+}
+
+static void
+ix_disable_rx_drop(struct ix_softc *sc)
+{
+       struct ixgbe_hw *hw = &sc->hw;
+       int i;
+
+       if (bootverbose) {
+               if_printf(&sc->arpcom.ac_if,
+                   "flow control %d, disable RX drop\n", sc->fc);
+       }
+
+       for (i = 0; i < sc->rx_ring_inuse; ++i) {
+               uint32_t srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
+
+               srrctl &= ~IXGBE_SRRCTL_DROP_EN;
+               IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
+       }
+}
+
+static int
+ix_sysctl_flowctrl(SYSCTL_HANDLER_ARGS)
+{
+       struct ix_softc *sc = (struct ix_softc *)arg1;
+       struct ifnet *ifp = &sc->arpcom.ac_if;
+       int error, fc;
+
+       fc = sc->fc;
+       error = sysctl_handle_int(oidp, &fc, 0, req);
+       if (error || req->newptr == NULL)
+               return error;
+
+       switch (fc) {
+       case ixgbe_fc_rx_pause:
+       case ixgbe_fc_tx_pause:
+       case ixgbe_fc_full:
+       case ixgbe_fc_none:
+               break;
+       default:
+               return EINVAL;
+       }
+
+       ifnet_serialize_all(ifp);
+
+       /* Don't bother if it's not changed */
+       if (sc->fc == fc)
+               goto done;
+       sc->fc = fc;
+
+       /* Don't do anything, if the interface is not up yet */
+       if ((ifp->if_flags & IFF_RUNNING) == 0)
+               goto done;
+
+       if (sc->rx_ring_inuse > 1) {
+               switch (sc->fc) {
+               case ixgbe_fc_rx_pause:
+               case ixgbe_fc_tx_pause:
+               case ixgbe_fc_full:
+                       ix_disable_rx_drop(sc);
+                       break;
+
+               case ixgbe_fc_none:
+                       ix_enable_rx_drop(sc);
+                       break;
+
+               default:
+                       panic("leading fc check mismatch");
+               }
+       }
+
+       sc->hw.fc.requested_mode = sc->fc;
+       /* Don't autoneg if forcing a value */
+       sc->hw.fc.disable_fc_autoneg = TRUE;
+       ixgbe_fc_enable(&sc->hw);
+
+done:
+       ifnet_deserialize_all(ifp);
+       return error;
+}
+
+#ifdef foo
+/* XXX not working properly w/ 82599 connected w/ DAC */
+/* XXX only work after the interface is up */
+static int
+ix_sysctl_advspeed(SYSCTL_HANDLER_ARGS)
+{
+       struct ix_softc *sc = (struct ix_softc *)arg1;
+       struct ifnet *ifp = &sc->arpcom.ac_if;
+       struct ixgbe_hw *hw = &sc->hw;
+       ixgbe_link_speed speed;
+       int error, advspeed;
+
+       advspeed = sc->advspeed;
+       error = sysctl_handle_int(oidp, &advspeed, 0, req);
+       if (error || req->newptr == NULL)
+               return error;
+
+       if (!(hw->phy.media_type == ixgbe_media_type_copper ||
+           hw->phy.multispeed_fiber))
+               return EOPNOTSUPP;
+       if (hw->mac.ops.setup_link == NULL)
+               return EOPNOTSUPP;
+
+       switch (advspeed) {
+       case 0: /* auto */
+               speed = IXGBE_LINK_SPEED_UNKNOWN;
+               break;
+
+       case 1: /* 1Gb */
+               speed = IXGBE_LINK_SPEED_1GB_FULL;
+               break;
+
+       case 2: /* 100Mb */
+               speed = IXGBE_LINK_SPEED_100_FULL;
+               break;
+
+       case 3: /* 1Gb/10Gb */
+               speed = IXGBE_LINK_SPEED_1GB_FULL |
+                   IXGBE_LINK_SPEED_10GB_FULL;
+               break;
+
+       default:
+               return EINVAL;
+       }
+
+       ifnet_serialize_all(ifp);
+
+       if (sc->advspeed == advspeed) /* no change */
+               goto done;
+
+       if ((speed & IXGBE_LINK_SPEED_100_FULL) &&
+           hw->mac.type != ixgbe_mac_X540) {
+               error = EOPNOTSUPP;
+               goto done;
+       }
+
+       sc->advspeed = advspeed;
+
+       if ((ifp->if_flags & IFF_RUNNING) == 0)
+               goto done;
+
+       if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
+               ix_config_link(sc);
+       } else {
+               hw->mac.autotry_restart = TRUE;
+               hw->mac.ops.setup_link(hw, speed, sc->link_up);
+       }
+
+done:
+       ifnet_deserialize_all(ifp);
+       return error;
+}
+#endif
+
+static void
+ix_setup_serialize(struct ix_softc *sc)
+{
+       int i = 0, j;
+
+       /* Main + RX + TX */
+       sc->nserialize = 1 + sc->rx_ring_cnt + sc->tx_ring_cnt;
+       sc->serializes =
+           kmalloc(sc->nserialize * sizeof(struct lwkt_serialize *),
+               M_DEVBUF, M_WAITOK | M_ZERO);
+
+       /*
+        * Setup serializes
+        *
+        * NOTE: Order is critical
+        */
+
+       KKASSERT(i < sc->nserialize);
+       sc->serializes[i++] = &sc->main_serialize;
+
+       for (j = 0; j < sc->rx_ring_cnt; ++j) {
+               KKASSERT(i < sc->nserialize);
+               sc->serializes[i++] = &sc->rx_rings[j].rx_serialize;
+       }
+
+       for (j = 0; j < sc->tx_ring_cnt; ++j) {
+               KKASSERT(i < sc->nserialize);
+               sc->serializes[i++] = &sc->tx_rings[j].tx_serialize;
+       }
+
+       KKASSERT(i == sc->nserialize);
+}
+
+static int
+ix_alloc_intr(struct ix_softc *sc)
+{
+       struct ix_intr_data *intr;
+       u_int intr_flags;
+       int i;
+
+       if (sc->intr_data != NULL)
+               kfree(sc->intr_data, M_DEVBUF);
+
+       sc->intr_cnt = 1;
+       sc->intr_data = kmalloc(sizeof(struct ix_intr_data), M_DEVBUF,
+           M_WAITOK | M_ZERO);
+       intr = &sc->intr_data[0];
+
+       /*
+        * Allocate MSI/legacy interrupt resource
+        */
+       sc->intr_type = pci_alloc_1intr(sc->dev, ix_msi_enable,
+           &intr->intr_rid, &intr_flags);
+
+       intr->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
+           &intr->intr_rid, intr_flags);
+       if (intr->intr_res == NULL) {
+               device_printf(sc->dev, "Unable to allocate bus resource: "
+                   "interrupt\n");
+               return ENXIO;
+       }
+
+       intr->intr_serialize = &sc->main_serialize;
+       intr->intr_cpuid = rman_get_cpuid(intr->intr_res);
+       intr->intr_func = ix_intr;
+       intr->intr_funcarg = sc;
+       intr->intr_rate = IX_INTR_RATE;
+       intr->intr_use = IX_INTR_USE_RXTX;
+
+       for (i = 0; i < sc->tx_ring_cnt; ++i) {
+               sc->tx_rings[i].tx_intr_cpuid = intr->intr_cpuid;
+               sc->tx_rings[i].tx_intr_vec = IX_TX_INTR_VEC;
+       }
+
+       for (i = 0; i < sc->rx_ring_cnt; ++i)
+               sc->rx_rings[i].rx_intr_vec = IX_RX0_INTR_VEC;
+
+       ix_set_ring_inuse(sc, FALSE);
+
+       KKASSERT(sc->rx_ring_inuse <= IX_MIN_RXRING_RSS);
+       if (sc->rx_ring_inuse == IX_MIN_RXRING_RSS)
+               sc->rx_rings[1].rx_intr_vec = IX_RX1_INTR_VEC;
+
+       return 0;
+}
+
+static void
+ix_free_intr(struct ix_softc *sc)
+{
+       if (sc->intr_data == NULL)
+               return;
+
+       if (sc->intr_type != PCI_INTR_TYPE_MSIX) {
+               struct ix_intr_data *intr = &sc->intr_data[0];
+
+               KKASSERT(sc->intr_cnt == 1);
+               if (intr->intr_res != NULL) {
+                       bus_release_resource(sc->dev, SYS_RES_IRQ,
+                           intr->intr_rid, intr->intr_res);
+               }
+               if (sc->intr_type == PCI_INTR_TYPE_MSI)
+                       pci_release_msi(sc->dev);
+       } else {
+               /* TODO */
+       }
+       kfree(sc->intr_data, M_DEVBUF);
+}
+
+static void
+ix_set_ring_inuse(struct ix_softc *sc, boolean_t polling)
+{
+       sc->rx_ring_inuse = ix_get_rxring_inuse(sc, polling);
+       sc->tx_ring_inuse = ix_get_txring_inuse(sc, polling);
+       if (bootverbose) {
+               if_printf(&sc->arpcom.ac_if,
+                   "RX rings %d/%d, TX rings %d/%d\n",
+                   sc->rx_ring_inuse, sc->rx_ring_cnt,
+                   sc->tx_ring_inuse, sc->tx_ring_cnt);
+       }
+}
+
+static int
+ix_get_rxring_inuse(const struct ix_softc *sc, boolean_t polling)
+{
+       if (!IX_ENABLE_HWRSS(sc))
+               return 1;
+
+       if (polling)
+               return sc->rx_ring_cnt;
+       else if (sc->intr_type != PCI_INTR_TYPE_MSIX)
+               return IX_MIN_RXRING_RSS;
+       else
+               return 1; /* TODO */
+}
+
+static int
+ix_get_txring_inuse(const struct ix_softc *sc, boolean_t polling)
+{
+       if (!IX_ENABLE_HWTSS(sc))
+               return 1;
+
+       if (polling)
+               return sc->tx_ring_cnt;
+       else if (sc->intr_type != PCI_INTR_TYPE_MSIX)
+               return 1;
+       else
+               return 1; /* TODO */
+}
+
+static int
+ix_setup_intr(struct ix_softc *sc)
+{
+       int i;
+
+       for (i = 0; i < sc->intr_cnt; ++i) {
+               struct ix_intr_data *intr = &sc->intr_data[i];
+               int error;
+
+               error = bus_setup_intr_descr(sc->dev, intr->intr_res,
+                   INTR_MPSAFE, intr->intr_func, intr->intr_funcarg,
+                   &intr->intr_hand, intr->intr_serialize, intr->intr_desc);
+               if (error) {
+                       device_printf(sc->dev, "can't setup %dth intr\n", i);
+                       ix_teardown_intr(sc, i);
+                       return error;
+               }
+       }
+       return 0;
+}
+
+static void
+ix_teardown_intr(struct ix_softc *sc, int intr_cnt)
+{
+       int i;
+
+       if (sc->intr_data == NULL)
+               return;
+
+       for (i = 0; i < intr_cnt; ++i) {
+               struct ix_intr_data *intr = &sc->intr_data[i];
+
+               bus_teardown_intr(sc->dev, intr->intr_res, intr->intr_hand);
+       }
+}
+
+static void
+ix_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
+{
+       struct ix_softc *sc = ifp->if_softc;
+
+       ifnet_serialize_array_enter(sc->serializes, sc->nserialize, slz);
+}
+
+static void
+ix_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
+{
+       struct ix_softc *sc = ifp->if_softc;
+
+       ifnet_serialize_array_exit(sc->serializes, sc->nserialize, slz);
+}
+
+static int
+ix_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
+{
+       struct ix_softc *sc = ifp->if_softc;
+
+       return ifnet_serialize_array_try(sc->serializes, sc->nserialize, slz);
+}
+
+#ifdef INVARIANTS
+
+static void
+ix_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
+    boolean_t serialized)
+{
+       struct ix_softc *sc = ifp->if_softc;
+
+       ifnet_serialize_array_assert(sc->serializes, sc->nserialize, slz,
+           serialized);
+}
+
+#endif /* INVARIANTS */
+
+static void
+ix_free_rings(struct ix_softc *sc)
+{
+       int i;
+
+       if (sc->tx_rings != NULL) {
+               for (i = 0; i < sc->tx_ring_cnt; ++i) {
+                       struct ix_tx_ring *txr = &sc->tx_rings[i];
+
+                       ix_destroy_tx_ring(txr, txr->tx_ndesc);
+               }
+               kfree(sc->tx_rings, M_DEVBUF);
+       }
+
+       if (sc->rx_rings != NULL) {
+               for (i =0; i < sc->rx_ring_cnt; ++i) {
+                       struct ix_rx_ring *rxr = &sc->rx_rings[i];
+
+                       ix_destroy_rx_ring(rxr, rxr->rx_ndesc);
+               }
+               kfree(sc->rx_rings, M_DEVBUF);
+       }
+
+       if (sc->parent_tag != NULL)
+               bus_dma_tag_destroy(sc->parent_tag);
+}
+
+static void
+ix_watchdog(struct ifaltq_subque *ifsq)
+{
+       struct ix_tx_ring *txr = ifsq_get_priv(ifsq);
+       struct ifnet *ifp = ifsq_get_ifp(ifsq);
+       struct ix_softc *sc = ifp->if_softc;
+       int i;
+
+       KKASSERT(txr->tx_ifsq == ifsq);
+       ASSERT_IFNET_SERIALIZED_ALL(ifp);
+
+       /*
+        * If the interface has been paused then don't do the watchdog check
+        */
+       if (IXGBE_READ_REG(&sc->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF) {
+               txr->tx_watchdog.wd_timer = 5;
+               return;
+       }
+
+       if_printf(ifp, "Watchdog timeout -- resetting\n");
+       if_printf(ifp, "Queue(%d) tdh = %d, hw tdt = %d\n", txr->tx_idx,
+           IXGBE_READ_REG(&sc->hw, IXGBE_TDH(txr->tx_idx)),
+           IXGBE_READ_REG(&sc->hw, IXGBE_TDT(txr->tx_idx)));
+       if_printf(ifp, "TX(%d) desc avail = %d, next TX to Clean = %d\n",
+           txr->tx_idx, txr->tx_avail, txr->tx_next_clean);
+
+       ix_init(sc);
+       for (i = 0; i < sc->tx_ring_inuse; ++i)
+               ifsq_devstart_sched(sc->tx_rings[i].tx_ifsq);
+}
+
+static void
+ix_free_tx_ring(struct ix_tx_ring *txr)
+{
+       int i;
+
+       for (i = 0; i < txr->tx_ndesc; ++i) {
+               struct ix_tx_buf *txbuf = &txr->tx_buf[i];
+
+               if (txbuf->m_head != NULL) {
+                       bus_dmamap_unload(txr->tx_tag, txbuf->map);
+                       m_freem(txbuf->m_head);
+                       txbuf->m_head = NULL;
+               }
+       }
+}
+
+static void
+ix_free_rx_ring(struct ix_rx_ring *rxr)
+{
+       int i;
+
+       for (i = 0; i < rxr->rx_ndesc; ++i) {
+               struct ix_rx_buf *rxbuf = &rxr->rx_buf[i];
+
+               if (rxbuf->fmp != NULL) {
+                       m_freem(rxbuf->fmp);
+                       rxbuf->fmp = NULL;
+                       rxbuf->lmp = NULL;
+               } else {
+                       KKASSERT(rxbuf->lmp == NULL);
+               }
+               if (rxbuf->m_head != NULL) {
+                       bus_dmamap_unload(rxr->rx_tag, rxbuf->map);
+                       m_freem(rxbuf->m_head);
+                       rxbuf->m_head = NULL;
+               }
+       }
+}
+
+static int
+ix_newbuf(struct ix_rx_ring *rxr, int i, boolean_t wait)
+{
+       struct mbuf *m;
+       bus_dma_segment_t seg;
+       bus_dmamap_t map;
+       struct ix_rx_buf *rxbuf;
+       int flags, error, nseg;
+
+       flags = MB_DONTWAIT;
+       if (__predict_false(wait))
+               flags = MB_WAIT;
+
+       m = m_getjcl(flags, MT_DATA, M_PKTHDR, rxr->rx_mbuf_sz);
+       if (m == NULL) {
+               if (wait) {
+                       if_printf(&rxr->rx_sc->arpcom.ac_if,
+                           "Unable to allocate RX mbuf\n");
+               }
+               return ENOBUFS;
+       }
+       m->m_len = m->m_pkthdr.len = rxr->rx_mbuf_sz;
+
+       error = bus_dmamap_load_mbuf_segment(rxr->rx_tag,
+           rxr->rx_sparemap, m, &seg, 1, &nseg, BUS_DMA_NOWAIT);
+       if (error) {
+               m_freem(m);
+               if (wait) {
+                       if_printf(&rxr->rx_sc->arpcom.ac_if,
+                           "Unable to load RX mbuf\n");
+               }
+               return error;
+       }
+
+       rxbuf = &rxr->rx_buf[i];
+       if (rxbuf->m_head != NULL)
+               bus_dmamap_unload(rxr->rx_tag, rxbuf->map);
+
+       map = rxbuf->map;
+       rxbuf->map = rxr->rx_sparemap;
+       rxr->rx_sparemap = map;
+
+       rxbuf->m_head = m;
+       rxbuf->paddr = seg.ds_addr;
+
+       ix_setup_rxdesc(&rxr->rx_base[i], rxbuf);
+       return 0;
+}
+
+static void
+ix_add_sysctl(struct ix_softc *sc)
+{
+#ifdef IX_RSS_DEBUG
+       char node[32];
+#endif
+       int i, add;
+
+       sysctl_ctx_init(&sc->sysctl_ctx);
+       sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
+           SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
+           device_get_nameunit(sc->dev), CTLFLAG_RD, 0, "");
+       if (sc->sysctl_tree == NULL) {
+               device_printf(sc->dev, "can't add sysctl node\n");
+               return;
+       }
+
+       SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+           OID_AUTO, "rxr", CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings");
+       SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+           OID_AUTO, "rxr_inuse", CTLFLAG_RD, &sc->rx_ring_inuse, 0,
+           "# of RX rings used");
+       SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+           OID_AUTO, "txr", CTLFLAG_RD, &sc->tx_ring_cnt, 0, "# of TX rings");
+       SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+           OID_AUTO, "txr_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0,
+           "# of TX rings used");
+       SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+           OID_AUTO, "rxd", CTLTYPE_INT | CTLFLAG_RD,
+           sc, 0, ix_sysctl_rxd, "I",
+           "# of RX descs");
+       SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+           OID_AUTO, "txd", CTLTYPE_INT | CTLFLAG_RD,
+           sc, 0, ix_sysctl_txd, "I",
+           "# of TX descs");
+       SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+           OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT | CTLFLAG_RW,
+           sc, 0, ix_sysctl_tx_wreg_nsegs, "I",
+           "# of segments sent before write to hardware register");
+       SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+           OID_AUTO, "rx_wreg_nsegs", CTLTYPE_INT | CTLFLAG_RW,
+           sc, 0, ix_sysctl_rx_wreg_nsegs, "I",
+           "# of received segments sent before write to hardware register");
+       SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+           OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT | CTLFLAG_RW,
+           sc, 0, ix_sysctl_tx_intr_nsegs, "I",
+           "# of segments per TX interrupt");
+
+       add = 0;
+       for (i = 0; i < sc->intr_cnt; ++i) {
+               if (sc->intr_data[i].intr_use == IX_INTR_USE_RXTX) {
+                       add = 1;
+                       break;
+               }
+       }
+       if (add) {
+               SYSCTL_ADD_PROC(&sc->sysctl_ctx,
+                   SYSCTL_CHILDREN(sc->sysctl_tree),
+                   OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW,
+                   sc, 0, ix_sysctl_intr_rate, "I",
+                   "interrupt rate");
+       }
+
+#ifdef IX_RSS_DEBUG
+       SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+           OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 0,
+           "RSS debug level");
+       for (i = 0; i < sc->rx_ring_cnt; ++i) {
+               ksnprintf(node, sizeof(node), "rx%d_pkt", i);
+               SYSCTL_ADD_ULONG(&sc->sysctl_ctx,
+                   SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, node,
+                   CTLFLAG_RW, &sc->rx_rings[i].rx_pkts, "RXed packets");
+       }
+#endif
+
+       SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+           OID_AUTO, "flowctrl", CTLTYPE_INT | CTLFLAG_RW,
+           sc, 0, ix_sysctl_flowctrl, "I",
+           "flow control, 0 - off, 1 - rx pause, 2 - tx pause, 3 - full");
+
+#ifdef foo
+       /*
+        * Allow a kind of speed control by forcing the autoneg
+        * advertised speed list to only a certain value, this
+        * supports 1G on 82599 devices, and 100Mb on X540.
+        */
+       SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+           OID_AUTO, "advspeed", CTLTYPE_INT | CTLFLAG_RW,
+           sc, 0, ix_sysctl_advspeed, "I",
+           "advertised link speed, "
+           "0 - auto, 1 - 1Gb, 2 - 100Mb, 3 - 1Gb/10Gb");
+#endif
+
+#if 0
+       ix_add_hw_stats(sc);
+#endif
+
+}
+
+static int
+ix_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS)
+{
+       struct ix_softc *sc = (void *)arg1;
+       struct ifnet *ifp = &sc->arpcom.ac_if;
+       int error, nsegs, i;
+
+       nsegs = sc->tx_rings[0].tx_wreg_nsegs;
+       error = sysctl_handle_int(oidp, &nsegs, 0, req);
+       if (error || req->newptr == NULL)
+               return error;
+       if (nsegs < 0)
+               return EINVAL;
+
+       ifnet_serialize_all(ifp);
+       for (i = 0; i < sc->tx_ring_cnt; ++i)
+               sc->tx_rings[i].tx_wreg_nsegs = nsegs;
+       ifnet_deserialize_all(ifp);
+
+       return 0;
+}
+
+static int
+ix_sysctl_rx_wreg_nsegs(SYSCTL_HANDLER_ARGS)
+{
+       struct ix_softc *sc = (void *)arg1;
+       struct ifnet *ifp = &sc->arpcom.ac_if;
+       int error, nsegs, i;
+
+       nsegs = sc->rx_rings[0].rx_wreg_nsegs;
+       error = sysctl_handle_int(oidp, &nsegs, 0, req);
+       if (error || req->newptr == NULL)
+               return error;
+       if (nsegs < 0)
+               return EINVAL;
+
+       ifnet_serialize_all(ifp);
+       for (i = 0; i < sc->rx_ring_cnt; ++i)
+               sc->rx_rings[i].rx_wreg_nsegs =nsegs;
+       ifnet_deserialize_all(ifp);
+
+       return 0;
+}
+
+static int
+ix_sysctl_txd(SYSCTL_HANDLER_ARGS)
+{
+       struct ix_softc *sc = (void *)arg1;
+       int txd;
+
+       txd = sc->tx_rings[0].tx_ndesc;
+       return sysctl_handle_int(oidp, &txd, 0, req);
+}
+
+static int
+ix_sysctl_rxd(SYSCTL_HANDLER_ARGS)
+{
+       struct ix_softc *sc = (void *)arg1;
+       int rxd;
+
+       rxd = sc->rx_rings[0].rx_ndesc;
+       return sysctl_handle_int(oidp, &rxd, 0, req);
+}
+
+static int
+ix_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS)
+{
+       struct ix_softc *sc = (void *)arg1;
+       struct ifnet *ifp = &sc->arpcom.ac_if;
+       struct ix_tx_ring *txr = &sc->tx_rings[0];
+       int error, nsegs;
+
+       nsegs = txr->tx_intr_nsegs;
+       error = sysctl_handle_int(oidp, &nsegs, 0, req);
+       if (error || req->newptr == NULL)
+               return error;
+       if (nsegs < 0)
+               return EINVAL;
+
+       ifnet_serialize_all(ifp);
+
+       if (nsegs >= txr->tx_ndesc - IX_MAX_SCATTER - IX_TX_RESERVED) {
+               error = EINVAL;
+       } else {
+               int i;
+
+               error = 0;
+               for (i = 0; i < sc->tx_ring_cnt; ++i)
+                       sc->tx_rings[i].tx_intr_nsegs = nsegs;
+       }
+
+       ifnet_deserialize_all(ifp);
+
+       return error;
+}
+
+static void
+ix_set_eitr(struct ix_softc *sc, int idx, int rate)
+{
+       uint32_t eitr, eitr_intvl;
+
+       eitr = IXGBE_READ_REG(&sc->hw, IXGBE_EITR(idx));
+       eitr_intvl = 1000000000 / 256 / rate;
+
+       if (sc->hw.mac.type == ixgbe_mac_82598EB) {
+               eitr &= ~IX_EITR_INTVL_MASK_82598;
+               if (eitr_intvl == 0)
+                       eitr_intvl = 1;
+               else if (eitr_intvl > IX_EITR_INTVL_MASK_82598)
+                       eitr_intvl = IX_EITR_INTVL_MASK_82598;
+       } else {
+               eitr &= ~IX_EITR_INTVL_MASK;
+
+               eitr_intvl &= ~IX_EITR_INTVL_RSVD_MASK;
+               if (eitr_intvl == 0)
+                       eitr_intvl = IX_EITR_INTVL_MIN;
+               else if (eitr_intvl > IX_EITR_INTVL_MAX)
+                       eitr_intvl = IX_EITR_INTVL_MAX;
+       }
+       eitr |= eitr_intvl;
+
+       IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(idx), eitr);
+}
+
+static int
+ix_sysctl_intr_rate(SYSCTL_HANDLER_ARGS)
+{
+       struct ix_softc *sc = (void *)arg1;
+       struct ifnet *ifp = &sc->arpcom.ac_if;
+       int error, rate, i;
+
+       rate = 0;
+       for (i = 0; i < sc->intr_cnt; ++i) {
+               if (sc->intr_data[i].intr_use == IX_INTR_USE_RXTX) {
+                       rate = sc->intr_data[i].intr_rate;
+                       break;
+               }
+       }
+
+       error = sysctl_handle_int(oidp, &rate, 0, req);
+       if (error || req->newptr == NULL)
+               return error;
+       if (rate <= 0)
+               return EINVAL;
+
+       ifnet_serialize_all(ifp);
+
+       for (i = 0; i < sc->intr_cnt; ++i) {
+               if (sc->intr_data[i].intr_use == IX_INTR_USE_RXTX) {
+                       sc->intr_data[i].intr_rate = rate;
+                       if (ifp->if_flags & IFF_RUNNING)
+                               ix_set_eitr(sc, i, rate);
+               }
+       }
+
+       ifnet_deserialize_all(ifp);
+
+       return error;
+}
+
+static void
+ix_set_timer_cpuid(struct ix_softc *sc, boolean_t polling)
+{
+       if (polling || sc->intr_type == PCI_INTR_TYPE_MSIX)
+               sc->timer_cpuid = 0; /* XXX fixed */
+       else
+               sc->timer_cpuid = rman_get_cpuid(sc->intr_data[0].intr_res);
+}
diff --git a/sys/dev/netif/ix/if_ix.h b/sys/dev/netif/ix/if_ix.h
new file mode 100644 (file)
index 0000000..c12fda9
--- /dev/null
@@ -0,0 +1,320 @@
+/*
+ * Copyright (c) 2001-2013, Intel Corporation 
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright notice, 
+ *     this list of conditions and the following disclaimer.
+ *
+ *  2. Redistributions in binary form must reproduce the above copyright 
+ *     notice, this list of conditions and the following disclaimer in the 
+ *     documentation and/or other materials provided with the distribution.
+ *
+ *  3. Neither the name of the Intel Corporation nor the names of its 
+ *     contributors may be used to endorse or promote products derived from 
+ *     this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IF_IX_H_
+#define _IF_IX_H_
+
+/* Tunables */
+
+/*
+ * RX ring count
+ */
+#define IX_MAX_RXRING          16
+#define IX_MIN_RXRING_RSS      2
+
+/*
+ * Default number of segments received before writing to RX related registers
+ */
+#define IX_DEF_RXWREG_NSEGS    32
+
+/*
+ * Default number of segments sent before writing to TX related registers
+ */
+#define IX_DEF_TXWREG_NSEGS    8
+
+/*
+ * TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
+ * number of transmit descriptors allocated by the driver. Increasing this
+ * value allows the driver to queue more transmits. Each descriptor is 16
+ * bytes. Performance tests have show the 2K value to be optimal for top
+ * performance.
+ */
+#define IX_DEF_TXD             1024
+#define IX_PERF_TXD            2048
+#define IX_MAX_TXD             4096
+#define IX_MIN_TXD             64
+
+/*
+ * RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
+ * number of receive descriptors allocated for each RX queue. Increasing this
+ * value allows the driver to buffer more incoming packets. Each descriptor
+ * is 16 bytes.  A receive buffer is also allocated for each descriptor. 
+ * 
+ * Note: with 8 rings and a dual port card, it is possible to bump up 
+ *     against the system mbuf pool limit, you can tune nmbclusters
+ *     to adjust for this.
+ */
+#define IX_DEF_RXD             1024
+#define IX_PERF_RXD            2048
+#define IX_MAX_RXD             4096
+#define IX_MIN_RXD             64
+
+/* Alignment for rings */
+#define IX_DBA_ALIGN           128
+
+#define IX_MAX_FRAME_SIZE      0x3F00
+
+/* Flow control constants */
+#define IX_FC_PAUSE            0xFFFF
+#define IX_FC_HI               0x20000
+#define IX_FC_LO               0x10000
+
+/*
+ * RSS related registers
+ */
+#define IX_NRSSRK              10
+#define IX_RSSRK_SIZE          4
+#define IX_RSSRK_VAL(key, i)   (key[(i) * IX_RSSRK_SIZE] | \
+                                key[(i) * IX_RSSRK_SIZE + 1] << 8 | \
+                                key[(i) * IX_RSSRK_SIZE + 2] << 16 | \
+                                key[(i) * IX_RSSRK_SIZE + 3] << 24)
+#define IX_NRETA               32
+#define IX_RETA_SIZE           4
+
+/*
+ * EITR
+ */
+#define IX_EITR_INTVL_MASK_82598 0xffff
+#define IX_EITR_INTVL_MASK     0x0fff
+#define IX_EITR_INTVL_RSVD_MASK        0x0007
+#define IX_EITR_INTVL_MIN      IXGBE_MIN_EITR
+#define IX_EITR_INTVL_MAX      IXGBE_MAX_EITR
+
+/*
+ * Used for optimizing small rx mbufs.  Effort is made to keep the copy
+ * small and aligned for the CPU L1 cache.
+ * 
+ * MHLEN is typically 168 bytes, giving us 8-byte alignment.  Getting
+ * 32 byte alignment needed for the fast bcopy results in 8 bytes being
+ * wasted.  Getting 64 byte alignment, which _should_ be ideal for
+ * modern Intel CPUs, results in 40 bytes wasted and a significant drop
+ * in observed efficiency of the optimization, 97.9% -> 81.8%.
+ */
+#define IX_RX_COPY_LEN         160
+#define IX_RX_COPY_ALIGN       (MHLEN - IX_RX_COPY_LEN)
+
+#define IX_MAX_MCASTADDR       128
+
+#define MSIX_82598_BAR         3
+#define MSIX_82599_BAR         4
+
+#define IX_TSO_SIZE            (IP_MAXPACKET + \
+                                sizeof(struct ether_vlan_header))
+
+/*
+ * MUST be less than 38.  Though 82598 does not have this limit,
+ * we don't want long TX chain.  33 should be large enough even
+ * for 64K TSO (32 x 2K mbuf cluster and 1 x mbuf header).
+ *
+ * Reference:
+ * - 82599 datasheet 7.2.1.1
+ * - X540 datasheet 7.2.1.1
+ */
+#define IX_MAX_SCATTER         33
+#define IX_TX_RESERVED         3       /* 1 for TX ctx, 2 reserved */
+
+/* MSI and legacy interrupt */
+#define IX_TX_INTR_VEC         0
+#define IX_TX_INTR_MASK                (1 << IX_TX_INTR_VEC)
+#define IX_RX0_INTR_VEC                1
+#define IX_RX0_INTR_MASK       (1 << IX_RX0_INTR_VEC)
+#define IX_RX1_INTR_VEC                2
+#define IX_RX1_INTR_MASK       (1 << IX_RX1_INTR_VEC)
+
+#define IX_INTR_RATE           8000
+
+/* IOCTL define to gather SFP+ Diagnostic data */
+#define SIOCGI2C               SIOCGIFGENERIC
+
+/* TX checksum offload */
+#define CSUM_OFFLOAD           (CSUM_IP|CSUM_TCP|CSUM_UDP)
+
+/* This is used to get SFP+ module data */
+struct ix_i2c_req {
+       uint8_t         dev_addr;
+       uint8_t         offset;
+       uint8_t         len;
+       uint8_t         data[8];
+};
+
+struct ix_tx_buf {
+       struct mbuf     *m_head;
+       bus_dmamap_t    map;
+};
+
+struct ix_rx_buf {
+       struct mbuf     *m_head;
+       struct mbuf     *fmp;
+       struct mbuf     *lmp;
+       bus_dmamap_t    map;
+       bus_addr_t      paddr;
+       u_int           flags;
+#define IX_RX_COPY     0x1
+};
+
+struct ix_softc;
+
+struct ix_tx_ring {
+       struct lwkt_serialize   tx_serialize;
+       struct ifaltq_subque    *tx_ifsq;
+       struct ix_softc         *tx_sc;
+       volatile uint32_t       *tx_hdr;
+       union ixgbe_adv_tx_desc *tx_base;
+       struct ix_tx_buf        *tx_buf;
+       bus_dma_tag_t           tx_tag;
+       uint32_t                tx_idx;
+       uint16_t                tx_avail;
+       uint16_t                tx_next_avail;
+       uint16_t                tx_next_clean;
+       uint16_t                tx_ndesc;
+       uint16_t                tx_wreg_nsegs;
+       uint16_t                tx_intr_nsegs;
+       uint16_t                tx_nsegs;
+       int16_t                 tx_intr_vec;
+       int                     tx_intr_cpuid;
+       struct ifsubq_watchdog  tx_watchdog;
+
+       bus_dma_tag_t           tx_base_dtag;
+       bus_dmamap_t            tx_base_map;
+       bus_addr_t              tx_base_paddr;
+
+       bus_dma_tag_t           tx_hdr_dtag;
+       bus_dmamap_t            tx_hdr_map;
+       bus_addr_t              tx_hdr_paddr;
+} __cachealign;
+
+struct ix_rx_ring {
+       struct lwkt_serialize   rx_serialize;
+       struct ix_softc         *rx_sc;
+       union ixgbe_adv_rx_desc *rx_base;
+       struct ix_rx_buf        *rx_buf;
+       bus_dma_tag_t           rx_tag;
+       bus_dmamap_t            rx_sparemap;
+       uint32_t                rx_idx;
+       uint16_t                rx_flags;
+#define IX_RXRING_FLAG_LRO     0x01
+#define IX_RXRING_FLAG_DISC    0x02
+       uint16_t                rx_next_check;
+       uint16_t                rx_ndesc;
+       uint16_t                rx_mbuf_sz;
+       uint16_t                rx_wreg_nsegs;
+       int16_t                 rx_intr_vec;
+
+#ifdef IX_RSS_DEBUG
+       u_long                  rx_pkts;
+#endif
+
+       bus_dma_tag_t           rx_base_dtag;
+       bus_dmamap_t            rx_base_map;
+       bus_addr_t              rx_base_paddr;
+} __cachealign;
+
+struct ix_intr_data {
+       struct lwkt_serialize   *intr_serialize;
+       driver_intr_t           *intr_func;
+       void                    *intr_hand;
+       struct resource         *intr_res;
+       void                    *intr_funcarg;
+       int                     intr_rid;
+       int                     intr_cpuid;
+       int                     intr_rate;
+       int                     intr_use;
+#define IX_INTR_USE_RXTX       0
+#define IX_INTR_USE_STATUS     1
+#define IX_INTR_USE_RX         2
+#define IX_INTR_USE_TX         3
+       const char              *intr_desc;
+       char                    intr_desc0[64];
+};
+
+struct ix_softc {
+       struct arpcom           arpcom;
+
+       struct ixgbe_hw         hw;
+       struct ixgbe_osdep      osdep;
+
+       struct lwkt_serialize   main_serialize;
+       uint32_t                intr_mask;
+
+       boolean_t               link_active;
+
+       int                     tx_ring_cnt;
+       int                     tx_ring_inuse;
+       struct ix_tx_ring       *tx_rings;
+
+       int                     rx_ring_cnt;
+       int                     rx_ring_inuse;
+       struct ix_rx_ring       *rx_rings;
+
+       struct callout          timer;
+       int                     timer_cpuid;
+
+       uint32_t                optics;
+       uint32_t                fc;             /* local flow ctrl setting */
+       uint32_t                link_speed;
+       bool                    link_up;
+       boolean_t               sfp_probe;      /* plyggable optics */
+
+       struct ixgbe_hw_stats   stats;
+
+       int                     intr_type;
+       int                     intr_cnt;
+       struct ix_intr_data     *intr_data;
+
+       /* sysctl tree glue */
+       struct sysctl_ctx_list  sysctl_ctx;
+       struct sysctl_oid       *sysctl_tree;
+
+       device_t                dev;
+       bus_dma_tag_t           parent_tag;
+       struct ifmedia          media;
+
+       struct resource         *mem_res;
+       int                     mem_rid;
+
+       int                     nserialize;
+       struct lwkt_serialize   **serializes;
+
+       uint8_t                 *mta;           /* Multicast array memory */
+
+       int                     if_flags;
+       int                     advspeed;       /* advertised link speeds */
+       uint16_t                max_frame_size;
+
+#ifdef IX_RSS_DEBUG
+       int                     rss_debug;
+#endif
+};
+
+#define IX_ENABLE_HWRSS(sc)    ((sc)->rx_ring_cnt > 1)
+#define IX_ENABLE_HWTSS(sc)    ((sc)->tx_ring_cnt > 1)
+
+#endif /* _IF_IX_H_ */
similarity index 94%
rename from sys/dev/netif/ixgbe/ixgbe_82598.c
rename to sys/dev/netif/ix/ixgbe_82598.c
index 15ece96..e32f270 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,7 +30,7 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82598.c,v 1.13 2012/07/05 20:51:44 jfv Exp $*/
+/*$FreeBSD$*/
 
 #include "ixgbe_type.h"
 #include "ixgbe_82598.h"
@@ -49,18 +49,17 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
                                      bool link_up_wait_to_complete);
 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
                                      ixgbe_link_speed speed,
-                                     bool autoneg,
                                      bool autoneg_wait_to_complete);
 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
                                         ixgbe_link_speed speed,
-                                        bool autoneg,
                                         bool autoneg_wait_to_complete);
 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
                                  u32 headroom, int strategy);
-
+static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
+                                       u8 *sff8472_data);
 /**
  *  ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
  *  @hw: pointer to the HW structure
@@ -155,6 +154,7 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
 
        /* SFP+ Module */
        phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
+       phy->ops.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598;
 
        /* Link */
        mac->ops.check_link = &ixgbe_check_mac_link_82598;
@@ -166,6 +166,8 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
        /* Manageability interface */
        mac->ops.set_fw_drv_ver = NULL;
 
+       mac->ops.get_rtrup2tc = NULL;
+
        return ret_val;
 }
 
@@ -712,15 +714,15 @@ out:
  *  ixgbe_setup_mac_link_82598 - Set MAC link speed
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
- *  @autoneg: TRUE if autonegotiation enabled
  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
  *
  *  Set the link speed in the AUTOC register and restarts link.
  **/
 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
-                                     ixgbe_link_speed speed, bool autoneg,
+                                     ixgbe_link_speed speed,
                                      bool autoneg_wait_to_complete)
 {
+       bool autoneg = FALSE;
        s32 status = IXGBE_SUCCESS;
        ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
        u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -766,14 +768,12 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
  *  ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
- *  @autoneg: TRUE if autonegotiation enabled
  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
  *
  *  Sets the link speed in the AUTOC register in the MAC and restarts link.
  **/
 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
                                         ixgbe_link_speed speed,
-                                        bool autoneg,
                                         bool autoneg_wait_to_complete)
 {
        s32 status;
@@ -781,7 +781,7 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
        DEBUGFUNC("ixgbe_setup_copper_link_82598");
 
        /* Setup the PHY according to input speed */
-       status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+       status = hw->phy.ops.setup_link_speed(hw, speed,
                                              autoneg_wait_to_complete);
        /* Set up MAC */
        ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
@@ -1102,23 +1102,33 @@ s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
 }
 
 /**
- *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ *  ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
  *  @hw: pointer to hardware structure
- *  @byte_offset: EEPROM byte offset to read
+ *  @dev_addr: address to read from
+ *  @byte_offset: byte offset to read from dev_addr
  *  @eeprom_data: value read
  *
  *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
  **/
-s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
-                               u8 *eeprom_data)
+static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
+                                   u8 byte_offset, u8 *eeprom_data)
 {
        s32 status = IXGBE_SUCCESS;
        u16 sfp_addr = 0;
        u16 sfp_data = 0;
        u16 sfp_stat = 0;
+       u16 gssr;
        u32 i;
 
-       DEBUGFUNC("ixgbe_read_i2c_eeprom_82598");
+       DEBUGFUNC("ixgbe_read_i2c_phy_82598");
+
+       if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+               gssr = IXGBE_GSSR_PHY1_SM;
+       else
+               gssr = IXGBE_GSSR_PHY0_SM;
+
+       if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
+               return IXGBE_ERR_SWFW_SYNC;
 
        if (hw->phy.type == ixgbe_phy_nl) {
                /*
@@ -1126,19 +1136,19 @@ s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
                 * 0xC30D. These registers are used to talk to the SFP+
                 * module's EEPROM through the SDA/SCL (I2C) interface.
                 */
-               sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
+               sfp_addr = (dev_addr << 8) + byte_offset;
                sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
-               hw->phy.ops.write_reg(hw,
-                                     IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
-                                     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-                                     sfp_addr);
+               hw->phy.ops.write_reg_mdi(hw,
+                                         IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
+                                         IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+                                         sfp_addr);
 
                /* Poll status */
                for (i = 0; i < 100; i++) {
-                       hw->phy.ops.read_reg(hw,
-                                            IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
-                                            IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-                                            &sfp_stat);
+                       hw->phy.ops.read_reg_mdi(hw,
+                                               IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
+                                               IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+                                               &sfp_stat);
                        sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
                        if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
                                break;
@@ -1152,20 +1162,50 @@ s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
                }
 
                /* Read data */
-               hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
-                                    IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
+               hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
+                                       IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
 
                *eeprom_data = (u8)(sfp_data >> 8);
        } else {
                status = IXGBE_ERR_PHY;
-               goto out;
        }
 
 out:
+       hw->mac.ops.release_swfw_sync(hw, gssr);
        return status;
 }
 
 /**
+ *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: EEPROM byte offset to read
+ *  @eeprom_data: value read
+ *
+ *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+                               u8 *eeprom_data)
+{
+       return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
+                                       byte_offset, eeprom_data);
+}
+
+/**
+ *  ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset at address 0xA2
+ *  @eeprom_data: value read
+ *
+ *  Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
+ **/
+static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
+                                       u8 *sff8472_data)
+{
+       return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
+                                       byte_offset, sff8472_data);
+}
+
+/**
  *  ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
  *  @hw: pointer to hardware structure
  *
similarity index 95%
rename from sys/dev/netif/ixgbe/ixgbe_82598.h
rename to sys/dev/netif/ix/ixgbe_82598.h
index 734e8fb..16cd2ee 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,7 +30,7 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82598.h,v 1.2 2012/07/05 20:51:44 jfv Exp $*/
+/*$FreeBSD$*/
 
 #ifndef _IXGBE_82598_H_
 #define _IXGBE_82598_H_
similarity index 85%
rename from sys/dev/netif/ixgbe/ixgbe_82599.c
rename to sys/dev/netif/ix/ixgbe_82599.c
index 5232a2a..3cc8cd7 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,7 +30,7 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82599.c,v 1.8 2012/07/05 20:51:44 jfv Exp $*/
+/*$FreeBSD$*/
 
 #include "ixgbe_type.h"
 #include "ixgbe_82599.h"
@@ -40,7 +40,6 @@
 
 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
                                         ixgbe_link_speed speed,
-                                        bool autoneg,
                                         bool autoneg_wait_to_complete);
 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
@@ -48,14 +47,37 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
                                          u16 words, u16 *data);
 
+static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
+{
+       u32 fwsm, manc, factps;
+
+       fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+       if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
+               return FALSE;
+
+       manc = IXGBE_READ_REG(hw, IXGBE_MANC);
+       if (!(manc & IXGBE_MANC_RCV_TCO_EN))
+               return FALSE;
+
+       factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+       if (factps & IXGBE_FACTPS_MNGCG)
+               return FALSE;
+
+       return TRUE;
+}
+
 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
 {
        struct ixgbe_mac_info *mac = &hw->mac;
 
        DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
 
-       /* enable the laser control functions for SFP+ fiber */
-       if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
+       /*
+        * enable the laser control functions for SFP+ fiber
+        * and MNG not enabled
+        */
+       if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+           !hw->mng_fw_enabled) {
                mac->ops.disable_tx_laser =
                                       &ixgbe_disable_tx_laser_multispeed_fiber;
                mac->ops.enable_tx_laser =
@@ -135,9 +157,8 @@ init_phy_ops_out:
 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
 {
        s32 ret_val = IXGBE_SUCCESS;
-       u32 reg_anlp1 = 0;
-       u32 i = 0;
        u16 list_offset, data_offset, data_value;
+       bool got_lock = FALSE;
 
        DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
 
@@ -159,11 +180,13 @@ s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
                        goto setup_sfp_out;
                }
 
-               hw->eeprom.ops.read(hw, ++data_offset, &data_value);
+               if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
+                       goto setup_sfp_err;
                while (data_value != 0xffff) {
                        IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
                        IXGBE_WRITE_FLUSH(hw);
-                       hw->eeprom.ops.read(hw, ++data_offset, &data_value);
+                       if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
+                               goto setup_sfp_err;
                }
 
                /* Release the semaphore */
@@ -171,32 +194,52 @@ s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
                /* Delay obtaining semaphore again to allow FW access */
                msec_delay(hw->eeprom.semaphore_delay);
 
-               /* Now restart DSP by setting Restart_AN and clearing LMS */
-               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
-                               IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
-                               IXGBE_AUTOC_AN_RESTART));
-
-               /* Wait for AN to leave state 0 */
-               for (i = 0; i < 10; i++) {
-                       msec_delay(4);
-                       reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
-                       if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
-                               break;
+               /* Need SW/FW semaphore around AUTOC writes if LESM on,
+                * likewise reset_pipeline requires lock as it also writes
+                * AUTOC.
+                */
+               if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+                       ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+                                                       IXGBE_GSSR_MAC_CSR_SM);
+                       if (ret_val != IXGBE_SUCCESS) {
+                               ret_val = IXGBE_ERR_SWFW_SYNC;
+                               goto setup_sfp_out;
+                       }
+
+                       got_lock = TRUE;
                }
-               if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
+
+               /* Restart DSP and set SFI mode */
+               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((hw->mac.orig_autoc) |
+                               IXGBE_AUTOC_LMS_10G_SERIAL));
+               hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+               ret_val = ixgbe_reset_pipeline_82599(hw);
+
+               if (got_lock) {
+                       hw->mac.ops.release_swfw_sync(hw,
+                                                     IXGBE_GSSR_MAC_CSR_SM);
+                       got_lock = FALSE;
+               }
+
+               if (ret_val) {
                        DEBUGOUT("sfp module setup not complete\n");
                        ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
                        goto setup_sfp_out;
                }
 
-               /* Restart DSP by setting Restart_AN and return to SFI mode */
-               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
-                               IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
-                               IXGBE_AUTOC_AN_RESTART));
        }
 
 setup_sfp_out:
        return ret_val;
+
+setup_sfp_err:
+       /* Release the semaphore */
+       hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+       /* Delay obtaining semaphore again to allow FW access */
+       msec_delay(hw->eeprom.semaphore_delay);
+       ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+                     "eeprom read at offset %d failed", data_offset);
+       return IXGBE_ERR_PHY;
 }
 
 /**
@@ -216,7 +259,7 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
 
        DEBUGFUNC("ixgbe_init_ops_82599");
 
-       ret_val = ixgbe_init_phy_ops_generic(hw);
+       ixgbe_init_phy_ops_generic(hw);
        ret_val = ixgbe_init_ops_generic(hw);
 
        /* PHY */
@@ -282,6 +325,11 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
        mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
 
 
+       mac->ops.get_rtrup2tc = &ixgbe_dcb_get_rtrup2tc_generic;
+
+       /* Cache if MNG FW is up */
+       hw->mng_fw_enabled = ixgbe_mng_enabled(hw);
+
        return ret_val;
 }
 
@@ -289,13 +337,13 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
  *  ixgbe_get_link_capabilities_82599 - Determines link capabilities
  *  @hw: pointer to hardware structure
  *  @speed: pointer to link speed
- *  @negotiation: TRUE when autoneg or autotry is enabled
+ *  @autoneg: TRUE when autoneg or autotry is enabled
  *
  *  Determines the link capabilities by reading the AUTOC register.
  **/
 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
                                      ixgbe_link_speed *speed,
-                                     bool *negotiation)
+                                     bool *autoneg)
 {
        s32 status = IXGBE_SUCCESS;
        u32 autoc = 0;
@@ -309,7 +357,7 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
            hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
            hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
                *speed = IXGBE_LINK_SPEED_1GB_FULL;
-               *negotiation = TRUE;
+               *autoneg = TRUE;
                goto out;
        }
 
@@ -326,22 +374,22 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
        switch (autoc & IXGBE_AUTOC_LMS_MASK) {
        case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
                *speed = IXGBE_LINK_SPEED_1GB_FULL;
-               *negotiation = FALSE;
+               *autoneg = FALSE;
                break;
 
        case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
                *speed = IXGBE_LINK_SPEED_10GB_FULL;
-               *negotiation = FALSE;
+               *autoneg = FALSE;
                break;
 
        case IXGBE_AUTOC_LMS_1G_AN:
                *speed = IXGBE_LINK_SPEED_1GB_FULL;
-               *negotiation = TRUE;
+               *autoneg = TRUE;
                break;
 
        case IXGBE_AUTOC_LMS_10G_SERIAL:
                *speed = IXGBE_LINK_SPEED_10GB_FULL;
-               *negotiation = FALSE;
+               *autoneg = FALSE;
                break;
 
        case IXGBE_AUTOC_LMS_KX4_KX_KR:
@@ -353,7 +401,7 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
                        *speed |= IXGBE_LINK_SPEED_10GB_FULL;
                if (autoc & IXGBE_AUTOC_KX_SUPP)
                        *speed |= IXGBE_LINK_SPEED_1GB_FULL;
-               *negotiation = TRUE;
+               *autoneg = TRUE;
                break;
 
        case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
@@ -364,12 +412,12 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
                        *speed |= IXGBE_LINK_SPEED_10GB_FULL;
                if (autoc & IXGBE_AUTOC_KX_SUPP)
                        *speed |= IXGBE_LINK_SPEED_1GB_FULL;
-               *negotiation = TRUE;
+               *autoneg = TRUE;
                break;
 
        case IXGBE_AUTOC_LMS_SGMII_1G_100M:
                *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
-               *negotiation = FALSE;
+               *autoneg = FALSE;
                break;
 
        default:
@@ -381,7 +429,7 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
        if (hw->phy.multispeed_fiber) {
                *speed |= IXGBE_LINK_SPEED_10GB_FULL |
                          IXGBE_LINK_SPEED_1GB_FULL;
-               *negotiation = TRUE;
+               *autoneg = TRUE;
        }
 
 out:
@@ -424,6 +472,7 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
        case IXGBE_DEV_ID_82599_SFP_FCOE:
        case IXGBE_DEV_ID_82599_SFP_EM:
        case IXGBE_DEV_ID_82599_SFP_SF2:
+       case IXGBE_DEV_ID_82599_SFP_SF_QP:
        case IXGBE_DEV_ID_82599EN_SFP:
                media_type = ixgbe_media_type_fiber;
                break;
@@ -433,6 +482,10 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
        case IXGBE_DEV_ID_82599_T3_LOM:
                media_type = ixgbe_media_type_copper;
                break;
+       case IXGBE_DEV_ID_82599_BYPASS:
+               media_type = ixgbe_media_type_fiber_fixed;
+               hw->phy.multispeed_fiber = TRUE;
+               break;
        default:
                media_type = ixgbe_media_type_unknown;
                break;
@@ -442,6 +495,29 @@ out:
 }
 
 /**
+ *  ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
+ *  @hw: pointer to hardware structure
+ *
+ *  Disables link during D3 power down sequence.
+ *
+ **/
+void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
+{
+       u32 autoc2_reg;
+       u16 ee_ctrl_2 = 0;
+
+       DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
+       ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
+
+       if (!hw->mng_fw_enabled && !hw->wol_enabled &&
+               ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
+               autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+               autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
+               IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
+       }
+}
+
+/**
  *  ixgbe_start_mac_link_82599 - Setup MAC link settings
  *  @hw: pointer to hardware structure
  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
@@ -456,17 +532,32 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
        u32 links_reg;
        u32 i;
        s32 status = IXGBE_SUCCESS;
+       bool got_lock = FALSE;
 
        DEBUGFUNC("ixgbe_start_mac_link_82599");
 
 
+       /*  reset_pipeline requires us to hold this lock as it writes to
+        *  AUTOC.
+        */
+       if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+               status = hw->mac.ops.acquire_swfw_sync(hw,
+                                                      IXGBE_GSSR_MAC_CSR_SM);
+               if (status != IXGBE_SUCCESS)
+                       goto out;
+
+               got_lock = TRUE;
+       }
+
        /* Restart link */
-       autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-       autoc_reg |= IXGBE_AUTOC_AN_RESTART;
-       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+       ixgbe_reset_pipeline_82599(hw);
+
+       if (got_lock)
+               hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
 
        /* Only poll for autoneg to complete if specified to do so */
        if (autoneg_wait_to_complete) {
+               autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
                if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
                     IXGBE_AUTOC_LMS_KX4_KX_KR ||
                    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
@@ -490,6 +581,7 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
        /* Add delay to filter out noises during initial link setup */
        msec_delay(50);
 
+out:
        return status;
 }
 
@@ -555,16 +647,84 @@ void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
 }
 
 /**
+ *  ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
+ *  @hw: pointer to hardware structure
+ *  @speed: link speed to set
+ *
+ *  We set the module speed differently for fixed fiber.  For other
+ *  multi-speed devices we don't have an error value so here if we
+ *  detect an error we just log it and exit.
+ */
+static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
+                                       ixgbe_link_speed speed)
+{
+       s32 status;
+       u8 rs, eeprom_data;
+
+       switch (speed) {
+       case IXGBE_LINK_SPEED_10GB_FULL:
+               /* one bit mask same as setting on */
+               rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
+               break;
+       case IXGBE_LINK_SPEED_1GB_FULL:
+               rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
+               break;
+       default:
+               DEBUGOUT("Invalid fixed module speed\n");
+               return;
+       }
+
+       /* Set RS0 */
+       status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+                                          IXGBE_I2C_EEPROM_DEV_ADDR2,
+                                          &eeprom_data);
+       if (status) {
+               DEBUGOUT("Failed to read Rx Rate Select RS0\n");
+               goto out;
+       }
+
+       eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+
+       status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+                                           IXGBE_I2C_EEPROM_DEV_ADDR2,
+                                           eeprom_data);
+       if (status) {
+               DEBUGOUT("Failed to write Rx Rate Select RS0\n");
+               goto out;
+       }
+
+       /* Set RS1 */
+       status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+                                          IXGBE_I2C_EEPROM_DEV_ADDR2,
+                                          &eeprom_data);
+       if (status) {
+               DEBUGOUT("Failed to read Rx Rate Select RS1\n");
+               goto out;
+       }
+
+       eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+
+       status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+                                           IXGBE_I2C_EEPROM_DEV_ADDR2,
+                                           eeprom_data);
+       if (status) {
+               DEBUGOUT("Failed to write Rx Rate Select RS1\n");
+               goto out;
+       }
+out:
+       return;
+}
+
+/**
  *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
- *  @autoneg: TRUE if autonegotiation enabled
  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
  *
  *  Set the link speed in the AUTOC register and restarts link.
  **/
 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
-                                    ixgbe_link_speed speed, bool autoneg,
+                                    ixgbe_link_speed speed,
                                     bool autoneg_wait_to_complete)
 {
        s32 status = IXGBE_SUCCESS;
@@ -573,13 +733,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
        u32 speedcnt = 0;
        u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
        u32 i = 0;
-       bool link_up = FALSE;
-       bool negotiation;
+       bool autoneg, link_up = FALSE;
 
        DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
 
        /* Mask off requested but non-supported speeds */
-       status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
+       status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
        if (status != IXGBE_SUCCESS)
                return status;
 
@@ -602,16 +761,20 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                        goto out;
 
                /* Set the module link speed */
-               esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
-               IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
-               IXGBE_WRITE_FLUSH(hw);
+               if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
+                       ixgbe_set_fiber_fixed_speed(hw,
+                                                   IXGBE_LINK_SPEED_10GB_FULL);
+               } else {
+                       esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
+                       IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+                       IXGBE_WRITE_FLUSH(hw);
+               }
 
                /* Allow module to change analog characteristics (1G->10G) */
                msec_delay(40);
 
                status = ixgbe_setup_mac_link_82599(hw,
                                                    IXGBE_LINK_SPEED_10GB_FULL,
-                                                   autoneg,
                                                    autoneg_wait_to_complete);
                if (status != IXGBE_SUCCESS)
                        return status;
@@ -653,17 +816,21 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
                        goto out;
 
                /* Set the module link speed */
-               esdp_reg &= ~IXGBE_ESDP_SDP5;
-               esdp_reg |= IXGBE_ESDP_SDP5_DIR;
-               IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
-               IXGBE_WRITE_FLUSH(hw);
+               if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
+                       ixgbe_set_fiber_fixed_speed(hw,
+                                                   IXGBE_LINK_SPEED_1GB_FULL);
+               } else {
+                       esdp_reg &= ~IXGBE_ESDP_SDP5;
+                       esdp_reg |= IXGBE_ESDP_SDP5_DIR;
+                       IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+                       IXGBE_WRITE_FLUSH(hw);
+               }
 
                /* Allow module to change analog characteristics (10G->1G) */
                msec_delay(40);
 
                status = ixgbe_setup_mac_link_82599(hw,
                                                    IXGBE_LINK_SPEED_1GB_FULL,
-                                                   autoneg,
                                                    autoneg_wait_to_complete);
                if (status != IXGBE_SUCCESS)
                        return status;
@@ -690,7 +857,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
         */
        if (speedcnt > 1)
                status = ixgbe_setup_mac_link_multispeed_fiber(hw,
-                       highest_link_speed, autoneg, autoneg_wait_to_complete);
+                       highest_link_speed, autoneg_wait_to_complete);
 
 out:
        /* Set autoneg_advertised value based on input link speed */
@@ -709,13 +876,12 @@ out:
  *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
- *  @autoneg: TRUE if autonegotiation enabled
  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
  *
  *  Implements the Intel SmartSpeed algorithm.
  **/
 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
-                                   ixgbe_link_speed speed, bool autoneg,
+                                   ixgbe_link_speed speed,
                                    bool autoneg_wait_to_complete)
 {
        s32 status = IXGBE_SUCCESS;
@@ -748,7 +914,7 @@ s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
        /* First, try to get link with full advertisement */
        hw->phy.smart_speed_active = FALSE;
        for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
-               status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+               status = ixgbe_setup_mac_link_82599(hw, speed,
                                                    autoneg_wait_to_complete);
                if (status != IXGBE_SUCCESS)
                        goto out;
@@ -783,7 +949,7 @@ s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
 
        /* Turn SmartSpeed on to disable KR support */
        hw->phy.smart_speed_active = TRUE;
-       status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+       status = ixgbe_setup_mac_link_82599(hw, speed,
                                            autoneg_wait_to_complete);
        if (status != IXGBE_SUCCESS)
                goto out;
@@ -808,7 +974,7 @@ s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
 
        /* We didn't get link.  Turn SmartSpeed back off. */
        hw->phy.smart_speed_active = FALSE;
-       status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+       status = ixgbe_setup_mac_link_82599(hw, speed,
                                            autoneg_wait_to_complete);
 
 out:
@@ -822,32 +988,30 @@ out:
  *  ixgbe_setup_mac_link_82599 - Set MAC link speed
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
- *  @autoneg: TRUE if autonegotiation enabled
  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
  *
  *  Set the link speed in the AUTOC register and restarts link.
  **/
 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
-                              ixgbe_link_speed speed, bool autoneg,
+                              ixgbe_link_speed speed,
                               bool autoneg_wait_to_complete)
 {
+       bool autoneg = FALSE;
        s32 status = IXGBE_SUCCESS;
-       u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       u32 autoc, pma_pmd_1g, link_mode, start_autoc;
        u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
-       u32 start_autoc = autoc;
        u32 orig_autoc = 0;
-       u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
-       u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
        u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
        u32 links_reg;
        u32 i;
        ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+       bool got_lock = FALSE;
 
        DEBUGFUNC("ixgbe_setup_mac_link_82599");
 
        /* Check to see if speed passed in is supported. */
        status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
-       if (status != IXGBE_SUCCESS)
+       if (status)
                goto out;
 
        speed &= link_capabilities;
@@ -859,9 +1023,14 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
 
        /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
        if (hw->mac.orig_link_settings_stored)
-               orig_autoc = hw->mac.orig_autoc;
+               autoc = hw->mac.orig_autoc;
        else
-               orig_autoc = autoc;
+               autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+       orig_autoc = autoc;
+       start_autoc = hw->mac.cached_autoc;
+       link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
+       pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
 
        if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
            link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
@@ -900,9 +1069,31 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
        }
 
        if (autoc != start_autoc) {
+               /* Need SW/FW semaphore around AUTOC writes if LESM is on,
+                * likewise reset_pipeline requires us to hold this lock as
+                * it also writes to AUTOC.
+                */
+               if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+                       status = hw->mac.ops.acquire_swfw_sync(hw,
+                                                       IXGBE_GSSR_MAC_CSR_SM);
+                       if (status != IXGBE_SUCCESS) {
+                               status = IXGBE_ERR_SWFW_SYNC;
+                               goto out;
+                       }
+
+                       got_lock = TRUE;
+               }
+
                /* Restart link */
-               autoc |= IXGBE_AUTOC_AN_RESTART;
                IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+               hw->mac.cached_autoc = autoc;
+               ixgbe_reset_pipeline_82599(hw);
+
+               if (got_lock) {
+                       hw->mac.ops.release_swfw_sync(hw,
+                                                     IXGBE_GSSR_MAC_CSR_SM);
+                       got_lock = FALSE;
+               }
 
                /* Only poll for autoneg to complete if specified to do so */
                if (autoneg_wait_to_complete) {
@@ -937,14 +1128,12 @@ out:
  *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
- *  @autoneg: TRUE if autonegotiation enabled
  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
  *
  *  Restarts link on PHY and MAC based on settings passed in.
  **/
 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
                                         ixgbe_link_speed speed,
-                                        bool autoneg,
                                         bool autoneg_wait_to_complete)
 {
        s32 status;
@@ -952,7 +1141,7 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
        DEBUGFUNC("ixgbe_setup_copper_link_82599");
 
        /* Setup the PHY according to input speed */
-       status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+       status = hw->phy.ops.setup_link_speed(hw, speed,
                                              autoneg_wait_to_complete);
        /* Set up MAC */
        ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
@@ -972,7 +1161,8 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
 {
        ixgbe_link_speed link_speed;
        s32 status;
-       u32 ctrl, i, autoc, autoc2;
+       u32 ctrl, i, autoc2;
+       u32 curr_lms;
        bool link_up = FALSE;
 
        DEBUGFUNC("ixgbe_reset_hw_82599");
@@ -1006,6 +1196,13 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
        if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
                hw->phy.ops.reset(hw);
 
+       /* remember AUTOC from before we reset */
+       if (hw->mac.cached_autoc)
+               curr_lms = hw->mac.cached_autoc & IXGBE_AUTOC_LMS_MASK;
+       else
+               curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) &
+                                         IXGBE_AUTOC_LMS_MASK;
+
 mac_reset_top:
        /*
         * Issue global reset to the MAC.  Needs to be SW reset if link is up.
@@ -1054,16 +1251,59 @@ mac_reset_top:
         * stored off yet.  Otherwise restore the stored original
         * values since the reset operation sets back to defaults.
         */
-       autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
        autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+
+       /* Enable link if disabled in NVM */
+       if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
+               autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
+               IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
+               IXGBE_WRITE_FLUSH(hw);
+       }
+
        if (hw->mac.orig_link_settings_stored == FALSE) {
-               hw->mac.orig_autoc = autoc;
+               hw->mac.orig_autoc = hw->mac.cached_autoc;
                hw->mac.orig_autoc2 = autoc2;
                hw->mac.orig_link_settings_stored = TRUE;
        } else {
-               if (autoc != hw->mac.orig_autoc)
-                       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
-                                       IXGBE_AUTOC_AN_RESTART));
+
+               /* If MNG FW is running on a multi-speed device that
+                * doesn't autoneg with out driver support we need to
+                * leave LMS in the state it was before we MAC reset.
+                * Likewise if we support WoL we don't want change the
+                * LMS state.
+                */
+               if ((hw->phy.multispeed_fiber && hw->mng_fw_enabled) ||
+                   hw->wol_enabled)
+                       hw->mac.orig_autoc =
+                               (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
+                               curr_lms;
+
+               if (hw->mac.cached_autoc != hw->mac.orig_autoc) {
+                       /* Need SW/FW semaphore around AUTOC writes if LESM is
+                        * on, likewise reset_pipeline requires us to hold
+                        * this lock as it also writes to AUTOC.
+                        */
+                       bool got_lock = FALSE;
+                       if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+                               status = hw->mac.ops.acquire_swfw_sync(hw,
+                                                       IXGBE_GSSR_MAC_CSR_SM);
+                               if (status != IXGBE_SUCCESS) {
+                                       status = IXGBE_ERR_SWFW_SYNC;
+                                       goto reset_hw_out;
+                               }
+
+                               got_lock = TRUE;
+                       }
+
+                       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
+                       hw->mac.cached_autoc = hw->mac.orig_autoc;
+                       ixgbe_reset_pipeline_82599(hw);
+
+                       if (got_lock)
+                               hw->mac.ops.release_swfw_sync(hw,
+                                                     IXGBE_GSSR_MAC_CSR_SM);
+               }
 
                if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
                    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
@@ -1168,7 +1408,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
                if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
                                   IXGBE_FDIRCTRL_INIT_DONE)
                        break;
-               usec_delay(10);
+               msec_delay(1);
        }
        if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
                DEBUGOUT("Flow Director Signature poll time exceeded!\n");
@@ -2098,7 +2338,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
 {
        s32 status = IXGBE_ERR_EEPROM_VERSION;
        u16 fw_offset, fw_ptp_cfg_offset;
-       u16 fw_version = 0;
+       u16 fw_version;
 
        DEBUGFUNC("ixgbe_verify_fw_version_82599");
 
@@ -2109,22 +2349,37 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
        }
 
        /* get the offset to the Firmware Module block */
-       hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
+       if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
+               ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+                             "eeprom read at offset %d failed", IXGBE_FW_PTR);
+               return IXGBE_ERR_EEPROM_VERSION;
+       }
 
        if ((fw_offset == 0) || (fw_offset == 0xFFFF))
                goto fw_version_out;
 
        /* get the offset to the Pass Through Patch Configuration block */
-       hw->eeprom.ops.read(hw, (fw_offset +
+       if (hw->eeprom.ops.read(hw, (fw_offset +
                                 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
-                                &fw_ptp_cfg_offset);
+                                &fw_ptp_cfg_offset)) {
+               ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+                             "eeprom read at offset %d failed",
+                             fw_offset +
+                             IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
+               return IXGBE_ERR_EEPROM_VERSION;
+       }
 
        if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
                goto fw_version_out;
 
        /* get the firmware version */
-       hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
-                           IXGBE_FW_PATCH_VERSION_4), &fw_version);
+       if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
+                           IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
+               ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+                             "eeprom read at offset %d failed",
+                             fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
+               return IXGBE_ERR_EEPROM_VERSION;
+       }
 
        if (fw_version > 0x5)
                status = IXGBE_SUCCESS;
@@ -2243,4 +2498,55 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
        return ret_val;
 }
 
+/**
+ * ixgbe_reset_pipeline_82599 - perform pipeline reset
+ *
+ *  @hw: pointer to hardware structure
+ *
+ * Reset pipeline by asserting Restart_AN together with LMS change to ensure
+ * full pipeline reset
+ **/
+s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+{
+       s32 ret_val;
+       u32 anlp1_reg = 0;
+       u32 i, autoc_reg, autoc2_reg;
+
+       /* Enable link if disabled in NVM */
+       autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+       if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
+               autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
+               IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
+               IXGBE_WRITE_FLUSH(hw);
+       }
+
+       autoc_reg = hw->mac.cached_autoc;
+       autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+       /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
+       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
+       /* Wait for AN to leave state 0 */
+       for (i = 0; i < 10; i++) {
+               msec_delay(4);
+               anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+               if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
+                       break;
+       }
+
+       if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
+               DEBUGOUT("auto negotiation not completed\n");
+               ret_val = IXGBE_ERR_RESET_FAILED;
+               goto reset_pipeline_out;
+       }
+
+       ret_val = IXGBE_SUCCESS;
+
+reset_pipeline_out:
+       /* Write AUTOC register with original LMS field and Restart_AN */
+       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return ret_val;
+}
+
+
 
similarity index 90%
rename from sys/dev/netif/ixgbe/ixgbe_82599.h
rename to sys/dev/netif/ix/ixgbe_82599.h
index 305f4a8..a77d153 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,7 +30,7 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82599.h,v 1.1 2012/01/30 16:42:02 jfv Exp $*/
+/*$FreeBSD$*/
 
 #ifndef _IXGBE_82599_H_
 #define _IXGBE_82599_H_
@@ -42,15 +42,15 @@ void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
-                                         ixgbe_link_speed speed, bool autoneg,
+                                         ixgbe_link_speed speed,
                                          bool autoneg_wait_to_complete);
 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
-                                   ixgbe_link_speed speed, bool autoneg,
+                                   ixgbe_link_speed speed,
                                    bool autoneg_wait_to_complete);
 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
                               bool autoneg_wait_to_complete);
 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed,
-                              bool autoneg, bool autoneg_wait_to_complete);
+                              bool autoneg_wait_to_complete);
 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
@@ -61,5 +61,4 @@ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
-bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
 #endif /* _IXGBE_82599_H_ */
similarity index 94%
rename from sys/dev/netif/ixgbe/ixgbe_api.c
rename to sys/dev/netif/ix/ixgbe_api.c
index 7abbcc5..925866b 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$FreeBSD: src/sys/dev/ixgbe/ixgbe_api.c,v 1.13 2012/07/05 20:51:44 jfv Exp $*/
+/*$FreeBSD$*/
 
 #include "ixgbe_api.h"
 #include "ixgbe_common.h"
 
 /**
+ * ixgbe_dcb_get_rtrup2tc - read rtrup2tc reg
+ * @hw: pointer to hardware structure
+ * @map: pointer to u8 arr for returning map
+ *
+ * Read the rtrup2tc HW register and resolve its content into map
+ **/
+void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
+{
+       if (hw->mac.ops.get_rtrup2tc)
+               hw->mac.ops.get_rtrup2tc(hw, map);
+}
+
+/**
  *  ixgbe_init_shared_code - Initialize the shared code
  *  @hw: pointer to hardware structure
  *
@@ -93,53 +106,62 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
 
        DEBUGFUNC("ixgbe_set_mac_type\n");
 
-       if (hw->vendor_id == IXGBE_INTEL_VENDOR_ID) {
-               switch (hw->device_id) {
-               case IXGBE_DEV_ID_82598:
-               case IXGBE_DEV_ID_82598_BX:
-               case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
-               case IXGBE_DEV_ID_82598AF_DUAL_PORT:
-               case IXGBE_DEV_ID_82598AT:
-               case IXGBE_DEV_ID_82598AT2:
-               case IXGBE_DEV_ID_82598EB_CX4:
-               case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
-               case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
-               case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
-               case IXGBE_DEV_ID_82598EB_XF_LR:
-               case IXGBE_DEV_ID_82598EB_SFP_LOM:
-                       hw->mac.type = ixgbe_mac_82598EB;
-                       break;
-               case IXGBE_DEV_ID_82599_KX4:
-               case IXGBE_DEV_ID_82599_KX4_MEZZ:
-               case IXGBE_DEV_ID_82599_XAUI_LOM:
-               case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
-               case IXGBE_DEV_ID_82599_KR:
-               case IXGBE_DEV_ID_82599_SFP:
-               case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
-               case IXGBE_DEV_ID_82599_SFP_FCOE:
-               case IXGBE_DEV_ID_82599_SFP_EM:
-               case IXGBE_DEV_ID_82599_SFP_SF2:
-               case IXGBE_DEV_ID_82599EN_SFP:
-               case IXGBE_DEV_ID_82599_CX4:
-               case IXGBE_DEV_ID_82599_T3_LOM:
-                       hw->mac.type = ixgbe_mac_82599EB;
-                       break;
-               case IXGBE_DEV_ID_82599_VF:
-                       hw->mac.type = ixgbe_mac_82599_vf;
-                       break;
-               case IXGBE_DEV_ID_X540_VF:
-                       hw->mac.type = ixgbe_mac_X540_vf;
-                       break;
-               case IXGBE_DEV_ID_X540T:
-               case IXGBE_DEV_ID_X540T1:
-                       hw->mac.type = ixgbe_mac_X540;
-                       break;
-               default:
-                       ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
-                       break;
-               }
-       } else {
+       if (hw->vendor_id != IXGBE_INTEL_VENDOR_ID) {
+               ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
+                            "Unsupported vendor id: %x", hw->vendor_id);
+               return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+       }
+
+       switch (hw->device_id) {
+       case IXGBE_DEV_ID_82598:
+       case IXGBE_DEV_ID_82598_BX:
+       case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+       case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+       case IXGBE_DEV_ID_82598AT:
+       case IXGBE_DEV_ID_82598AT2:
+       case IXGBE_DEV_ID_82598EB_CX4:
+       case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+       case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+       case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+       case IXGBE_DEV_ID_82598EB_XF_LR:
+       case IXGBE_DEV_ID_82598EB_SFP_LOM:
+               hw->mac.type = ixgbe_mac_82598EB;
+               break;
+       case IXGBE_DEV_ID_82599_KX4:
+       case IXGBE_DEV_ID_82599_KX4_MEZZ:
+       case IXGBE_DEV_ID_82599_XAUI_LOM:
+       case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+       case IXGBE_DEV_ID_82599_KR:
+       case IXGBE_DEV_ID_82599_SFP:
+       case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
+       case IXGBE_DEV_ID_82599_SFP_FCOE:
+       case IXGBE_DEV_ID_82599_SFP_EM:
+       case IXGBE_DEV_ID_82599_SFP_SF2:
+       case IXGBE_DEV_ID_82599_SFP_SF_QP:
+       case IXGBE_DEV_ID_82599EN_SFP:
+       case IXGBE_DEV_ID_82599_CX4:
+       case IXGBE_DEV_ID_82599_BYPASS:
+       case IXGBE_DEV_ID_82599_T3_LOM:
+               hw->mac.type = ixgbe_mac_82599EB;
+               break;
+       case IXGBE_DEV_ID_82599_VF:
+       case IXGBE_DEV_ID_82599_VF_HV:
+               hw->mac.type = ixgbe_mac_82599_vf;
+               break;
+       case IXGBE_DEV_ID_X540_VF:
+       case IXGBE_DEV_ID_X540_VF_HV:
+               hw->mac.type = ixgbe_mac_X540_vf;
+               break;
+       case IXGBE_DEV_ID_X540T:
+       case IXGBE_DEV_ID_X540_BYPASS:
+               hw->mac.type = ixgbe_mac_X540;
+               break;
+       default:
                ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+               ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
+                            "Unsupported device id: %x",
+                            hw->device_id);
+               break;
        }
 
        DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n",
@@ -507,16 +529,14 @@ s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
  *  ixgbe_setup_phy_link_speed - Set auto advertise
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
- *  @autoneg: TRUE if autonegotiation enabled
  *
  *  Sets the auto advertised capabilities
  **/
 s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
-                              bool autoneg,
                               bool autoneg_wait_to_complete)
 {
        return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed,
-                              autoneg, autoneg_wait_to_complete),
+                              autoneg_wait_to_complete),
                               IXGBE_NOT_IMPLEMENTED);
 }
 
@@ -576,17 +596,15 @@ void ixgbe_flap_tx_laser(struct ixgbe_hw *hw)
  *  ixgbe_setup_link - Set link speed
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
- *  @autoneg: TRUE if autonegotiation enabled
  *
  *  Configures link settings.  Restarts the link.
  *  Performs autonegotiation if needed.
  **/
 s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
-                    bool autoneg,
                     bool autoneg_wait_to_complete)
 {
        return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw, speed,
-                              autoneg, autoneg_wait_to_complete),
+                              autoneg_wait_to_complete),
                               IXGBE_NOT_IMPLEMENTED);
 }
 
@@ -999,6 +1017,8 @@ s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
 }
 
 
+
+
 /**
  *  ixgbe_read_analog_reg8 - Reads 8 bit analog register
  *  @hw: pointer to hardware structure
similarity index 96%
rename from sys/dev/netif/ixgbe/ixgbe_api.h
rename to sys/dev/netif/ix/ixgbe_api.h
index b5f5a1b..91023ae 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$FreeBSD: src/sys/dev/ixgbe/ixgbe_api.h,v 1.14 2012/07/05 20:51:44 jfv Exp $*/
+/*$FreeBSD$*/
 
 #ifndef _IXGBE_API_H_
 #define _IXGBE_API_H_
 
 #include "ixgbe_type.h"
 
+void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map);
+
 s32 ixgbe_init_shared_code(struct ixgbe_hw *hw);
 
 extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
@@ -72,13 +74,12 @@ s32 ixgbe_check_phy_link(struct ixgbe_hw *hw,
                         bool *link_up);
 s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw,
                               ixgbe_link_speed speed,
-                              bool autoneg,
                               bool autoneg_wait_to_complete);
 void ixgbe_disable_tx_laser(struct ixgbe_hw *hw);
 void ixgbe_enable_tx_laser(struct ixgbe_hw *hw);
 void ixgbe_flap_tx_laser(struct ixgbe_hw *hw);
 s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
-                    bool autoneg, bool autoneg_wait_to_complete);
+                    bool autoneg_wait_to_complete);
 s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
                     bool *link_up, bool link_up_wait_to_complete);
 s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
@@ -135,6 +136,7 @@ u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
 s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval);
 s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw);
 s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw);
+s32 ixgbe_mng_fw_enabled(struct ixgbe_hw *hw);
 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
@@ -159,6 +161,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
                                          union ixgbe_atr_input *mask);
 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
                                     union ixgbe_atr_hash_dword common);
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
 s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
                        u8 *data);
 s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
similarity index 88%
rename from sys/dev/netif/ixgbe/ixgbe_common.c
rename to sys/dev/netif/ix/ixgbe_common.c
index f110208..1734345 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$FreeBSD: src/sys/dev/ixgbe/ixgbe_common.c,v 1.14 2012/07/05 20:51:44 jfv Exp $*/
+/*$FreeBSD$*/
 
 #include "ixgbe_common.h"
 #include "ixgbe_phy.h"
+#include "ixgbe_dcb.h"
+#include "ixgbe_dcb_82599.h"
 #include "ixgbe_api.h"
 
 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
@@ -134,33 +136,63 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
        mac->ops.get_link_capabilities = NULL;
        mac->ops.setup_link = NULL;
        mac->ops.check_link = NULL;
+       mac->ops.dmac_config = NULL;
+       mac->ops.dmac_update_tcs = NULL;
+       mac->ops.dmac_config_tcs = NULL;
 
        return IXGBE_SUCCESS;
 }
 
 /**
- *  ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
- *  control
- *  @hw: pointer to hardware structure
+ * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
+ * of flow control
+ * @hw: pointer to hardware structure
+ *
+ * This function returns TRUE if the device supports flow control
+ * autonegotiation, and FALSE if it does not.
  *
- *  There are several phys that do not support autoneg flow control. This
- *  function check the device id to see if the associated phy supports
- *  autoneg flow control.
  **/
-static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
 {
+       bool supported = FALSE;
+       ixgbe_link_speed speed;
+       bool link_up;
 
        DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
 
-       switch (hw->device_id) {
-       case IXGBE_DEV_ID_X540T:
-       case IXGBE_DEV_ID_X540T1:
-               return IXGBE_SUCCESS;
-       case IXGBE_DEV_ID_82599_T3_LOM:
-               return IXGBE_SUCCESS;
+       switch (hw->phy.media_type) {
+       case ixgbe_media_type_fiber_fixed:
+       case ixgbe_media_type_fiber:
+               hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
+               /* if link is down, assume supported */
+               if (link_up)
+                       supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
+                               TRUE : FALSE;
+               else
+                       supported = TRUE;
+               break;
+       case ixgbe_media_type_backplane:
+               supported = TRUE;
+               break;
+       case ixgbe_media_type_copper:
+               /* only some copper devices support flow control autoneg */
+               switch (hw->device_id) {
+               case IXGBE_DEV_ID_82599_T3_LOM:
+               case IXGBE_DEV_ID_X540T:
+               case IXGBE_DEV_ID_X540_BYPASS:
+                       supported = TRUE;
+                       break;
+               default:
+                       supported = FALSE;
+               }
        default:
-               return IXGBE_ERR_FC_NOT_SUPPORTED;
+               break;
        }
+
+       ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
+                     "Device %x does not support flow control autoneg",
+                     hw->device_id);
+       return supported;
 }
 
 /**
@@ -174,6 +206,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
        s32 ret_val = IXGBE_SUCCESS;
        u32 reg = 0, reg_bp = 0;
        u16 reg_cu = 0;
+       bool got_lock = FALSE;
 
        DEBUGFUNC("ixgbe_setup_fc");
 
@@ -182,7 +215,8 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
         * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
         */
        if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
-               DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+               ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+                          "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
                ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
                goto out;
        }
@@ -200,6 +234,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
         * we link at 10G, the 1G advertisement is harmless and vice versa.
         */
        switch (hw->phy.media_type) {
+       case ixgbe_media_type_fiber_fixed:
        case ixgbe_media_type_fiber:
        case ixgbe_media_type_backplane:
                reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
@@ -268,7 +303,8 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
                        reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
                break;
        default:
-               DEBUGOUT("Flow control param set incorrectly\n");
+               ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
+                            "Flow control param set incorrectly\n");
                ret_val = IXGBE_ERR_CONFIG;
                goto out;
                break;
@@ -297,9 +333,30 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
         */
        if (hw->phy.media_type == ixgbe_media_type_backplane) {
                reg_bp |= IXGBE_AUTOC_AN_RESTART;
+               /* Need the SW/FW semaphore around AUTOC writes if 82599 and
+                * LESM is on, likewise reset_pipeline requries the lock as
+                * it also writes AUTOC.
+                */
+               if ((hw->mac.type == ixgbe_mac_82599EB) &&
+                   ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+                       ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+                                                       IXGBE_GSSR_MAC_CSR_SM);
+                       if (ret_val != IXGBE_SUCCESS) {
+                               ret_val = IXGBE_ERR_SWFW_SYNC;
+                               goto out;
+                       }
+                       got_lock = TRUE;
+               }
+
                IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
+               if (hw->mac.type == ixgbe_mac_82599EB)
+                       ixgbe_reset_pipeline_82599(hw);
+
+               if (got_lock)
+                       hw->mac.ops.release_swfw_sync(hw,
+                                                     IXGBE_GSSR_MAC_CSR_SM);
        } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
-                   (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
+                   (ixgbe_device_supports_autoneg_fc(hw))) {
                hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
                                      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
        }
@@ -680,6 +737,195 @@ s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
 }
 
 /**
+ *  ixgbe_read_pba_raw
+ *  @hw: pointer to the HW structure
+ *  @eeprom_buf: optional pointer to EEPROM image
+ *  @eeprom_buf_size: size of EEPROM image in words
+ *  @max_pba_block_size: PBA block size limit
+ *  @pba: pointer to output PBA structure
+ *
+ *  Reads PBA from EEPROM image when eeprom_buf is not NULL.
+ *  Reads PBA from physical EEPROM device when eeprom_buf is NULL.
+ *
+ **/
+s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+                      u32 eeprom_buf_size, u16 max_pba_block_size,
+                      struct ixgbe_pba *pba)
+{
+       s32 ret_val;
+       u16 pba_block_size;
+
+       if (pba == NULL)
+               return IXGBE_ERR_PARAM;
+
+       if (eeprom_buf == NULL) {
+               ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
+                                                    &pba->word[0]);
+               if (ret_val)
+                       return ret_val;
+       } else {
+               if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
+                       pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
+                       pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
+               } else {
+                       return IXGBE_ERR_PARAM;
+               }
+       }
+
+       if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
+               if (pba->pba_block == NULL)
+                       return IXGBE_ERR_PARAM;
+
+               ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
+                                                  eeprom_buf_size,
+                                                  &pba_block_size);
+               if (ret_val)
+                       return ret_val;
+
+               if (pba_block_size > max_pba_block_size)
+                       return IXGBE_ERR_PARAM;
+
+               if (eeprom_buf == NULL) {
+                       ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
+                                                            pba_block_size,
+                                                            pba->pba_block);
+                       if (ret_val)
+                               return ret_val;
+               } else {
+                       if (eeprom_buf_size > (u32)(pba->word[1] +
+                                             pba->pba_block[0])) {
+                               memcpy(pba->pba_block,
+                                      &eeprom_buf[pba->word[1]],
+                                      pba_block_size * sizeof(u16));
+                       } else {
+                               return IXGBE_ERR_PARAM;
+                       }
+               }
+       }
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_write_pba_raw
+ *  @hw: pointer to the HW structure
+ *  @eeprom_buf: optional pointer to EEPROM image
+ *  @eeprom_buf_size: size of EEPROM image in words
+ *  @pba: pointer to PBA structure
+ *
+ *  Writes PBA to EEPROM image when eeprom_buf is not NULL.
+ *  Writes PBA to physical EEPROM device when eeprom_buf is NULL.
+ *
+ **/
+s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+                       u32 eeprom_buf_size, struct ixgbe_pba *pba)
+{
+       s32 ret_val;
+
+       if (pba == NULL)
+               return IXGBE_ERR_PARAM;
+
+       if (eeprom_buf == NULL) {
+               ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
+                                                     &pba->word[0]);
+               if (ret_val)
+                       return ret_val;
+       } else {
+               if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
+                       eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
+                       eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
+               } else {
+                       return IXGBE_ERR_PARAM;
+               }
+       }
+
+       if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
+               if (pba->pba_block == NULL)
+                       return IXGBE_ERR_PARAM;
+
+               if (eeprom_buf == NULL) {
+                       ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
+                                                             pba->pba_block[0],
+                                                             pba->pba_block);
+                       if (ret_val)
+                               return ret_val;
+               } else {
+                       if (eeprom_buf_size > (u32)(pba->word[1] +
+                                             pba->pba_block[0])) {
+                               memcpy(&eeprom_buf[pba->word[1]],
+                                      pba->pba_block,
+                                      pba->pba_block[0] * sizeof(u16));
+                       } else {
+                               return IXGBE_ERR_PARAM;
+                       }
+               }
+       }
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_get_pba_block_size
+ *  @hw: pointer to the HW structure
+ *  @eeprom_buf: optional pointer to EEPROM image
+ *  @eeprom_buf_size: size of EEPROM image in words
+ *  @pba_data_size: pointer to output variable
+ *
+ *  Returns the size of the PBA block in words. Function operates on EEPROM
+ *  image if the eeprom_buf pointer is not NULL otherwise it accesses physical
+ *  EEPROM device.
+ *
+ **/
+s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
+                            u32 eeprom_buf_size, u16 *pba_block_size)
+{
+       s32 ret_val;
+       u16 pba_word[2];
+       u16 length;
+
+       DEBUGFUNC("ixgbe_get_pba_block_size");
+
+       if (eeprom_buf == NULL) {
+               ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
+                                                    &pba_word[0]);
+               if (ret_val)
+                       return ret_val;
+       } else {
+               if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
+                       pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
+                       pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
+               } else {
+                       return IXGBE_ERR_PARAM;
+               }
+       }
+
+       if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
+               if (eeprom_buf == NULL) {
+                       ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
+                                                     &length);
+                       if (ret_val)
+                               return ret_val;
+               } else {
+                       if (eeprom_buf_size > pba_word[1])
+                               length = eeprom_buf[pba_word[1] + 0];
+                       else
+                               return IXGBE_ERR_PARAM;
+               }
+
+               if (length == 0xFFFF || length == 0)
+                       return IXGBE_ERR_PBA_SECTION;
+       } else {
+               /* PBA number in legacy format, there is no PBA Block. */
+               length = 0;
+       }
+
+       if (pba_block_size != NULL)
+               *pba_block_size = length;
+
+       return IXGBE_SUCCESS;
+}
+
+/**
  *  ixgbe_get_mac_addr_generic - Generic get MAC address
  *  @hw: pointer to hardware structure
  *  @mac_addr: Adapter MAC address
@@ -709,23 +955,18 @@ s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
 }
 
 /**
- *  ixgbe_get_bus_info_generic - Generic set PCI bus info
+ *  ixgbe_set_pci_config_data_generic - Generic store PCI bus info
  *  @hw: pointer to hardware structure
+ *  @link_status: the link status returned by the PCI config space
  *
- *  Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
+ *  Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
  **/
-s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
+void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
 {
        struct ixgbe_mac_info *mac = &hw->mac;
-       u16 link_status;
-
-       DEBUGFUNC("ixgbe_get_bus_info_generic");
 
        hw->bus.type = ixgbe_bus_type_pci_express;
 
-       /* Get the negotiated link width and speed from PCI config space */
-       link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
-
        switch (link_status & IXGBE_PCI_LINK_WIDTH) {
        case IXGBE_PCI_LINK_WIDTH_1:
                hw->bus.width = ixgbe_bus_width_pcie_x1;
@@ -760,6 +1001,25 @@ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
        }
 
        mac->ops.set_lan_id(hw);
+}
+
+/**
+ *  ixgbe_get_bus_info_generic - Generic set PCI bus info
+ *  @hw: pointer to hardware structure
+ *
+ *  Gets the PCI bus info (speed, width, type) then calls helper function to
+ *  store this data within the ixgbe_hw structure.
+ **/
+s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
+{
+       u16 link_status;
+
+       DEBUGFUNC("ixgbe_get_bus_info_generic");
+
+       /* Get the negotiated link width and speed from PCI config space */
+       link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
+
+       ixgbe_set_pci_config_data_generic(hw, link_status);
 
        return IXGBE_SUCCESS;
 }
@@ -1259,16 +1519,18 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
 
        if (words == 0) {
                status = IXGBE_ERR_INVALID_ARGUMENT;
+               ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
                goto out;
        }
 
        if (offset >= hw->eeprom.word_size) {
                status = IXGBE_ERR_EEPROM;
+               ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
                goto out;
        }
 
        for (i = 0; i < words; i++) {
-               eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) +
+               eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
                       IXGBE_EEPROM_RW_REG_START;
 
                IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
@@ -1365,11 +1627,13 @@ s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
 
        if (words == 0) {
                status = IXGBE_ERR_INVALID_ARGUMENT;
+               ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
                goto out;
        }
 
        if (offset >= hw->eeprom.word_size) {
                status = IXGBE_ERR_EEPROM;
+               ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
                goto out;
        }
 
@@ -1438,6 +1702,11 @@ s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
                }
                usec_delay(5);
        }
+
+       if (i == IXGBE_EERD_EEWR_ATTEMPTS)
+               ERROR_REPORT1(IXGBE_ERROR_POLLING,
+                            "EEPROM read/write done polling timed out");
+
        return status;
 }
 
@@ -1573,14 +1842,15 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
                 * was not granted because we don't have access to the EEPROM
                 */
                if (i >= timeout) {
-                       DEBUGOUT("SWESMBI Software EEPROM semaphore "
-                                "not granted.\n");
+                       ERROR_REPORT1(IXGBE_ERROR_POLLING,
+                           "SWESMBI Software EEPROM semaphore not granted.\n");
                        ixgbe_release_eeprom_semaphore(hw);
                        status = IXGBE_ERR_EEPROM;
                }
        } else {
-               DEBUGOUT("Software semaphore SMBI between device drivers "
-                        "not granted.\n");
+               ERROR_REPORT1(IXGBE_ERROR_POLLING,
+                            "Software semaphore SMBI between device drivers "
+                            "not granted.\n");
        }
 
        return status;
@@ -1633,7 +1903,7 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
 
                usec_delay(5);
                ixgbe_standby_eeprom(hw);
-       }
+       };
 
        /*
         * On some parts, SPI write time could vary from 0-20mSec on 3.3V
@@ -1719,7 +1989,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
                 * EEPROM
                 */
                mask = mask >> 1;
-       }
+       };
 
        /* We leave the "DI" bit set to "0" when we leave this routine. */
        eec &= ~IXGBE_EEC_DI;
@@ -2004,7 +2274,8 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
 
        /* Make sure we are using a valid rar index range */
        if (index >= rar_entries) {
-               DEBUGOUT1("RAR index %d is out of range.\n", index);
+               ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+                            "RAR index %d is out of range.\n", index);
                return IXGBE_ERR_INVALID_ARGUMENT;
        }
 
@@ -2053,7 +2324,8 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
 
        /* Make sure we are using a valid rar index range */
        if (index >= rar_entries) {
-               DEBUGOUT1("RAR index %d is out of range.\n", index);
+               ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+                            "RAR index %d is out of range.\n", index);
                return IXGBE_ERR_INVALID_ARGUMENT;
        }
 
@@ -2496,7 +2768,8 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
                fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
                break;
        default:
-               DEBUGOUT("Flow control param set incorrectly\n");
+               ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
+                            "Flow control param set incorrectly\n");
                ret_val = IXGBE_ERR_CONFIG;
                goto out;
                break;
@@ -2557,8 +2830,13 @@ out:
 static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
                              u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
 {
-       if ((!(adv_reg)) ||  (!(lp_reg)))
+       if ((!(adv_reg)) ||  (!(lp_reg))) {
+               ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
+                            "Local or link partner's advertised flow control "
+                            "settings are NULL. Local: %x, link partner: %x\n",
+                            adv_reg, lp_reg);
                return IXGBE_ERR_FC_NOT_NEGOTIATED;
+       }
 
        if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
                /*
@@ -2609,8 +2887,11 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
 
        linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
        if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
-           (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
+           (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
+               ERROR_REPORT1(IXGBE_ERROR_POLLING,
+                            "Auto-Negotiation did not complete or timed out");
                goto out;
+       }
 
        pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
        pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
@@ -2642,13 +2923,19 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
         * - we are 82599 and link partner is not AN enabled
         */
        links = IXGBE_READ_REG(hw, IXGBE_LINKS);
-       if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
+       if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
+               ERROR_REPORT1(IXGBE_ERROR_POLLING,
+                            "Auto-Negotiation did not complete");
                goto out;
+       }
 
        if (hw->mac.type == ixgbe_mac_82599EB) {
                links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
-               if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
+               if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
+                       ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+                                    "Link partner is not AN enabled");
                        goto out;
+               }
        }
        /*
         * Read the 10g AN autoc and LP ability registers and resolve
@@ -2710,15 +2997,21 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
         * - FC autoneg is disabled, or if
         * - link is not up.
         */
-       if (hw->fc.disable_fc_autoneg)
+       if (hw->fc.disable_fc_autoneg) {
+               ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+                            "Flow control autoneg is disabled");
                goto out;
+       }
 
        hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
-       if (!link_up)
+       if (!link_up) {
+               ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
                goto out;
+       }
 
        switch (hw->phy.media_type) {
        /* Autoneg flow control on fiber adapters */
+       case ixgbe_media_type_fiber_fixed:
        case ixgbe_media_type_fiber:
                if (speed == IXGBE_LINK_SPEED_1GB_FULL)
                        ret_val = ixgbe_fc_autoneg_fiber(hw);
@@ -2731,7 +3024,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
 
        /* Autoneg flow control on copper adapters */
        case ixgbe_media_type_copper:
-               if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)
+               if (ixgbe_device_supports_autoneg_fc(hw))
                        ret_val = ixgbe_fc_autoneg_copper(hw);
                break;
 
@@ -2748,6 +3041,53 @@ out:
        }
 }
 
+/*
+ * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
+ * @hw: pointer to hardware structure
+ *
+ * System-wide timeout range is encoded in PCIe Device Control2 register.
+ *
+ * Add 10% to specified maximum and return the number of times to poll for
+ * completion timeout, in units of 100 microsec.  Never return less than
+ * 800 = 80 millisec.
+ */
+static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
+{
+       s16 devctl2;
+       u32 pollcnt;
+
+       devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
+       devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
+
+       switch (devctl2) {
+       case IXGBE_PCIDEVCTRL2_65_130ms:
+               pollcnt = 1300;         /* 130 millisec */
+               break;
+       case IXGBE_PCIDEVCTRL2_260_520ms:
+               pollcnt = 5200;         /* 520 millisec */
+               break;
+       case IXGBE_PCIDEVCTRL2_1_2s:
+               pollcnt = 20000;        /* 2 sec */
+               break;
+       case IXGBE_PCIDEVCTRL2_4_8s:
+               pollcnt = 80000;        /* 8 sec */
+               break;
+       case IXGBE_PCIDEVCTRL2_17_34s:
+               pollcnt = 34000;        /* 34 sec */
+               break;
+       case IXGBE_PCIDEVCTRL2_50_100us:        /* 100 microsecs */
+       case IXGBE_PCIDEVCTRL2_1_2ms:           /* 2 millisecs */
+       case IXGBE_PCIDEVCTRL2_16_32ms:         /* 32 millisec */
+       case IXGBE_PCIDEVCTRL2_16_32ms_def:     /* 32 millisec default */
+       default:
+               pollcnt = 800;          /* 80 millisec minimum */
+               break;
+       }
+
+       /* add 10% to spec maximum */
+       return (pollcnt * 11) / 10;
+}
+
 /**
  *  ixgbe_disable_pcie_master - Disable PCI-express master access
  *  @hw: pointer to hardware structure
@@ -2760,14 +3100,14 @@ out:
 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
 {
        s32 status = IXGBE_SUCCESS;
-       u32 i;
+       u32 i, poll;
 
        DEBUGFUNC("ixgbe_disable_pcie_master");
 
        /* Always set this bit to ensure any future transactions are blocked */
        IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
 
-       /* Exit if master requets are blocked */
+       /* Exit if master requests are blocked */
        if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
                goto out;
 
@@ -2793,14 +3133,16 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
         * Before proceeding, make sure that the PCIe block does not have
         * transactions pending.
         */
-       for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+       poll = ixgbe_pcie_timeout_poll(hw);
+       for (i = 0; i < poll; i++) {
                usec_delay(100);
                if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
                    IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
                        goto out;
        }
 
-       DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
+       ERROR_REPORT1(IXGBE_ERROR_POLLING,
+                    "PCIe transaction pending bit also did not clear.\n");
        status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
 
 out:
@@ -2817,44 +3159,41 @@ out:
  **/
 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
 {
-       u32 gssr;
+       u32 gssr = 0;
        u32 swmask = mask;
        u32 fwmask = mask << 5;
-       s32 timeout = 200;
+       u32 timeout = 200;
+       u32 i;
 
        DEBUGFUNC("ixgbe_acquire_swfw_sync");
 
-       while (timeout) {
+       for (i = 0; i < timeout; i++) {
                /*
-                * SW EEPROM semaphore bit is used for access to all
-                * SW_FW_SYNC/GSSR bits (not just EEPROM)
+                * SW NVM semaphore bit is used for access to all
+                * SW_FW_SYNC bits (not just NVM)
                 */
                if (ixgbe_get_eeprom_semaphore(hw))
                        return IXGBE_ERR_SWFW_SYNC;
 
                gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
-               if (!(gssr & (fwmask | swmask)))
-                       break;
-
-               /*
-                * Firmware currently using resource (fwmask) or other software
-                * thread currently using resource (swmask)
-                */
-               ixgbe_release_eeprom_semaphore(hw);
-               msec_delay(5);
-               timeout--;
-       }
-
-       if (!timeout) {
-               DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
-               return IXGBE_ERR_SWFW_SYNC;
+               if (!(gssr & (fwmask | swmask))) {
+                       gssr |= swmask;
+                       IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+                       ixgbe_release_eeprom_semaphore(hw);
+                       return IXGBE_SUCCESS;
+               } else {
+                       /* Resource is currently in use by FW or SW */
+                       ixgbe_release_eeprom_semaphore(hw);
+                       msec_delay(5);
+               }
        }
 
-       gssr |= swmask;
-       IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+       /* If time expired clear the bits holding the lock and retry */
+       if (gssr & (fwmask | swmask))
+               ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
 
-       ixgbe_release_eeprom_semaphore(hw);
-       return IXGBE_SUCCESS;
+       msec_delay(5);
+       return IXGBE_ERR_SWFW_SYNC;
 }
 
 /**
@@ -2965,6 +3304,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
        bool link_up = 0;
        u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
        u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+       s32 ret_val = IXGBE_SUCCESS;
 
        DEBUGFUNC("ixgbe_blink_led_start_generic");
 
@@ -2975,10 +3315,29 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
        hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
 
        if (!link_up) {
+               /* Need the SW/FW semaphore around AUTOC writes if 82599 and
+                * LESM is on.
+                */
+               bool got_lock = FALSE;
+               if ((hw->mac.type == ixgbe_mac_82599EB) &&
+                   ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+                       ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+                                                       IXGBE_GSSR_MAC_CSR_SM);
+                       if (ret_val != IXGBE_SUCCESS) {
+                               ret_val = IXGBE_ERR_SWFW_SYNC;
+                               goto out;
+                       }
+                       got_lock = TRUE;
+               }
+
                autoc_reg |= IXGBE_AUTOC_AN_RESTART;
                autoc_reg |= IXGBE_AUTOC_FLU;
                IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
                IXGBE_WRITE_FLUSH(hw);
+
+               if (got_lock)
+                       hw->mac.ops.release_swfw_sync(hw,
+                                                     IXGBE_GSSR_MAC_CSR_SM);
                msec_delay(10);
        }
 
@@ -2987,7 +3346,8 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
        IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
        IXGBE_WRITE_FLUSH(hw);
 
-       return IXGBE_SUCCESS;
+out:
+       return ret_val;
 }
 
 /**
@@ -2999,21 +3359,43 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
 {
        u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
        u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+       s32 ret_val = IXGBE_SUCCESS;
+       bool got_lock = FALSE;
 
        DEBUGFUNC("ixgbe_blink_led_stop_generic");
+       /* Need the SW/FW semaphore around AUTOC writes if 82599 and
+        * LESM is on.
+        */
+       if ((hw->mac.type == ixgbe_mac_82599EB) &&
+           ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+               ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+                                               IXGBE_GSSR_MAC_CSR_SM);
+               if (ret_val != IXGBE_SUCCESS) {
+                       ret_val = IXGBE_ERR_SWFW_SYNC;
+                       goto out;
+               }
+               got_lock = TRUE;
+       }
 
 
        autoc_reg &= ~IXGBE_AUTOC_FLU;
        autoc_reg |= IXGBE_AUTOC_AN_RESTART;
        IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
 
+       if (hw->mac.type == ixgbe_mac_82599EB)
+               ixgbe_reset_pipeline_82599(hw);
+
+       if (got_lock)
+               hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+
        led_reg &= ~IXGBE_LED_MODE_MASK(index);
        led_reg &= ~IXGBE_LED_BLINK(index);
        led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
        IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
        IXGBE_WRITE_FLUSH(hw);
 
-       return IXGBE_SUCCESS;
+out:
+       return ret_val;
 }
 
 /**
@@ -3028,15 +3410,23 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
                                         u16 *san_mac_offset)
 {
+       s32 ret_val;
+
        DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
 
        /*
         * First read the EEPROM pointer to see if the MAC addresses are
         * available.
         */
-       hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
+       ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
+                                     san_mac_offset);
+       if (ret_val) {
+               ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+                             "eeprom at offset %d failed",
+                             IXGBE_SAN_MAC_ADDR_PTR);
+       }
 
-       return IXGBE_SUCCESS;
+       return ret_val;
 }
 
 /**
@@ -3053,6 +3443,7 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
 {
        u16 san_mac_data, san_mac_offset;
        u8 i;
+       s32 ret_val;
 
        DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
 
@@ -3060,18 +3451,9 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
         * First read the EEPROM pointer to see if the MAC addresses are
         * available.  If they're not, no point in calling set_lan_id() here.
         */
-       ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
-
-       if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
-               /*
-                * No addresses available in this EEPROM.  It's not an
-                * error though, so just wipe the local address and return.
-                */
-               for (i = 0; i < 6; i++)
-                       san_mac_addr[i] = 0xFF;
-
+       ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+       if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
                goto san_mac_addr_out;
-       }
 
        /* make sure we know which port we need to program */
        hw->mac.ops.set_lan_id(hw);
@@ -3079,13 +3461,27 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
        (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
                         (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
        for (i = 0; i < 3; i++) {
-               hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
+               ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
+                                             &san_mac_data);
+               if (ret_val) {
+                       ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+                                     "eeprom read at offset %d failed",
+                                     san_mac_offset);
+                       goto san_mac_addr_out;
+               }
                san_mac_addr[i * 2] = (u8)(san_mac_data);
                san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
                san_mac_offset++;
        }
+       return IXGBE_SUCCESS;
 
 san_mac_addr_out:
+       /*
+        * No addresses available in this EEPROM.  It's not an
+        * error though, so just wipe the local address and return.
+        */
+       for (i = 0; i < 6; i++)
+               san_mac_addr[i] = 0xFF;
        return IXGBE_SUCCESS;
 }
 
@@ -3098,19 +3494,16 @@ san_mac_addr_out:
  **/
 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
 {
-       s32 status = IXGBE_SUCCESS;
+       s32 ret_val;
        u16 san_mac_data, san_mac_offset;
        u8 i;
 
        DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
 
        /* Look for SAN mac address pointer.  If not defined, return */
-       ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
-
-       if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
-               status = IXGBE_ERR_NO_SAN_ADDR_PTR;
-               goto san_mac_addr_out;
-       }
+       ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+       if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
+               return IXGBE_ERR_NO_SAN_ADDR_PTR;
 
        /* Make sure we know which port we need to write */
        hw->mac.ops.set_lan_id(hw);
@@ -3125,8 +3518,7 @@ s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
                san_mac_offset++;
        }
 
-san_mac_addr_out:
-       return status;
+       return IXGBE_SUCCESS;
 }
 
 /**
@@ -3253,7 +3645,8 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
 
        /* Make sure we are using a valid rar index range */
        if (rar >= rar_entries) {
-               DEBUGOUT1("RAR index %d is out of range.\n", rar);
+               ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+                            "RAR index %d is out of range.\n", rar);
                return IXGBE_ERR_INVALID_ARGUMENT;
        }
 
@@ -3302,7 +3695,8 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
 
        /* Make sure we are using a valid rar index range */
        if (rar >= rar_entries) {
-               DEBUGOUT1("RAR index %d is out of range.\n", rar);
+               ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+                            "RAR index %d is out of range.\n", rar);
                return IXGBE_ERR_INVALID_ARGUMENT;
        }
 
@@ -3401,7 +3795,8 @@ s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
                if (first_empty_slot)
                        regindex = first_empty_slot;
                else {
-                       DEBUGOUT("No space in VLVF.\n");
+                       ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
+                                    "No space in VLVF.\n");
                        regindex = IXGBE_ERR_NO_SPACE;
                }
        }
@@ -3691,8 +4086,9 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
        *wwpn_prefix = 0xFFFF;
 
        /* check if alternative SAN MAC is supported */
-       hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
-                           &alt_san_mac_blk_offset);
+       offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
+       if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
+               goto wwn_prefix_err;
 
        if ((alt_san_mac_blk_offset == 0) ||
            (alt_san_mac_blk_offset == 0xFFFF))
@@ -3700,19 +4096,29 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
 
        /* check capability in alternative san mac address block */
        offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
-       hw->eeprom.ops.read(hw, offset, &caps);
+       if (hw->eeprom.ops.read(hw, offset, &caps))
+               goto wwn_prefix_err;
        if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
                goto wwn_prefix_out;
 
        /* get the corresponding prefix for WWNN/WWPN */
        offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
-       hw->eeprom.ops.read(hw, offset, wwnn_prefix);
+       if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
+               ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+                             "eeprom read at offset %d failed", offset);
+       }
 
        offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
-       hw->eeprom.ops.read(hw, offset, wwpn_prefix);
+       if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
+               goto wwn_prefix_err;
 
 wwn_prefix_out:
        return IXGBE_SUCCESS;
+
+wwn_prefix_err:
+       ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+                     "eeprom read at offset %d failed", offset);
+       return IXGBE_SUCCESS;
 }
 
 /**
@@ -3882,7 +4288,7 @@ void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
  *  Calculates the checksum for some buffer on a specified length.  The
  *  checksum calculated is returned.
  **/
-static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
+u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
 {
        u32 i;
        u8 sum = 0;
@@ -3908,8 +4314,8 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
  *  Communicates with the manageability block.  On success return IXGBE_SUCCESS
  *  else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
  **/
-static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
-                                       u32 length)
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+                                u32 length)
 {
        u32 hicr, i, bi;
        u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
@@ -4158,3 +4564,21 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
        IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
 }
 
+
+/**
+ * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
+ * @hw: pointer to hardware structure
+ * @map: pointer to u8 arr for returning map
+ *
+ * Read the rtrup2tc HW register and resolve its content into map
+ **/
+void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
+{
+       u32 reg, i;
+
+       reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
+       for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
+               map[i] = IXGBE_RTRUP2TC_UP_MASK &
+                       (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
+       return;
+}
similarity index 86%
rename from sys/dev/netif/ixgbe/ixgbe_common.h
rename to sys/dev/netif/ix/ixgbe_common.h
index 0dad30e..7d1ee82 100644 (file)
@@ -1,6 +1,6 @@
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,7 +30,7 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$FreeBSD: src/sys/dev/ixgbe/ixgbe_common.h,v 1.12 2012/07/05 20:51:44 jfv Exp $*/
+/*$FreeBSD$*/
 
 #ifndef _IXGBE_COMMON_H_
 #define _IXGBE_COMMON_H_
                IXGBE_WRITE_REG(hw, reg, (u32) value); \
                IXGBE_WRITE_REG(hw, reg + 4, (u32) (value >> 32)); \
        } while (0)
+#if !defined(NO_READ_PBA_RAW) || !defined(NO_WRITE_PBA_RAW)
+struct ixgbe_pba {
+       u16 word[2];
+       u16 *pba_block;
+};
+#endif
 
-u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
+void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map);
 
+u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
@@ -52,8 +59,16 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
                                  u32 pba_num_size);
+s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+                      u32 eeprom_buf_size, u16 max_pba_block_size,
+                      struct ixgbe_pba *pba);
+s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+                       u32 eeprom_buf_size, struct ixgbe_pba *pba);
+s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
+                            u32 eeprom_buf_size, u16 *pba_block_size);
 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
+void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status);
 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
 
@@ -96,6 +111,7 @@ s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw);
 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw);
 
 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
+bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
 void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
 
 s32 ixgbe_validate_mac_addr(u8 *mac_addr);
@@ -137,5 +153,12 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw);
 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
                                 u8 build, u8 ver);
+u8 ixgbe_calculate_checksum(u8 *buffer, u32 length);
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+                                u32 length);
 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
+
+extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
+extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
+
 #endif /* IXGBE_COMMON */
diff --git a/sys/dev/netif/ix/ixgbe_dcb.c b/sys/dev/netif/ix/ixgbe_dcb.c
new file mode 100644 (file)
index 0000000..149aad1
--- /dev/null
@@ -0,0 +1,718 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2013, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+
+#include "ixgbe_type.h"
+#include "ixgbe_dcb.h"
+#include "ixgbe_dcb_82598.h"
+#include "ixgbe_dcb_82599.h"
+
+/**
+ * ixgbe_dcb_calculate_tc_credits - This calculates the ieee traffic class
+ * credits from the configured bandwidth percentages. Credits
+ * are the smallest unit programmable into the underlying
+ * hardware. The IEEE 802.1Qaz specification do not use bandwidth
+ * groups so this is much simplified from the CEE case.
+ */
+s32 ixgbe_dcb_calculate_tc_credits(u8 *bw, u16 *refill, u16 *max,
+                                  int max_frame_size)
+{
+       int min_percent = 100;
+       int min_credit, multiplier;
+       int i;
+
+       min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) /
+                       IXGBE_DCB_CREDIT_QUANTUM;
+
+       for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+               if (bw[i] < min_percent && bw[i])
+                       min_percent = bw[i];
+       }
+
+       multiplier = (min_credit / min_percent) + 1;
+
+       /* Find out the hw credits for each TC */
+       for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+               int val = min(bw[i] * multiplier, IXGBE_DCB_MAX_CREDIT_REFILL);
+
+               if (val < min_credit)
+                       val = min_credit;
+               refill[i] = (u16)val;
+
+               max[i] = bw[i] ? (bw[i]*IXGBE_DCB_MAX_CREDIT)/100 : min_credit;
+       }
+
+       return 0;
+}
+
+/**
+ * ixgbe_dcb_calculate_tc_credits_cee - Calculates traffic class credits
+ * @ixgbe_dcb_config: Struct containing DCB settings.
+ * @direction: Configuring either Tx or Rx.
+ *
+ * This function calculates the credits allocated to each traffic class.
+ * It should be called only after the rules are checked by
+ * ixgbe_dcb_check_config_cee().
+ */
+s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *hw,
+                                  struct ixgbe_dcb_config *dcb_config,
+                                  u32 max_frame_size, u8 direction)
+{
+       struct ixgbe_dcb_tc_path *p;
+       u32 min_multiplier      = 0;
+       u16 min_percent         = 100;
+       s32 ret_val =           IXGBE_SUCCESS;
+       /* Initialization values default for Tx settings */
+       u32 min_credit          = 0;
+       u32 credit_refill       = 0;
+       u32 credit_max          = 0;
+       u16 link_percentage     = 0;
+       u8  bw_percent          = 0;
+       u8  i;
+
+       if (dcb_config == NULL) {
+               ret_val = IXGBE_ERR_CONFIG;
+               goto out;
+       }
+
+       min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) /
+                    IXGBE_DCB_CREDIT_QUANTUM;