ixgbe: enable VLAN code
[dragonfly.git] / sys / dev / netif / ixgbe / ixgbe.c
1 /******************************************************************************
2
3   Copyright (c) 2001-2012, Intel Corporation 
4   All rights reserved.
5   
6   Redistribution and use in source and binary forms, with or without 
7   modification, are permitted provided that the following conditions are met:
8   
9    1. Redistributions of source code must retain the above copyright notice, 
10       this list of conditions and the following disclaimer.
11   
12    2. Redistributions in binary form must reproduce the above copyright 
13       notice, this list of conditions and the following disclaimer in the 
14       documentation and/or other materials provided with the distribution.
15   
16    3. Neither the name of the Intel Corporation nor the names of its 
17       contributors may be used to endorse or promote products derived from 
18       this software without specific prior written permission.
19   
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe.c,v 1.70 2012/07/05 20:51:44 jfv Exp $*/
34
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
37
38 #include "ixgbe.h"
39
40 /*********************************************************************
41  *  Set this to one to display debug statistics
42  *********************************************************************/
43 int             ixgbe_display_debug_stats = 0;
44
45 /*********************************************************************
46  *  Driver version
47  *********************************************************************/
48 char ixgbe_driver_version[] = "2.4.8";
49
50 /*********************************************************************
51  *  PCI Device ID Table
52  *
53  *  Used by probe to select devices to load on
54  *  Last field stores an index into ixgbe_strings
55  *  Last entry must be all 0s
56  *
57  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58  *********************************************************************/
59
60 static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
61 {
62         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
63         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
64         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
65         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
66         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
67         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
68         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
69         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
70         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
71         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
72         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
73         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
74         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
75         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
76         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
77         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
78         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
79         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
80         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
81         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
82         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
83         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
84         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
85         {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
86         /* required last entry */
87         {0, 0, 0, 0, 0}
88 };
89
90 /*********************************************************************
91  *  Table of branding strings
92  *********************************************************************/
93
94 static char    *ixgbe_strings[] = {
95         "Intel(R) PRO/10GbE PCI-Express Network Driver"
96 };
97
98 /*********************************************************************
99  *  Function prototypes
100  *********************************************************************/
101 static int      ixgbe_probe(device_t);
102 static int      ixgbe_attach(device_t);
103 static int      ixgbe_detach(device_t);
104 static int      ixgbe_shutdown(device_t);
105 static void     ixgbe_start(struct ifnet *);
106 static void     ixgbe_start_locked(struct tx_ring *, struct ifnet *);
107 #if 0 /* __FreeBSD_version >= 800000 */
108 static int      ixgbe_mq_start(struct ifnet *, struct mbuf *);
109 static int      ixgbe_mq_start_locked(struct ifnet *,
110                     struct tx_ring *, struct mbuf *);
111 static void     ixgbe_qflush(struct ifnet *);
112 #endif
113 static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
114 static void     ixgbe_init(void *);
115 static void     ixgbe_init_locked(struct adapter *);
116 static void     ixgbe_stop(void *);
117 static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
118 static int      ixgbe_media_change(struct ifnet *);
119 static void     ixgbe_identify_hardware(struct adapter *);
120 static int      ixgbe_allocate_pci_resources(struct adapter *);
121 static int      ixgbe_allocate_msix(struct adapter *);
122 static int      ixgbe_allocate_legacy(struct adapter *);
123 static int      ixgbe_allocate_queues(struct adapter *);
124 #if 0   /* HAVE_MSIX */
125 static int      ixgbe_setup_msix(struct adapter *);
126 #endif
127 static void     ixgbe_free_pci_resources(struct adapter *);
128 static void     ixgbe_local_timer(void *);
129 static int      ixgbe_setup_interface(device_t, struct adapter *);
130 static void     ixgbe_config_link(struct adapter *);
131
132 static int      ixgbe_allocate_transmit_buffers(struct tx_ring *);
133 static int      ixgbe_setup_transmit_structures(struct adapter *);
134 static void     ixgbe_setup_transmit_ring(struct tx_ring *);
135 static void     ixgbe_initialize_transmit_units(struct adapter *);
136 static void     ixgbe_free_transmit_structures(struct adapter *);
137 static void     ixgbe_free_transmit_buffers(struct tx_ring *);
138
139 static int      ixgbe_allocate_receive_buffers(struct rx_ring *);
140 static int      ixgbe_setup_receive_structures(struct adapter *);
141 static int      ixgbe_setup_receive_ring(struct rx_ring *);
142 static void     ixgbe_initialize_receive_units(struct adapter *);
143 static void     ixgbe_free_receive_structures(struct adapter *);
144 static void     ixgbe_free_receive_buffers(struct rx_ring *);
145 #if 0   /* NET_LRO */
146 static void     ixgbe_setup_hw_rsc(struct rx_ring *);
147 #endif
148
149 static void     ixgbe_enable_intr(struct adapter *);
150 static void     ixgbe_disable_intr(struct adapter *);
151 static void     ixgbe_update_stats_counters(struct adapter *);
152 static bool     ixgbe_txeof(struct tx_ring *);
153 static bool     ixgbe_rxeof(struct ix_queue *, int);
154 static void     ixgbe_rx_checksum(u32, struct mbuf *, u32);
155 static void     ixgbe_set_promisc(struct adapter *);
156 static void     ixgbe_set_multi(struct adapter *);
157 static void     ixgbe_update_link_status(struct adapter *);
158 static void     ixgbe_refresh_mbufs(struct rx_ring *, int);
159 static int      ixgbe_xmit(struct tx_ring *, struct mbuf **);
160 static int      ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
161 static int      ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
162 static int      ixgbe_set_thermal_test(SYSCTL_HANDLER_ARGS);
163 static int      ixgbe_dma_malloc(struct adapter *, bus_size_t,
164                     struct ixgbe_dma_alloc *, int);
165 static void     ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
166 static void     ixgbe_add_rx_process_limit(struct adapter *, const char *,
167                     const char *, int *, int);
168 static bool     ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
169 #if 0   /* NET_TSO */
170 static bool     ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *, u32 *);
171 #endif
172 static void     ixgbe_set_ivar(struct adapter *, u8, u8, s8);
173 static void     ixgbe_configure_ivars(struct adapter *);
174 static u8 *     ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
175
176 static void     ixgbe_setup_vlan_hw_support(struct adapter *);
177 static void     ixgbe_register_vlan(void *, struct ifnet *, u16);
178 static void     ixgbe_unregister_vlan(void *, struct ifnet *, u16);
179
180 static void     ixgbe_add_hw_stats(struct adapter *adapter);
181
182 static __inline void ixgbe_rx_discard(struct rx_ring *, int);
183 static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
184                     struct mbuf *, u32);
185
186 /* Support for pluggable optic modules */
187 static bool     ixgbe_sfp_probe(struct adapter *);
188 static void     ixgbe_setup_optics(struct adapter *);
189
190 /* Legacy (single vector interrupt handler */
191 static void     ixgbe_legacy_irq(void *);
192
193 /* The MSI/X Interrupt handlers */
194 static void     ixgbe_msix_que(void *);
195 static void     ixgbe_msix_link(void *);
196
197 /* Deferred interrupt tasklets */
198 static void     ixgbe_handle_que(void *, int);
199 static void     ixgbe_handle_link(void *, int);
200 static void     ixgbe_handle_msf(void *, int);
201 static void     ixgbe_handle_mod(void *, int);
202
203 #ifdef IXGBE_FDIR
204 static void     ixgbe_atr(struct tx_ring *, struct mbuf *);
205 static void     ixgbe_reinit_fdir(void *, int);
206 #endif
207
208 /*********************************************************************
209  *  FreeBSD Device Interface Entry Points
210  *********************************************************************/
211
212 static device_method_t ixgbe_methods[] = {
213         /* Device interface */
214         DEVMETHOD(device_probe, ixgbe_probe),
215         DEVMETHOD(device_attach, ixgbe_attach),
216         DEVMETHOD(device_detach, ixgbe_detach),
217         DEVMETHOD(device_shutdown, ixgbe_shutdown),
218         {0, 0}
219 };
220
221 static driver_t ixgbe_driver = {
222         "ix", ixgbe_methods, sizeof(struct adapter),
223 };
224
225 devclass_t ixgbe_devclass;
226 DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
227
228 MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
229 MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
230
231 /*
232 ** TUNEABLE PARAMETERS:
233 */
234
235 /*
236 ** AIM: Adaptive Interrupt Moderation
237 ** which means that the interrupt rate
238 ** is varied over time based on the
239 ** traffic for that interrupt vector
240 */
241 static int ixgbe_enable_aim = TRUE;
242 TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
243
244 static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY);
245 TUNABLE_INT("hw.ixgbe.max_interrupt_rate", &ixgbe_max_interrupt_rate);
246
247 /* How many packets rxeof tries to clean at a time */
248 static int ixgbe_rx_process_limit = 128;
249 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
250
251 /*
252 ** Smart speed setting, default to on
253 ** this only works as a compile option
254 ** right now as its during attach, set
255 ** this to 'ixgbe_smart_speed_off' to
256 ** disable.
257 */
258 static int ixgbe_smart_speed = ixgbe_smart_speed_on;
259
260 static int ixgbe_msi_enable = 1;
261 TUNABLE_INT("hw.ixgbe.msi.enable", &ixgbe_msi_enable);
262
263 /*
264  * MSIX should be the default for best performance,
265  * but this allows it to be forced off for testing.
266  */
267 static int ixgbe_enable_msix = 1;
268 TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
269
270 /*
271  * Header split: this causes the hardware to DMA
272  * the header into a separate mbuf from the payload,
273  * it can be a performance win in some workloads, but
274  * in others it actually hurts, its off by default. 
275  */
276 static int ixgbe_header_split = FALSE;
277 TUNABLE_INT("hw.ixgbe.hdr_split", &ixgbe_header_split);
278
279 /*
280  * Number of Queues, can be set to 0,
281  * it then autoconfigures based on the
282  * number of cpus with a max of 8. This
283  * can be overriden manually here.
284  */
285 static int ixgbe_num_queues = 0;
286 TUNABLE_INT("hw.ixgbe.num_queues", &ixgbe_num_queues);
287
288 /*
289 ** Number of TX descriptors per ring,
290 ** setting higher than RX as this seems
291 ** the better performing choice.
292 */
293 static int ixgbe_txd = PERFORM_TXD;
294 TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
295
296 /* Number of RX descriptors per ring */
297 static int ixgbe_rxd = PERFORM_RXD;
298 TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
299
300 /* Keep running tab on them for sanity check */
301 static int ixgbe_total_ports;
302
303 #ifdef IXGBE_FDIR
304 /*
305 ** For Flow Director: this is the
306 ** number of TX packets we sample
307 ** for the filter pool, this means
308 ** every 20th packet will be probed.
309 **
310 ** This feature can be disabled by 
311 ** setting this to 0.
312 */
313 static int atr_sample_rate = 20;
314 /* 
315 ** Flow Director actually 'steals'
316 ** part of the packet buffer as its
317 ** filter pool, this variable controls
318 ** how much it uses:
319 **  0 = 64K, 1 = 128K, 2 = 256K
320 */
321 static int fdir_pballoc = 1;
322 #endif
323
324 #ifdef DEV_NETMAP
325 /*
326  * The #ifdef DEV_NETMAP / #endif blocks in this file are meant to
327  * be a reference on how to implement netmap support in a driver.
328  * Additional comments are in ixgbe_netmap.h .
329  *
330  * <dev/netmap/ixgbe_netmap.h> contains functions for netmap support
331  * that extend the standard driver.
332  */
333 #include <dev/netmap/ixgbe_netmap.h>
334 #endif /* DEV_NETMAP */
335
336 /*********************************************************************
337  *  Device identification routine
338  *
339  *  ixgbe_probe determines if the driver should be loaded on
340  *  adapter based on PCI vendor/device id of the adapter.
341  *
342  *  return BUS_PROBE_DEFAULT on success, positive on failure
343  *********************************************************************/
344
345 static int
346 ixgbe_probe(device_t dev)
347 {
348         ixgbe_vendor_info_t *ent;
349
350         u16     pci_vendor_id = 0;
351         u16     pci_device_id = 0;
352         u16     pci_subvendor_id = 0;
353         u16     pci_subdevice_id = 0;
354         char    adapter_name[256];
355
356         INIT_DEBUGOUT("ixgbe_probe: begin");
357
358         pci_vendor_id = pci_get_vendor(dev);
359         if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
360                 return (ENXIO);
361
362         pci_device_id = pci_get_device(dev);
363         pci_subvendor_id = pci_get_subvendor(dev);
364         pci_subdevice_id = pci_get_subdevice(dev);
365
366         ent = ixgbe_vendor_info_array;
367         while (ent->vendor_id != 0) {
368                 if ((pci_vendor_id == ent->vendor_id) &&
369                     (pci_device_id == ent->device_id) &&
370
371                     ((pci_subvendor_id == ent->subvendor_id) ||
372                      (ent->subvendor_id == 0)) &&
373
374                     ((pci_subdevice_id == ent->subdevice_id) ||
375                      (ent->subdevice_id == 0))) {
376                         ksprintf(adapter_name, "%s, Version - %s",
377                                 ixgbe_strings[ent->index],
378                                 ixgbe_driver_version);
379                         device_set_desc_copy(dev, adapter_name);
380                         ++ixgbe_total_ports;
381                         return (BUS_PROBE_DEFAULT);
382                 }
383                 ent++;
384         }
385         return (ENXIO);
386 }
387
388 /*********************************************************************
389  *  Device initialization routine
390  *
391  *  The attach entry point is called when the driver is being loaded.
392  *  This routine identifies the type of hardware, allocates all resources
393  *  and initializes the hardware.
394  *
395  *  return 0 on success, positive on failure
396  *********************************************************************/
397
398 static int
399 ixgbe_attach(device_t dev)
400 {
401         struct adapter *adapter;
402         struct ixgbe_hw *hw;
403         int             error = 0;
404         u16             csum;
405         u32             ctrl_ext;
406
407         INIT_DEBUGOUT("ixgbe_attach: begin");
408
409         if (resource_disabled("ixgbe", device_get_unit(dev))) {
410                 device_printf(dev, "Disabled by device hint\n");
411                 return (ENXIO);
412         }
413
414         /* Allocate, clear, and link in our adapter structure */
415         adapter = device_get_softc(dev);
416         adapter->dev = adapter->osdep.dev = dev;
417         hw = &adapter->hw;
418
419         /* Core Lock Init*/
420         IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
421
422         /* SYSCTL APIs */
423
424         sysctl_ctx_init(&adapter->sysctl_ctx);
425         adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx,
426             SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
427             device_get_nameunit(adapter->dev), CTLFLAG_RD, 0, "");
428         if (adapter->sysctl_tree == NULL) {
429                 device_printf(adapter->dev, "can't add sysctl node\n");
430                 return (EINVAL);
431         }
432         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
433                         SYSCTL_CHILDREN(adapter->sysctl_tree),
434                         OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
435                         adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control");
436
437         SYSCTL_ADD_INT(&adapter->sysctl_ctx,
438                         SYSCTL_CHILDREN(adapter->sysctl_tree),
439                         OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
440                         &ixgbe_enable_aim, 1, "Interrupt Moderation");
441
442         /*
443         ** Allow a kind of speed control by forcing the autoneg
444         ** advertised speed list to only a certain value, this
445         ** supports 1G on 82599 devices, and 100Mb on x540.
446         */
447         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
448                         SYSCTL_CHILDREN(adapter->sysctl_tree),
449                         OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
450                         adapter, 0, ixgbe_set_advertise, "I", "Link Speed");
451
452         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
453                         SYSCTL_CHILDREN(adapter->sysctl_tree),
454                         OID_AUTO, "ts", CTLTYPE_INT | CTLFLAG_RW, adapter,
455                         0, ixgbe_set_thermal_test, "I", "Thermal Test");
456
457         /* Set up the timer callout */
458         callout_init_mp(&adapter->timer);
459
460         /* Determine hardware revision */
461         ixgbe_identify_hardware(adapter);
462
463         /* Do base PCI setup - map BAR0 */
464         if (ixgbe_allocate_pci_resources(adapter)) {
465                 device_printf(dev, "Allocation of PCI resources failed\n");
466                 error = ENXIO;
467                 goto err_out;
468         }
469
470         /* Do descriptor calc and sanity checks */
471         if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
472             ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
473                 device_printf(dev, "TXD config issue, using default!\n");
474                 adapter->num_tx_desc = DEFAULT_TXD;
475         } else
476                 adapter->num_tx_desc = ixgbe_txd;
477
478         /*
479         ** With many RX rings it is easy to exceed the
480         ** system mbuf allocation. Tuning nmbclusters
481         ** can alleviate this.
482         */
483         if (nmbclusters > 0 ) {
484                 int s;
485                 s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
486                 if (s > nmbclusters) {
487                         device_printf(dev, "RX Descriptors exceed "
488                             "system mbuf max, using default instead!\n");
489                         ixgbe_rxd = DEFAULT_RXD;
490                 }
491         }
492
493         if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
494             ixgbe_rxd < MIN_TXD || ixgbe_rxd > MAX_TXD) {
495                 device_printf(dev, "RXD config issue, using default!\n");
496                 adapter->num_rx_desc = DEFAULT_RXD;
497         } else
498                 adapter->num_rx_desc = ixgbe_rxd;
499
500         /* Allocate our TX/RX Queues */
501         if (ixgbe_allocate_queues(adapter)) {
502                 error = ENOMEM;
503                 goto err_out;
504         }
505
506         /* Allocate multicast array memory. */
507         adapter->mta = kmalloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
508             MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
509         if (adapter->mta == NULL) {
510                 device_printf(dev, "Can not allocate multicast setup array\n");
511                 error = ENOMEM;
512                 goto err_late;
513         }
514
515         /* Initialize the shared code */
516         error = ixgbe_init_shared_code(hw);
517         if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
518                 /*
519                 ** No optics in this port, set up
520                 ** so the timer routine will probe 
521                 ** for later insertion.
522                 */
523                 adapter->sfp_probe = TRUE;
524                 error = 0;
525         } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
526                 device_printf(dev,"Unsupported SFP+ module detected!\n");
527                 error = EIO;
528                 goto err_late;
529         } else if (error) {
530                 device_printf(dev,"Unable to initialize the shared code\n");
531                 error = EIO;
532                 goto err_late;
533         }
534
535         /* Make sure we have a good EEPROM before we read from it */
536         if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
537                 device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
538                 error = EIO;
539                 goto err_late;
540         }
541
542         error = ixgbe_init_hw(hw);
543         switch (error) {
544         case IXGBE_ERR_EEPROM_VERSION:
545                 device_printf(dev, "This device is a pre-production adapter/"
546                     "LOM.  Please be aware there may be issues associated "
547                     "with your hardware.\n If you are experiencing problems "
548                     "please contact your Intel or hardware representative "
549                     "who provided you with this hardware.\n");
550                 break;
551         case IXGBE_ERR_SFP_NOT_SUPPORTED:
552                 device_printf(dev,"Unsupported SFP+ Module\n");
553                 error = EIO;
554                 device_printf(dev,"Hardware Initialization Failure\n");
555                 goto err_late;
556         case IXGBE_ERR_SFP_NOT_PRESENT:
557                 device_printf(dev,"No SFP+ Module found\n");
558                 /* falls thru */
559         default:
560                 break;
561         }
562
563         /* Detect and set physical type */
564         ixgbe_setup_optics(adapter);
565
566         if ((adapter->msix > 1) && (ixgbe_enable_msix))
567                 error = ixgbe_allocate_msix(adapter); 
568         else
569                 error = ixgbe_allocate_legacy(adapter); 
570         if (error) 
571                 goto err_late;
572
573         /* Setup OS specific network interface */
574         if (ixgbe_setup_interface(dev, adapter) != 0)
575                 goto err_late;
576
577         /* Sysctl for limiting the amount of work done in the taskqueue */
578         ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
579             "max number of rx packets to process", &adapter->rx_process_limit,
580             ixgbe_rx_process_limit);
581
582         /* Initialize statistics */
583         ixgbe_update_stats_counters(adapter);
584
585         /* Register for VLAN events */
586         adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
587             ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
588         adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
589             ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
590
591         /* Print PCIE bus type/speed/width info */
592         ixgbe_get_bus_info(hw);
593         device_printf(dev,"PCI Express Bus: Speed %s %s\n",
594             ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
595             (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
596             (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
597             (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
598             (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
599             ("Unknown"));
600
601         if ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
602             (hw->bus.speed == ixgbe_bus_speed_2500)) {
603                 device_printf(dev, "PCI-Express bandwidth available"
604                     " for this card\n     is not sufficient for"
605                     " optimal performance.\n");
606                 device_printf(dev, "For optimal performance a x8 "
607                     "PCIE, or x4 PCIE 2 slot is required.\n");
608         }
609
610         /* let hardware know driver is loaded */
611         ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
612         ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
613         IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
614
615         ixgbe_add_hw_stats(adapter);
616
617 #ifdef DEV_NETMAP
618         ixgbe_netmap_attach(adapter);
619 #endif /* DEV_NETMAP */
620         INIT_DEBUGOUT("ixgbe_attach: end");
621         return (0);
622 err_late:
623         ixgbe_free_transmit_structures(adapter);
624         ixgbe_free_receive_structures(adapter);
625 err_out:
626         if (adapter->ifp != NULL)
627                 if_free(adapter->ifp);
628         ixgbe_free_pci_resources(adapter);
629         kfree(adapter->mta, M_DEVBUF);
630         return (error);
631
632 }
633
634 /*********************************************************************
635  *  Device removal routine
636  *
637  *  The detach entry point is called when the driver is being removed.
638  *  This routine stops the adapter and deallocates all the resources
639  *  that were allocated for driver operation.
640  *
641  *  return 0 on success, positive on failure
642  *********************************************************************/
643
644 static int
645 ixgbe_detach(device_t dev)
646 {
647         struct adapter *adapter = device_get_softc(dev);
648         struct ix_queue *que = adapter->queues;
649         u32     ctrl_ext;
650
651         INIT_DEBUGOUT("ixgbe_detach: begin");
652
653         /* Make sure VLANS are not using driver */
654         if (adapter->ifp->if_vlantrunks != NULL) {
655                 device_printf(dev,"Vlan in use, detach first\n");
656                 return (EBUSY);
657         }
658
659         IXGBE_CORE_LOCK(adapter);
660         ixgbe_stop(adapter);
661         IXGBE_CORE_UNLOCK(adapter);
662
663         for (int i = 0; i < adapter->num_queues; i++, que++) {
664                 if (que->tq) {
665                         taskqueue_drain(que->tq, &que->que_task);
666                         taskqueue_free(que->tq);
667                 }
668         }
669
670         /* Drain the Link queue */
671         if (adapter->tq) {
672                 taskqueue_drain(adapter->tq, &adapter->link_task);
673                 taskqueue_drain(adapter->tq, &adapter->mod_task);
674                 taskqueue_drain(adapter->tq, &adapter->msf_task);
675 #ifdef IXGBE_FDIR
676                 taskqueue_drain(adapter->tq, &adapter->fdir_task);
677 #endif
678                 taskqueue_free(adapter->tq);
679         }
680
681         /* let hardware know driver is unloading */
682         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
683         ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
684         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
685
686         /* Unregister VLAN events */
687         if (adapter->vlan_attach != NULL)
688                 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
689         if (adapter->vlan_detach != NULL)
690                 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
691
692         ether_ifdetach(adapter->ifp);
693         callout_stop(&adapter->timer);
694 #ifdef DEV_NETMAP
695         netmap_detach(adapter->ifp);
696 #endif /* DEV_NETMAP */
697         ixgbe_free_pci_resources(adapter);
698         bus_generic_detach(dev);
699         if_free(adapter->ifp);
700
701         ixgbe_free_transmit_structures(adapter);
702         ixgbe_free_receive_structures(adapter);
703         kfree(adapter->mta, M_DEVBUF);
704         sysctl_ctx_free(&adapter->sysctl_ctx);
705         
706         IXGBE_CORE_LOCK_DESTROY(adapter);
707         return (0);
708 }
709
710 /*********************************************************************
711  *
712  *  Shutdown entry point
713  *
714  **********************************************************************/
715
716 static int
717 ixgbe_shutdown(device_t dev)
718 {
719         struct adapter *adapter = device_get_softc(dev);
720         IXGBE_CORE_LOCK(adapter);
721         ixgbe_stop(adapter);
722         IXGBE_CORE_UNLOCK(adapter);
723         return (0);
724 }
725
726
727 /*********************************************************************
728  *  Transmit entry point
729  *
730  *  ixgbe_start is called by the stack to initiate a transmit.
731  *  The driver will remain in this routine as long as there are
732  *  packets to transmit and transmit resources are available.
733  *  In case resources are not available stack is notified and
734  *  the packet is requeued.
735  **********************************************************************/
736
737 static void
738 ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
739 {
740         struct mbuf    *m_head;
741         struct adapter *adapter = txr->adapter;
742
743         IXGBE_TX_LOCK_ASSERT(txr);
744
745         if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
746                 return;
747         if (!adapter->link_active)
748                 return;
749
750         while (!ifq_is_empty(&ifp->if_snd)) {
751                 if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE) {
752                         txr->queue_status |= IXGBE_QUEUE_DEPLETED;
753                         break;
754                 }
755
756                 m_head = ifq_dequeue(&ifp->if_snd, NULL);
757                 if (m_head == NULL)
758                         break;
759
760                 if (ixgbe_xmit(txr, &m_head)) {
761 #if 0 /* XXX: prepend to an ALTQ queue ? */
762                         if (m_head != NULL)
763                                 IF_PREPEND(&ifp->if_snd, m_head);
764 #endif
765                         if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
766                                 txr->queue_status |= IXGBE_QUEUE_DEPLETED;
767                         break;
768                 }
769                 /* Send a copy of the frame to the BPF listener */
770                 ETHER_BPF_MTAP(ifp, m_head);
771
772                 /* Set watchdog on */
773                 txr->watchdog_time = ticks;
774                 txr->queue_status = IXGBE_QUEUE_WORKING;
775
776         }
777         return;
778 }
779
780 /*
781  * Legacy TX start - called by the stack, this
782  * always uses the first tx ring, and should
783  * not be used with multiqueue tx enabled.
784  */
785 static void
786 ixgbe_start(struct ifnet *ifp)
787 {
788         struct adapter *adapter = ifp->if_softc;
789         struct tx_ring  *txr = adapter->tx_rings;
790
791         if (ifp->if_flags & IFF_RUNNING) {
792                 IXGBE_TX_LOCK(txr);
793                 ixgbe_start_locked(txr, ifp);
794                 IXGBE_TX_UNLOCK(txr);
795         }
796         return;
797 }
798
799 #if 0 /* __FreeBSD_version >= 800000 */
800 /*
801 ** Multiqueue Transmit driver
802 **
803 */
804 static int
805 ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
806 {
807         struct adapter  *adapter = ifp->if_softc;
808         struct ix_queue *que;
809         struct tx_ring  *txr;
810         int             i = 0, err = 0;
811
812         /* Which queue to use */
813         if ((m->m_flags & M_FLOWID) != 0)
814                 i = m->m_pkthdr.flowid % adapter->num_queues;
815         else
816                 i = curcpu % adapter->num_queues;
817
818         txr = &adapter->tx_rings[i];
819         que = &adapter->queues[i];
820
821         if (((txr->queue_status & IXGBE_QUEUE_DEPLETED) == 0) &&
822             IXGBE_TX_TRYLOCK(txr)) {
823                 err = ixgbe_mq_start_locked(ifp, txr, m);
824                 IXGBE_TX_UNLOCK(txr);
825         } else {
826                 err = drbr_enqueue(ifp, txr->br, m);
827                 taskqueue_enqueue(que->tq, &que->que_task);
828         }
829
830         return (err);
831 }
832
833 static int
834 ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
835 {
836         struct adapter  *adapter = txr->adapter;
837         struct mbuf     *next;
838         int             enqueued, err = 0;
839
840         if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
841             (txr->queue_status == IXGBE_QUEUE_DEPLETED) ||
842             adapter->link_active == 0) {
843                 if (m != NULL)
844                         err = drbr_enqueue(ifp, txr->br, m);
845                 return (err);
846         }
847
848         enqueued = 0;
849         if (m == NULL) {
850                 next = drbr_dequeue(ifp, txr->br);
851         } else if (drbr_needs_enqueue(ifp, txr->br)) {
852                 if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
853                         return (err);
854                 next = drbr_dequeue(ifp, txr->br);
855         } else
856                 next = m;
857
858         /* Process the queue */
859         while (next != NULL) {
860                 if ((err = ixgbe_xmit(txr, &next)) != 0) {
861                         if (next != NULL)
862                                 err = drbr_enqueue(ifp, txr->br, next);
863                         break;
864                 }
865                 enqueued++;
866                 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
867                 /* Send a copy of the frame to the BPF listener */
868                 ETHER_BPF_MTAP(ifp, next);
869                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
870                         break;
871                 if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD)
872                         ixgbe_txeof(txr);
873                 if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD) {
874                         txr->queue_status |= IXGBE_QUEUE_DEPLETED;
875                         break;
876                 }
877                 next = drbr_dequeue(ifp, txr->br);
878         }
879
880         if (enqueued > 0) {
881                 /* Set watchdog on */
882                 txr->queue_status |= IXGBE_QUEUE_WORKING;
883                 txr->watchdog_time = ticks;
884         }
885
886         if (txr->tx_avail < IXGBE_TX_CLEANUP_THRESHOLD)
887                 ixgbe_txeof(txr);
888
889         return (err);
890 }
891
892 /*
893 ** Flush all ring buffers
894 */
895 static void
896 ixgbe_qflush(struct ifnet *ifp)
897 {
898         struct adapter  *adapter = ifp->if_softc;
899         struct tx_ring  *txr = adapter->tx_rings;
900         struct mbuf     *m;
901
902         for (int i = 0; i < adapter->num_queues; i++, txr++) {
903                 IXGBE_TX_LOCK(txr);
904                 while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
905                         m_freem(m);
906                 IXGBE_TX_UNLOCK(txr);
907         }
908         if_qflush(ifp);
909 }
910 #endif /* __FreeBSD_version >= 800000 */
911
912 /*********************************************************************
913  *  Ioctl entry point
914  *
915  *  ixgbe_ioctl is called when the user wants to configure the
916  *  interface.
917  *
918  *  return 0 on success, positive on failure
919  **********************************************************************/
920
921 static int
922 ixgbe_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
923 {
924         struct adapter  *adapter = ifp->if_softc;
925         struct ifreq    *ifr = (struct ifreq *) data;
926 #if defined(INET) || defined(INET6)
927         struct ifaddr *ifa = (struct ifaddr *)data;
928         bool            avoid_reset = FALSE;
929 #endif
930         int             error = 0;
931
932         switch (command) {
933
934         case SIOCSIFADDR:
935 #ifdef INET
936                 if (ifa->ifa_addr->sa_family == AF_INET)
937                         avoid_reset = TRUE;
938 #endif
939 #ifdef INET6
940                 if (ifa->ifa_addr->sa_family == AF_INET6)
941                         avoid_reset = TRUE;
942 #endif
943 #if defined(INET) || defined(INET6)
944                 /*
945                 ** Calling init results in link renegotiation,
946                 ** so we avoid doing it when possible.
947                 */
948                 if (avoid_reset) {
949                         ifp->if_flags |= IFF_UP;
950                         if (!(ifp->if_flags & IFF_RUNNING))
951                                 ixgbe_init(adapter);
952                         if (!(ifp->if_flags & IFF_NOARP))
953                                 arp_ifinit(ifp, ifa);
954                 } else
955                         error = ether_ioctl(ifp, command, data);
956 #endif
957                 break;
958         case SIOCSIFMTU:
959                 IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
960                 if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
961                         error = EINVAL;
962                 } else {
963                         IXGBE_CORE_LOCK(adapter);
964                         ifp->if_mtu = ifr->ifr_mtu;
965                         adapter->max_frame_size =
966                                 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
967                         ixgbe_init_locked(adapter);
968                         IXGBE_CORE_UNLOCK(adapter);
969                 }
970                 break;
971         case SIOCSIFFLAGS:
972                 IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
973                 IXGBE_CORE_LOCK(adapter);
974                 if (ifp->if_flags & IFF_UP) {
975                         if ((ifp->if_flags & IFF_RUNNING)) {
976                                 if ((ifp->if_flags ^ adapter->if_flags) &
977                                     (IFF_PROMISC | IFF_ALLMULTI)) {
978                                         ixgbe_set_promisc(adapter);
979                                 }
980                         } else
981                                 ixgbe_init_locked(adapter);
982                 } else
983                         if (ifp->if_flags & IFF_RUNNING)
984                                 ixgbe_stop(adapter);
985                 adapter->if_flags = ifp->if_flags;
986                 IXGBE_CORE_UNLOCK(adapter);
987                 break;
988         case SIOCADDMULTI:
989         case SIOCDELMULTI:
990                 IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
991                 if (ifp->if_flags & IFF_RUNNING) {
992                         IXGBE_CORE_LOCK(adapter);
993                         ixgbe_disable_intr(adapter);
994                         ixgbe_set_multi(adapter);
995                         ixgbe_enable_intr(adapter);
996                         IXGBE_CORE_UNLOCK(adapter);
997                 }
998                 break;
999         case SIOCSIFMEDIA:
1000         case SIOCGIFMEDIA:
1001                 IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1002                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1003                 break;
1004         case SIOCSIFCAP:
1005         {
1006                 int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1007                 IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
1008                 if (mask & IFCAP_HWCSUM)
1009                         ifp->if_capenable ^= IFCAP_HWCSUM;
1010 #if 0 /* NET_TSO */
1011                 if (mask & IFCAP_TSO4)
1012                         ifp->if_capenable ^= IFCAP_TSO4;
1013                 if (mask & IFCAP_TSO6)
1014                         ifp->if_capenable ^= IFCAP_TSO6;
1015 #endif
1016 #if 0 /* NET_LRO */
1017                 if (mask & IFCAP_LRO)
1018                         ifp->if_capenable ^= IFCAP_LRO;
1019 #endif
1020                 if (mask & IFCAP_VLAN_HWTAGGING)
1021                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1022                 if (mask & IFCAP_VLAN_HWFILTER)
1023                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1024 #if 0 /* NET_TSO */
1025                 if (mask & IFCAP_VLAN_HWTSO)
1026                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1027 #endif
1028                 if (ifp->if_flags & IFF_RUNNING) {
1029                         IXGBE_CORE_LOCK(adapter);
1030                         ixgbe_init_locked(adapter);
1031                         IXGBE_CORE_UNLOCK(adapter);
1032                 }
1033 #if 0
1034                 VLAN_CAPABILITIES(ifp);
1035 #endif
1036                 break;
1037         }
1038
1039         default:
1040                 IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
1041                 error = ether_ioctl(ifp, command, data);
1042                 break;
1043         }
1044
1045         return (error);
1046 }
1047
1048 /*********************************************************************
1049  *  Init entry point
1050  *
1051  *  This routine is used in two ways. It is used by the stack as
1052  *  init entry point in network interface structure. It is also used
1053  *  by the driver as a hw/sw initialization routine to get to a
1054  *  consistent state.
1055  *
1056  *  return 0 on success, positive on failure
1057  **********************************************************************/
1058 #define IXGBE_MHADD_MFS_SHIFT 16
1059
1060 static void
1061 ixgbe_init_locked(struct adapter *adapter)
1062 {
1063         struct ifnet   *ifp = adapter->ifp;
1064         device_t        dev = adapter->dev;
1065         struct ixgbe_hw *hw = &adapter->hw;
1066         u32             k, txdctl, mhadd, gpie;
1067         u32             rxdctl, rxctrl;
1068
1069         KKASSERT(lockstatus(&adapter->core_lock, curthread) != 0);
1070         INIT_DEBUGOUT("ixgbe_init: begin");
1071         hw->adapter_stopped = FALSE;
1072         ixgbe_stop_adapter(hw);
1073         callout_stop(&adapter->timer);
1074
1075         /* reprogram the RAR[0] in case user changed it. */
1076         ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
1077
1078         /* Get the latest mac address, User can use a LAA */
1079         bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
1080               IXGBE_ETH_LENGTH_OF_ADDRESS);
1081         ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
1082         hw->addr_ctrl.rar_used_count = 1;
1083
1084         /* Set the various hardware offload abilities */
1085         ifp->if_hwassist = 0;
1086 #if 0 /* NET_TSO */
1087         if (ifp->if_capenable & IFCAP_TSO)
1088                 ifp->if_hwassist |= CSUM_TSO;
1089 #endif
1090         if (ifp->if_capenable & IFCAP_TXCSUM) {
1091                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1092 #if 0
1093                 if (hw->mac.type != ixgbe_mac_82598EB)
1094                         ifp->if_hwassist |= CSUM_SCTP;
1095 #endif
1096         }
1097
1098         /* Prepare transmit descriptors and buffers */
1099         if (ixgbe_setup_transmit_structures(adapter)) {
1100                 device_printf(dev,"Could not setup transmit structures\n");
1101                 ixgbe_stop(adapter);
1102                 return;
1103         }
1104
1105         ixgbe_init_hw(hw);
1106         ixgbe_initialize_transmit_units(adapter);
1107
1108         /* Setup Multicast table */
1109         ixgbe_set_multi(adapter);
1110
1111         /*
1112         ** Determine the correct mbuf pool
1113         ** for doing jumbo/headersplit
1114         */
1115         if (adapter->max_frame_size <= 2048)
1116                 adapter->rx_mbuf_sz = MCLBYTES;
1117         else if (adapter->max_frame_size <= 4096)
1118                 adapter->rx_mbuf_sz = MJUMPAGESIZE;
1119         else if (adapter->max_frame_size <= 9216)
1120                 adapter->rx_mbuf_sz = MJUM9BYTES;
1121         else
1122                 adapter->rx_mbuf_sz = MJUM16BYTES;
1123
1124         /* Prepare receive descriptors and buffers */
1125         if (ixgbe_setup_receive_structures(adapter)) {
1126                 device_printf(dev,"Could not setup receive structures\n");
1127                 ixgbe_stop(adapter);
1128                 return;
1129         }
1130
1131         /* Configure RX settings */
1132         ixgbe_initialize_receive_units(adapter);
1133
1134         gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
1135
1136         /* Enable Fan Failure Interrupt */
1137         gpie |= IXGBE_SDP1_GPIEN;
1138
1139         /* Add for Module detection */
1140         if (hw->mac.type == ixgbe_mac_82599EB)
1141                 gpie |= IXGBE_SDP2_GPIEN;
1142
1143         /* Thermal Failure Detection */
1144         if (hw->mac.type == ixgbe_mac_X540)
1145                 gpie |= IXGBE_SDP0_GPIEN;
1146
1147         if (adapter->msix > 1) {
1148                 /* Enable Enhanced MSIX mode */
1149                 gpie |= IXGBE_GPIE_MSIX_MODE;
1150                 gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
1151                     IXGBE_GPIE_OCD;
1152         }
1153         IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1154
1155         /* Set MTU size */
1156         if (ifp->if_mtu > ETHERMTU) {
1157                 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1158                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1159                 mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
1160                 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1161         }
1162         
1163         /* Now enable all the queues */
1164
1165         for (int i = 0; i < adapter->num_queues; i++) {
1166                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
1167                 txdctl |= IXGBE_TXDCTL_ENABLE;
1168                 /* Set WTHRESH to 8, burst writeback */
1169                 txdctl |= (8 << 16);
1170                 /*
1171                  * When the internal queue falls below PTHRESH (32),
1172                  * start prefetching as long as there are at least
1173                  * HTHRESH (1) buffers ready. The values are taken
1174                  * from the Intel linux driver 3.8.21.
1175                  * Prefetching enables tx line rate even with 1 queue.
1176                  */
1177                 txdctl |= (32 << 0) | (1 << 8);
1178                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
1179         }
1180
1181         for (int i = 0; i < adapter->num_queues; i++) {
1182                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1183                 if (hw->mac.type == ixgbe_mac_82598EB) {
1184                         /*
1185                         ** PTHRESH = 21
1186                         ** HTHRESH = 4
1187                         ** WTHRESH = 8
1188                         */
1189                         rxdctl &= ~0x3FFFFF;
1190                         rxdctl |= 0x080420;
1191                 }
1192                 rxdctl |= IXGBE_RXDCTL_ENABLE;
1193                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
1194                 for (k = 0; k < 10; k++) {
1195                         if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
1196                             IXGBE_RXDCTL_ENABLE)
1197                                 break;
1198                         else
1199                                 msec_delay(1);
1200                 }
1201                 wmb();
1202 #ifdef DEV_NETMAP
1203                 /*
1204                  * In netmap mode, we must preserve the buffers made
1205                  * available to userspace before the if_init()
1206                  * (this is true by default on the TX side, because
1207                  * init makes all buffers available to userspace).
1208                  *
1209                  * netmap_reset() and the device specific routines
1210                  * (e.g. ixgbe_setup_receive_rings()) map these
1211                  * buffers at the end of the NIC ring, so here we
1212                  * must set the RDT (tail) register to make sure
1213                  * they are not overwritten.
1214                  *
1215                  * In this driver the NIC ring starts at RDH = 0,
1216                  * RDT points to the last slot available for reception (?),
1217                  * so RDT = num_rx_desc - 1 means the whole ring is available.
1218                  */
1219                 if (ifp->if_capenable & IFCAP_NETMAP) {
1220                         struct netmap_adapter *na = NA(adapter->ifp);
1221                         struct netmap_kring *kring = &na->rx_rings[i];
1222                         int t = na->num_rx_desc - 1 - kring->nr_hwavail;
1223
1224                         IXGBE_WRITE_REG(hw, IXGBE_RDT(i), t);
1225                 } else
1226 #endif /* DEV_NETMAP */
1227                 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
1228         }
1229
1230         /* Set up VLAN support and filter */
1231         ixgbe_setup_vlan_hw_support(adapter);
1232
1233         /* Enable Receive engine */
1234         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1235         if (hw->mac.type == ixgbe_mac_82598EB)
1236                 rxctrl |= IXGBE_RXCTRL_DMBYPS;
1237         rxctrl |= IXGBE_RXCTRL_RXEN;
1238         ixgbe_enable_rx_dma(hw, rxctrl);
1239
1240         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
1241
1242         /* Set up MSI/X routing */
1243         if (ixgbe_enable_msix)  {
1244                 ixgbe_configure_ivars(adapter);
1245                 /* Set up auto-mask */
1246                 if (hw->mac.type == ixgbe_mac_82598EB)
1247                         IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1248                 else {
1249                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
1250                         IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
1251                 }
1252         } else {  /* Simple settings for Legacy/MSI */
1253                 ixgbe_set_ivar(adapter, 0, 0, 0);
1254                 ixgbe_set_ivar(adapter, 0, 0, 1);
1255                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1256         }
1257
1258 #ifdef IXGBE_FDIR
1259         /* Init Flow director */
1260         if (hw->mac.type != ixgbe_mac_82598EB) {
1261                 u32 hdrm = 32 << fdir_pballoc;
1262
1263                 hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
1264                 ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
1265         }
1266 #endif
1267
1268         /*
1269         ** Check on any SFP devices that
1270         ** need to be kick-started
1271         */
1272         if (hw->phy.type == ixgbe_phy_none) {
1273                 int err = hw->phy.ops.identify(hw);
1274                 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
1275                         device_printf(dev,
1276                             "Unsupported SFP+ module type was detected.\n");
1277                         return;
1278                 }
1279         }
1280
1281         /* Set moderation on the Link interrupt */
1282         IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->linkvec), IXGBE_LINK_ITR);
1283
1284         /* Config/Enable Link */
1285         ixgbe_config_link(adapter);
1286
1287         /* Hardware Packet Buffer & Flow Control setup */
1288         {
1289                 u32 rxpb, frame, size, tmp;
1290
1291                 frame = adapter->max_frame_size;
1292
1293                 /* Calculate High Water */
1294                 if (hw->mac.type == ixgbe_mac_X540)
1295                         tmp = IXGBE_DV_X540(frame, frame);
1296                 else
1297                         tmp = IXGBE_DV(frame, frame);
1298                 size = IXGBE_BT2KB(tmp);
1299                 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1300                 hw->fc.high_water[0] = rxpb - size;
1301
1302                 /* Now calculate Low Water */
1303                 if (hw->mac.type == ixgbe_mac_X540)
1304                         tmp = IXGBE_LOW_DV_X540(frame);
1305                 else
1306                         tmp = IXGBE_LOW_DV(frame);
1307                 hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
1308                 
1309                 adapter->fc = hw->fc.requested_mode = ixgbe_fc_full;
1310                 hw->fc.pause_time = IXGBE_FC_PAUSE;
1311                 hw->fc.send_xon = TRUE;
1312         }
1313         /* Initialize the FC settings */
1314         ixgbe_start_hw(hw);
1315
1316         /* And now turn on interrupts */
1317         ixgbe_enable_intr(adapter);
1318
1319         /* Now inform the stack we're ready */
1320         ifp->if_flags |= IFF_RUNNING;
1321         ifp->if_flags &= ~IFF_OACTIVE;
1322
1323         return;
1324 }
1325
1326 static void
1327 ixgbe_init(void *arg)
1328 {
1329         struct adapter *adapter = arg;
1330
1331         IXGBE_CORE_LOCK(adapter);
1332         ixgbe_init_locked(adapter);
1333         IXGBE_CORE_UNLOCK(adapter);
1334         return;
1335 }
1336
1337
1338 /*
1339 **
1340 ** MSIX Interrupt Handlers and Tasklets
1341 **
1342 */
1343
1344 static inline void
1345 ixgbe_enable_queue(struct adapter *adapter, u32 vector)
1346 {
1347         struct ixgbe_hw *hw = &adapter->hw;
1348         u64     queue = (u64)(1 << vector);
1349         u32     mask;
1350
1351         if (hw->mac.type == ixgbe_mac_82598EB) {
1352                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1353                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1354         } else {
1355                 mask = (queue & 0xFFFFFFFF);
1356                 if (mask)
1357                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1358                 mask = (queue >> 32);
1359                 if (mask)
1360                         IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1361         }
1362 }
1363
1364 static inline void
1365 ixgbe_disable_queue(struct adapter *adapter, u32 vector)
1366 {
1367         struct ixgbe_hw *hw = &adapter->hw;
1368         u64     queue = (u64)(1 << vector);
1369         u32     mask;
1370
1371         if (hw->mac.type == ixgbe_mac_82598EB) {
1372                 mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1373                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1374         } else {
1375                 mask = (queue & 0xFFFFFFFF);
1376                 if (mask)
1377                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1378                 mask = (queue >> 32);
1379                 if (mask)
1380                         IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1381         }
1382 }
1383
1384 static inline void
1385 ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
1386 {
1387         u32 mask;
1388
1389         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1390                 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
1391                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1392         } else {
1393                 mask = (queues & 0xFFFFFFFF);
1394                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
1395                 mask = (queues >> 32);
1396                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
1397         }
1398 }
1399
1400
1401 static void
1402 ixgbe_handle_que(void *context, int pending)
1403 {
1404         struct ix_queue *que = context;
1405         struct adapter  *adapter = que->adapter;
1406         struct tx_ring  *txr = que->txr;
1407         struct ifnet    *ifp = adapter->ifp;
1408         bool            more;
1409
1410         if (ifp->if_flags & IFF_RUNNING) {
1411                 more = ixgbe_rxeof(que, adapter->rx_process_limit);
1412                 IXGBE_TX_LOCK(txr);
1413                 ixgbe_txeof(txr);
1414 #if 0 /*__FreeBSD_version >= 800000*/
1415                 if (!drbr_empty(ifp, txr->br))
1416                         ixgbe_mq_start_locked(ifp, txr, NULL);
1417 #else
1418                 if (!ifq_is_empty(&ifp->if_snd))
1419                         ixgbe_start_locked(txr, ifp);
1420 #endif
1421                 IXGBE_TX_UNLOCK(txr);
1422                 if (more) {
1423                         taskqueue_enqueue(que->tq, &que->que_task);
1424                         return;
1425                 }
1426         }
1427
1428         /* Reenable this interrupt */
1429         ixgbe_enable_queue(adapter, que->msix);
1430         return;
1431 }
1432
1433
1434 /*********************************************************************
1435  *
1436  *  Legacy Interrupt Service routine
1437  *
1438  **********************************************************************/
1439
1440 static void
1441 ixgbe_legacy_irq(void *arg)
1442 {
1443         struct ix_queue *que = arg;
1444         struct adapter  *adapter = que->adapter;
1445         struct ixgbe_hw *hw = &adapter->hw;
1446         struct          tx_ring *txr = adapter->tx_rings;
1447         bool            more_tx, more_rx;
1448         u32             reg_eicr, loop = MAX_LOOP;
1449
1450
1451         reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1452
1453         ++que->irqs;
1454         if (reg_eicr == 0) {
1455                 ixgbe_enable_intr(adapter);
1456                 return;
1457         }
1458
1459         more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
1460
1461         IXGBE_TX_LOCK(txr);
1462         do {
1463                 more_tx = ixgbe_txeof(txr);
1464         } while (loop-- && more_tx);
1465         IXGBE_TX_UNLOCK(txr);
1466
1467         if (more_rx || more_tx)
1468                 taskqueue_enqueue(que->tq, &que->que_task);
1469
1470         /* Check for fan failure */
1471         if ((hw->phy.media_type == ixgbe_media_type_copper) &&
1472             (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1473                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1474                     "REPLACE IMMEDIATELY!!\n");
1475                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
1476         }
1477
1478         /* Link status change */
1479         if (reg_eicr & IXGBE_EICR_LSC)
1480                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1481
1482         ixgbe_enable_intr(adapter);
1483         return;
1484 }
1485
1486
1487 /*********************************************************************
1488  *
1489  *  MSIX Queue Interrupt Service routine
1490  *
1491  **********************************************************************/
1492 void
1493 ixgbe_msix_que(void *arg)
1494 {
1495         struct ix_queue *que = arg;
1496         struct adapter  *adapter = que->adapter;
1497         struct tx_ring  *txr = que->txr;
1498         struct rx_ring  *rxr = que->rxr;
1499         bool            more_tx, more_rx;
1500         u32             newitr = 0;
1501
1502         ixgbe_disable_queue(adapter, que->msix);
1503         ++que->irqs;
1504
1505         more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
1506
1507         IXGBE_TX_LOCK(txr);
1508         more_tx = ixgbe_txeof(txr);
1509         /*
1510         ** Make certain that if the stack 
1511         ** has anything queued the task gets
1512         ** scheduled to handle it.
1513         */
1514 #if 0
1515 #if __FreeBSD_version < 800000
1516         if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
1517 #else
1518         if (!drbr_empty(adapter->ifp, txr->br))
1519 #endif
1520 #endif
1521         if (!ifq_is_empty(&adapter->ifp->if_snd))
1522                 more_tx = 1;
1523         IXGBE_TX_UNLOCK(txr);
1524
1525         /* Do AIM now? */
1526
1527         if (ixgbe_enable_aim == FALSE)
1528                 goto no_calc;
1529         /*
1530         ** Do Adaptive Interrupt Moderation:
1531         **  - Write out last calculated setting
1532         **  - Calculate based on average size over
1533         **    the last interval.
1534         */
1535         if (que->eitr_setting)
1536                 IXGBE_WRITE_REG(&adapter->hw,
1537                     IXGBE_EITR(que->msix), que->eitr_setting);
1538  
1539         que->eitr_setting = 0;
1540
1541         /* Idle, do nothing */
1542         if ((txr->bytes == 0) && (rxr->bytes == 0))
1543                 goto no_calc;
1544                                 
1545         if ((txr->bytes) && (txr->packets))
1546                 newitr = txr->bytes/txr->packets;
1547         if ((rxr->bytes) && (rxr->packets))
1548                 newitr = max(newitr,
1549                     (rxr->bytes / rxr->packets));
1550         newitr += 24; /* account for hardware frame, crc */
1551
1552         /* set an upper boundary */
1553         newitr = min(newitr, 3000);
1554
1555         /* Be nice to the mid range */
1556         if ((newitr > 300) && (newitr < 1200))
1557                 newitr = (newitr / 3);
1558         else
1559                 newitr = (newitr / 2);
1560
1561         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1562                 newitr |= newitr << 16;
1563         else
1564                 newitr |= IXGBE_EITR_CNT_WDIS;
1565                  
1566         /* save for next interrupt */
1567         que->eitr_setting = newitr;
1568
1569         /* Reset state */
1570         txr->bytes = 0;
1571         txr->packets = 0;
1572         rxr->bytes = 0;
1573         rxr->packets = 0;
1574
1575 no_calc:
1576         if (more_tx || more_rx)
1577                 taskqueue_enqueue(que->tq, &que->que_task);
1578         else /* Reenable this interrupt */
1579                 ixgbe_enable_queue(adapter, que->msix);
1580         return;
1581 }
1582
1583
1584 static void
1585 ixgbe_msix_link(void *arg)
1586 {
1587         struct adapter  *adapter = arg;
1588         struct ixgbe_hw *hw = &adapter->hw;
1589         u32             reg_eicr;
1590
1591         ++adapter->link_irq;
1592
1593         /* First get the cause */
1594         reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1595         /* Clear interrupt with write */
1596         IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
1597
1598         /* Link status change */
1599         if (reg_eicr & IXGBE_EICR_LSC)
1600                 taskqueue_enqueue(adapter->tq, &adapter->link_task);
1601
1602         if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
1603 #ifdef IXGBE_FDIR
1604                 if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
1605                         /* This is probably overkill :) */
1606                         if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
1607                                 return;
1608                         /* Disable the interrupt */
1609                         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR);
1610                         taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
1611                 } else
1612 #endif
1613                 if (reg_eicr & IXGBE_EICR_ECC) {
1614                         device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
1615                             "Please Reboot!!\n");
1616                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1617                 } else
1618
1619                 if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
1620                         /* Clear the interrupt */
1621                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1622                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
1623                 } else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
1624                         /* Clear the interrupt */
1625                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1626                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
1627                 }
1628         } 
1629
1630         /* Check for fan failure */
1631         if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1632             (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1633                 device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
1634                     "REPLACE IMMEDIATELY!!\n");
1635                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1636         }
1637
1638         /* Check for over temp condition */
1639         if ((hw->mac.type == ixgbe_mac_X540) &&
1640             (reg_eicr & IXGBE_EICR_GPI_SDP0)) {
1641                 device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
1642                     "PHY IS SHUT DOWN!!\n");
1643                 device_printf(adapter->dev, "System shutdown required\n");
1644                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
1645         }
1646
1647         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1648         return;
1649 }
1650
1651 /*********************************************************************
1652  *
1653  *  Media Ioctl callback
1654  *
1655  *  This routine is called whenever the user queries the status of
1656  *  the interface using ifconfig.
1657  *
1658  **********************************************************************/
1659 static void
1660 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
1661 {
1662         struct adapter *adapter = ifp->if_softc;
1663
1664         INIT_DEBUGOUT("ixgbe_media_status: begin");
1665         IXGBE_CORE_LOCK(adapter);
1666         ixgbe_update_link_status(adapter);
1667
1668         ifmr->ifm_status = IFM_AVALID;
1669         ifmr->ifm_active = IFM_ETHER;
1670
1671         if (!adapter->link_active) {
1672                 IXGBE_CORE_UNLOCK(adapter);
1673                 return;
1674         }
1675
1676         ifmr->ifm_status |= IFM_ACTIVE;
1677
1678         switch (adapter->link_speed) {
1679                 case IXGBE_LINK_SPEED_100_FULL:
1680                         ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1681                         break;
1682                 case IXGBE_LINK_SPEED_1GB_FULL:
1683                         ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1684                         break;
1685                 case IXGBE_LINK_SPEED_10GB_FULL:
1686                         ifmr->ifm_active |= adapter->optics | IFM_FDX;
1687                         break;
1688         }
1689
1690         IXGBE_CORE_UNLOCK(adapter);
1691
1692         return;
1693 }
1694
1695 /*********************************************************************
1696  *
1697  *  Media Ioctl callback
1698  *
1699  *  This routine is called when the user changes speed/duplex using
1700  *  media/mediopt option with ifconfig.
1701  *
1702  **********************************************************************/
1703 static int
1704 ixgbe_media_change(struct ifnet * ifp)
1705 {
1706         struct adapter *adapter = ifp->if_softc;
1707         struct ifmedia *ifm = &adapter->media;
1708
1709         INIT_DEBUGOUT("ixgbe_media_change: begin");
1710
1711         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1712                 return (EINVAL);
1713
1714         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1715         case IFM_AUTO:
1716                 adapter->hw.phy.autoneg_advertised =
1717                     IXGBE_LINK_SPEED_100_FULL |
1718                     IXGBE_LINK_SPEED_1GB_FULL |
1719                     IXGBE_LINK_SPEED_10GB_FULL;
1720                 break;
1721         default:
1722                 device_printf(adapter->dev, "Only auto media type\n");
1723                 return (EINVAL);
1724         }
1725
1726         return (0);
1727 }
1728
1729 /*********************************************************************
1730  *
1731  *  This routine maps the mbufs to tx descriptors, allowing the
1732  *  TX engine to transmit the packets. 
1733  *      - return 0 on success, positive on failure
1734  *
1735  **********************************************************************/
1736
1737 static int
1738 ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
1739 {
1740         struct adapter  *adapter = txr->adapter;
1741         u32             olinfo_status = 0, cmd_type_len;
1742         u32             paylen = 0;
1743         int             i, j, error, nsegs, maxsegs;
1744         int             first, last = 0;
1745         struct mbuf     *m_head;
1746         bus_dma_segment_t segs[adapter->num_segs];
1747         bus_dmamap_t    map;
1748         struct ixgbe_tx_buf *txbuf;
1749         union ixgbe_adv_tx_desc *txd = NULL;
1750
1751         m_head = *m_headp;
1752
1753         /* Basic descriptor defines */
1754         cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1755             IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1756
1757         if (m_head->m_flags & M_VLANTAG)
1758                 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1759
1760         /*
1761          * Important to capture the first descriptor
1762          * used because it will contain the index of
1763          * the one we tell the hardware to report back
1764          */
1765         first = txr->next_avail_desc;
1766         txbuf = &txr->tx_buffers[first];
1767         map = txbuf->map;
1768
1769         /*
1770          * Map the packet for DMA.
1771          */
1772         maxsegs = txr->tx_avail - IXGBE_TX_RESERVED;
1773         if (maxsegs > adapter->num_segs)
1774                 maxsegs = adapter->num_segs;
1775
1776         error = bus_dmamap_load_mbuf_defrag(txr->txtag, map, m_headp,
1777             segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1778         if (error) {
1779                 if (error == ENOBUFS)
1780                         adapter->mbuf_defrag_failed++;
1781                 else
1782                         adapter->no_tx_dma_setup++;
1783
1784                 m_freem(*m_headp);
1785                 *m_headp = NULL;
1786                 return (error);
1787         }
1788
1789         /* Make certain there are enough descriptors */
1790         if (nsegs > txr->tx_avail - 2) {
1791                 txr->no_desc_avail++;
1792                 error = ENOBUFS;
1793                 goto xmit_fail;
1794         }
1795         m_head = *m_headp;
1796
1797         /*
1798         ** Set up the appropriate offload context
1799         ** this becomes the first descriptor of 
1800         ** a packet.
1801         */
1802 #if 0 /* NET_TSO */
1803         if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1804                 if (ixgbe_tso_setup(txr, m_head, &paylen, &olinfo_status)) {
1805                         cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1806                         olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1807                         olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1808                         ++adapter->tso_tx;
1809                 } else
1810                         return (ENXIO);
1811         } else if (ixgbe_tx_ctx_setup(txr, m_head))
1812 #endif
1813         if (ixgbe_tx_ctx_setup(txr, m_head))
1814                 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1815
1816 #ifdef IXGBE_IEEE1588
1817         /* This is changing soon to an mtag detection */
1818         if (we detect this mbuf has a TSTAMP mtag)
1819                 cmd_type_len |= IXGBE_ADVTXD_MAC_TSTAMP;
1820 #endif
1821
1822 #ifdef IXGBE_FDIR
1823         /* Do the flow director magic */
1824         if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
1825                 ++txr->atr_count;
1826                 if (txr->atr_count >= atr_sample_rate) {
1827                         ixgbe_atr(txr, m_head);
1828                         txr->atr_count = 0;
1829                 }
1830         }
1831 #endif
1832         /* Record payload length */
1833         if (paylen == 0)
1834                 olinfo_status |= m_head->m_pkthdr.len <<
1835                     IXGBE_ADVTXD_PAYLEN_SHIFT;
1836
1837         i = txr->next_avail_desc;
1838         for (j = 0; j < nsegs; j++) {
1839                 bus_size_t seglen;
1840                 bus_addr_t segaddr;
1841
1842                 txbuf = &txr->tx_buffers[i];
1843                 txd = &txr->tx_base[i];
1844                 seglen = segs[j].ds_len;
1845                 segaddr = htole64(segs[j].ds_addr);
1846
1847                 txd->read.buffer_addr = segaddr;
1848                 txd->read.cmd_type_len = htole32(txr->txd_cmd |
1849                     cmd_type_len |seglen);
1850                 txd->read.olinfo_status = htole32(olinfo_status);
1851                 last = i; /* descriptor that will get completion IRQ */
1852
1853                 if (++i == adapter->num_tx_desc)
1854                         i = 0;
1855
1856                 txbuf->m_head = NULL;
1857                 txbuf->eop_index = -1;
1858         }
1859
1860         txd->read.cmd_type_len |=
1861             htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1862         txr->tx_avail -= nsegs;
1863         txr->next_avail_desc = i;
1864
1865         txbuf->m_head = m_head;
1866         /* Swap the dma map between the first and last descriptor */
1867         txr->tx_buffers[first].map = txbuf->map;
1868         txbuf->map = map;
1869         bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
1870
1871         /* Set the index of the descriptor that will be marked done */
1872         txbuf = &txr->tx_buffers[first];
1873         txbuf->eop_index = last;
1874
1875         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1876             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1877         /*
1878          * Advance the Transmit Descriptor Tail (Tdt), this tells the
1879          * hardware that this frame is available to transmit.
1880          */
1881         ++txr->total_packets;
1882         IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
1883
1884         return (0);
1885
1886 xmit_fail:
1887         bus_dmamap_unload(txr->txtag, txbuf->map);
1888         return (error);
1889
1890 }
1891
1892 static void
1893 ixgbe_set_promisc(struct adapter *adapter)
1894 {
1895         u_int32_t       reg_rctl;
1896         struct ifnet   *ifp = adapter->ifp;
1897
1898         reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1899         reg_rctl &= (~IXGBE_FCTRL_UPE);
1900         reg_rctl &= (~IXGBE_FCTRL_MPE);
1901         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1902
1903         if (ifp->if_flags & IFF_PROMISC) {
1904                 reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1905                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1906         } else if (ifp->if_flags & IFF_ALLMULTI) {
1907                 reg_rctl |= IXGBE_FCTRL_MPE;
1908                 reg_rctl &= ~IXGBE_FCTRL_UPE;
1909                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
1910         }
1911         return;
1912 }
1913
1914
1915 /*********************************************************************
1916  *  Multicast Update
1917  *
1918  *  This routine is called whenever multicast address list is updated.
1919  *
1920  **********************************************************************/
1921 #define IXGBE_RAR_ENTRIES 16
1922
1923 static void
1924 ixgbe_set_multi(struct adapter *adapter)
1925 {
1926         u32     fctrl;
1927         u8      *mta;
1928         u8      *update_ptr;
1929         struct  ifmultiaddr *ifma;
1930         int     mcnt = 0;
1931         struct ifnet   *ifp = adapter->ifp;
1932
1933         IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1934
1935         mta = adapter->mta;
1936         bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1937             MAX_NUM_MULTICAST_ADDRESSES);
1938
1939         fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1940         fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1941         if (ifp->if_flags & IFF_PROMISC)
1942                 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1943         else if (ifp->if_flags & IFF_ALLMULTI) {
1944                 fctrl |= IXGBE_FCTRL_MPE;
1945                 fctrl &= ~IXGBE_FCTRL_UPE;
1946         } else
1947                 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1948         
1949         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1950
1951         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1952                 if (ifma->ifma_addr->sa_family != AF_LINK)
1953                         continue;
1954                 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1955                     &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1956                     IXGBE_ETH_LENGTH_OF_ADDRESS);
1957                 mcnt++;
1958         }
1959
1960         update_ptr = mta;
1961         ixgbe_update_mc_addr_list(&adapter->hw,
1962             update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
1963
1964         return;
1965 }
1966
1967 /*
1968  * This is an iterator function now needed by the multicast
1969  * shared code. It simply feeds the shared code routine the
1970  * addresses in the array of ixgbe_set_multi() one by one.
1971  */
1972 static u8 *
1973 ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
1974 {
1975         u8 *addr = *update_ptr;
1976         u8 *newptr;
1977         *vmdq = 0;
1978
1979         newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1980         *update_ptr = newptr;
1981         return addr;
1982 }
1983
1984
1985 /*********************************************************************
1986  *  Timer routine
1987  *
1988  *  This routine checks for link status,updates statistics,
1989  *  and runs the watchdog check.
1990  *
1991  **********************************************************************/
1992
1993 static void
1994 ixgbe_local_timer(void *arg)
1995 {
1996         struct adapter  *adapter = arg;
1997         device_t        dev = adapter->dev;
1998         struct ifnet    *ifp = adapter->ifp;
1999         struct ix_queue *que = adapter->queues;
2000         struct tx_ring  *txr = adapter->tx_rings;
2001         int             hung, busy, paused;
2002
2003         IXGBE_CORE_LOCK(adapter);
2004         hung = busy = paused = 0;
2005
2006         /* Check for pluggable optics */
2007         if (adapter->sfp_probe)
2008                 if (!ixgbe_sfp_probe(adapter))
2009                         goto out; /* Nothing to do */
2010
2011         ixgbe_update_link_status(adapter);
2012         ixgbe_update_stats_counters(adapter);
2013
2014         /*
2015          * If the interface has been paused
2016          * then don't do the watchdog check
2017          */
2018         if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
2019                 paused = 1;
2020
2021         /*
2022         ** Check the TX queues status
2023         **      - central locked handling of OACTIVE
2024         **      - watchdog only if all queues show hung
2025         */          
2026         for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
2027                 if ((txr->queue_status & IXGBE_QUEUE_HUNG) &&
2028                     (paused == 0))
2029                         ++hung;
2030                 if (txr->queue_status & IXGBE_QUEUE_DEPLETED)
2031                         ++busy;
2032                 if ((txr->queue_status & IXGBE_QUEUE_IDLE) == 0)
2033                         taskqueue_enqueue(que->tq, &que->que_task);
2034         }
2035         /* Only truely watchdog if all queues show hung */
2036         if (hung == adapter->num_queues)
2037                 goto watchdog;
2038         /* Only turn off the stack flow when ALL are depleted */
2039         if (busy == adapter->num_queues)
2040                 ifp->if_flags |= IFF_OACTIVE;
2041         else if ((ifp->if_flags & IFF_OACTIVE) &&
2042             (busy < adapter->num_queues))
2043                 ifp->if_flags &= ~IFF_OACTIVE;
2044
2045 out:
2046         ixgbe_rearm_queues(adapter, adapter->que_mask);
2047         callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
2048         IXGBE_CORE_UNLOCK(adapter);
2049         return;
2050
2051 watchdog:
2052         device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2053         device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
2054             IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me)),
2055             IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me)));
2056         device_printf(dev,"TX(%d) desc avail = %d,"
2057             "Next TX to Clean = %d\n",
2058             txr->me, txr->tx_avail, txr->next_to_clean);
2059         adapter->ifp->if_flags &= ~IFF_RUNNING;
2060         adapter->watchdog_events++;
2061         ixgbe_init_locked(adapter);
2062
2063         IXGBE_CORE_UNLOCK(adapter);
2064 }
2065
2066 /*
2067 ** Note: this routine updates the OS on the link state
2068 **      the real check of the hardware only happens with
2069 **      a link interrupt.
2070 */
2071 static void
2072 ixgbe_update_link_status(struct adapter *adapter)
2073 {
2074         struct ifnet    *ifp = adapter->ifp;
2075         struct tx_ring *txr = adapter->tx_rings;
2076         device_t dev = adapter->dev;
2077
2078
2079         if (adapter->link_up){ 
2080                 if (adapter->link_active == FALSE) {
2081                         if (bootverbose)
2082                                 device_printf(dev,"Link is up %d Gbps %s \n",
2083                                     ((adapter->link_speed == 128)? 10:1),
2084                                     "Full Duplex");
2085                         adapter->link_active = TRUE;
2086                         /* Update any Flow Control changes */
2087                         ixgbe_fc_enable(&adapter->hw);
2088                         ifp->if_link_state = LINK_STATE_UP;
2089                         if_link_state_change(ifp);
2090                 }
2091         } else { /* Link down */
2092                 if (adapter->link_active == TRUE) {
2093                         if (bootverbose)
2094                                 device_printf(dev,"Link is Down\n");
2095                         ifp->if_link_state = LINK_STATE_DOWN;
2096                         if_link_state_change(ifp);
2097                         adapter->link_active = FALSE;
2098                         for (int i = 0; i < adapter->num_queues;
2099                             i++, txr++)
2100                                 txr->queue_status = IXGBE_QUEUE_IDLE;
2101                 }
2102         }
2103
2104         return;
2105 }
2106
2107
2108 /*********************************************************************
2109  *
2110  *  This routine disables all traffic on the adapter by issuing a
2111  *  global reset on the MAC and deallocates TX/RX buffers.
2112  *
2113  **********************************************************************/
2114
2115 static void
2116 ixgbe_stop(void *arg)
2117 {
2118         struct ifnet   *ifp;
2119         struct adapter *adapter = arg;
2120         struct ixgbe_hw *hw = &adapter->hw;
2121         ifp = adapter->ifp;
2122
2123         KKASSERT(lockstatus(&adapter->core_lock, curthread) != 0);
2124
2125         INIT_DEBUGOUT("ixgbe_stop: begin\n");
2126         ixgbe_disable_intr(adapter);
2127         callout_stop(&adapter->timer);
2128
2129         /* Let the stack know...*/
2130         ifp->if_flags &= ~IFF_RUNNING;
2131         ifp->if_flags |= IFF_OACTIVE;
2132
2133         ixgbe_reset_hw(hw);
2134         hw->adapter_stopped = FALSE;
2135         ixgbe_stop_adapter(hw);
2136         /* Turn off the laser */
2137         if (hw->phy.multispeed_fiber)
2138                 ixgbe_disable_tx_laser(hw);
2139
2140         /* reprogram the RAR[0] in case user changed it. */
2141         ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2142
2143         return;
2144 }
2145
2146
2147 /*********************************************************************
2148  *
2149  *  Determine hardware revision.
2150  *
2151  **********************************************************************/
2152 static void
2153 ixgbe_identify_hardware(struct adapter *adapter)
2154 {
2155         device_t        dev = adapter->dev;
2156         struct ixgbe_hw *hw = &adapter->hw;
2157
2158         /* Save off the information about this board */
2159         hw->vendor_id = pci_get_vendor(dev);
2160         hw->device_id = pci_get_device(dev);
2161         hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
2162         hw->subsystem_vendor_id =
2163             pci_read_config(dev, PCIR_SUBVEND_0, 2);
2164         hw->subsystem_device_id =
2165             pci_read_config(dev, PCIR_SUBDEV_0, 2);
2166
2167         /* We need this here to set the num_segs below */
2168         ixgbe_set_mac_type(hw);
2169
2170         /* Pick up the 82599 and VF settings */
2171         if (hw->mac.type != ixgbe_mac_82598EB) {
2172                 hw->phy.smart_speed = ixgbe_smart_speed;
2173                 adapter->num_segs = IXGBE_82599_SCATTER;
2174         } else
2175                 adapter->num_segs = IXGBE_82598_SCATTER;
2176
2177         return;
2178 }
2179
2180 /*********************************************************************
2181  *
2182  *  Determine optic type
2183  *
2184  **********************************************************************/
2185 static void
2186 ixgbe_setup_optics(struct adapter *adapter)
2187 {
2188         struct ixgbe_hw *hw = &adapter->hw;
2189         int             layer;
2190         
2191         layer = ixgbe_get_supported_physical_layer(hw);
2192
2193         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
2194                 adapter->optics = IFM_10G_T;
2195                 return;
2196         }
2197
2198         if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
2199                 adapter->optics = IFM_1000_T;
2200                 return;
2201         }
2202
2203         if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
2204             IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
2205                 adapter->optics = IFM_10G_LR;
2206                 return;
2207         }
2208
2209         if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
2210                 adapter->optics = IFM_10G_SR;
2211                 return;
2212         }
2213
2214         if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
2215                 adapter->optics = IFM_10G_TWINAX;
2216                 return;
2217         }
2218
2219         if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
2220             IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
2221                 adapter->optics = IFM_10G_CX4;
2222                 return;
2223         }
2224
2225         /* If we get here just set the default */
2226         adapter->optics = IFM_ETHER | IFM_AUTO;
2227         return;
2228 }
2229
2230 /*********************************************************************
2231  *
2232  *  Setup the Legacy or MSI Interrupt handler
2233  *
2234  **********************************************************************/
2235 static int
2236 ixgbe_allocate_legacy(struct adapter *adapter)
2237 {
2238         device_t dev = adapter->dev;
2239         struct          ix_queue *que = adapter->queues;
2240         int error, rid = 0;
2241         unsigned int intr_flags;
2242
2243         /* MSI RID at 1 */
2244         if (adapter->msix == 1)
2245                 rid = 1;
2246
2247         /* Try allocating a MSI interrupt first */
2248         adapter->intr_type = pci_alloc_1intr(dev, ixgbe_msi_enable,
2249                 &rid, &intr_flags);
2250
2251         /* We allocate a single interrupt resource */
2252         adapter->res = bus_alloc_resource_any(dev,
2253             SYS_RES_IRQ, &rid, intr_flags);
2254         if (adapter->res == NULL) {
2255                 device_printf(dev, "Unable to allocate bus resource: "
2256                     "interrupt\n");
2257                 return (ENXIO);
2258         }
2259
2260         /*
2261          * Try allocating a fast interrupt and the associated deferred
2262          * processing contexts.
2263          */
2264         TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2265         que->tq = taskqueue_create("ixgbe_que", M_NOWAIT,
2266             taskqueue_thread_enqueue, &que->tq);
2267         taskqueue_start_threads(&que->tq, 1, PI_NET, -1, "%s ixq",
2268             device_get_nameunit(adapter->dev));
2269
2270         /* Tasklets for Link, SFP and Multispeed Fiber */
2271         TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2272         TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2273         TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2274 #ifdef IXGBE_FDIR
2275         TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2276 #endif
2277         adapter->tq = taskqueue_create("ixgbe_link", M_NOWAIT,
2278             taskqueue_thread_enqueue, &adapter->tq);
2279         taskqueue_start_threads(&adapter->tq, 1, PI_NET, -1, "%s linkq",
2280             device_get_nameunit(adapter->dev));
2281
2282         if ((error = bus_setup_intr(dev, adapter->res, INTR_MPSAFE,
2283             ixgbe_legacy_irq, que, &adapter->tag, &adapter->serializer)) != 0) {
2284                 device_printf(dev, "Failed to register fast interrupt "
2285                     "handler: %d\n", error);
2286                 taskqueue_free(que->tq);
2287                 taskqueue_free(adapter->tq);
2288                 que->tq = NULL;
2289                 adapter->tq = NULL;
2290                 return (error);
2291         }
2292         /* For simplicity in the handlers */
2293         adapter->que_mask = IXGBE_EIMS_ENABLE_MASK;
2294
2295         return (0);
2296 }
2297
2298
2299 /*********************************************************************
2300  *
2301  *  Setup MSIX Interrupt resources and handlers 
2302  *
2303  **********************************************************************/
2304 static int
2305 ixgbe_allocate_msix(struct adapter *adapter)
2306 {
2307         device_t        dev = adapter->dev;
2308         struct          ix_queue *que = adapter->queues;
2309         int             error, rid, vector = 0;
2310
2311         for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
2312                 rid = vector + 1;
2313                 que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2314                     RF_SHAREABLE | RF_ACTIVE);
2315                 if (que->res == NULL) {
2316                         device_printf(dev,"Unable to allocate"
2317                             " bus resource: que interrupt [%d]\n", vector);
2318                         return (ENXIO);
2319                 }
2320                 /* Set the handler function */
2321                 error = bus_setup_intr(dev, que->res, INTR_MPSAFE,
2322                     ixgbe_msix_que, que, &que->tag, &que->serializer);
2323                 if (error) {
2324                         que->res = NULL;
2325                         device_printf(dev, "Failed to register QUE handler");
2326                         return (error);
2327                 }
2328 #if 0 /* __FreeBSD_version >= 800504 */
2329                 bus_describe_intr(dev, que->res, que->tag, "que %d", i);
2330 #endif
2331                 que->msix = vector;
2332                 adapter->que_mask |= (u64)(1 << que->msix);
2333                 /*
2334                 ** Bind the msix vector, and thus the
2335                 ** ring to the corresponding cpu.
2336                 */
2337 #if 0 /* XXX */
2338                 if (adapter->num_queues > 1)
2339                         bus_bind_intr(dev, que->res, i);
2340 #endif
2341
2342                 TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
2343                 que->tq = taskqueue_create("ixgbe_que", M_NOWAIT,
2344                     taskqueue_thread_enqueue, &que->tq);
2345                 taskqueue_start_threads(&que->tq, 1, PI_NET, -1, "%s que",
2346                     device_get_nameunit(adapter->dev));
2347         }
2348
2349         /* and Link */
2350         rid = vector + 1;
2351         adapter->res = bus_alloc_resource_any(dev,
2352             SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
2353         if (!adapter->res) {
2354                 device_printf(dev,"Unable to allocate"
2355             " bus resource: Link interrupt [%d]\n", rid);
2356                 return (ENXIO);
2357         }
2358         /* Set the link handler function */
2359         error = bus_setup_intr(dev, adapter->res, INTR_MPSAFE,
2360             ixgbe_msix_link, adapter, &adapter->tag, &adapter->serializer);
2361         if (error) {
2362                 adapter->res = NULL;
2363                 device_printf(dev, "Failed to register LINK handler");
2364                 return (error);
2365         }
2366 #if 0 /* __FreeBSD_version >= 800504 */
2367         bus_describe_intr(dev, adapter->res, adapter->tag, "link");
2368 #endif
2369         adapter->linkvec = vector;
2370         /* Tasklets for Link, SFP and Multispeed Fiber */
2371         TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
2372         TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
2373         TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
2374 #ifdef IXGBE_FDIR
2375         TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
2376 #endif
2377         adapter->tq = taskqueue_create("ixgbe_link", M_NOWAIT,
2378             taskqueue_thread_enqueue, &adapter->tq);
2379         taskqueue_start_threads(&adapter->tq, 1, PI_NET, -1, "%s linkq",
2380             device_get_nameunit(adapter->dev));
2381
2382         return (0);
2383 }
2384
2385 #if 0   /* HAVE_MSIX */
2386 /*
2387  * Setup Either MSI/X or MSI
2388  */
2389 static int
2390 ixgbe_setup_msix(struct adapter *adapter)
2391 {
2392         device_t dev = adapter->dev;
2393         int rid, want, queues, msgs;
2394
2395         /* Override by tuneable */
2396         if (ixgbe_enable_msix == 0)
2397                 goto msi;
2398
2399         /* First try MSI/X */
2400         rid = PCIR_BAR(MSIX_82598_BAR);
2401         adapter->msix_mem = bus_alloc_resource_any(dev,
2402             SYS_RES_MEMORY, &rid, RF_ACTIVE);
2403         if (!adapter->msix_mem) {
2404                 rid += 4;       /* 82599 maps in higher BAR */
2405                 adapter->msix_mem = bus_alloc_resource_any(dev,
2406                     SYS_RES_MEMORY, &rid, RF_ACTIVE);
2407         }
2408         if (!adapter->msix_mem) {
2409                 /* May not be enabled */
2410                 device_printf(adapter->dev,
2411                     "Unable to map MSIX table \n");
2412                 goto msi;
2413         }
2414
2415         msgs = pci_msix_count(dev); 
2416         if (msgs == 0) { /* system has msix disabled */
2417                 bus_release_resource(dev, SYS_RES_MEMORY,
2418                     rid, adapter->msix_mem);
2419                 adapter->msix_mem = NULL;
2420                 goto msi;
2421         }
2422
2423         /* Figure out a reasonable auto config value */
2424         queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
2425
2426         if (ixgbe_num_queues != 0)
2427                 queues = ixgbe_num_queues;
2428         /* Set max queues to 8 when autoconfiguring */
2429         else if ((ixgbe_num_queues == 0) && (queues > 8))
2430                 queues = 8;
2431
2432         /*
2433         ** Want one vector (RX/TX pair) per queue
2434         ** plus an additional for Link.
2435         */
2436         want = queues + 1;
2437         if (msgs >= want)
2438                 msgs = want;
2439         else {
2440                 device_printf(adapter->dev,
2441                     "MSIX Configuration Problem, "
2442                     "%d vectors but %d queues wanted!\n",
2443                     msgs, want);
2444                 return (0); /* Will go to Legacy setup */
2445         }
2446         if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
2447                 device_printf(adapter->dev,
2448                     "Using MSIX interrupts with %d vectors\n", msgs);
2449                 adapter->num_queues = queues;
2450                 return (msgs);
2451         }
2452 msi:
2453         msgs = pci_msi_count(dev);
2454         if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
2455                 device_printf(adapter->dev,"Using an MSI interrupt\n");
2456         else
2457                 device_printf(adapter->dev,"Using a Legacy interrupt\n");
2458         return (msgs);
2459 }
2460 #endif
2461
2462
2463 static int
2464 ixgbe_allocate_pci_resources(struct adapter *adapter)
2465 {
2466         int             rid;
2467         device_t        dev = adapter->dev;
2468
2469         rid = PCIR_BAR(0);
2470         adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2471             &rid, RF_ACTIVE);
2472
2473         if (!(adapter->pci_mem)) {
2474                 device_printf(dev,"Unable to allocate bus resource: memory\n");
2475                 return (ENXIO);
2476         }
2477
2478         adapter->osdep.mem_bus_space_tag =
2479                 rman_get_bustag(adapter->pci_mem);
2480         adapter->osdep.mem_bus_space_handle =
2481                 rman_get_bushandle(adapter->pci_mem);
2482         adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
2483
2484         /* Legacy defaults */
2485         adapter->num_queues = 1;
2486         adapter->hw.back = &adapter->osdep;
2487
2488         /*
2489         ** Now setup MSI or MSI/X, should
2490         ** return us the number of supported
2491         ** vectors. (Will be 1 for MSI)
2492         */
2493 #if 0   /* HAVE_MSIX */
2494         adapter->msix = ixgbe_setup_msix(adapter);
2495 #endif
2496         return (0);
2497 }
2498
2499 static void
2500 ixgbe_free_pci_resources(struct adapter * adapter)
2501 {
2502         struct          ix_queue *que = adapter->queues;
2503         device_t        dev = adapter->dev;
2504         int             rid, memrid;
2505
2506         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2507                 memrid = PCIR_BAR(MSIX_82598_BAR);
2508         else
2509                 memrid = PCIR_BAR(MSIX_82599_BAR);
2510
2511         /*
2512         ** There is a slight possibility of a failure mode
2513         ** in attach that will result in entering this function
2514         ** before interrupt resources have been initialized, and
2515         ** in that case we do not want to execute the loops below
2516         ** We can detect this reliably by the state of the adapter
2517         ** res pointer.
2518         */
2519         if (adapter->res == NULL)
2520                 goto mem;
2521
2522         /*
2523         **  Release all msix queue resources:
2524         */
2525         for (int i = 0; i < adapter->num_queues; i++, que++) {
2526                 rid = que->msix + 1;
2527                 if (que->tag != NULL) {
2528                         bus_teardown_intr(dev, que->res, que->tag);
2529                         que->tag = NULL;
2530                 }
2531                 if (que->res != NULL)
2532                         bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
2533         }
2534
2535
2536         /* Clean the Legacy or Link interrupt last */
2537         if (adapter->linkvec) /* we are doing MSIX */
2538                 rid = adapter->linkvec + 1;
2539         else
2540                 (adapter->msix != 0) ? (rid = 1):(rid = 0);
2541
2542         if (adapter->tag != NULL) {
2543                 bus_teardown_intr(dev, adapter->res, adapter->tag);
2544                 adapter->tag = NULL;
2545         }
2546         if (adapter->res != NULL)
2547                 bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
2548         if (adapter->intr_type == PCI_INTR_TYPE_MSI)
2549                 pci_release_msi(adapter->dev);
2550
2551 mem:
2552         if (adapter->msix)
2553                 pci_release_msi(dev);
2554
2555         if (adapter->msix_mem != NULL)
2556                 bus_release_resource(dev, SYS_RES_MEMORY,
2557                     memrid, adapter->msix_mem);
2558
2559         if (adapter->pci_mem != NULL)
2560                 bus_release_resource(dev, SYS_RES_MEMORY,
2561                     PCIR_BAR(0), adapter->pci_mem);
2562
2563         return;
2564 }
2565
2566 /*********************************************************************
2567  *
2568  *  Setup networking device structure and register an interface.
2569  *
2570  **********************************************************************/
2571 static int
2572 ixgbe_setup_interface(device_t dev, struct adapter *adapter)
2573 {
2574         struct ixgbe_hw *hw = &adapter->hw;
2575         struct ifnet   *ifp;
2576
2577         INIT_DEBUGOUT("ixgbe_setup_interface: begin");
2578
2579         ifp = adapter->ifp = if_alloc(IFT_ETHER);
2580         if (ifp == NULL) {
2581                 device_printf(dev, "can not allocate ifnet structure\n");
2582                 return (-1);
2583         }
2584         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2585         ifp->if_baudrate = 1000000000;
2586         ifp->if_init = ixgbe_init;
2587         ifp->if_softc = adapter;
2588         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2589         ifp->if_ioctl = ixgbe_ioctl;
2590         ifp->if_start = ixgbe_start;
2591 #if 0 /* __FreeBSD_version >= 800000 */
2592         ifp->if_transmit = ixgbe_mq_start;
2593         ifp->if_qflush = ixgbe_qflush;
2594 #endif
2595         ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
2596
2597         ether_ifattach(ifp, adapter->hw.mac.addr, NULL);
2598
2599         adapter->max_frame_size =
2600             ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
2601
2602         /*
2603          * Tell the upper layer(s) we support long frames.
2604          */
2605         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2606
2607 #if 0 /* NET_TSO */
2608         ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM;
2609 #endif
2610         ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
2611         ifp->if_capabilities |= IFCAP_JUMBO_MTU;
2612         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
2613 #if 0 /* NET_TSO */
2614                              |  IFCAP_VLAN_HWTSO
2615 #endif
2616                              |  IFCAP_VLAN_MTU;
2617         ifp->if_capenable = ifp->if_capabilities;
2618
2619         /* Don't enable LRO by default */
2620 #if 0 /* NET_LRO */
2621         ifp->if_capabilities |= IFCAP_LRO;
2622 #endif
2623
2624         /*
2625         ** Don't turn this on by default, if vlans are
2626         ** created on another pseudo device (eg. lagg)
2627         ** then vlan events are not passed thru, breaking
2628         ** operation, but with HW FILTER off it works. If
2629         ** using vlans directly on the ixgbe driver you can
2630         ** enable this and get full hardware tag filtering.
2631         */
2632         ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
2633
2634         /*
2635          * Specify the media types supported by this adapter and register
2636          * callbacks to update media and link information
2637          */
2638         ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
2639                      ixgbe_media_status);
2640         ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics, 0, NULL);
2641         ifmedia_set(&adapter->media, IFM_ETHER | adapter->optics);
2642         if (hw->device_id == IXGBE_DEV_ID_82598AT) {
2643                 ifmedia_add(&adapter->media,
2644                     IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2645                 ifmedia_add(&adapter->media,
2646                     IFM_ETHER | IFM_1000_T, 0, NULL);
2647         }
2648         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2649         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2650
2651         return (0);
2652 }
2653
2654 static void
2655 ixgbe_config_link(struct adapter *adapter)
2656 {
2657         struct ixgbe_hw *hw = &adapter->hw;
2658         u32     autoneg, err = 0;
2659         bool    sfp, negotiate;
2660
2661         sfp = ixgbe_is_sfp(hw);
2662
2663         if (sfp) { 
2664                 if (hw->phy.multispeed_fiber) {
2665                         hw->mac.ops.setup_sfp(hw);
2666                         ixgbe_enable_tx_laser(hw);
2667                         taskqueue_enqueue(adapter->tq, &adapter->msf_task);
2668                 } else
2669                         taskqueue_enqueue(adapter->tq, &adapter->mod_task);
2670         } else {
2671                 if (hw->mac.ops.check_link)
2672                         err = ixgbe_check_link(hw, &autoneg,
2673                             &adapter->link_up, FALSE);
2674                 if (err)
2675                         goto out;
2676                 autoneg = hw->phy.autoneg_advertised;
2677                 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
2678                         err  = hw->mac.ops.get_link_capabilities(hw,
2679                             &autoneg, &negotiate);
2680                 if (err)
2681                         goto out;
2682                 if (hw->mac.ops.setup_link)
2683                         err = hw->mac.ops.setup_link(hw, autoneg,
2684                             negotiate, adapter->link_up);
2685         }
2686 out:
2687         return;
2688 }
2689
2690 /********************************************************************
2691  * Manage DMA'able memory.
2692  *******************************************************************/
2693 static void
2694 ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
2695 {
2696         if (error)
2697                 return;
2698         *(bus_addr_t *) arg = segs->ds_addr;
2699         return;
2700 }
2701
2702 static int
2703 ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
2704                 struct ixgbe_dma_alloc *dma, int mapflags)
2705 {
2706         device_t dev = adapter->dev;
2707         int             r;
2708
2709         r = bus_dma_tag_create(NULL,    /* parent */
2710                                DBA_ALIGN, 0,    /* alignment, bounds */
2711                                BUS_SPACE_MAXADDR,       /* lowaddr */
2712                                BUS_SPACE_MAXADDR,       /* highaddr */
2713                                NULL, NULL,      /* filter, filterarg */
2714                                size,    /* maxsize */
2715                                1,       /* nsegments */
2716                                size,    /* maxsegsize */
2717                                BUS_DMA_ALLOCNOW,        /* flags */
2718                                &dma->dma_tag);
2719         if (r != 0) {
2720                 device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; "
2721                        "error %u\n", r);
2722                 goto fail_0;
2723         }
2724         r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
2725                              BUS_DMA_NOWAIT, &dma->dma_map);
2726         if (r != 0) {
2727                 device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; "
2728                        "error %u\n", r);
2729                 goto fail_1;
2730         }
2731         r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2732                             size,
2733                             ixgbe_dmamap_cb,
2734                             &dma->dma_paddr,
2735                             mapflags | BUS_DMA_NOWAIT);
2736         if (r != 0) {
2737                 device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; "
2738                        "error %u\n", r);
2739                 goto fail_2;
2740         }
2741         dma->dma_size = size;
2742         return (0);
2743 fail_2:
2744         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2745 fail_1:
2746         bus_dma_tag_destroy(dma->dma_tag);
2747 fail_0:
2748         dma->dma_map = NULL;
2749         dma->dma_tag = NULL;
2750         return (r);
2751 }
2752
2753 static void
2754 ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
2755 {
2756         bus_dmamap_sync(dma->dma_tag, dma->dma_map,
2757             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2758         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2759         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2760         bus_dma_tag_destroy(dma->dma_tag);
2761 }
2762
2763
2764 /*********************************************************************
2765  *
2766  *  Allocate memory for the transmit and receive rings, and then
2767  *  the descriptors associated with each, called only once at attach.
2768  *
2769  **********************************************************************/
2770 static int
2771 ixgbe_allocate_queues(struct adapter *adapter)
2772 {
2773         device_t        dev = adapter->dev;
2774         struct ix_queue *que;
2775         struct tx_ring  *txr;
2776         struct rx_ring  *rxr;
2777         int rsize, tsize, error = IXGBE_SUCCESS;
2778         int txconf = 0, rxconf = 0;
2779
2780         /* First allocate the top level queue structs */
2781         if (!(adapter->queues =
2782             (struct ix_queue *) kmalloc(sizeof(struct ix_queue) *
2783             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2784                 device_printf(dev, "Unable to allocate queue memory\n");
2785                 error = ENOMEM;
2786                 goto fail;
2787         }
2788
2789         /* First allocate the TX ring struct memory */
2790         if (!(adapter->tx_rings =
2791             (struct tx_ring *) kmalloc(sizeof(struct tx_ring) *
2792             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2793                 device_printf(dev, "Unable to allocate TX ring memory\n");
2794                 error = ENOMEM;
2795                 goto tx_fail;
2796         }
2797
2798         /* Next allocate the RX */
2799         if (!(adapter->rx_rings =
2800             (struct rx_ring *) kmalloc(sizeof(struct rx_ring) *
2801             adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2802                 device_printf(dev, "Unable to allocate RX ring memory\n");
2803                 error = ENOMEM;
2804                 goto rx_fail;
2805         }
2806
2807         /* For the ring itself */
2808         tsize = roundup2(adapter->num_tx_desc *
2809             sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2810
2811         /*
2812          * Now set up the TX queues, txconf is needed to handle the
2813          * possibility that things fail midcourse and we need to
2814          * undo memory gracefully
2815          */ 
2816         for (int i = 0; i < adapter->num_queues; i++, txconf++) {
2817                 /* Set up some basics */
2818                 txr = &adapter->tx_rings[i];
2819                 txr->adapter = adapter;
2820                 txr->me = i;
2821
2822                 /* Initialize the TX side lock */
2823                 ksnprintf(txr->lock_name, sizeof(txr->lock_name), "%s:tx(%d)",
2824                     device_get_nameunit(dev), txr->me);
2825                 lockinit(&txr->tx_lock, txr->lock_name, 0, LK_CANRECURSE);
2826
2827                 if (ixgbe_dma_malloc(adapter, tsize,
2828                         &txr->txdma, BUS_DMA_NOWAIT)) {
2829                         device_printf(dev,
2830                             "Unable to allocate TX Descriptor memory\n");
2831                         error = ENOMEM;
2832                         goto err_tx_desc;
2833                 }
2834                 txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2835                 bzero((void *)txr->tx_base, tsize);
2836
2837                 /* Now allocate transmit buffers for the ring */
2838                 if (ixgbe_allocate_transmit_buffers(txr)) {
2839                         device_printf(dev,
2840                             "Critical Failure setting up transmit buffers\n");
2841                         error = ENOMEM;
2842                         goto err_tx_desc;
2843                 }
2844 #if 0 /* __FreeBSD_version >= 800000 */
2845                 /* Allocate a buf ring */
2846                 txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
2847                     M_WAITOK, &txr->tx_mtx);
2848                 if (txr->br == NULL) {
2849                         device_printf(dev,
2850                             "Critical Failure setting up buf ring\n");
2851                         error = ENOMEM;
2852                         goto err_tx_desc;
2853                 }
2854 #endif
2855         }
2856
2857         /*
2858          * Next the RX queues...
2859          */ 
2860         rsize = roundup2(adapter->num_rx_desc *
2861             sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
2862         for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
2863                 rxr = &adapter->rx_rings[i];
2864                 /* Set up some basics */
2865                 rxr->adapter = adapter;
2866                 rxr->me = i;
2867
2868                 /* Initialize the RX side lock */
2869                 ksnprintf(rxr->lock_name, sizeof(rxr->lock_name), "%s:rx(%d)",
2870                     device_get_nameunit(dev), rxr->me);
2871                 lockinit(&rxr->rx_lock, rxr->lock_name, 0, LK_CANRECURSE);
2872
2873                 if (ixgbe_dma_malloc(adapter, rsize,
2874                         &rxr->rxdma, BUS_DMA_NOWAIT)) {
2875                         device_printf(dev,
2876                             "Unable to allocate RxDescriptor memory\n");
2877                         error = ENOMEM;
2878                         goto err_rx_desc;
2879                 }
2880                 rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2881                 bzero((void *)rxr->rx_base, rsize);
2882
2883                 /* Allocate receive buffers for the ring*/
2884                 if (ixgbe_allocate_receive_buffers(rxr)) {
2885                         device_printf(dev,
2886                             "Critical Failure setting up receive buffers\n");
2887                         error = ENOMEM;
2888                         goto err_rx_desc;
2889                 }
2890         }
2891
2892         /*
2893         ** Finally set up the queue holding structs
2894         */
2895         for (int i = 0; i < adapter->num_queues; i++) {
2896                 que = &adapter->queues[i];
2897                 que->adapter = adapter;
2898                 que->txr = &adapter->tx_rings[i];
2899                 que->rxr = &adapter->rx_rings[i];
2900         }
2901
2902         return (0);
2903
2904 err_rx_desc:
2905         for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
2906                 ixgbe_dma_free(adapter, &rxr->rxdma);
2907 err_tx_desc:
2908         for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
2909                 ixgbe_dma_free(adapter, &txr->txdma);
2910         kfree(adapter->rx_rings, M_DEVBUF);
2911 rx_fail:
2912         kfree(adapter->tx_rings, M_DEVBUF);
2913 tx_fail:
2914         kfree(adapter->queues, M_DEVBUF);
2915 fail:
2916         return (error);
2917 }
2918
2919 /*********************************************************************
2920  *
2921  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2922  *  the information needed to transmit a packet on the wire. This is
2923  *  called only once at attach, setup is done every reset.
2924  *
2925  **********************************************************************/
2926 static int
2927 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
2928 {
2929         struct adapter *adapter = txr->adapter;
2930         device_t dev = adapter->dev;
2931         struct ixgbe_tx_buf *txbuf;
2932         int error, i;
2933
2934         /*
2935          * Setup DMA descriptor areas.
2936          */
2937         if ((error = bus_dma_tag_create(
2938                                NULL,    /* parent */
2939                                1, 0,            /* alignment, bounds */
2940                                BUS_SPACE_MAXADDR,       /* lowaddr */
2941                                BUS_SPACE_MAXADDR,       /* highaddr */
2942                                NULL, NULL,              /* filter, filterarg */
2943                                IXGBE_TSO_SIZE,          /* maxsize */
2944                                adapter->num_segs,       /* nsegments */
2945                                PAGE_SIZE,               /* maxsegsize */
2946                                0,                       /* flags */
2947                                &txr->txtag))) {
2948                 device_printf(dev,"Unable to allocate TX DMA tag\n");
2949                 goto fail;
2950         }
2951
2952         if (!(txr->tx_buffers =
2953             (struct ixgbe_tx_buf *) kmalloc(sizeof(struct ixgbe_tx_buf) *
2954             adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
2955                 device_printf(dev, "Unable to allocate tx_buffer memory\n");
2956                 error = ENOMEM;
2957                 goto fail;
2958         }
2959
2960         /* Create the descriptor buffer dma maps */
2961         txbuf = txr->tx_buffers;
2962         for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
2963                 error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
2964                 if (error != 0) {
2965                         device_printf(dev, "Unable to create TX DMA map\n");
2966                         goto fail;
2967                 }
2968         }
2969
2970         return 0;
2971 fail:
2972         /* We free all, it handles case where we are in the middle */
2973         ixgbe_free_transmit_structures(adapter);
2974         return (error);
2975 }
2976
2977 /*********************************************************************
2978  *
2979  *  Initialize a transmit ring.
2980  *
2981  **********************************************************************/
2982 static void
2983 ixgbe_setup_transmit_ring(struct tx_ring *txr)
2984 {
2985         struct adapter *adapter = txr->adapter;
2986         struct ixgbe_tx_buf *txbuf;
2987         int i;
2988 #ifdef DEV_NETMAP
2989         struct netmap_adapter *na = NA(adapter->ifp);
2990         struct netmap_slot *slot;
2991 #endif /* DEV_NETMAP */
2992
2993         /* Clear the old ring contents */
2994         IXGBE_TX_LOCK(txr);
2995 #ifdef DEV_NETMAP
2996         /*
2997          * (under lock): if in netmap mode, do some consistency
2998          * checks and set slot to entry 0 of the netmap ring.
2999          */
3000         slot = netmap_reset(na, NR_TX, txr->me, 0);
3001 #endif /* DEV_NETMAP */
3002         bzero((void *)txr->tx_base,
3003               (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
3004         /* Reset indices */
3005         txr->next_avail_desc = 0;
3006         txr->next_to_clean = 0;
3007
3008         /* Free any existing tx buffers. */
3009         txbuf = txr->tx_buffers;
3010         for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
3011                 if (txbuf->m_head != NULL) {
3012                         bus_dmamap_sync(txr->txtag, txbuf->map,
3013                             BUS_DMASYNC_POSTWRITE);
3014                         bus_dmamap_unload(txr->txtag, txbuf->map);
3015                         m_freem(txbuf->m_head);
3016                         txbuf->m_head = NULL;
3017                 }
3018 #ifdef DEV_NETMAP
3019                 /*
3020                  * In netmap mode, set the map for the packet buffer.
3021                  * NOTE: Some drivers (not this one) also need to set
3022                  * the physical buffer address in the NIC ring.
3023                  * Slots in the netmap ring (indexed by "si") are
3024                  * kring->nkr_hwofs positions "ahead" wrt the
3025                  * corresponding slot in the NIC ring. In some drivers
3026                  * (not here) nkr_hwofs can be negative. Function
3027                  * netmap_idx_n2k() handles wraparounds properly.
3028                  */
3029                 if (slot) {
3030                         int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
3031                         netmap_load_map(txr->txtag, txbuf->map, NMB(slot + si));
3032                 }
3033 #endif /* DEV_NETMAP */
3034                 /* Clear the EOP index */
3035                 txbuf->eop_index = -1;
3036         }
3037
3038 #ifdef IXGBE_FDIR
3039         /* Set the rate at which we sample packets */
3040         if (adapter->hw.mac.type != ixgbe_mac_82598EB)
3041                 txr->atr_sample = atr_sample_rate;
3042 #endif
3043
3044         /* Set number of descriptors available */
3045         txr->tx_avail = adapter->num_tx_desc;
3046
3047         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3048             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3049         IXGBE_TX_UNLOCK(txr);
3050 }
3051
3052 /*********************************************************************
3053  *
3054  *  Initialize all transmit rings.
3055  *
3056  **********************************************************************/
3057 static int
3058 ixgbe_setup_transmit_structures(struct adapter *adapter)
3059 {
3060         struct tx_ring *txr = adapter->tx_rings;
3061
3062         for (int i = 0; i < adapter->num_queues; i++, txr++)
3063                 ixgbe_setup_transmit_ring(txr);
3064
3065         return (0);
3066 }
3067
3068 /*********************************************************************
3069  *
3070  *  Enable transmit unit.
3071  *
3072  **********************************************************************/
3073 static void
3074 ixgbe_initialize_transmit_units(struct adapter *adapter)
3075 {
3076         struct tx_ring  *txr = adapter->tx_rings;
3077         struct ixgbe_hw *hw = &adapter->hw;
3078
3079         /* Setup the Base and Length of the Tx Descriptor Ring */
3080
3081         for (int i = 0; i < adapter->num_queues; i++, txr++) {
3082                 u64     tdba = txr->txdma.dma_paddr;
3083                 u32     txctrl;
3084
3085                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
3086                        (tdba & 0x00000000ffffffffULL));
3087                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
3088                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
3089                     adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
3090
3091                 /* Setup the HW Tx Head and Tail descriptor pointers */
3092                 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
3093                 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
3094
3095                 /* Setup Transmit Descriptor Cmd Settings */
3096                 txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
3097                 txr->queue_status = IXGBE_QUEUE_IDLE;
3098
3099                 /* Disable Head Writeback */
3100                 switch (hw->mac.type) {
3101                 case ixgbe_mac_82598EB:
3102                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
3103                         break;
3104                 case ixgbe_mac_82599EB:
3105                 case ixgbe_mac_X540:
3106                 default:
3107                         txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
3108                         break;
3109                 }
3110                 txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
3111                 switch (hw->mac.type) {
3112                 case ixgbe_mac_82598EB:
3113                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
3114                         break;
3115                 case ixgbe_mac_82599EB:
3116                 case ixgbe_mac_X540:
3117                 default:
3118                         IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
3119                         break;
3120                 }
3121
3122         }
3123
3124         if (hw->mac.type != ixgbe_mac_82598EB) {
3125                 u32 dmatxctl, rttdcs;
3126                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
3127                 dmatxctl |= IXGBE_DMATXCTL_TE;
3128                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
3129                 /* Disable arbiter to set MTQC */
3130                 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
3131                 rttdcs |= IXGBE_RTTDCS_ARBDIS;
3132                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3133                 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
3134                 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
3135                 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
3136         }
3137
3138         return;
3139 }
3140
3141 /*********************************************************************
3142  *
3143  *  Free all transmit rings.
3144  *
3145  **********************************************************************/
3146 static void
3147 ixgbe_free_transmit_structures(struct adapter *adapter)
3148 {
3149         struct tx_ring *txr = adapter->tx_rings;
3150
3151         for (int i = 0; i < adapter->num_queues; i++, txr++) {
3152                 IXGBE_TX_LOCK(txr);
3153                 ixgbe_free_transmit_buffers(txr);
3154                 ixgbe_dma_free(adapter, &txr->txdma);
3155                 IXGBE_TX_UNLOCK(txr);
3156                 IXGBE_TX_LOCK_DESTROY(txr);
3157         }
3158         kfree(adapter->tx_rings, M_DEVBUF);
3159 }
3160
3161 /*********************************************************************
3162  *
3163  *  Free transmit ring related data structures.
3164  *
3165  **********************************************************************/
3166 static void
3167 ixgbe_free_transmit_buffers(struct tx_ring *txr)
3168 {
3169         struct adapter *adapter = txr->adapter;
3170         struct ixgbe_tx_buf *tx_buffer;
3171         int             i;
3172
3173         INIT_DEBUGOUT("free_transmit_ring: begin");
3174
3175         if (txr->tx_buffers == NULL)
3176                 return;
3177
3178         tx_buffer = txr->tx_buffers;
3179         for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
3180                 if (tx_buffer->m_head != NULL) {
3181                         bus_dmamap_sync(txr->txtag, tx_buffer->map,
3182                             BUS_DMASYNC_POSTWRITE);
3183                         bus_dmamap_unload(txr->txtag,
3184                             tx_buffer->map);
3185                         m_freem(tx_buffer->m_head);
3186                         tx_buffer->m_head = NULL;
3187                         if (tx_buffer->map != NULL) {
3188                                 bus_dmamap_destroy(txr->txtag,
3189                                     tx_buffer->map);
3190                                 tx_buffer->map = NULL;
3191                         }
3192                 } else if (tx_buffer->map != NULL) {
3193                         bus_dmamap_unload(txr->txtag,
3194                             tx_buffer->map);
3195                         bus_dmamap_destroy(txr->txtag,
3196                             tx_buffer->map);
3197                         tx_buffer->map = NULL;
3198                 }
3199         }
3200 #if 0 /* __FreeBSD_version >= 800000 */
3201         if (txr->br != NULL)
3202                 buf_ring_free(txr->br, M_DEVBUF);
3203 #endif
3204         if (txr->tx_buffers != NULL) {
3205                 kfree(txr->tx_buffers, M_DEVBUF);
3206                 txr->tx_buffers = NULL;
3207         }
3208         if (txr->txtag != NULL) {
3209                 bus_dma_tag_destroy(txr->txtag);
3210                 txr->txtag = NULL;
3211         }
3212         return;
3213 }
3214
3215 /*********************************************************************
3216  *
3217  *  Advanced Context Descriptor setup for VLAN or CSUM
3218  *
3219  **********************************************************************/
3220
3221 static bool
3222 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
3223 {
3224         struct adapter *adapter = txr->adapter;
3225         struct ixgbe_adv_tx_context_desc *TXD;
3226         struct ixgbe_tx_buf        *tx_buffer;
3227         u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3228         struct ether_vlan_header *eh;
3229         struct ip *ip;
3230         struct ip6_hdr *ip6;
3231         int  ehdrlen, ip_hlen = 0;
3232         u16     etype;
3233         u8      ipproto = 0;
3234         bool    offload = TRUE;
3235         int ctxd = txr->next_avail_desc;
3236         u16 vtag = 0;
3237
3238
3239         if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
3240                 offload = FALSE;
3241
3242         tx_buffer = &txr->tx_buffers[ctxd];
3243         TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
3244
3245         /*
3246         ** In advanced descriptors the vlan tag must 
3247         ** be placed into the descriptor itself.
3248         */
3249         if (mp->m_flags & M_VLANTAG) {
3250                 vtag = htole16(mp->m_pkthdr.ether_vlantag);
3251                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
3252         } else if (offload == FALSE)
3253                 return FALSE;
3254
3255         /*
3256          * Determine where frame payload starts.
3257          * Jump over vlan headers if already present,
3258          * helpful for QinQ too.
3259          */
3260         eh = mtod(mp, struct ether_vlan_header *);
3261         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3262                 etype = ntohs(eh->evl_proto);
3263                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3264         } else {
3265                 etype = ntohs(eh->evl_encap_proto);
3266                 ehdrlen = ETHER_HDR_LEN;
3267         }
3268
3269         /* Set the ether header length */
3270         vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
3271
3272         switch (etype) {
3273                 case ETHERTYPE_IP:
3274                         ip = (struct ip *)(mp->m_data + ehdrlen);
3275                         ip_hlen = ip->ip_hl << 2;
3276                         ipproto = ip->ip_p;
3277                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3278                         break;
3279                 case ETHERTYPE_IPV6:
3280                         ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3281                         ip_hlen = sizeof(struct ip6_hdr);
3282                         /* XXX-BZ this will go badly in case of ext hdrs. */
3283                         ipproto = ip6->ip6_nxt;
3284                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
3285                         break;
3286                 default:
3287                         offload = FALSE;
3288                         break;
3289         }
3290
3291         vlan_macip_lens |= ip_hlen;
3292         type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3293
3294         switch (ipproto) {
3295                 case IPPROTO_TCP:
3296                         if (mp->m_pkthdr.csum_flags & CSUM_TCP)
3297                                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3298                         break;
3299
3300                 case IPPROTO_UDP:
3301                         if (mp->m_pkthdr.csum_flags & CSUM_UDP)
3302                                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
3303                         break;
3304
3305 #if 0
3306                 case IPPROTO_SCTP:
3307                         if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
3308                                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3309                         break;
3310 #endif
3311                 default:
3312                         offload = FALSE;
3313                         break;
3314         }
3315
3316         /* Now copy bits into descriptor */
3317         TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3318         TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3319         TXD->seqnum_seed = htole32(0);
3320         TXD->mss_l4len_idx = htole32(0);
3321
3322         tx_buffer->m_head = NULL;
3323         tx_buffer->eop_index = -1;
3324
3325         /* We've consumed the first desc, adjust counters */
3326         if (++ctxd == adapter->num_tx_desc)
3327                 ctxd = 0;
3328         txr->next_avail_desc = ctxd;
3329         --txr->tx_avail;
3330
3331         return (offload);
3332 }
3333
3334 /**********************************************************************
3335  *
3336  *  Setup work for hardware segmentation offload (TSO) on
3337  *  adapters using advanced tx descriptors
3338  *
3339  **********************************************************************/
3340 #if 0   /* NET_TSO */
3341 static bool
3342 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen,
3343     u32 *olinfo_status)
3344 {
3345         struct adapter *adapter = txr->adapter;
3346         struct ixgbe_adv_tx_context_desc *TXD;
3347         struct ixgbe_tx_buf        *tx_buffer;
3348         u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3349         u16 vtag = 0, eh_type;
3350         u32 mss_l4len_idx = 0, len;
3351         int ctxd, ehdrlen, ip_hlen, tcp_hlen;
3352         struct ether_vlan_header *eh;
3353 #ifdef INET6
3354         struct ip6_hdr *ip6;
3355 #endif
3356 #ifdef INET
3357         struct ip *ip;
3358 #endif
3359         struct tcphdr *th;
3360
3361
3362         /*
3363          * Determine where frame payload starts.
3364          * Jump over vlan headers if already present
3365          */
3366         eh = mtod(mp, struct ether_vlan_header *);
3367         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3368                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3369                 eh_type = eh->evl_proto;
3370         } else {
3371                 ehdrlen = ETHER_HDR_LEN;
3372                 eh_type = eh->evl_encap_proto;
3373         }
3374
3375         /* Ensure we have at least the IP+TCP header in the first mbuf. */
3376         len = ehdrlen + sizeof(struct tcphdr);
3377         switch (ntohs(eh_type)) {
3378 #ifdef INET6
3379         case ETHERTYPE_IPV6:
3380                 if (mp->m_len < len + sizeof(struct ip6_hdr))
3381                         return FALSE;
3382                 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3383                 /* XXX-BZ For now we do not pretend to support ext. hdrs. */
3384                 if (ip6->ip6_nxt != IPPROTO_TCP)
3385                         return FALSE;
3386                 ip_hlen = sizeof(struct ip6_hdr);
3387                 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
3388                 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
3389                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
3390                 break;
3391 #endif
3392 #ifdef INET
3393         case ETHERTYPE_IP:
3394                 if (mp->m_len < len + sizeof(struct ip))
3395                         return FALSE;
3396                 ip = (struct ip *)(mp->m_data + ehdrlen);
3397                 if (ip->ip_p != IPPROTO_TCP)
3398                         return FALSE;
3399                 ip->ip_sum = 0;
3400                 ip_hlen = ip->ip_hl << 2;
3401                 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3402                 th->th_sum = in_pseudo(ip->ip_src.s_addr,
3403                     ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3404                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3405                 /* Tell transmit desc to also do IPv4 checksum. */
3406                 *olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
3407                 break;
3408 #endif
3409         default:
3410                 panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
3411                     __func__, ntohs(eh_type));
3412                 break;
3413         }
3414
3415         ctxd = txr->next_avail_desc;
3416         tx_buffer = &txr->tx_buffers[ctxd];
3417         TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
3418
3419         tcp_hlen = th->th_off << 2;
3420
3421         /* This is used in the transmit desc in encap */
3422         *paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
3423
3424         /* VLAN MACLEN IPLEN */
3425         if (mp->m_flags & M_VLANTAG) {
3426                 vtag = htole16(mp->m_pkthdr.ether_vlantag);
3427                 vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
3428         }
3429
3430         vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
3431         vlan_macip_lens |= ip_hlen;
3432         TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
3433
3434         /* ADV DTYPE TUCMD */
3435         type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3436         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3437         TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
3438
3439         /* MSS L4LEN IDX */
3440         mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
3441         mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
3442         TXD->mss_l4len_idx = htole32(mss_l4len_idx);
3443
3444         TXD->seqnum_seed = htole32(0);
3445         tx_buffer->m_head = NULL;
3446         tx_buffer->eop_index = -1;
3447
3448         if (++ctxd == adapter->num_tx_desc)
3449                 ctxd = 0;
3450
3451         txr->tx_avail--;
3452         txr->next_avail_desc = ctxd;
3453         return TRUE;
3454 }
3455 #endif
3456
3457 #ifdef IXGBE_FDIR
3458 /*
3459 ** This routine parses packet headers so that Flow
3460 ** Director can make a hashed filter table entry 
3461 ** allowing traffic flows to be identified and kept
3462 ** on the same cpu.  This would be a performance
3463 ** hit, but we only do it at IXGBE_FDIR_RATE of
3464 ** packets.
3465 */
3466 static void
3467 ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
3468 {
3469         struct adapter                  *adapter = txr->adapter;
3470         struct ix_queue                 *que;
3471         struct ip                       *ip;
3472         struct tcphdr                   *th;
3473         struct udphdr                   *uh;
3474         struct ether_vlan_header        *eh;
3475         union ixgbe_atr_hash_dword      input = {.dword = 0}; 
3476         union ixgbe_atr_hash_dword      common = {.dword = 0}; 
3477         int                             ehdrlen, ip_hlen;
3478         u16                             etype;
3479
3480         eh = mtod(mp, struct ether_vlan_header *);
3481         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3482                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3483                 etype = eh->evl_proto;
3484         } else {
3485                 ehdrlen = ETHER_HDR_LEN;
3486                 etype = eh->evl_encap_proto;
3487         }
3488
3489         /* Only handling IPv4 */
3490         if (etype != htons(ETHERTYPE_IP))
3491                 return;
3492
3493         ip = (struct ip *)(mp->m_data + ehdrlen);
3494         ip_hlen = ip->ip_hl << 2;
3495
3496         /* check if we're UDP or TCP */
3497         switch (ip->ip_p) {
3498         case IPPROTO_TCP:
3499                 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3500                 /* src and dst are inverted */
3501                 common.port.dst ^= th->th_sport;
3502                 common.port.src ^= th->th_dport;
3503                 input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_TCPV4;
3504                 break;
3505         case IPPROTO_UDP:
3506                 uh = (struct udphdr *)((caddr_t)ip + ip_hlen);
3507                 /* src and dst are inverted */
3508                 common.port.dst ^= uh->uh_sport;
3509                 common.port.src ^= uh->uh_dport;
3510                 input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_UDPV4;
3511                 break;
3512         default:
3513                 return;
3514         }
3515
3516         input.formatted.vlan_id = htobe16(mp->m_pkthdr.ether_vtag);
3517         if (mp->m_pkthdr.ether_vtag)
3518                 common.flex_bytes ^= htons(ETHERTYPE_VLAN);
3519         else
3520                 common.flex_bytes ^= etype;
3521         common.ip ^= ip->ip_src.s_addr ^ ip->ip_dst.s_addr;
3522
3523         que = &adapter->queues[txr->me];
3524         /*
3525         ** This assumes the Rx queue and Tx
3526         ** queue are bound to the same CPU
3527         */
3528         ixgbe_fdir_add_signature_filter_82599(&adapter->hw,
3529             input, common, que->msix);
3530 }
3531 #endif /* IXGBE_FDIR */
3532
3533 /**********************************************************************
3534  *
3535  *  Examine each tx_buffer in the used queue. If the hardware is done
3536  *  processing the packet then free associated resources. The
3537  *  tx_buffer is put back on the free queue.
3538  *
3539  **********************************************************************/
3540 static bool
3541 ixgbe_txeof(struct tx_ring *txr)
3542 {
3543         struct adapter  *adapter = txr->adapter;
3544         struct ifnet    *ifp = adapter->ifp;
3545         u32     first, last, done, processed;
3546         struct ixgbe_tx_buf *tx_buffer;
3547         struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
3548
3549         KKASSERT(lockstatus(&txr->tx_lock, curthread) != 0);
3550
3551 #ifdef DEV_NETMAP
3552         if (ifp->if_capenable & IFCAP_NETMAP) {
3553                 struct netmap_adapter *na = NA(ifp);
3554                 struct netmap_kring *kring = &na->tx_rings[txr->me];
3555
3556                 tx_desc = (struct ixgbe_legacy_tx_desc *)txr->tx_base;
3557
3558                 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3559                     BUS_DMASYNC_POSTREAD);
3560                 /*
3561                  * In netmap mode, all the work is done in the context
3562                  * of the client thread. Interrupt handlers only wake up
3563                  * clients, which may be sleeping on individual rings
3564                  * or on a global resource for all rings.
3565                  * To implement tx interrupt mitigation, we wake up the client
3566                  * thread roughly every half ring, even if the NIC interrupts
3567                  * more frequently. This is implemented as follows:
3568                  * - ixgbe_txsync() sets kring->nr_kflags with the index of
3569                  *   the slot that should wake up the thread (nkr_num_slots
3570                  *   means the user thread should not be woken up);
3571                  * - the driver ignores tx interrupts unless netmap_mitigate=0
3572                  *   or the slot has the DD bit set.
3573                  *
3574                  * When the driver has separate locks, we need to
3575                  * release and re-acquire txlock to avoid deadlocks.
3576                  * XXX see if we can find a better way.
3577                  */
3578                 if (!netmap_mitigate ||
3579                     (kring->nr_kflags < kring->nkr_num_slots &&
3580                      tx_desc[kring->nr_kflags].upper.fields.status & IXGBE_TXD_STAT_DD)) {
3581                         kring->nr_kflags = kring->nkr_num_slots;
3582                         selwakeuppri(&na->tx_rings[txr->me].si, PI_NET);
3583                         IXGBE_TX_UNLOCK(txr);
3584                         IXGBE_CORE_LOCK(adapter);
3585                         selwakeuppri(&na->tx_si, PI_NET);
3586                         IXGBE_CORE_UNLOCK(adapter);
3587                         IXGBE_TX_LOCK(txr);
3588                 }
3589                 return FALSE;
3590         }
3591 #endif /* DEV_NETMAP */
3592
3593         if (txr->tx_avail == adapter->num_tx_desc) {
3594                 txr->queue_status = IXGBE_QUEUE_IDLE;
3595                 return FALSE;
3596         }
3597
3598         processed = 0;
3599         first = txr->next_to_clean;
3600         tx_buffer = &txr->tx_buffers[first];
3601         /* For cleanup we just use legacy struct */
3602         tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
3603         last = tx_buffer->eop_index;
3604         if (last == -1)
3605                 return FALSE;
3606         eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
3607
3608         /*
3609         ** Get the index of the first descriptor
3610         ** BEYOND the EOP and call that 'done'.
3611         ** I do this so the comparison in the
3612         ** inner while loop below can be simple
3613         */
3614         if (++last == adapter->num_tx_desc) last = 0;
3615         done = last;
3616
3617         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3618             BUS_DMASYNC_POSTREAD);
3619         /*
3620         ** Only the EOP descriptor of a packet now has the DD
3621         ** bit set, this is what we look for...
3622         */
3623         while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
3624                 /* We clean the range of the packet */
3625                 while (first != done) {
3626                         tx_desc->upper.data = 0;
3627                         tx_desc->lower.data = 0;
3628                         tx_desc->buffer_addr = 0;
3629                         ++txr->tx_avail;
3630                         ++processed;
3631
3632                         if (tx_buffer->m_head) {
3633                                 txr->bytes +=
3634                                     tx_buffer->m_head->m_pkthdr.len;
3635                                 bus_dmamap_sync(txr->txtag,
3636                                     tx_buffer->map,
3637                                     BUS_DMASYNC_POSTWRITE);
3638                                 bus_dmamap_unload(txr->txtag,
3639                                     tx_buffer->map);
3640                                 m_freem(tx_buffer->m_head);
3641                                 tx_buffer->m_head = NULL;
3642                                 tx_buffer->map = NULL;
3643                         }
3644                         tx_buffer->eop_index = -1;
3645                         txr->watchdog_time = ticks;
3646
3647                         if (++first == adapter->num_tx_desc)
3648                                 first = 0;
3649
3650                         tx_buffer = &txr->tx_buffers[first];
3651                         tx_desc =
3652                             (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
3653                 }
3654                 ++txr->packets;
3655                 ++ifp->if_opackets;
3656                 /* See if there is more work now */
3657                 last = tx_buffer->eop_index;
3658                 if (last != -1) {
3659                         eop_desc =
3660                             (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
3661                         /* Get next done point */
3662                         if (++last == adapter->num_tx_desc) last = 0;
3663                         done = last;
3664                 } else
3665                         break;
3666         }
3667         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
3668             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3669
3670         txr->next_to_clean = first;
3671
3672         /*
3673         ** Watchdog calculation, we know there's
3674         ** work outstanding or the first return
3675         ** would have been taken, so none processed
3676         ** for too long indicates a hang.
3677         */
3678         if ((!processed) && ((ticks - txr->watchdog_time) > IXGBE_WATCHDOG))
3679                 txr->queue_status = IXGBE_QUEUE_HUNG;
3680
3681         /* With a minimum free clear the depleted state bit.  */
3682         if (txr->tx_avail > IXGBE_TX_CLEANUP_THRESHOLD)
3683                 txr->queue_status &= ~IXGBE_QUEUE_DEPLETED;
3684
3685         if (txr->tx_avail == adapter->num_tx_desc) {
3686                 txr->queue_status = IXGBE_QUEUE_IDLE;
3687                 return (FALSE);
3688         }
3689
3690         return TRUE;
3691 }
3692
3693 /*********************************************************************
3694  *
3695  *  Refresh mbuf buffers for RX descriptor rings
3696  *   - now keeps its own state so discards due to resource
3697  *     exhaustion are unnecessary, if an mbuf cannot be obtained
3698  *     it just returns, keeping its placeholder, thus it can simply
3699  *     be recalled to try again.
3700  *
3701  **********************************************************************/
3702 static void
3703 ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
3704 {
3705         struct adapter          *adapter = rxr->adapter;
3706         bus_dma_segment_t       hseg[1];
3707         bus_dma_segment_t       pseg[1];
3708         struct ixgbe_rx_buf     *rxbuf;
3709         struct mbuf             *mh, *mp;
3710         int                     i, j, nsegs, error;
3711         bool                    refreshed = FALSE;
3712
3713         i = j = rxr->next_to_refresh;
3714         /* Control the loop with one beyond */
3715         if (++j == adapter->num_rx_desc)
3716                 j = 0;
3717
3718         while (j != limit) {
3719                 rxbuf = &rxr->rx_buffers[i];
3720                 if (rxr->hdr_split == FALSE)
3721                         goto no_split;
3722
3723                 if (rxbuf->m_head == NULL) {
3724                         mh = m_gethdr(MB_DONTWAIT, MT_DATA);
3725                         if (mh == NULL)
3726                                 goto update;
3727                 } else
3728                         mh = rxbuf->m_head;
3729
3730                 mh->m_pkthdr.len = mh->m_len = MHLEN;
3731                 mh->m_len = MHLEN;
3732                 mh->m_flags |= M_PKTHDR;
3733                 /* Get the memory mapping */
3734                 error = bus_dmamap_load_mbuf_segment(rxr->htag,
3735                     rxbuf->hmap, mh, hseg, 1, &nsegs, BUS_DMA_NOWAIT);
3736                 if (error != 0) {
3737                         kprintf("Refresh mbufs: hdr dmamap load"
3738                             " failure - %d\n", error);
3739                         m_free(mh);
3740                         rxbuf->m_head = NULL;
3741                         goto update;
3742                 }
3743                 rxbuf->m_head = mh;
3744                 bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3745                     BUS_DMASYNC_PREREAD);
3746                 rxr->rx_base[i].read.hdr_addr =
3747                     htole64(hseg[0].ds_addr);
3748
3749 no_split:
3750                 if (rxbuf->m_pack == NULL) {
3751                         mp = m_getjcl(MB_DONTWAIT, MT_DATA,
3752                             M_PKTHDR, adapter->rx_mbuf_sz);
3753                         if (mp == NULL)
3754                                 goto update;
3755                 } else
3756                         mp = rxbuf->m_pack;
3757
3758                 mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
3759                 /* Get the memory mapping */
3760                 error = bus_dmamap_load_mbuf_segment(rxr->ptag,
3761                     rxbuf->pmap, mp, pseg, 1, &nsegs, BUS_DMA_NOWAIT);
3762                 if (error != 0) {
3763                         kprintf("Refresh mbufs: payload dmamap load"
3764                             " failure - %d\n", error);
3765                         m_free(mp);
3766                         rxbuf->m_pack = NULL;
3767                         goto update;
3768                 }
3769                 rxbuf->m_pack = mp;
3770                 bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3771                     BUS_DMASYNC_PREREAD);
3772                 rxr->rx_base[i].read.pkt_addr =
3773                     htole64(pseg[0].ds_addr);
3774
3775                 refreshed = TRUE;
3776                 /* Next is precalculated */
3777                 i = j;
3778                 rxr->next_to_refresh = i;
3779                 if (++j == adapter->num_rx_desc)
3780                         j = 0;
3781         }
3782 update:
3783         if (refreshed) /* Update hardware tail index */
3784                 IXGBE_WRITE_REG(&adapter->hw,
3785                     IXGBE_RDT(rxr->me), rxr->next_to_refresh);
3786         return;
3787 }
3788
3789 /*********************************************************************
3790  *
3791  *  Allocate memory for rx_buffer structures. Since we use one
3792  *  rx_buffer per received packet, the maximum number of rx_buffer's
3793  *  that we'll need is equal to the number of receive descriptors
3794  *  that we've allocated.
3795  *
3796  **********************************************************************/
3797 static int
3798 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
3799 {
3800         struct  adapter         *adapter = rxr->adapter;
3801         device_t                dev = adapter->dev;
3802         struct ixgbe_rx_buf     *rxbuf;
3803         int                     i, bsize, error;
3804
3805         bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc;
3806         if (!(rxr->rx_buffers =
3807             (struct ixgbe_rx_buf *) kmalloc(bsize,
3808             M_DEVBUF, M_NOWAIT | M_ZERO))) {
3809                 device_printf(dev, "Unable to allocate rx_buffer memory\n");
3810                 error = ENOMEM;
3811                 goto fail;
3812         }
3813
3814         if ((error = bus_dma_tag_create(NULL,   /* parent */
3815                                    1, 0,        /* alignment, bounds */
3816                                    BUS_SPACE_MAXADDR,   /* lowaddr */
3817                                    BUS_SPACE_MAXADDR,   /* highaddr */
3818                                    NULL, NULL,          /* filter, filterarg */
3819                                    MSIZE,               /* maxsize */
3820                                    1,                   /* nsegments */
3821                                    MSIZE,               /* maxsegsize */
3822                                    0,                   /* flags */
3823                                    &rxr->htag))) {
3824                 device_printf(dev, "Unable to create RX DMA tag\n");
3825                 goto fail;
3826         }
3827
3828         if ((error = bus_dma_tag_create(NULL,   /* parent */
3829                                    1, 0,        /* alignment, bounds */
3830                                    BUS_SPACE_MAXADDR,   /* lowaddr */
3831                                    BUS_SPACE_MAXADDR,   /* highaddr */
3832                                    NULL, NULL,          /* filter, filterarg */
3833                                    MJUM16BYTES,         /* maxsize */
3834                                    1,                   /* nsegments */
3835                                    MJUM16BYTES,         /* maxsegsize */
3836                                    0,                   /* flags */
3837                                    &rxr->ptag))) {
3838                 device_printf(dev, "Unable to create RX DMA tag\n");
3839                 goto fail;
3840         }
3841
3842         for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
3843                 rxbuf = &rxr->rx_buffers[i];
3844                 error = bus_dmamap_create(rxr->htag,
3845                     BUS_DMA_NOWAIT, &rxbuf->hmap);
3846                 if (error) {
3847                         device_printf(dev, "Unable to create RX head map\n");
3848                         goto fail;
3849                 }
3850                 error = bus_dmamap_create(rxr->ptag,
3851                     BUS_DMA_NOWAIT, &rxbuf->pmap);
3852                 if (error) {
3853                         device_printf(dev, "Unable to create RX pkt map\n");
3854                         goto fail;
3855                 }
3856         }
3857
3858         return (0);
3859
3860 fail:
3861         /* Frees all, but can handle partial completion */
3862         ixgbe_free_receive_structures(adapter);
3863         return (error);
3864 }
3865
3866 /*
3867 ** Used to detect a descriptor that has
3868 ** been merged by Hardware RSC.
3869 */
3870 static inline u32
3871 ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
3872 {
3873         return (le32toh(rx->wb.lower.lo_dword.data) &
3874             IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
3875 }
3876
3877 /*********************************************************************
3878  *
3879  *  Initialize Hardware RSC (LRO) feature on 82599
3880  *  for an RX ring, this is toggled by the LRO capability
3881  *  even though it is transparent to the stack.
3882  *
3883  **********************************************************************/
3884 #if 0   /* NET_LRO */
3885 static void
3886 ixgbe_setup_hw_rsc(struct rx_ring *rxr)
3887 {
3888         struct  adapter         *adapter = rxr->adapter;
3889         struct  ixgbe_hw        *hw = &adapter->hw;
3890         u32                     rscctrl, rdrxctl;
3891
3892         rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3893         rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3894 #ifdef DEV_NETMAP /* crcstrip is optional in netmap */
3895         if (adapter->ifp->if_capenable & IFCAP_NETMAP && !ix_crcstrip)
3896 #endif /* DEV_NETMAP */
3897         rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3898         rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
3899         IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3900
3901         rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
3902         rscctrl |= IXGBE_RSCCTL_RSCEN;
3903         /*
3904         ** Limit the total number of descriptors that
3905         ** can be combined, so it does not exceed 64K
3906         */
3907         if (adapter->rx_mbuf_sz == MCLBYTES)
3908                 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
3909         else if (adapter->rx_mbuf_sz == MJUMPAGESIZE)
3910                 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
3911         else if (adapter->rx_mbuf_sz == MJUM9BYTES)
3912                 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
3913         else  /* Using 16K cluster */
3914                 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
3915
3916         IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
3917
3918         /* Enable TCP header recognition */
3919         IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
3920             (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
3921             IXGBE_PSRTYPE_TCPHDR));
3922
3923         /* Disable RSC for ACK packets */
3924         IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3925             (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
3926
3927         rxr->hw_rsc = TRUE;
3928 }
3929 #endif
3930
3931 static void     
3932 ixgbe_free_receive_ring(struct rx_ring *rxr)
3933
3934         struct  adapter         *adapter;
3935         struct ixgbe_rx_buf       *rxbuf;
3936         int i;
3937
3938         adapter = rxr->adapter;
3939         for (i = 0; i < adapter->num_rx_desc; i++) {
3940                 rxbuf = &rxr->rx_buffers[i];
3941                 if (rxbuf->m_head != NULL) {
3942                         bus_dmamap_sync(rxr->htag, rxbuf->hmap,
3943                             BUS_DMASYNC_POSTREAD);
3944                         bus_dmamap_unload(rxr->htag, rxbuf->hmap);
3945                         rxbuf->m_head->m_flags |= M_PKTHDR;
3946                         m_freem(rxbuf->m_head);
3947                 }
3948                 if (rxbuf->m_pack != NULL) {
3949                         bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
3950                             BUS_DMASYNC_POSTREAD);
3951                         bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
3952                         rxbuf->m_pack->m_flags |= M_PKTHDR;
3953                         m_freem(rxbuf->m_pack);
3954                 }
3955                 rxbuf->m_head = NULL;
3956                 rxbuf->m_pack = NULL;
3957         }
3958 }
3959
3960
3961 /*********************************************************************
3962  *
3963  *  Initialize a receive ring and its buffers.
3964  *
3965  **********************************************************************/
3966 static int
3967 ixgbe_setup_receive_ring(struct rx_ring *rxr)
3968 {
3969         struct  adapter         *adapter;
3970         struct ifnet            *ifp;
3971         device_t                dev;
3972         struct ixgbe_rx_buf     *rxbuf;
3973         bus_dma_segment_t       pseg[1], hseg[1];
3974 #if 0   /* NET_LRO */
3975         struct lro_ctrl         *lro = &rxr->lro;
3976 #endif
3977         int                     rsize, nsegs, error = 0;
3978 #ifdef DEV_NETMAP
3979         struct netmap_adapter *na = NA(rxr->adapter->ifp);
3980         struct netmap_slot *slot;
3981 #endif /* DEV_NETMAP */
3982
3983         adapter = rxr->adapter;
3984         ifp = adapter->ifp;
3985         dev = adapter->dev;
3986
3987         /* Clear the ring contents */
3988         IXGBE_RX_LOCK(rxr);
3989 #ifdef DEV_NETMAP
3990         /* same as in ixgbe_setup_transmit_ring() */
3991         slot = netmap_reset(na, NR_RX, rxr->me, 0);
3992 #endif /* DEV_NETMAP */
3993         rsize = roundup2(adapter->num_rx_desc *
3994             sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
3995         bzero((void *)rxr->rx_base, rsize);
3996
3997         /* Free current RX buffer structs and their mbufs */
3998         ixgbe_free_receive_ring(rxr);
3999
4000         /* Configure header split? */
4001         if (ixgbe_header_split)
4002                 rxr->hdr_split = TRUE;
4003
4004         /* Now replenish the mbufs */
4005         for (int j = 0; j != adapter->num_rx_desc; ++j) {
4006                 struct mbuf     *mh, *mp;
4007
4008                 rxbuf = &rxr->rx_buffers[j];
4009 #ifdef DEV_NETMAP
4010                 /*
4011                  * In netmap mode, fill the map and set the buffer
4012                  * address in the NIC ring, considering the offset
4013                  * between the netmap and NIC rings (see comment in
4014                  * ixgbe_setup_transmit_ring() ). No need to allocate
4015                  * an mbuf, so end the block with a continue;
4016                  */
4017                 if (slot) {
4018                         int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j);
4019                       &