1 /******************************************************************************
3 Copyright (c) 2001-2010, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
36 #include "opt_device_polling.h"
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #if __FreeBSD_version >= 800000
44 #include <sys/buf_ring.h>
47 #include <sys/endian.h>
48 #include <sys/kernel.h>
49 #include <sys/kthread.h>
50 #include <sys/malloc.h>
52 #include <sys/module.h>
54 #include <sys/socket.h>
55 #include <sys/sockio.h>
56 #include <sys/sysctl.h>
57 #include <sys/taskqueue.h>
58 #if __FreeBSD_version >= 700029
59 #include <sys/eventhandler.h>
61 #include <machine/bus.h>
62 #include <machine/resource.h>
65 #include <net/ethernet.h>
67 #include <net/if_arp.h>
68 #include <net/if_dl.h>
69 #include <net/if_media.h>
71 #include <net/if_types.h>
72 #include <net/if_vlan_var.h>
74 #include <netinet/in_systm.h>
75 #include <netinet/in.h>
76 #include <netinet/if_ether.h>
77 #include <netinet/ip.h>
78 #include <netinet/ip6.h>
79 #include <netinet/tcp.h>
80 #include <netinet/udp.h>
82 #include <machine/in_cksum.h>
83 #include <dev/pci/pcivar.h>
84 #include <dev/pci/pcireg.h>
86 #include "e1000_api.h"
87 #include "e1000_82571.h"
90 /*********************************************************************
91 * Set this to one to display debug statistics
92 *********************************************************************/
93 int em_display_debug_stats = 0;
95 /*********************************************************************
97 *********************************************************************/
98 char em_driver_version[] = "6.9.25";
101 /*********************************************************************
102 * PCI Device ID Table
104 * Used by probe to select devices to load on
105 * Last field stores an index into e1000_strings
106 * Last entry must be all 0s
108 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
109 *********************************************************************/
111 static em_vendor_info_t em_vendor_info_array[] =
113 /* Intel(R) PRO/1000 Network Connection */
114 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
115 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
116 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
117 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
118 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
120 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
121 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
122 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
123 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
124 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
125 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
126 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
128 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
130 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
131 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
133 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
134 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
135 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
136 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
138 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
139 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
140 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
141 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
142 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
144 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
145 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
146 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
147 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
148 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
149 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
150 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
151 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
152 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
153 PCI_ANY_ID, PCI_ANY_ID, 0},
155 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
156 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
157 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
159 { 0x8086, E1000_DEV_ID_82571EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
160 { 0x8086, E1000_DEV_ID_82571EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
161 { 0x8086, E1000_DEV_ID_82571EB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
162 { 0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL,
163 PCI_ANY_ID, PCI_ANY_ID, 0},
164 { 0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD,
165 PCI_ANY_ID, PCI_ANY_ID, 0},
166 { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER,
167 PCI_ANY_ID, PCI_ANY_ID, 0},
168 { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP,
169 PCI_ANY_ID, PCI_ANY_ID, 0},
170 { 0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER,
171 PCI_ANY_ID, PCI_ANY_ID, 0},
172 { 0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER,
173 PCI_ANY_ID, PCI_ANY_ID, 0},
174 { 0x8086, E1000_DEV_ID_82572EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
175 { 0x8086, E1000_DEV_ID_82572EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
176 { 0x8086, E1000_DEV_ID_82572EI_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
177 { 0x8086, E1000_DEV_ID_82572EI, PCI_ANY_ID, PCI_ANY_ID, 0},
179 { 0x8086, E1000_DEV_ID_82573E, PCI_ANY_ID, PCI_ANY_ID, 0},
180 { 0x8086, E1000_DEV_ID_82573E_IAMT, PCI_ANY_ID, PCI_ANY_ID, 0},
181 { 0x8086, E1000_DEV_ID_82573L, PCI_ANY_ID, PCI_ANY_ID, 0},
182 { 0x8086, E1000_DEV_ID_82583V, PCI_ANY_ID, PCI_ANY_ID, 0},
183 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT,
184 PCI_ANY_ID, PCI_ANY_ID, 0},
185 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT,
186 PCI_ANY_ID, PCI_ANY_ID, 0},
187 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT,
188 PCI_ANY_ID, PCI_ANY_ID, 0},
189 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT,
190 PCI_ANY_ID, PCI_ANY_ID, 0},
191 { 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
192 { 0x8086, E1000_DEV_ID_ICH8_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
193 { 0x8086, E1000_DEV_ID_ICH8_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0},
194 { 0x8086, E1000_DEV_ID_ICH8_IFE, PCI_ANY_ID, PCI_ANY_ID, 0},
195 { 0x8086, E1000_DEV_ID_ICH8_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0},
196 { 0x8086, E1000_DEV_ID_ICH8_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
197 { 0x8086, E1000_DEV_ID_ICH8_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0},
198 { 0x8086, E1000_DEV_ID_ICH8_82567V_3, PCI_ANY_ID, PCI_ANY_ID, 0},
199 { 0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
200 { 0x8086, E1000_DEV_ID_ICH9_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
201 { 0x8086, E1000_DEV_ID_ICH9_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0},
202 { 0x8086, E1000_DEV_ID_ICH9_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0},
203 { 0x8086, E1000_DEV_ID_ICH9_IGP_M_V, PCI_ANY_ID, PCI_ANY_ID, 0},
204 { 0x8086, E1000_DEV_ID_ICH9_IFE, PCI_ANY_ID, PCI_ANY_ID, 0},
205 { 0x8086, E1000_DEV_ID_ICH9_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0},
206 { 0x8086, E1000_DEV_ID_ICH9_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
207 { 0x8086, E1000_DEV_ID_ICH9_BM, PCI_ANY_ID, PCI_ANY_ID, 0},
208 { 0x8086, E1000_DEV_ID_82574L, PCI_ANY_ID, PCI_ANY_ID, 0},
209 { 0x8086, E1000_DEV_ID_82574LA, PCI_ANY_ID, PCI_ANY_ID, 0},
210 { 0x8086, E1000_DEV_ID_ICH10_R_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
211 { 0x8086, E1000_DEV_ID_ICH10_R_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
212 { 0x8086, E1000_DEV_ID_ICH10_R_BM_V, PCI_ANY_ID, PCI_ANY_ID, 0},
213 { 0x8086, E1000_DEV_ID_ICH10_D_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
214 { 0x8086, E1000_DEV_ID_ICH10_D_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
215 { 0x8086, E1000_DEV_ID_PCH_M_HV_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
216 { 0x8086, E1000_DEV_ID_PCH_M_HV_LC, PCI_ANY_ID, PCI_ANY_ID, 0},
217 { 0x8086, E1000_DEV_ID_PCH_D_HV_DM, PCI_ANY_ID, PCI_ANY_ID, 0},
218 { 0x8086, E1000_DEV_ID_PCH_D_HV_DC, PCI_ANY_ID, PCI_ANY_ID, 0},
219 /* required last entry */
223 /*********************************************************************
224 * Table of branding strings for all supported NICs.
225 *********************************************************************/
227 static char *em_strings[] = {
228 "Intel(R) PRO/1000 Network Connection"
231 /*********************************************************************
232 * Function prototypes
233 *********************************************************************/
234 static int em_probe(device_t);
235 static int em_attach(device_t);
236 static int em_detach(device_t);
237 static int em_shutdown(device_t);
238 static int em_suspend(device_t);
239 static int em_resume(device_t);
240 static void em_start(struct ifnet *);
241 static void em_start_locked(struct ifnet *ifp);
242 #if __FreeBSD_version >= 800000
243 static int em_mq_start(struct ifnet *, struct mbuf *);
244 static int em_mq_start_locked(struct ifnet *, struct mbuf *);
245 static void em_qflush(struct ifnet *);
247 static int em_ioctl(struct ifnet *, u_long, caddr_t);
248 static void em_init(void *);
249 static void em_init_locked(struct adapter *);
250 static void em_stop(void *);
251 static void em_media_status(struct ifnet *, struct ifmediareq *);
252 static int em_media_change(struct ifnet *);
253 static void em_identify_hardware(struct adapter *);
254 static int em_allocate_pci_resources(struct adapter *);
255 static int em_allocate_legacy(struct adapter *adapter);
256 static int em_allocate_msix(struct adapter *adapter);
257 static int em_setup_msix(struct adapter *);
258 static void em_free_pci_resources(struct adapter *);
259 static void em_local_timer(void *);
260 static int em_hardware_init(struct adapter *);
261 static void em_setup_interface(device_t, struct adapter *);
262 static void em_setup_transmit_structures(struct adapter *);
263 static void em_initialize_transmit_unit(struct adapter *);
264 static int em_setup_receive_structures(struct adapter *);
265 static void em_initialize_receive_unit(struct adapter *);
266 static void em_enable_intr(struct adapter *);
267 static void em_disable_intr(struct adapter *);
268 static void em_free_transmit_structures(struct adapter *);
269 static void em_free_receive_structures(struct adapter *);
270 static void em_update_stats_counters(struct adapter *);
271 static void em_txeof(struct adapter *);
272 static void em_tx_purge(struct adapter *);
273 static int em_allocate_receive_structures(struct adapter *);
274 static int em_allocate_transmit_structures(struct adapter *);
275 static int em_rxeof(struct adapter *, int);
276 #ifndef __NO_STRICT_ALIGNMENT
277 static int em_fixup_rx(struct adapter *);
279 static void em_receive_checksum(struct adapter *, struct e1000_rx_desc *,
281 static void em_transmit_checksum_setup(struct adapter *, struct mbuf *,
283 #if __FreeBSD_version >= 700000
284 static bool em_tso_setup(struct adapter *, struct mbuf *,
286 #endif /* FreeBSD_version >= 700000 */
287 static void em_set_promisc(struct adapter *);
288 static void em_disable_promisc(struct adapter *);
289 static void em_set_multi(struct adapter *);
290 static void em_print_hw_stats(struct adapter *);
291 static void em_update_link_status(struct adapter *);
292 static int em_get_buf(struct adapter *, int);
293 #if __FreeBSD_version >= 700029
294 static void em_register_vlan(void *, struct ifnet *, u16);
295 static void em_unregister_vlan(void *, struct ifnet *, u16);
296 static void em_setup_vlan_hw_support(struct adapter *);
298 static int em_xmit(struct adapter *, struct mbuf **);
299 static void em_smartspeed(struct adapter *);
300 static int em_82547_fifo_workaround(struct adapter *, int);
301 static void em_82547_update_fifo_head(struct adapter *, int);
302 static int em_82547_tx_fifo_reset(struct adapter *);
303 static void em_82547_move_tail(void *);
304 static int em_dma_malloc(struct adapter *, bus_size_t,
305 struct em_dma_alloc *, int);
306 static void em_dma_free(struct adapter *, struct em_dma_alloc *);
307 static void em_print_debug_info(struct adapter *);
308 static void em_print_nvm_info(struct adapter *);
309 static int em_is_valid_ether_addr(u8 *);
310 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS);
311 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
312 static u32 em_fill_descriptors (bus_addr_t address, u32 length,
313 PDESC_ARRAY desc_array);
314 static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
315 static void em_add_int_delay_sysctl(struct adapter *, const char *,
316 const char *, struct em_int_delay_info *, int, int);
317 /* Management and WOL Support */
318 static void em_init_manageability(struct adapter *);
319 static void em_release_manageability(struct adapter *);
320 static void em_get_hw_control(struct adapter *);
321 static void em_release_hw_control(struct adapter *);
322 static void em_get_wakeup(device_t);
323 static void em_enable_wakeup(device_t);
324 static int em_enable_phy_wakeup(struct adapter *);
327 static void em_intr(void *);
329 #if __FreeBSD_version < 700000
330 static void em_irq_fast(void *);
332 static int em_irq_fast(void *);
336 static void em_msix_tx(void *);
337 static void em_msix_rx(void *);
338 static void em_msix_link(void *);
339 static void em_handle_rx(void *context, int pending);
340 static void em_handle_tx(void *context, int pending);
342 static void em_handle_rxtx(void *context, int pending);
343 static void em_handle_link(void *context, int pending);
344 static void em_add_rx_process_limit(struct adapter *, const char *,
345 const char *, int *, int);
346 #endif /* ~EM_LEGACY_IRQ */
348 #ifdef DEVICE_POLLING
349 static poll_handler_t em_poll;
352 /*********************************************************************
353 * FreeBSD Device Interface Entry Points
354 *********************************************************************/
356 static device_method_t em_methods[] = {
357 /* Device interface */
358 DEVMETHOD(device_probe, em_probe),
359 DEVMETHOD(device_attach, em_attach),
360 DEVMETHOD(device_detach, em_detach),
361 DEVMETHOD(device_shutdown, em_shutdown),
362 DEVMETHOD(device_suspend, em_suspend),
363 DEVMETHOD(device_resume, em_resume),
367 static driver_t em_driver = {
368 "em", em_methods, sizeof(struct adapter),
371 static devclass_t em_devclass;
372 DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
373 MODULE_DEPEND(em, pci, 1, 1, 1);
374 MODULE_DEPEND(em, ether, 1, 1, 1);
376 /*********************************************************************
377 * Tunable default values.
378 *********************************************************************/
380 #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
381 #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
384 /* Allow common code without TSO */
389 static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
390 static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
391 static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
392 static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
393 static int em_rxd = EM_DEFAULT_RXD;
394 static int em_txd = EM_DEFAULT_TXD;
395 static int em_smart_pwr_down = FALSE;
396 /* Controls whether promiscuous also shows bad packets */
397 static int em_debug_sbp = FALSE;
398 /* Local switch for MSI/MSIX */
399 static int em_enable_msi = TRUE;
401 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
402 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
403 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
404 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
405 TUNABLE_INT("hw.em.rxd", &em_rxd);
406 TUNABLE_INT("hw.em.txd", &em_txd);
407 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
408 TUNABLE_INT("hw.em.sbp", &em_debug_sbp);
409 TUNABLE_INT("hw.em.enable_msi", &em_enable_msi);
411 #ifndef EM_LEGACY_IRQ
412 /* How many packets rxeof tries to clean at a time */
413 static int em_rx_process_limit = 100;
414 TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
417 /* Flow control setting - default to FULL */
418 static int em_fc_setting = e1000_fc_full;
419 TUNABLE_INT("hw.em.fc_setting", &em_fc_setting);
422 ** Shadow VFTA table, this is needed because
423 ** the real vlan filter table gets cleared during
424 ** a soft reset and the driver needs to be able
427 static u32 em_shadow_vfta[EM_VFTA_SIZE];
429 /* Global used in WOL setup with multiport cards */
430 static int global_quad_port_a = 0;
432 /*********************************************************************
433 * Device identification routine
435 * em_probe determines if the driver should be loaded on
436 * adapter based on PCI vendor/device id of the adapter.
438 * return BUS_PROBE_DEFAULT on success, positive on failure
439 *********************************************************************/
442 em_probe(device_t dev)
444 char adapter_name[60];
445 u16 pci_vendor_id = 0;
446 u16 pci_device_id = 0;
447 u16 pci_subvendor_id = 0;
448 u16 pci_subdevice_id = 0;
449 em_vendor_info_t *ent;
451 INIT_DEBUGOUT("em_probe: begin");
453 pci_vendor_id = pci_get_vendor(dev);
454 if (pci_vendor_id != EM_VENDOR_ID)
457 pci_device_id = pci_get_device(dev);
458 pci_subvendor_id = pci_get_subvendor(dev);
459 pci_subdevice_id = pci_get_subdevice(dev);
461 ent = em_vendor_info_array;
462 while (ent->vendor_id != 0) {
463 if ((pci_vendor_id == ent->vendor_id) &&
464 (pci_device_id == ent->device_id) &&
466 ((pci_subvendor_id == ent->subvendor_id) ||
467 (ent->subvendor_id == PCI_ANY_ID)) &&
469 ((pci_subdevice_id == ent->subdevice_id) ||
470 (ent->subdevice_id == PCI_ANY_ID))) {
471 sprintf(adapter_name, "%s %s",
472 em_strings[ent->index],
474 device_set_desc_copy(dev, adapter_name);
475 return (BUS_PROBE_DEFAULT);
483 /*********************************************************************
484 * Device initialization routine
486 * The attach entry point is called when the driver is being loaded.
487 * This routine identifies the type of hardware, allocates all resources
488 * and initializes the hardware.
490 * return 0 on success, positive on failure
491 *********************************************************************/
494 em_attach(device_t dev)
496 struct adapter *adapter;
500 INIT_DEBUGOUT("em_attach: begin");
502 adapter = device_get_softc(dev);
503 adapter->dev = adapter->osdep.dev = dev;
504 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
505 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
506 EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
509 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
510 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
511 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
512 em_sysctl_debug_info, "I", "Debug Information");
514 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
515 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
516 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
517 em_sysctl_stats, "I", "Statistics");
519 callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
520 callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0);
522 /* Determine hardware and mac info */
523 em_identify_hardware(adapter);
525 /* Setup PCI resources */
526 if (em_allocate_pci_resources(adapter)) {
527 device_printf(dev, "Allocation of PCI resources failed\n");
533 ** For ICH8 and family we need to
534 ** map the flash memory, and this
535 ** must happen after the MAC is
538 if ((adapter->hw.mac.type == e1000_ich8lan) ||
539 (adapter->hw.mac.type == e1000_pchlan) ||
540 (adapter->hw.mac.type == e1000_ich9lan) ||
541 (adapter->hw.mac.type == e1000_ich10lan)) {
542 int rid = EM_BAR_TYPE_FLASH;
543 adapter->flash = bus_alloc_resource_any(dev,
544 SYS_RES_MEMORY, &rid, RF_ACTIVE);
545 if (adapter->flash == NULL) {
546 device_printf(dev, "Mapping of Flash failed\n");
550 /* This is used in the shared code */
551 adapter->hw.flash_address = (u8 *)adapter->flash;
552 adapter->osdep.flash_bus_space_tag =
553 rman_get_bustag(adapter->flash);
554 adapter->osdep.flash_bus_space_handle =
555 rman_get_bushandle(adapter->flash);
558 /* Do Shared Code initialization */
559 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
560 device_printf(dev, "Setup of Shared code failed\n");
565 e1000_get_bus_info(&adapter->hw);
567 /* Set up some sysctls for the tunable interrupt delays */
568 em_add_int_delay_sysctl(adapter, "rx_int_delay",
569 "receive interrupt delay in usecs", &adapter->rx_int_delay,
570 E1000_REGISTER(&adapter->hw, E1000_RDTR), em_rx_int_delay_dflt);
571 em_add_int_delay_sysctl(adapter, "tx_int_delay",
572 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
573 E1000_REGISTER(&adapter->hw, E1000_TIDV), em_tx_int_delay_dflt);
574 if (adapter->hw.mac.type >= e1000_82540) {
575 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
576 "receive interrupt delay limit in usecs",
577 &adapter->rx_abs_int_delay,
578 E1000_REGISTER(&adapter->hw, E1000_RADV),
579 em_rx_abs_int_delay_dflt);
580 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
581 "transmit interrupt delay limit in usecs",
582 &adapter->tx_abs_int_delay,
583 E1000_REGISTER(&adapter->hw, E1000_TADV),
584 em_tx_abs_int_delay_dflt);
587 #ifndef EM_LEGACY_IRQ
588 /* Sysctls for limiting the amount of work done in the taskqueue */
589 em_add_rx_process_limit(adapter, "rx_processing_limit",
590 "max number of rx packets to process", &adapter->rx_process_limit,
591 em_rx_process_limit);
595 * Validate number of transmit and receive descriptors. It
596 * must not exceed hardware maximum, and must be multiple
597 * of E1000_DBA_ALIGN.
599 if (((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
600 (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) ||
601 (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) ||
602 (em_txd < EM_MIN_TXD)) {
603 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
604 EM_DEFAULT_TXD, em_txd);
605 adapter->num_tx_desc = EM_DEFAULT_TXD;
607 adapter->num_tx_desc = em_txd;
608 if (((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
609 (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) ||
610 (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) ||
611 (em_rxd < EM_MIN_RXD)) {
612 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
613 EM_DEFAULT_RXD, em_rxd);
614 adapter->num_rx_desc = EM_DEFAULT_RXD;
616 adapter->num_rx_desc = em_rxd;
618 adapter->hw.mac.autoneg = DO_AUTO_NEG;
619 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
620 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
621 adapter->rx_buffer_len = 2048;
623 e1000_init_script_state_82541(&adapter->hw, TRUE);
624 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
627 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
628 adapter->hw.phy.mdix = AUTO_ALL_MODES;
629 adapter->hw.phy.disable_polarity_correction = FALSE;
630 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
634 * Set the frame limits assuming
635 * standard ethernet sized frames.
637 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
638 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
641 * This controls when hardware reports transmit completion
644 adapter->hw.mac.report_tx_early = 1;
646 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
649 /* Allocate Transmit Descriptor ring */
650 if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
651 device_printf(dev, "Unable to allocate tx_desc memory\n");
655 adapter->tx_desc_base =
656 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
658 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
661 /* Allocate Receive Descriptor ring */
662 if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
663 device_printf(dev, "Unable to allocate rx_desc memory\n");
667 adapter->rx_desc_base =
668 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
671 ** Start from a known state, this is
672 ** important in reading the nvm and
675 e1000_reset_hw(&adapter->hw);
677 /* Make sure we have a good EEPROM before we read from it */
678 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
680 ** Some PCI-E parts fail the first check due to
681 ** the link being in sleep state, call it again,
682 ** if it fails a second time its a real issue.
684 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
686 "The EEPROM Checksum Is Not Valid\n");
692 /* Copy the permanent MAC address out of the EEPROM */
693 if (e1000_read_mac_addr(&adapter->hw) < 0) {
694 device_printf(dev, "EEPROM read error while reading MAC"
700 if (!em_is_valid_ether_addr(adapter->hw.mac.addr)) {
701 device_printf(dev, "Invalid MAC address\n");
706 /* Initialize the hardware */
707 if (em_hardware_init(adapter)) {
708 device_printf(dev, "Unable to initialize the hardware\n");
713 /* Allocate transmit descriptors and buffers */
714 if (em_allocate_transmit_structures(adapter)) {
715 device_printf(dev, "Could not setup transmit structures\n");
720 /* Allocate receive descriptors and buffers */
721 if (em_allocate_receive_structures(adapter)) {
722 device_printf(dev, "Could not setup receive structures\n");
728 ** Do interrupt configuration
730 if (adapter->msi > 1) /* Do MSI/X */
731 error = em_allocate_msix(adapter);
732 else /* MSI or Legacy */
733 error = em_allocate_legacy(adapter);
738 * Get Wake-on-Lan and Management info for later use
742 /* Setup OS specific network interface */
743 em_setup_interface(dev, adapter);
745 /* Initialize statistics */
746 em_update_stats_counters(adapter);
748 adapter->hw.mac.get_link_status = 1;
749 em_update_link_status(adapter);
751 /* Indicate SOL/IDER usage */
752 if (e1000_check_reset_block(&adapter->hw))
754 "PHY reset is blocked due to SOL/IDER session.\n");
756 /* Do we need workaround for 82544 PCI-X adapter? */
757 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
758 adapter->hw.mac.type == e1000_82544)
759 adapter->pcix_82544 = TRUE;
761 adapter->pcix_82544 = FALSE;
763 #if __FreeBSD_version >= 700029
764 /* Register for VLAN events */
765 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
766 em_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
767 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
768 em_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
771 /* Non-AMT based hardware can now take control from firmware */
772 if (adapter->has_manage && !adapter->has_amt)
773 em_get_hw_control(adapter);
775 /* Tell the stack that the interface is not active */
776 adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
778 INIT_DEBUGOUT("em_attach: end");
783 em_free_transmit_structures(adapter);
786 em_release_hw_control(adapter);
787 em_dma_free(adapter, &adapter->rxdma);
789 em_dma_free(adapter, &adapter->txdma);
792 em_free_pci_resources(adapter);
793 EM_TX_LOCK_DESTROY(adapter);
794 EM_RX_LOCK_DESTROY(adapter);
795 EM_CORE_LOCK_DESTROY(adapter);
800 /*********************************************************************
801 * Device removal routine
803 * The detach entry point is called when the driver is being removed.
804 * This routine stops the adapter and deallocates all the resources
805 * that were allocated for driver operation.
807 * return 0 on success, positive on failure
808 *********************************************************************/
811 em_detach(device_t dev)
813 struct adapter *adapter = device_get_softc(dev);
814 struct ifnet *ifp = adapter->ifp;
816 INIT_DEBUGOUT("em_detach: begin");
818 /* Make sure VLANS are not using driver */
819 #if __FreeBSD_version >= 700000
820 if (adapter->ifp->if_vlantrunk != NULL) {
822 if (adapter->ifp->if_nvlans != 0) {
824 device_printf(dev,"Vlan in use, detach first\n");
828 #ifdef DEVICE_POLLING
829 if (ifp->if_capenable & IFCAP_POLLING)
830 ether_poll_deregister(ifp);
833 EM_CORE_LOCK(adapter);
835 adapter->in_detach = 1;
837 e1000_phy_hw_reset(&adapter->hw);
839 em_release_manageability(adapter);
841 EM_TX_UNLOCK(adapter);
842 EM_CORE_UNLOCK(adapter);
844 #if __FreeBSD_version >= 700029
845 /* Unregister VLAN events */
846 if (adapter->vlan_attach != NULL)
847 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
848 if (adapter->vlan_detach != NULL)
849 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
852 ether_ifdetach(adapter->ifp);
853 callout_drain(&adapter->timer);
854 callout_drain(&adapter->tx_fifo_timer);
856 em_free_pci_resources(adapter);
857 bus_generic_detach(dev);
860 em_free_transmit_structures(adapter);
861 em_free_receive_structures(adapter);
863 /* Free Transmit Descriptor ring */
864 if (adapter->tx_desc_base) {
865 em_dma_free(adapter, &adapter->txdma);
866 adapter->tx_desc_base = NULL;
869 /* Free Receive Descriptor ring */
870 if (adapter->rx_desc_base) {
871 em_dma_free(adapter, &adapter->rxdma);
872 adapter->rx_desc_base = NULL;
875 em_release_hw_control(adapter);
876 EM_TX_LOCK_DESTROY(adapter);
877 EM_RX_LOCK_DESTROY(adapter);
878 EM_CORE_LOCK_DESTROY(adapter);
883 /*********************************************************************
885 * Shutdown entry point
887 **********************************************************************/
890 em_shutdown(device_t dev)
892 return em_suspend(dev);
896 * Suspend/resume device methods.
899 em_suspend(device_t dev)
901 struct adapter *adapter = device_get_softc(dev);
903 EM_CORE_LOCK(adapter);
905 em_release_manageability(adapter);
906 em_release_hw_control(adapter);
907 em_enable_wakeup(dev);
909 EM_CORE_UNLOCK(adapter);
911 return bus_generic_suspend(dev);
915 em_resume(device_t dev)
917 struct adapter *adapter = device_get_softc(dev);
918 struct ifnet *ifp = adapter->ifp;
920 EM_CORE_LOCK(adapter);
921 em_init_locked(adapter);
922 em_init_manageability(adapter);
923 EM_CORE_UNLOCK(adapter);
926 return bus_generic_resume(dev);
930 /*********************************************************************
931 * Transmit entry point
933 * em_start is called by the stack to initiate a transmit.
934 * The driver will remain in this routine as long as there are
935 * packets to transmit and transmit resources are available.
936 * In case resources are not available stack is notified and
937 * the packet is requeued.
938 **********************************************************************/
940 #if __FreeBSD_version >= 800000
942 em_mq_start_locked(struct ifnet *ifp, struct mbuf *m)
944 struct adapter *adapter = ifp->if_softc;
946 int error = E1000_SUCCESS;
948 EM_TX_LOCK_ASSERT(adapter);
949 /* To allow being called from a tasklet */
953 if (((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
955 || (!adapter->link_active)) {
956 error = drbr_enqueue(ifp, adapter->br, m);
958 } else if (!drbr_needs_enqueue(ifp, adapter->br) &&
959 (adapter->num_tx_desc_avail > EM_TX_OP_THRESHOLD)) {
960 if ((error = em_xmit(adapter, &m)) != 0) {
962 error = drbr_enqueue(ifp, adapter->br, m);
966 * We've bypassed the buf ring so we need to update
969 drbr_stats_update(ifp, m->m_pkthdr.len, m->m_flags);
971 ** Send a copy of the frame to the BPF
972 ** listener and set the watchdog on.
974 ETHER_BPF_MTAP(ifp, m);
975 adapter->watchdog_check = TRUE;
977 } else if ((error = drbr_enqueue(ifp, adapter->br, m)) != 0)
981 if (drbr_empty(ifp, adapter->br))
983 /* Process the queue */
985 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
987 next = drbr_dequeue(ifp, adapter->br);
990 if ((error = em_xmit(adapter, &next)) != 0) {
992 error = drbr_enqueue(ifp, adapter->br, next);
995 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
996 ETHER_BPF_MTAP(ifp, next);
997 /* Set the watchdog */
998 adapter->watchdog_check = TRUE;
1001 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
1002 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1008 ** Multiqueue capable stack interface, this is not
1009 ** yet truely multiqueue, but that is coming...
1012 em_mq_start(struct ifnet *ifp, struct mbuf *m)
1015 struct adapter *adapter = ifp->if_softc;
1018 if (EM_TX_TRYLOCK(adapter)) {
1019 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1020 error = em_mq_start_locked(ifp, m);
1021 EM_TX_UNLOCK(adapter);
1023 error = drbr_enqueue(ifp, adapter->br, m);
1029 em_qflush(struct ifnet *ifp)
1032 struct adapter *adapter = (struct adapter *)ifp->if_softc;
1034 EM_TX_LOCK(adapter);
1035 while ((m = buf_ring_dequeue_sc(adapter->br)) != NULL)
1038 EM_TX_UNLOCK(adapter);
1040 #endif /* FreeBSD_version */
1043 em_start_locked(struct ifnet *ifp)
1045 struct adapter *adapter = ifp->if_softc;
1046 struct mbuf *m_head;
1048 EM_TX_LOCK_ASSERT(adapter);
1050 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1053 if (!adapter->link_active)
1056 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
1058 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1062 * Encapsulation can modify our pointer, and or make it
1063 * NULL on failure. In that event, we can't requeue.
1065 if (em_xmit(adapter, &m_head)) {
1068 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1069 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1073 /* Send a copy of the frame to the BPF listener */
1074 ETHER_BPF_MTAP(ifp, m_head);
1076 /* Set timeout in case hardware has problems transmitting. */
1077 adapter->watchdog_check = TRUE;
1079 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
1080 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1086 em_start(struct ifnet *ifp)
1088 struct adapter *adapter = ifp->if_softc;
1090 EM_TX_LOCK(adapter);
1091 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1092 em_start_locked(ifp);
1093 EM_TX_UNLOCK(adapter);
1096 /*********************************************************************
1099 * em_ioctl is called when the user wants to configure the
1102 * return 0 on success, positive on failure
1103 **********************************************************************/
1106 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1108 struct adapter *adapter = ifp->if_softc;
1109 struct ifreq *ifr = (struct ifreq *)data;
1111 struct ifaddr *ifa = (struct ifaddr *)data;
1115 if (adapter->in_detach)
1121 if (ifa->ifa_addr->sa_family == AF_INET) {
1124 * Since resetting hardware takes a very long time
1125 * and results in link renegotiation we only
1126 * initialize the hardware only when it is absolutely
1129 ifp->if_flags |= IFF_UP;
1130 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1131 EM_CORE_LOCK(adapter);
1132 em_init_locked(adapter);
1133 EM_CORE_UNLOCK(adapter);
1135 arp_ifinit(ifp, ifa);
1138 error = ether_ioctl(ifp, command, data);
1143 u16 eeprom_data = 0;
1145 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1147 EM_CORE_LOCK(adapter);
1148 switch (adapter->hw.mac.type) {
1151 * 82573 only supports jumbo frames
1152 * if ASPM is disabled.
1154 e1000_read_nvm(&adapter->hw,
1155 NVM_INIT_3GIO_3, 1, &eeprom_data);
1156 if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
1157 max_frame_size = ETHER_MAX_LEN;
1160 /* Allow Jumbo frames - fall thru */
1164 case e1000_ich10lan:
1166 case e1000_80003es2lan: /* Limit Jumbo Frame size */
1167 max_frame_size = 9234;
1170 max_frame_size = 4096;
1172 /* Adapters that do not support jumbo frames */
1176 max_frame_size = ETHER_MAX_LEN;
1179 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1181 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1183 EM_CORE_UNLOCK(adapter);
1188 ifp->if_mtu = ifr->ifr_mtu;
1189 adapter->max_frame_size =
1190 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1191 em_init_locked(adapter);
1192 EM_CORE_UNLOCK(adapter);
1196 IOCTL_DEBUGOUT("ioctl rcv'd:\
1197 SIOCSIFFLAGS (Set Interface Flags)");
1198 EM_CORE_LOCK(adapter);
1199 if (ifp->if_flags & IFF_UP) {
1200 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1201 if ((ifp->if_flags ^ adapter->if_flags) &
1202 (IFF_PROMISC | IFF_ALLMULTI)) {
1203 em_disable_promisc(adapter);
1204 em_set_promisc(adapter);
1207 em_init_locked(adapter);
1209 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1210 EM_TX_LOCK(adapter);
1212 EM_TX_UNLOCK(adapter);
1214 adapter->if_flags = ifp->if_flags;
1215 EM_CORE_UNLOCK(adapter);
1219 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
1220 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1221 EM_CORE_LOCK(adapter);
1222 em_disable_intr(adapter);
1223 em_set_multi(adapter);
1224 if (adapter->hw.mac.type == e1000_82542 &&
1225 adapter->hw.revision_id == E1000_REVISION_2) {
1226 em_initialize_receive_unit(adapter);
1228 #ifdef DEVICE_POLLING
1229 if (!(ifp->if_capenable & IFCAP_POLLING))
1231 em_enable_intr(adapter);
1232 EM_CORE_UNLOCK(adapter);
1236 /* Check SOL/IDER usage */
1237 EM_CORE_LOCK(adapter);
1238 if (e1000_check_reset_block(&adapter->hw)) {
1239 EM_CORE_UNLOCK(adapter);
1240 device_printf(adapter->dev, "Media change is"
1241 " blocked due to SOL/IDER session.\n");
1244 EM_CORE_UNLOCK(adapter);
1246 IOCTL_DEBUGOUT("ioctl rcv'd: \
1247 SIOCxIFMEDIA (Get/Set Interface Media)");
1248 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1254 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1256 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1257 #ifdef DEVICE_POLLING
1258 if (mask & IFCAP_POLLING) {
1259 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1260 error = ether_poll_register(em_poll, ifp);
1263 EM_CORE_LOCK(adapter);
1264 em_disable_intr(adapter);
1265 ifp->if_capenable |= IFCAP_POLLING;
1266 EM_CORE_UNLOCK(adapter);
1268 error = ether_poll_deregister(ifp);
1269 /* Enable interrupt even in error case */
1270 EM_CORE_LOCK(adapter);
1271 em_enable_intr(adapter);
1272 ifp->if_capenable &= ~IFCAP_POLLING;
1273 EM_CORE_UNLOCK(adapter);
1277 if (mask & IFCAP_HWCSUM) {
1278 ifp->if_capenable ^= IFCAP_HWCSUM;
1281 #if __FreeBSD_version >= 700000
1282 if (mask & IFCAP_TSO4) {
1283 ifp->if_capenable ^= IFCAP_TSO4;
1287 if (mask & IFCAP_VLAN_HWTAGGING) {
1288 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1292 if (mask & IFCAP_VLAN_HWFILTER) {
1293 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1297 if ((mask & IFCAP_WOL) &&
1298 (ifp->if_capabilities & IFCAP_WOL) != 0) {
1299 if (mask & IFCAP_WOL_MCAST)
1300 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1301 if (mask & IFCAP_WOL_MAGIC)
1302 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1305 if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1307 #if __FreeBSD_version >= 700000
1308 VLAN_CAPABILITIES(ifp);
1314 error = ether_ioctl(ifp, command, data);
1322 /*********************************************************************
1325 * This routine is used in two ways. It is used by the stack as
1326 * init entry point in network interface structure. It is also used
1327 * by the driver as a hw/sw initialization routine to get to a
1330 * return 0 on success, positive on failure
1331 **********************************************************************/
1334 em_init_locked(struct adapter *adapter)
1336 struct ifnet *ifp = adapter->ifp;
1337 device_t dev = adapter->dev;
1340 INIT_DEBUGOUT("em_init: begin");
1342 EM_CORE_LOCK_ASSERT(adapter);
1344 EM_TX_LOCK(adapter);
1346 EM_TX_UNLOCK(adapter);
1349 * Packet Buffer Allocation (PBA)
1350 * Writing PBA sets the receive portion of the buffer
1351 * the remainder is used for the transmit buffer.
1353 * Devices before the 82547 had a Packet Buffer of 64K.
1354 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1355 * After the 82547 the buffer was reduced to 40K.
1356 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1357 * Note: default does not leave enough room for Jumbo Frame >10k.
1359 switch (adapter->hw.mac.type) {
1361 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1362 if (adapter->max_frame_size > 8192)
1363 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1365 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1366 adapter->tx_fifo_head = 0;
1367 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1368 adapter->tx_fifo_size =
1369 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1371 /* Total Packet Buffer on these is 48K */
1374 case e1000_80003es2lan:
1375 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1377 case e1000_82573: /* 82573: Total Packet Buffer is 32K */
1378 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
1382 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
1385 case e1000_ich10lan:
1387 pba = E1000_PBA_10K;
1393 /* Devices before 82547 had a Packet Buffer of 64K. */
1394 if (adapter->max_frame_size > 8192)
1395 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1397 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1400 INIT_DEBUGOUT1("em_init: pba=%dK",pba);
1401 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1403 /* Get the latest mac address, User can use a LAA */
1404 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1407 /* Put the address into the Receive Address Array */
1408 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1411 * With the 82571 adapter, RAR[0] may be overwritten
1412 * when the other port is reset, we make a duplicate
1413 * in RAR[14] for that eventuality, this assures
1414 * the interface continues to function.
1416 if (adapter->hw.mac.type == e1000_82571) {
1417 e1000_set_laa_state_82571(&adapter->hw, TRUE);
1418 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr,
1419 E1000_RAR_ENTRIES - 1);
1422 /* Initialize the hardware */
1423 if (em_hardware_init(adapter)) {
1424 device_printf(dev, "Unable to initialize the hardware\n");
1427 em_update_link_status(adapter);
1429 /* Setup VLAN support, basic and offload if available */
1430 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1431 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1432 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
1433 /* Use real VLAN Filter support */
1434 em_setup_vlan_hw_support(adapter);
1437 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1438 ctrl |= E1000_CTRL_VME;
1439 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1443 /* Set hardware offload abilities */
1444 ifp->if_hwassist = 0;
1445 if (adapter->hw.mac.type >= e1000_82543) {
1446 if (ifp->if_capenable & IFCAP_TXCSUM)
1447 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1448 #if __FreeBSD_version >= 700000
1449 if (ifp->if_capenable & IFCAP_TSO4)
1450 ifp->if_hwassist |= CSUM_TSO;
1454 /* Configure for OS presence */
1455 em_init_manageability(adapter);
1457 /* Prepare transmit descriptors and buffers */
1458 em_setup_transmit_structures(adapter);
1459 em_initialize_transmit_unit(adapter);
1461 /* Setup Multicast table */
1462 em_set_multi(adapter);
1464 /* Prepare receive descriptors and buffers */
1465 if (em_setup_receive_structures(adapter)) {
1466 device_printf(dev, "Could not setup receive structures\n");
1467 EM_TX_LOCK(adapter);
1469 EM_TX_UNLOCK(adapter);
1472 em_initialize_receive_unit(adapter);
1474 /* Don't lose promiscuous settings */
1475 em_set_promisc(adapter);
1477 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1478 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1480 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1481 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1483 /* MSI/X configuration for 82574 */
1484 if (adapter->hw.mac.type == e1000_82574) {
1486 tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1487 tmp |= E1000_CTRL_EXT_PBA_CLR;
1488 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1490 ** Set the IVAR - interrupt vector routing.
1491 ** Each nibble represents a vector, high bit
1492 ** is enable, other 3 bits are the MSIX table
1493 ** entry, we map RXQ0 to 0, TXQ0 to 1, and
1494 ** Link (other) to 2, hence the magic number.
1496 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1499 #ifdef DEVICE_POLLING
1501 * Only enable interrupts if we are not polling, make sure
1502 * they are off otherwise.
1504 if (ifp->if_capenable & IFCAP_POLLING)
1505 em_disable_intr(adapter);
1507 #endif /* DEVICE_POLLING */
1508 em_enable_intr(adapter);
1510 /* AMT based hardware can now take control from firmware */
1511 if (adapter->has_manage && adapter->has_amt)
1512 em_get_hw_control(adapter);
1514 /* Don't reset the phy next time init gets called */
1515 adapter->hw.phy.reset_disable = TRUE;
1521 struct adapter *adapter = arg;
1523 EM_CORE_LOCK(adapter);
1524 em_init_locked(adapter);
1525 EM_CORE_UNLOCK(adapter);
1529 #ifdef DEVICE_POLLING
1530 /*********************************************************************
1532 * Legacy polling routine
1534 *********************************************************************/
1536 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1538 struct adapter *adapter = ifp->if_softc;
1539 u32 reg_icr, rx_done = 0;
1541 EM_CORE_LOCK(adapter);
1542 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1543 EM_CORE_UNLOCK(adapter);
1547 if (cmd == POLL_AND_CHECK_STATUS) {
1548 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1549 /* Link status change */
1550 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1551 adapter->hw.mac.get_link_status = 1;
1552 em_update_link_status(adapter);
1554 if (reg_icr & E1000_ICR_RXO)
1555 adapter->rx_overruns++;
1557 EM_CORE_UNLOCK(adapter);
1559 rx_done = em_rxeof(adapter, count);
1561 EM_TX_LOCK(adapter);
1563 #if __FreeBSD_version >= 800000
1564 if (!drbr_empty(ifp, adapter->br))
1565 em_mq_start_locked(ifp, NULL);
1567 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1568 em_start_locked(ifp);
1570 EM_TX_UNLOCK(adapter);
1573 #endif /* DEVICE_POLLING */
1575 #ifdef EM_LEGACY_IRQ
1576 /*********************************************************************
1578 * Legacy Interrupt Service routine
1580 *********************************************************************/
1585 struct adapter *adapter = arg;
1586 struct ifnet *ifp = adapter->ifp;
1590 if (ifp->if_capenable & IFCAP_POLLING)
1593 EM_CORE_LOCK(adapter);
1594 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1595 if (reg_icr & E1000_ICR_RXO)
1596 adapter->rx_overruns++;
1597 if ((reg_icr == 0xffffffff) || (reg_icr == 0)||
1598 (adapter->hw.mac.type >= e1000_82571 &&
1599 (reg_icr & E1000_ICR_INT_ASSERTED) == 0))
1602 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1605 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1606 callout_stop(&adapter->timer);
1607 adapter->hw.mac.get_link_status = 1;
1608 em_update_link_status(adapter);
1609 /* Deal with TX cruft when link lost */
1610 em_tx_purge(adapter);
1611 callout_reset(&adapter->timer, hz,
1612 em_local_timer, adapter);
1616 EM_TX_LOCK(adapter);
1618 em_rxeof(adapter, -1);
1620 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1621 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1622 em_start_locked(ifp);
1623 EM_TX_UNLOCK(adapter);
1626 EM_CORE_UNLOCK(adapter);
1630 #else /* EM_FAST_IRQ, then fast interrupt routines only */
1633 em_handle_link(void *context, int pending)
1635 struct adapter *adapter = context;
1636 struct ifnet *ifp = adapter->ifp;
1638 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1641 EM_CORE_LOCK(adapter);
1642 callout_stop(&adapter->timer);
1643 em_update_link_status(adapter);
1644 /* Deal with TX cruft when link lost */
1645 em_tx_purge(adapter);
1646 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1647 EM_CORE_UNLOCK(adapter);
1651 /* Combined RX/TX handler, used by Legacy and MSI */
1653 em_handle_rxtx(void *context, int pending)
1655 struct adapter *adapter = context;
1656 struct ifnet *ifp = adapter->ifp;
1659 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1660 if (em_rxeof(adapter, adapter->rx_process_limit) != 0)
1661 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1662 EM_TX_LOCK(adapter);
1665 #if __FreeBSD_version >= 800000
1666 if (!drbr_empty(ifp, adapter->br))
1667 em_mq_start_locked(ifp, NULL);
1669 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1670 em_start_locked(ifp);
1672 EM_TX_UNLOCK(adapter);
1675 em_enable_intr(adapter);
1678 /*********************************************************************
1680 * Fast Legacy/MSI Combined Interrupt Service routine
1682 *********************************************************************/
1683 #if __FreeBSD_version < 700000
1684 #define FILTER_STRAY
1685 #define FILTER_HANDLED
1690 em_irq_fast(void *arg)
1692 struct adapter *adapter = arg;
1698 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1701 if (reg_icr == 0xffffffff)
1702 return FILTER_STRAY;
1704 /* Definitely not our interrupt. */
1706 return FILTER_STRAY;
1709 * Starting with the 82571 chip, bit 31 should be used to
1710 * determine whether the interrupt belongs to us.
1712 if (adapter->hw.mac.type >= e1000_82571 &&
1713 (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1714 return FILTER_STRAY;
1717 * Mask interrupts until the taskqueue is finished running. This is
1718 * cheap, just assume that it is needed. This also works around the
1719 * MSI message reordering errata on certain systems.
1721 em_disable_intr(adapter);
1722 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1724 /* Link status change */
1725 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1726 adapter->hw.mac.get_link_status = 1;
1727 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1730 if (reg_icr & E1000_ICR_RXO)
1731 adapter->rx_overruns++;
1732 return FILTER_HANDLED;
1735 /*********************************************************************
1737 * MSIX Interrupt Service Routines
1739 **********************************************************************/
1740 #define EM_MSIX_TX 0x00040000
1741 #define EM_MSIX_RX 0x00010000
1742 #define EM_MSIX_LINK 0x00100000
1745 em_msix_tx(void *arg)
1747 struct adapter *adapter = arg;
1748 struct ifnet *ifp = adapter->ifp;
1751 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1752 EM_TX_LOCK(adapter);
1754 EM_TX_UNLOCK(adapter);
1755 taskqueue_enqueue(adapter->tq, &adapter->tx_task);
1757 /* Reenable this interrupt */
1758 E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_TX);
1762 /*********************************************************************
1764 * MSIX RX Interrupt Service routine
1766 **********************************************************************/
1769 em_msix_rx(void *arg)
1771 struct adapter *adapter = arg;
1772 struct ifnet *ifp = adapter->ifp;
1775 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1776 (em_rxeof(adapter, adapter->rx_process_limit) != 0))
1777 taskqueue_enqueue(adapter->tq, &adapter->rx_task);
1778 /* Reenable this interrupt */
1779 E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_RX);
1783 /*********************************************************************
1785 * MSIX Link Fast Interrupt Service routine
1787 **********************************************************************/
1790 em_msix_link(void *arg)
1792 struct adapter *adapter = arg;
1795 ++adapter->link_irq;
1796 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1798 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1799 adapter->hw.mac.get_link_status = 1;
1800 taskqueue_enqueue(taskqueue_fast, &adapter->link_task);
1802 E1000_WRITE_REG(&adapter->hw, E1000_IMS,
1803 EM_MSIX_LINK | E1000_IMS_LSC);
1808 em_handle_rx(void *context, int pending)
1810 struct adapter *adapter = context;
1811 struct ifnet *ifp = adapter->ifp;
1813 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1814 (em_rxeof(adapter, adapter->rx_process_limit) != 0))
1815 taskqueue_enqueue(adapter->tq, &adapter->rx_task);
1820 em_handle_tx(void *context, int pending)
1822 struct adapter *adapter = context;
1823 struct ifnet *ifp = adapter->ifp;
1825 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1826 if (!EM_TX_TRYLOCK(adapter))
1829 #if __FreeBSD_version >= 800000
1830 if (!drbr_empty(ifp, adapter->br))
1831 em_mq_start_locked(ifp, NULL);
1833 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1834 em_start_locked(ifp);
1836 EM_TX_UNLOCK(adapter);
1839 #endif /* EM_FAST_IRQ */
1841 /*********************************************************************
1843 * Media Ioctl callback
1845 * This routine is called whenever the user queries the status of
1846 * the interface using ifconfig.
1848 **********************************************************************/
1850 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1852 struct adapter *adapter = ifp->if_softc;
1853 u_char fiber_type = IFM_1000_SX;
1855 INIT_DEBUGOUT("em_media_status: begin");
1857 EM_CORE_LOCK(adapter);
1858 em_update_link_status(adapter);
1860 ifmr->ifm_status = IFM_AVALID;
1861 ifmr->ifm_active = IFM_ETHER;
1863 if (!adapter->link_active) {
1864 EM_CORE_UNLOCK(adapter);
1868 ifmr->ifm_status |= IFM_ACTIVE;
1870 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1871 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1872 if (adapter->hw.mac.type == e1000_82545)
1873 fiber_type = IFM_1000_LX;
1874 ifmr->ifm_active |= fiber_type | IFM_FDX;
1876 switch (adapter->link_speed) {
1878 ifmr->ifm_active |= IFM_10_T;
1881 ifmr->ifm_active |= IFM_100_TX;
1884 ifmr->ifm_active |= IFM_1000_T;
1887 if (adapter->link_duplex == FULL_DUPLEX)
1888 ifmr->ifm_active |= IFM_FDX;
1890 ifmr->ifm_active |= IFM_HDX;
1892 EM_CORE_UNLOCK(adapter);
1895 /*********************************************************************
1897 * Media Ioctl callback
1899 * This routine is called when the user changes speed/duplex using
1900 * media/mediopt option with ifconfig.
1902 **********************************************************************/
1904 em_media_change(struct ifnet *ifp)
1906 struct adapter *adapter = ifp->if_softc;
1907 struct ifmedia *ifm = &adapter->media;
1909 INIT_DEBUGOUT("em_media_change: begin");
1911 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1914 EM_CORE_LOCK(adapter);
1915 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1917 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1918 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1923 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1924 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1927 adapter->hw.mac.autoneg = FALSE;
1928 adapter->hw.phy.autoneg_advertised = 0;
1929 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1930 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1932 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1935 adapter->hw.mac.autoneg = FALSE;
1936 adapter->hw.phy.autoneg_advertised = 0;
1937 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1938 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1940 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1943 device_printf(adapter->dev, "Unsupported media type\n");
1946 /* As the speed/duplex settings my have changed we need to
1949 adapter->hw.phy.reset_disable = FALSE;
1951 em_init_locked(adapter);
1952 EM_CORE_UNLOCK(adapter);
1957 /*********************************************************************
1959 * This routine maps the mbufs to tx descriptors.
1961 * return 0 on success, positive on failure
1962 **********************************************************************/
1965 em_xmit(struct adapter *adapter, struct mbuf **m_headp)
1967 bus_dma_segment_t segs[EM_MAX_SCATTER];
1969 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1970 struct e1000_tx_desc *ctxd = NULL;
1971 struct mbuf *m_head;
1972 u32 txd_upper, txd_lower, txd_used, txd_saved;
1973 int nsegs, i, j, first, last = 0;
1974 int error, do_tso, tso_desc = 0;
1975 #if __FreeBSD_version < 700000
1979 txd_upper = txd_lower = txd_used = txd_saved = 0;
1981 #if __FreeBSD_version >= 700000
1982 do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0);
1988 * Force a cleanup if number of TX descriptors
1989 * available hits the threshold
1991 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1993 /* Now do we at least have a minimal? */
1994 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
1995 adapter->no_tx_desc_avail1++;
2003 * If an mbuf is only header we need
2004 * to pull 4 bytes of data into it.
2006 if (do_tso && (m_head->m_len <= M_TSO_LEN)) {
2007 m_head = m_pullup(m_head, M_TSO_LEN + 4);
2014 * Map the packet for DMA
2016 * Capture the first descriptor index,
2017 * this descriptor will have the index
2018 * of the EOP which is the only one that
2019 * now gets a DONE bit writeback.
2021 first = adapter->next_avail_tx_desc;
2022 tx_buffer = &adapter->tx_buffer_area[first];
2023 tx_buffer_mapped = tx_buffer;
2024 map = tx_buffer->map;
2026 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
2027 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
2030 * There are two types of errors we can (try) to handle:
2031 * - EFBIG means the mbuf chain was too long and bus_dma ran
2032 * out of segments. Defragment the mbuf chain and try again.
2033 * - ENOMEM means bus_dma could not obtain enough bounce buffers
2034 * at this point in time. Defer sending and try again later.
2035 * All other errors, in particular EINVAL, are fatal and prevent the
2036 * mbuf chain from ever going through. Drop it and report error.
2038 if (error == EFBIG) {
2041 m = m_defrag(*m_headp, M_DONTWAIT);
2043 adapter->mbuf_alloc_failed++;
2051 error = bus_dmamap_load_mbuf_sg(adapter->txtag, map,
2052 *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
2055 adapter->no_tx_dma_setup++;
2060 } else if (error != 0) {
2061 adapter->no_tx_dma_setup++;
2066 * TSO Hardware workaround, if this packet is not
2067 * TSO, and is only a single descriptor long, and
2068 * it follows a TSO burst, then we need to add a
2069 * sentinel descriptor to prevent premature writeback.
2071 if ((do_tso == 0) && (adapter->tx_tso == TRUE)) {
2074 adapter->tx_tso = FALSE;
2077 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
2078 adapter->no_tx_desc_avail2++;
2079 bus_dmamap_unload(adapter->txtag, map);
2084 /* Do hardware assists */
2085 #if __FreeBSD_version >= 700000
2086 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2087 error = em_tso_setup(adapter, m_head, &txd_upper, &txd_lower);
2089 return (ENXIO); /* something foobar */
2090 /* we need to make a final sentinel transmit desc */
2094 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
2095 em_transmit_checksum_setup(adapter, m_head,
2096 &txd_upper, &txd_lower);
2098 i = adapter->next_avail_tx_desc;
2099 if (adapter->pcix_82544)
2102 /* Set up our transmit descriptors */
2103 for (j = 0; j < nsegs; j++) {
2105 bus_addr_t seg_addr;
2106 /* If adapter is 82544 and on PCIX bus */
2107 if(adapter->pcix_82544) {
2108 DESC_ARRAY desc_array;
2109 u32 array_elements, counter;
2111 * Check the Address and Length combination and
2112 * split the data accordingly
2114 array_elements = em_fill_descriptors(segs[j].ds_addr,
2115 segs[j].ds_len, &desc_array);
2116 for (counter = 0; counter < array_elements; counter++) {
2117 if (txd_used == adapter->num_tx_desc_avail) {
2118 adapter->next_avail_tx_desc = txd_saved;
2119 adapter->no_tx_desc_avail2++;
2120 bus_dmamap_unload(adapter->txtag, map);
2123 tx_buffer = &adapter->tx_buffer_area[i];
2124 ctxd = &adapter->tx_desc_base[i];
2125 ctxd->buffer_addr = htole64(
2126 desc_array.descriptor[counter].address);
2127 ctxd->lower.data = htole32(
2128 (adapter->txd_cmd | txd_lower | (u16)
2129 desc_array.descriptor[counter].length));
2131 htole32((txd_upper));
2133 if (++i == adapter->num_tx_desc)
2135 tx_buffer->m_head = NULL;
2136 tx_buffer->next_eop = -1;
2140 tx_buffer = &adapter->tx_buffer_area[i];
2141 ctxd = &adapter->tx_desc_base[i];
2142 seg_addr = segs[j].ds_addr;
2143 seg_len = segs[j].ds_len;
2146 ** If this is the last descriptor, we want to
2147 ** split it so we have a small final sentinel
2149 if (tso_desc && (j == (nsegs -1)) && (seg_len > 8)) {
2151 ctxd->buffer_addr = htole64(seg_addr);
2152 ctxd->lower.data = htole32(
2153 adapter->txd_cmd | txd_lower | seg_len);
2156 if (++i == adapter->num_tx_desc)
2158 /* Now make the sentinel */
2159 ++txd_used; /* using an extra txd */
2160 ctxd = &adapter->tx_desc_base[i];
2161 tx_buffer = &adapter->tx_buffer_area[i];
2163 htole64(seg_addr + seg_len);
2164 ctxd->lower.data = htole32(
2165 adapter->txd_cmd | txd_lower | 4);
2169 if (++i == adapter->num_tx_desc)
2172 ctxd->buffer_addr = htole64(seg_addr);
2173 ctxd->lower.data = htole32(
2174 adapter->txd_cmd | txd_lower | seg_len);
2178 if (++i == adapter->num_tx_desc)
2181 tx_buffer->m_head = NULL;
2182 tx_buffer->next_eop = -1;
2186 adapter->next_avail_tx_desc = i;
2187 if (adapter->pcix_82544)
2188 adapter->num_tx_desc_avail -= txd_used;
2190 adapter->num_tx_desc_avail -= nsegs;
2191 if (tso_desc) /* TSO used an extra for sentinel */
2192 adapter->num_tx_desc_avail -= txd_used;
2196 ** Handle VLAN tag, this is the
2197 ** biggest difference between
2200 #if __FreeBSD_version < 700000
2201 /* Find out if we are in vlan mode. */
2202 mtag = VLAN_OUTPUT_TAG(ifp, m_head);
2204 ctxd->upper.fields.special =
2205 htole16(VLAN_TAG_VALUE(mtag));
2206 #else /* FreeBSD 7 */
2207 if (m_head->m_flags & M_VLANTAG) {
2208 /* Set the vlan id. */
2209 ctxd->upper.fields.special =
2210 htole16(m_head->m_pkthdr.ether_vtag);
2212 /* Tell hardware to add tag */
2213 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
2216 tx_buffer->m_head = m_head;
2217 tx_buffer_mapped->map = tx_buffer->map;
2218 tx_buffer->map = map;
2219 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
2222 * Last Descriptor of Packet
2223 * needs End Of Packet (EOP)
2224 * and Report Status (RS)
2227 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
2229 * Keep track in the first buffer which
2230 * descriptor will be written back
2232 tx_buffer = &adapter->tx_buffer_area[first];
2233 tx_buffer->next_eop = last;
2234 adapter->watchdog_time = ticks;
2237 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
2238 * that this frame is available to transmit.
2240 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2241 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2242 if (adapter->hw.mac.type == e1000_82547 &&
2243 adapter->link_duplex == HALF_DUPLEX)
2244 em_82547_move_tail(adapter);
2246 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
2247 if (adapter->hw.mac.type == e1000_82547)
2248 em_82547_update_fifo_head(adapter,
2249 m_head->m_pkthdr.len);
2255 /*********************************************************************
2257 * 82547 workaround to avoid controller hang in half-duplex environment.
2258 * The workaround is to avoid queuing a large packet that would span
2259 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
2260 * in this case. We do that only when FIFO is quiescent.
2262 **********************************************************************/
2264 em_82547_move_tail(void *arg)
2266 struct adapter *adapter = arg;
2267 struct e1000_tx_desc *tx_desc;
2268 u16 hw_tdt, sw_tdt, length = 0;
2271 EM_TX_LOCK_ASSERT(adapter);
2273 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
2274 sw_tdt = adapter->next_avail_tx_desc;
2276 while (hw_tdt != sw_tdt) {
2277 tx_desc = &adapter->tx_desc_base[hw_tdt];
2278 length += tx_desc->lower.flags.length;
2279 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
2280 if (++hw_tdt == adapter->num_tx_desc)
2284 if (em_82547_fifo_workaround(adapter, length)) {
2285 adapter->tx_fifo_wrk_cnt++;
2286 callout_reset(&adapter->tx_fifo_timer, 1,
2287 em_82547_move_tail, adapter);
2290 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
2291 em_82547_update_fifo_head(adapter, length);
2298 em_82547_fifo_workaround(struct adapter *adapter, int len)
2300 int fifo_space, fifo_pkt_len;
2302 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2304 if (adapter->link_duplex == HALF_DUPLEX) {
2305 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
2307 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
2308 if (em_82547_tx_fifo_reset(adapter))
2319 em_82547_update_fifo_head(struct adapter *adapter, int len)
2321 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2323 /* tx_fifo_head is always 16 byte aligned */
2324 adapter->tx_fifo_head += fifo_pkt_len;
2325 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
2326 adapter->tx_fifo_head -= adapter->tx_fifo_size;
2332 em_82547_tx_fifo_reset(struct adapter *adapter)
2336 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
2337 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
2338 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
2339 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
2340 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
2341 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
2342 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
2343 /* Disable TX unit */
2344 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2345 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
2346 tctl & ~E1000_TCTL_EN);
2348 /* Reset FIFO pointers */
2349 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
2350 adapter->tx_head_addr);
2351 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
2352 adapter->tx_head_addr);
2353 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
2354 adapter->tx_head_addr);
2355 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
2356 adapter->tx_head_addr);
2358 /* Re-enable TX unit */
2359 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2360 E1000_WRITE_FLUSH(&adapter->hw);
2362 adapter->tx_fifo_head = 0;
2363 adapter->tx_fifo_reset_cnt++;
2373 em_set_promisc(struct adapter *adapter)
2375 struct ifnet *ifp = adapter->ifp;
2378 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2380 if (ifp->if_flags & IFF_PROMISC) {
2381 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2382 /* Turn this on if you want to see bad packets */
2384 reg_rctl |= E1000_RCTL_SBP;
2385 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2386 } else if (ifp->if_flags & IFF_ALLMULTI) {
2387 reg_rctl |= E1000_RCTL_MPE;
2388 reg_rctl &= ~E1000_RCTL_UPE;
2389 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2394 em_disable_promisc(struct adapter *adapter)
2398 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2400 reg_rctl &= (~E1000_RCTL_UPE);
2401 reg_rctl &= (~E1000_RCTL_MPE);
2402 reg_rctl &= (~E1000_RCTL_SBP);
2403 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2407 /*********************************************************************
2410 * This routine is called whenever multicast address list is updated.
2412 **********************************************************************/
2415 em_set_multi(struct adapter *adapter)
2417 struct ifnet *ifp = adapter->ifp;
2418 struct ifmultiaddr *ifma;
2420 u8 *mta; /* Multicast array memory */
2423 IOCTL_DEBUGOUT("em_set_multi: begin");
2425 if (adapter->hw.mac.type == e1000_82542 &&
2426 adapter->hw.revision_id == E1000_REVISION_2) {
2427 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2428 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2429 e1000_pci_clear_mwi(&adapter->hw);
2430 reg_rctl |= E1000_RCTL_RST;
2431 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2435 /* Allocate temporary memory to setup array */
2436 mta = malloc(sizeof(u8) *
2437 (ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES),
2438 M_DEVBUF, M_NOWAIT | M_ZERO);
2440 panic("em_set_multi memory failure\n");
2442 #if __FreeBSD_version < 800000
2445 if_maddr_rlock(ifp);
2447 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2448 if (ifma->ifma_addr->sa_family != AF_LINK)
2451 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2454 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2455 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
2458 #if __FreeBSD_version < 800000
2459 IF_ADDR_UNLOCK(ifp);
2461 if_maddr_runlock(ifp);
2463 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
2464 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2465 reg_rctl |= E1000_RCTL_MPE;
2466 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2468 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
2470 if (adapter->hw.mac.type == e1000_82542 &&
2471 adapter->hw.revision_id == E1000_REVISION_2) {
2472 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2473 reg_rctl &= ~E1000_RCTL_RST;
2474 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2476 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2477 e1000_pci_set_mwi(&adapter->hw);
2479 free(mta, M_DEVBUF);
2483 /*********************************************************************
2486 * This routine checks for link status and updates statistics.
2488 **********************************************************************/
2491 em_local_timer(void *arg)
2493 struct adapter *adapter = arg;
2494 struct ifnet *ifp = adapter->ifp;
2496 EM_CORE_LOCK_ASSERT(adapter);
2498 #ifndef DEVICE_POLLING
2499 taskqueue_enqueue(adapter->tq,
2500 &adapter->rxtx_task);
2502 em_update_link_status(adapter);
2503 em_update_stats_counters(adapter);
2505 /* Reset LAA into RAR[0] on 82571 */
2506 if (e1000_get_laa_state_82571(&adapter->hw) == TRUE)
2507 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2509 if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING)
2510 em_print_hw_stats(adapter);
2512 em_smartspeed(adapter);
2515 * We check the watchdog: the time since
2516 * the last TX descriptor was cleaned.
2517 * This implies a functional TX engine.
2519 if ((adapter->watchdog_check == TRUE) &&
2520 (ticks - adapter->watchdog_time > EM_WATCHDOG))
2523 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
2526 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2527 adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2528 adapter->watchdog_events++;
2529 em_init_locked(adapter);
2533 em_update_link_status(struct adapter *adapter)
2535 struct e1000_hw *hw = &adapter->hw;
2536 struct ifnet *ifp = adapter->ifp;
2537 device_t dev = adapter->dev;
2540 /* Get the cached link value or read phy for real */
2541 switch (hw->phy.media_type) {
2542 case e1000_media_type_copper:
2543 if (hw->mac.get_link_status) {
2544 /* Do the work to read phy */
2545 e1000_check_for_link(hw);
2546 link_check = !hw->mac.get_link_status;
2547 if (link_check) /* ESB2 fix */
2548 e1000_cfg_on_link_up(hw);
2552 case e1000_media_type_fiber:
2553 e1000_check_for_link(hw);
2554 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2557 case e1000_media_type_internal_serdes:
2558 e1000_check_for_link(hw);
2559 link_check = adapter->hw.mac.serdes_has_link;
2562 case e1000_media_type_unknown:
2566 /* Now check for a transition */
2567 if (link_check && (adapter->link_active == 0)) {
2568 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2569 &adapter->link_duplex);
2570 /* Check if we must disable SPEED_MODE bit on PCI-E */
2571 if ((adapter->link_speed != SPEED_1000) &&
2572 ((hw->mac.type == e1000_82571) ||
2573 (hw->mac.type == e1000_82572))) {
2575 tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
2576 tarc0 &= ~SPEED_MODE_BIT;
2577 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
2580 device_printf(dev, "Link is up %d Mbps %s\n",
2581 adapter->link_speed,
2582 ((adapter->link_duplex == FULL_DUPLEX) ?
2583 "Full Duplex" : "Half Duplex"));
2584 adapter->link_active = 1;
2585 adapter->smartspeed = 0;
2586 ifp->if_baudrate = adapter->link_speed * 1000000;
2587 if_link_state_change(ifp, LINK_STATE_UP);
2588 } else if (!link_check && (adapter->link_active == 1)) {
2589 ifp->if_baudrate = adapter->link_speed = 0;
2590 adapter->link_duplex = 0;
2592 device_printf(dev, "Link is Down\n");
2593 adapter->link_active = 0;
2594 /* Link down, disable watchdog */
2595 adapter->watchdog_check = FALSE;
2596 if_link_state_change(ifp, LINK_STATE_DOWN);
2600 /*********************************************************************
2602 * This routine disables all traffic on the adapter by issuing a
2603 * global reset on the MAC and deallocates TX/RX buffers.
2605 * This routine should always be called with BOTH the CORE
2607 **********************************************************************/
2612 struct adapter *adapter = arg;
2613 struct ifnet *ifp = adapter->ifp;
2615 EM_CORE_LOCK_ASSERT(adapter);
2616 EM_TX_LOCK_ASSERT(adapter);
2618 INIT_DEBUGOUT("em_stop: begin");
2620 em_disable_intr(adapter);
2621 callout_stop(&adapter->timer);
2622 callout_stop(&adapter->tx_fifo_timer);
2624 /* Tell the stack that the interface is no longer active */
2625 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2627 e1000_reset_hw(&adapter->hw);
2628 if (adapter->hw.mac.type >= e1000_82544)
2629 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2633 /*********************************************************************
2635 * Determine hardware revision.
2637 **********************************************************************/
2639 em_identify_hardware(struct adapter *adapter)
2641 device_t dev = adapter->dev;
2643 /* Make sure our PCI config space has the necessary stuff set */
2644 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2645 if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2646 (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2647 device_printf(dev, "Memory Access and/or Bus Master bits "
2649 adapter->hw.bus.pci_cmd_word |=
2650 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2651 pci_write_config(dev, PCIR_COMMAND,
2652 adapter->hw.bus.pci_cmd_word, 2);
2655 /* Save off the information about this board */
2656 adapter->hw.vendor_id = pci_get_vendor(dev);
2657 adapter->hw.device_id = pci_get_device(dev);
2658 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2659 adapter->hw.subsystem_vendor_id =
2660 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2661 adapter->hw.subsystem_device_id =
2662 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2664 /* Do Shared Code Init and Setup */
2665 if (e1000_set_mac_type(&adapter->hw)) {
2666 device_printf(dev, "Setup init failure\n");
2672 em_allocate_pci_resources(struct adapter *adapter)
2674 device_t dev = adapter->dev;
2675 int val, rid, error = E1000_SUCCESS;
2678 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2680 if (adapter->memory == NULL) {
2681 device_printf(dev, "Unable to allocate bus resource: memory\n");
2684 adapter->osdep.mem_bus_space_tag =
2685 rman_get_bustag(adapter->memory);
2686 adapter->osdep.mem_bus_space_handle =
2687 rman_get_bushandle(adapter->memory);
2688 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2690 /* Only older adapters use IO mapping */
2691 if ((adapter->hw.mac.type > e1000_82543) &&
2692 (adapter->hw.mac.type < e1000_82571)) {
2693 /* Figure our where our IO BAR is ? */
2694 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2695 val = pci_read_config(dev, rid, 4);
2696 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2697 adapter->io_rid = rid;
2701 /* check for 64bit BAR */
2702 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2705 if (rid >= PCIR_CIS) {
2706 device_printf(dev, "Unable to locate IO BAR\n");
2709 adapter->ioport = bus_alloc_resource_any(dev,
2710 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2711 if (adapter->ioport == NULL) {
2712 device_printf(dev, "Unable to allocate bus resource: "
2716 adapter->hw.io_base = 0;
2717 adapter->osdep.io_bus_space_tag =
2718 rman_get_bustag(adapter->ioport);
2719 adapter->osdep.io_bus_space_handle =
2720 rman_get_bushandle(adapter->ioport);
2724 ** Init the resource arrays
2725 ** used by MSIX setup
2727 for (int i = 0; i < 3; i++) {
2728 adapter->rid[i] = i + 1; /* MSI/X RID starts at 1 */
2729 adapter->tag[i] = NULL;
2730 adapter->res[i] = NULL;
2734 * Setup MSI/X or MSI if PCI Express
2737 adapter->msi = em_setup_msix(adapter);
2739 adapter->hw.back = &adapter->osdep;
2744 /*********************************************************************
2746 * Setup the Legacy or MSI Interrupt handler
2748 **********************************************************************/
2750 em_allocate_legacy(struct adapter *adapter)
2752 device_t dev = adapter->dev;
2755 /* Manually turn off all interrupts */
2756 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2758 /* Legacy RID is 0 */
2759 if (adapter->msi == 0)
2760 adapter->rid[0] = 0;
2762 /* We allocate a single interrupt resource */
2763 adapter->res[0] = bus_alloc_resource_any(dev,
2764 SYS_RES_IRQ, &adapter->rid[0], RF_SHAREABLE | RF_ACTIVE);
2765 if (adapter->res[0] == NULL) {
2766 device_printf(dev, "Unable to allocate bus resource: "
2771 #ifdef EM_LEGACY_IRQ
2772 /* We do Legacy setup */
2773 if ((error = bus_setup_intr(dev, adapter->res[0],
2774 #if __FreeBSD_version > 700000
2775 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_intr, adapter,
2777 INTR_TYPE_NET | INTR_MPSAFE, em_intr, adapter,
2779 &adapter->tag[0])) != 0) {
2780 device_printf(dev, "Failed to register interrupt handler");
2784 #else /* FAST_IRQ */
2786 * Try allocating a fast interrupt and the associated deferred
2787 * processing contexts.
2789 TASK_INIT(&adapter->rxtx_task, 0, em_handle_rxtx, adapter);
2790 TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2791 adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2792 taskqueue_thread_enqueue, &adapter->tq);
2793 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2794 device_get_nameunit(adapter->dev));
2795 #if __FreeBSD_version < 700000
2796 if ((error = bus_setup_intr(dev, adapter->res[0],
2797 INTR_TYPE_NET | INTR_FAST, em_irq_fast, adapter,
2799 if ((error = bus_setup_intr(dev, adapter->res[0],
2800 INTR_TYPE_NET, em_irq_fast, NULL, adapter,
2802 &adapter->tag[0])) != 0) {
2803 device_printf(dev, "Failed to register fast interrupt "
2804 "handler: %d\n", error);
2805 taskqueue_free(adapter->tq);
2809 #endif /* EM_LEGACY_IRQ */
2814 /*********************************************************************
2816 * Setup the MSIX Interrupt handlers
2817 * This is not really Multiqueue, rather
2818 * its just multiple interrupt vectors.
2820 **********************************************************************/
2822 em_allocate_msix(struct adapter *adapter)
2824 device_t dev = adapter->dev;
2827 /* Make sure all interrupts are disabled */
2828 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2830 /* First get the resources */
2831 for (int i = 0; i < adapter->msi; i++) {
2832 adapter->res[i] = bus_alloc_resource_any(dev,
2833 SYS_RES_IRQ, &adapter->rid[i], RF_ACTIVE);
2834 if (adapter->res[i] == NULL) {
2836 "Unable to allocate bus resource: "
2837 "MSIX Interrupt\n");
2843 * Now allocate deferred processing contexts.
2845 TASK_INIT(&adapter->rx_task, 0, em_handle_rx, adapter);
2846 TASK_INIT(&adapter->tx_task, 0, em_handle_tx, adapter);
2848 * Handle compatibility for msi case for deferral due to
2851 TASK_INIT(&adapter->rxtx_task, 0, em_handle_tx, adapter);
2852 TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2853 adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT,
2854 taskqueue_thread_enqueue, &adapter->tq);
2855 taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq",
2856 device_get_nameunit(adapter->dev));
2859 * And setup the interrupt handlers
2862 /* First slot to RX */
2863 if ((error = bus_setup_intr(dev, adapter->res[0],
2864 #if __FreeBSD_version > 700000
2865 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_rx, adapter,
2867 INTR_TYPE_NET | INTR_MPSAFE, em_msix_rx, adapter,
2869 &adapter->tag[0])) != 0) {
2870 device_printf(dev, "Failed to register RX handler");
2875 if ((error = bus_setup_intr(dev, adapter->res[1],
2876 #if __FreeBSD_version > 700000
2877 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_tx, adapter,
2879 INTR_TYPE_NET | INTR_MPSAFE, em_msix_tx, adapter,
2881 &adapter->tag[1])) != 0) {
2882 device_printf(dev, "Failed to register TX handler");
2887 if ((error = bus_setup_intr(dev, adapter->res[2],
2888 #if __FreeBSD_version > 700000
2889 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_link, adapter,
2891 INTR_TYPE_NET | INTR_MPSAFE, em_msix_link, adapter,
2893 &adapter->tag[2])) != 0) {
2894 device_printf(dev, "Failed to register TX handler");
2903 em_free_pci_resources(struct adapter *adapter)
2905 device_t dev = adapter->dev;
2907 /* Make sure the for loop below runs once */
2908 if (adapter->msi == 0)
2912 * First release all the interrupt resources:
2913 * notice that since these are just kept
2914 * in an array we can do the same logic
2915 * whether its MSIX or just legacy.
2917 for (int i = 0; i < adapter->msi; i++) {
2918 if (adapter->tag[i] != NULL) {
2919 bus_teardown_intr(dev, adapter->res[i],
2921 adapter->tag[i] = NULL;
2923 if (adapter->res[i] != NULL) {
2924 bus_release_resource(dev, SYS_RES_IRQ,
2925 adapter->rid[i], adapter->res[i]);
2930 pci_release_msi(dev);
2932 if (adapter->msix != NULL)
2933 bus_release_resource(dev, SYS_RES_MEMORY,
2934 PCIR_BAR(EM_MSIX_BAR), adapter->msix);
2936 if (adapter->memory != NULL)
2937 bus_release_resource(dev, SYS_RES_MEMORY,
2938 PCIR_BAR(0), adapter->memory);
2940 if (adapter->flash != NULL)
2941 bus_release_resource(dev, SYS_RES_MEMORY,
2942 EM_FLASH, adapter->flash);
2944 if (adapter->ioport != NULL)
2945 bus_release_resource(dev, SYS_RES_IOPORT,
2946 adapter->io_rid, adapter->ioport);
2950 * Setup MSI or MSI/X
2953 em_setup_msix(struct adapter *adapter)
2955 device_t dev = adapter->dev;
2958 if (adapter->hw.mac.type < e1000_82571)
2961 /* Setup MSI/X for Hartwell */
2962 if (adapter->hw.mac.type == e1000_82574) {
2963 /* Map the MSIX BAR */
2964 int rid = PCIR_BAR(EM_MSIX_BAR);
2965 adapter->msix = bus_alloc_resource_any(dev,
2966 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2967 if (!adapter->msix) {
2968 /* May not be enabled */
2969 device_printf(adapter->dev,
2970 "Unable to map MSIX table \n");
2973 val = pci_msix_count(dev);
2975 ** 82574 can be configured for 5 but
2976 ** we limit use to 3.
2978 if (val > 3) val = 3;
2979 if ((val) && pci_alloc_msix(dev, &val) == 0) {
2980 device_printf(adapter->dev,"Using MSIX interrupts\n");
2985 val = pci_msi_count(dev);
2986 if (val == 1 && pci_alloc_msi(dev, &val) == 0) {
2988 device_printf(adapter->dev,"Using MSI interrupt\n");
2994 /*********************************************************************
2996 * Initialize the hardware to a configuration
2997 * as specified by the adapter structure.
2999 **********************************************************************/
3001 em_hardware_init(struct adapter *adapter)
3003 device_t dev = adapter->dev;
3006 INIT_DEBUGOUT("em_hardware_init: begin");
3008 /* Issue a global reset */
3009 e1000_reset_hw(&adapter->hw);
3011 /* When hardware is reset, fifo_head is also reset */
3012 adapter->tx_fifo_head = 0;
3014 /* Set up smart power down as default off on newer adapters. */
3015 if (!em_smart_pwr_down && (adapter->hw.mac.type == e1000_82571 ||
3016 adapter->hw.mac.type == e1000_82572)) {
3019 /* Speed up time to link by disabling smart power down. */
3020 e1000_read_phy_reg(&adapter->hw,
3021 IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
3022 phy_tmp &= ~IGP02E1000_PM_SPD;
3023 e1000_write_phy_reg(&adapter->hw,
3024 IGP02E1000_PHY_POWER_MGMT, phy_tmp);
3028 * These parameters control the automatic generation (Tx) and
3029 * response (Rx) to Ethernet PAUSE frames.
3030 * - High water mark should allow for at least two frames to be
3031 * received after sending an XOFF.
3032 * - Low water mark works best when it is very near the high water mark.
3033 * This allows the receiver to restart by sending XON when it has
3034 * drained a bit. Here we use an arbitary value of 1500 which will
3035 * restart after one full frame is pulled from the buffer. There
3036 * could be several smaller frames in the buffer and if so they will
3037 * not trigger the XON until their total number reduces the buffer
3039 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
3041 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
3044 adapter->hw.fc.high_water = rx_buffer_size -
3045 roundup2(adapter->max_frame_size, 1024);
3046 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
3048 if (adapter->hw.mac.type == e1000_80003es2lan)
3049 adapter->hw.fc.pause_time = 0xFFFF;
3051 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
3052 adapter->hw.fc.send_xon = TRUE;
3054 /* Set Flow control, use the tunable location if sane */
3055 if ((em_fc_setting >= 0) || (em_fc_setting < 4))
3056 adapter->hw.fc.requested_mode = em_fc_setting;
3058 adapter->hw.fc.requested_mode = e1000_fc_none;
3060 /* Override - workaround for PCHLAN issue */
3061 if (adapter->hw.mac.type == e1000_pchlan)
3062 adapter->hw.fc.requested_mode = e1000_fc_rx_pause;
3064 if (e1000_init_hw(&adapter->hw) < 0) {
3065 device_printf(dev, "Hardware Initialization Failed\n");
3069 e1000_check_for_link(&adapter->hw);
3074 /*********************************************************************
3076 * Setup networking device structure and register an interface.
3078 **********************************************************************/
3080 em_setup_interface(device_t dev, struct adapter *adapter)
3084 INIT_DEBUGOUT("em_setup_interface: begin");
3086 ifp = adapter->ifp = if_alloc(IFT_ETHER);
3088 panic("%s: can not if_alloc()", device_get_nameunit(dev));
3089 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3090 ifp->if_mtu = ETHERMTU;
3091 ifp->if_init = em_init;
3092 ifp->if_softc = adapter;
3093 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3094 ifp->if_ioctl = em_ioctl;
3095 ifp->if_start = em_start;
3096 IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
3097 ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
3098 IFQ_SET_READY(&ifp->if_snd);
3100 ether_ifattach(ifp, adapter->hw.mac.addr);
3102 ifp->if_capabilities = ifp->if_capenable = 0;
3104 #if __FreeBSD_version >= 800000
3105 /* Multiqueue tx functions */
3106 ifp->if_transmit = em_mq_start;
3107 ifp->if_qflush = em_qflush;
3108 adapter->br = buf_ring_alloc(4096, M_DEVBUF, M_WAITOK, &adapter->tx_mtx);
3110 if (adapter->hw.mac.type >= e1000_82543) {
3112 #if __FreeBSD_version < 700000
3113 version_cap = IFCAP_HWCSUM;
3115 version_cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
3117 ifp->if_capabilities |= version_cap;
3118 ifp->if_capenable |= version_cap;
3121 #if __FreeBSD_version >= 700000
3122 /* Identify TSO capable adapters */
3123 if ((adapter->hw.mac.type > e1000_82544) &&
3124 (adapter->hw.mac.type != e1000_82547))
3125 ifp->if_capabilities |= IFCAP_TSO4;
3127 * By default only enable on PCI-E, this
3128 * can be overriden by ifconfig.
3130 if (adapter->hw.mac.type >= e1000_82571)
3131 ifp->if_capenable |= IFCAP_TSO4;
3134 * Tell the upper layer(s) we
3135 * support full VLAN capability
3137 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3138 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
3139 ifp->if_capenable |= (IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING);
3142 ** Dont turn this on by default, if vlans are
3143 ** created on another pseudo device (eg. lagg)
3144 ** then vlan events are not passed thru, breaking
3145 ** operation, but with HW FILTER off it works. If
3146 ** using vlans directly on the em driver you can
3147 ** enable this and get full hardware tag filtering.
3149 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
3151 #ifdef DEVICE_POLLING
3152 ifp->if_capabilities |= IFCAP_POLLING;
3155 /* Limit WOL to MAGIC, not clear others are used */
3157 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
3158 ifp->if_capenable |= IFCAP_WOL_MAGIC;
3162 * Specify the media types supported by this adapter and register
3163 * callbacks to update media and link information
3165 ifmedia_init(&adapter->media, IFM_IMASK,
3166 em_media_change, em_media_status);
3167 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3168 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
3169 u_char fiber_type = IFM_1000_SX; /* default type */
3171 if (adapter->hw.mac.type == e1000_82545)
3172 fiber_type = IFM_1000_LX;
3173 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
3175 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
3177 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
3178 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
3180 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
3182 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
3184 if (adapter->hw.phy.type != e1000_phy_ife) {
3185 ifmedia_add(&adapter->media,
3186 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
3187 ifmedia_add(&adapter->media,
3188 IFM_ETHER | IFM_1000_T, 0, NULL);
3191 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3192 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3196 /*********************************************************************
3198 * Workaround for SmartSpeed on 82541 and 82547 controllers
3200 **********************************************************************/
3202 em_smartspeed(struct adapter *adapter)
3206 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
3207 adapter->hw.mac.autoneg == 0 ||
3208 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
3211 if (adapter->smartspeed == 0) {
3212 /* If Master/Slave config fault is asserted twice,
3213 * we assume back-to-back */
3214 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3215 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
3217 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3218 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
3219 e1000_read_phy_reg(&adapter->hw,
3220 PHY_1000T_CTRL, &phy_tmp);
3221 if(phy_tmp & CR_1000T_MS_ENABLE) {
3222 phy_tmp &= ~CR_1000T_MS_ENABLE;
3223 e1000_write_phy_reg(&adapter->hw,
3224 PHY_1000T_CTRL, phy_tmp);
3225 adapter->smartspeed++;
3226 if(adapter->hw.mac.autoneg &&
3227 !e1000_copper_link_autoneg(&adapter->hw) &&
3228 !e1000_read_phy_reg(&adapter->hw,
3229 PHY_CONTROL, &phy_tmp)) {
3230 phy_tmp |= (MII_CR_AUTO_NEG_EN |
3231 MII_CR_RESTART_AUTO_NEG);
3232 e1000_write_phy_reg(&adapter->hw,
3233 PHY_CONTROL, phy_tmp);
3238 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
3239 /* If still no link, perhaps using 2/3 pair cable */
3240 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
3241 phy_tmp |= CR_1000T_MS_ENABLE;
3242 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
3243 if(adapter->hw.mac.autoneg &&
3244 !e1000_copper_link_autoneg(&adapter->hw) &&
3245 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
3246 phy_tmp |= (MII_CR_AUTO_NEG_EN |
3247 MII_CR_RESTART_AUTO_NEG);
3248 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
3251 /* Restart process after EM_SMARTSPEED_MAX iterations */
3252 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
3253 adapter->smartspeed = 0;
3258 * Manage DMA'able memory.
3261 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3265 *(bus_addr_t *) arg = segs[0].ds_addr;
3269 em_dma_malloc(struct adapter *adapter, bus_size_t size,
3270 struct em_dma_alloc *dma, int mapflags)
3274 #if __FreeBSD_version >= 700000
3275 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
3277 error = bus_dma_tag_create(NULL, /* parent */
3279 EM_DBA_ALIGN, 0, /* alignment, bounds */
3280 BUS_SPACE_MAXADDR, /* lowaddr */
3281 BUS_SPACE_MAXADDR, /* highaddr */
3282 NULL, NULL, /* filter, filterarg */
3285 size, /* maxsegsize */
3287 NULL, /* lockfunc */
3291 device_printf(adapter->dev,
3292 "%s: bus_dma_tag_create failed: %d\n",
3297 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
3298 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
3300 device_printf(adapter->dev,
3301 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
3302 __func__, (uintmax_t)size, error);
3307 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
3308 size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
3309 if (error || dma->dma_paddr == 0) {
3310 device_printf(adapter->dev,
3311 "%s: bus_dmamap_load failed: %d\n",
3319 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3321 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3322 bus_dma_tag_destroy(dma->dma_tag);
3324 dma->dma_map = NULL;
3325 dma->dma_tag = NULL;
3331 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
3333 if (dma->dma_tag == NULL)
3335 if (dma->dma_map != NULL) {
3336 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
3337 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3338 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3339 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3340 dma->dma_map = NULL;
3342 bus_dma_tag_destroy(dma->dma_tag);
3343 dma->dma_tag = NULL;
3347 /*********************************************************************
3349 * Allocate memory for tx_buffer structures. The tx_buffer stores all
3350 * the information needed to transmit a packet on the wire.
3352 **********************************************************************/
3354 em_allocate_transmit_structures(struct adapter *adapter)
3356 device_t dev = adapter->dev;
3357 struct em_buffer *tx_buffer;
3361 * Create DMA tags for tx descriptors
3363 #if __FreeBSD_version >= 700000
3364 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
3366 if ((error = bus_dma_tag_create(NULL, /* parent */
3368 1, 0, /* alignment, bounds */
3369 BUS_SPACE_MAXADDR, /* lowaddr */
3370 BUS_SPACE_MAXADDR, /* highaddr */
3371 NULL, NULL, /* filter, filterarg */
3372 EM_TSO_SIZE, /* maxsize */
3373 EM_MAX_SCATTER, /* nsegments */
3374 EM_TSO_SEG_SIZE, /* maxsegsize */
3376 NULL, /* lockfunc */
3378 &adapter->txtag)) != 0) {
3379 device_printf(dev, "Unable to allocate TX DMA tag\n");
3383 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
3384 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3385 if (adapter->tx_buffer_area == NULL) {
3386 device_printf(dev, "Unable to allocate tx_buffer memory\n");
3391 /* Create the descriptor buffer dma maps */
3392 for (int i = 0; i < adapter->num_tx_desc; i++) {
3393 tx_buffer = &adapter->tx_buffer_area[i];
3394 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
3396 device_printf(dev, "Unable to create TX DMA map\n");
3399 tx_buffer->next_eop = -1;
3404 em_free_transmit_structures(adapter);
3408 /*********************************************************************
3410 * (Re)Initialize transmit structures.
3412 **********************************************************************/
3414 em_setup_transmit_structures(struct adapter *adapter)
3416 struct em_buffer *tx_buffer;
3418 /* Clear the old ring contents */
3419 bzero(adapter->tx_desc_base,
3420 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
3422 /* Free any existing TX buffers */
3423 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
3424 tx_buffer = &adapter->tx_buffer_area[i];
3425 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3426 BUS_DMASYNC_POSTWRITE);
3427 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
3428 m_freem(tx_buffer->m_head);
3429 tx_buffer->m_head = NULL;
3430 tx_buffer->next_eop = -1;
3434 adapter->next_avail_tx_desc = 0;
3435 adapter->next_tx_to_clean = 0;
3436 adapter->num_tx_desc_avail = adapter->num_tx_desc;
3438 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3439 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3444 /*********************************************************************
3446 * Enable transmit unit.
3448 **********************************************************************/
3450 em_initialize_transmit_unit(struct adapter *adapter)
3452 u32 tctl, tarc, tipg = 0;
3455 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
3456 /* Setup the Base and Length of the Tx Descriptor Ring */
3457 bus_addr = adapter->txdma.dma_paddr;
3458 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
3459 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
3460 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
3461 (u32)(bus_addr >> 32));
3462 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
3464 /* Setup the HW Tx Head and Tail descriptor pointers */
3465 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
3466 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
3468 HW_DEBUGOUT2("Base = %x, Length = %x\n",
3469 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
3470 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
3472 /* Set the default values for the Tx Inter Packet Gap timer */
3473 switch (adapter->hw.mac.type) {
3475 tipg = DEFAULT_82542_TIPG_IPGT;
3476 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3477 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3479 case e1000_80003es2lan:
3480 tipg = DEFAULT_82543_TIPG_IPGR1;
3481 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
3482 E1000_TIPG_IPGR2_SHIFT;
3485 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3486 (adapter->hw.phy.media_type ==
3487 e1000_media_type_internal_serdes))
3488 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
3490 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
3491 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3492 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3495 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
3496 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
3497 if(adapter->hw.mac.type >= e1000_82540)
3498 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
3499 adapter->tx_abs_int_delay.value);
3501 if ((adapter->hw.mac.type == e1000_82571) ||
3502 (adapter->hw.mac.type == e1000_82572)) {
3503 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3504 tarc |= SPEED_MODE_BIT;
3505 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3506 } else if (adapter->hw.mac.type == e1000_80003es2lan) {
3507 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3509 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3510 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1));
3512 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
3515 /* Program the Transmit Control Register */
3516 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
3517 tctl &= ~E1000_TCTL_CT;
3518 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
3519 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
3521 if (adapter->hw.mac.type >= e1000_82571)
3522 tctl |= E1000_TCTL_MULR;
3524 /* This write will effectively turn on the transmit unit. */
3525 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
3527 /* Setup Transmit Descriptor Base Settings */
3528 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
3530 if (adapter->tx_int_delay.value > 0)
3531 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3534 /*********************************************************************
3536 * Free all transmit related data structures.
3538 **********************************************************************/
3540 em_free_transmit_structures(struct adapter *adapter)
3542 struct em_buffer *tx_buffer;
3544 INIT_DEBUGOUT("free_transmit_structures: begin");
3546 if (adapter->tx_buffer_area != NULL) {
3547 for (int i = 0; i < adapter->num_tx_desc; i++) {
3548 tx_buffer = &adapter->tx_buffer_area[i];
3549 if (tx_buffer->m_head != NULL) {
3550 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3551 BUS_DMASYNC_POSTWRITE);
3552 bus_dmamap_unload(adapter->txtag,
3554 m_freem(tx_buffer->m_head);
3555 tx_buffer->m_head = NULL;
3556 } else if (tx_buffer->map != NULL)
3557 bus_dmamap_unload(adapter->txtag,
3559 if (tx_buffer->map != NULL) {
3560 bus_dmamap_destroy(adapter->txtag,
3562 tx_buffer->map = NULL;
3566 if (adapter->tx_buffer_area != NULL) {
3567 free(adapter->tx_buffer_area, M_DEVBUF);
3568 adapter->tx_buffer_area = NULL;
3570 if (adapter->txtag != NULL) {
3571 bus_dma_tag_destroy(adapter->txtag);
3572 adapter->txtag = NULL;
3574 #if __FreeBSD_version >= 800000
3575 if (adapter->br != NULL)
3576 buf_ring_free(adapter->br, M_DEVBUF);
3580 /*********************************************************************
3582 * The offload context needs to be set when we transfer the first
3583 * packet of a particular protocol (TCP/UDP). This routine has been
3584 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
3586 * Added back the old method of keeping the current context type
3587 * and not setting if unnecessary, as this is reported to be a
3588 * big performance win. -jfv
3589 **********************************************************************/
3591 em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
3592 u32 *txd_upper, u32 *txd_lower)
3594 struct e1000_context_desc *TXD = NULL;
3595 struct em_buffer *tx_buffer;
3596 struct ether_vlan_header *eh;
3597 struct ip *ip = NULL;
3598 struct ip6_hdr *ip6;
3599 int curr_txd, ehdrlen;
3600 u32 cmd, hdr_len, ip_hlen;
3605 cmd = hdr_len = ipproto = 0;
3606 curr_txd = adapter->next_avail_tx_desc;
3609 * Determine where frame payload starts.
3610 * Jump over vlan headers if already present,
3611 * helpful for QinQ too.
3613 eh = mtod(mp, struct ether_vlan_header *);
3614 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3615 etype = ntohs(eh->evl_proto);
3616 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3618 etype = ntohs(eh->evl_encap_proto);
3619 ehdrlen = ETHER_HDR_LEN;
3623 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
3624 * TODO: Support SCTP too when it hits the tree.
3628 ip = (struct ip *)(mp->m_data + ehdrlen);
3629 ip_hlen = ip->ip_hl << 2;
3631 /* Setup of IP header checksum. */
3632 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
3634 * Start offset for header checksum calculation.
3635 * End offset for header checksum calculation.
3636 * Offset of place to put the checksum.
3638 TXD = (struct e1000_context_desc *)
3639 &adapter->tx_desc_base[curr_txd];
3640 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3641 TXD->lower_setup.ip_fields.ipcse =
3642 htole16(ehdrlen + ip_hlen);
3643 TXD->lower_setup.ip_fields.ipcso =
3644 ehdrlen + offsetof(struct ip, ip_sum);
3645 cmd |= E1000_TXD_CMD_IP;
3646 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3649 if (mp->m_len < ehdrlen + ip_hlen)
3650 return; /* failure */
3652 hdr_len = ehdrlen + ip_hlen;
3656 case ETHERTYPE_IPV6:
3657 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3658 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
3660 if (mp->m_len < ehdrlen + ip_hlen)
3661 return; /* failure */
3663 /* IPv6 doesn't have a header checksum. */
3665 hdr_len = ehdrlen + ip_hlen;
3666 ipproto = ip6->ip6_nxt;
3677 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
3678 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3679 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3680 /* no need for context if already set */
3681 if (adapter->last_hw_offload == CSUM_TCP)
3683 adapter->last_hw_offload = CSUM_TCP;
3685 * Start offset for payload checksum calculation.
3686 * End offset for payload checksum calculation.
3687 * Offset of place to put the checksum.
3689 TXD = (struct e1000_context_desc *)
3690 &adapter->tx_desc_base[curr_txd];
3691 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3692 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3693 TXD->upper_setup.tcp_fields.tucso =
3694 hdr_len + offsetof(struct tcphdr, th_sum);
3695 cmd |= E1000_TXD_CMD_TCP;
3700 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
3701 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3702 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3703 /* no need for context if already set */
3704 if (adapter->last_hw_offload == CSUM_UDP)
3706 adapter->last_hw_offload = CSUM_UDP;
3708 * Start offset for header checksum calculation.
3709 * End offset for header checksum calculation.
3710 * Offset of place to put the checksum.
3712 TXD = (struct e1000_context_desc *)
3713 &adapter->tx_desc_base[curr_txd];
3714 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3715 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3716 TXD->upper_setup.tcp_fields.tucso =
3717 hdr_len + offsetof(struct udphdr, uh_sum);
3725 TXD->tcp_seg_setup.data = htole32(0);
3726 TXD->cmd_and_length =
3727 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
3728 tx_buffer = &adapter->tx_buffer_area[curr_txd];
3729 tx_buffer->m_head = NULL;
3730 tx_buffer->next_eop = -1;
3732 if (++curr_txd == adapter->num_tx_desc)
3735 adapter->num_tx_desc_avail--;
3736 adapter->next_avail_tx_desc = curr_txd;
3740 #if __FreeBSD_version >= 700000
3741 /**********************************************************************
3743 * Setup work for hardware segmentation offload (TSO)
3745 **********************************************************************/
3747 em_tso_setup(struct adapter *adapter, struct mbuf *mp, u32 *txd_upper,
3750 struct e1000_context_desc *TXD;
3751 struct em_buffer *tx_buffer;
3752 struct ether_vlan_header *eh;
3754 struct ip6_hdr *ip6;
3756 int curr_txd, ehdrlen, hdr_len, ip_hlen, isip6;
3760 * This function could/should be extended to support IP/IPv6
3761 * fragmentation as well. But as they say, one step at a time.
3765 * Determine where frame payload starts.
3766 * Jump over vlan headers if already present,
3767 * helpful for QinQ too.
3769 eh = mtod(mp, struct ether_vlan_header *);
3770 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3771 etype = ntohs(eh->evl_proto);
3772 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3774 etype = ntohs(eh->evl_encap_proto);
3775 ehdrlen = ETHER_HDR_LEN;
3778 /* Ensure we have at least the IP+TCP header in the first mbuf. */
3779 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
3780 return FALSE; /* -1 */
3783 * We only support TCP for IPv4 and IPv6 (notyet) for the moment.
3784 * TODO: Support SCTP too when it hits the tree.
3789 ip = (struct ip *)(mp->m_data + ehdrlen);
3790 if (ip->ip_p != IPPROTO_TCP)
3791 return FALSE; /* 0 */
3794 ip_hlen = ip->ip_hl << 2;
3795 if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3796 return FALSE; /* -1 */
3797 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3799 th->th_sum = in_pseudo(ip->ip_src.s_addr,
3800 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3802 th->th_sum = mp->m_pkthdr.csum_data;
3805 case ETHERTYPE_IPV6:
3807 return FALSE; /* Not supported yet. */
3808 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3809 if (ip6->ip6_nxt != IPPROTO_TCP)
3810 return FALSE; /* 0 */
3812 ip_hlen = sizeof(struct ip6_hdr); /* XXX: no header stacking. */
3813 if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3814 return FALSE; /* -1 */
3815 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
3817 th->th_sum = in6_pseudo(ip6->ip6_src, ip->ip6_dst,
3818 htons(IPPROTO_TCP)); /* XXX: function notyet. */
3820 th->th_sum = mp->m_pkthdr.csum_data;
3826 hdr_len = ehdrlen + ip_hlen + (th->th_off << 2);
3828 *txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */
3829 E1000_TXD_DTYP_D | /* Data descr type */
3830 E1000_TXD_CMD_TSE); /* Do TSE on this packet */
3832 /* IP and/or TCP header checksum calculation and insertion. */
3833 *txd_upper = ((isip6 ? 0 : E1000_TXD_POPTS_IXSM) |
3834 E1000_TXD_POPTS_TXSM) << 8;
3836 curr_txd = adapter->next_avail_tx_desc;
3837 tx_buffer = &adapter->tx_buffer_area[curr_txd];
3838 TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
3840 /* IPv6 doesn't have a header checksum. */
3843 * Start offset for header checksum calculation.
3844 * End offset for header checksum calculation.
3845 * Offset of place put the checksum.
3847 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3848 TXD->lower_setup.ip_fields.ipcse =
3849 htole16(ehdrlen + ip_hlen - 1);
3850 TXD->lower_setup.ip_fields.ipcso =
3851 ehdrlen + offsetof(struct ip, ip_sum);
3854 * Start offset for payload checksum calculation.
3855 * End offset for payload checksum calculation.
3856 * Offset of place to put the checksum.
3858 TXD->upper_setup.tcp_fields.tucss =
3860 TXD->upper_setup.tcp_fields.tucse = 0;
3861 TXD->upper_setup.tcp_fields.tucso =
3862 ehdrlen + ip_hlen + offsetof(struct tcphdr, th_sum);
3864 * Payload size per packet w/o any headers.
3865 * Length of all headers up to payload.
3867 TXD->tcp_seg_setup.fields.mss = htole16(mp->m_pkthdr.tso_segsz);
3868 TXD->tcp_seg_setup.fields.hdr_len = hdr_len;
3870 TXD->cmd_and_length = htole32(adapter->txd_cmd |
3871 E1000_TXD_CMD_DEXT | /* Extended descr */
3872 E1000_TXD_CMD_TSE | /* TSE context */
3873 (isip6 ? 0 : E1000_TXD_CMD_IP) | /* Do IP csum */
3874 E1000_TXD_CMD_TCP | /* Do TCP checksum */
3875 (mp->m_pkthdr.len - (hdr_len))); /* Total len */
3877 tx_buffer->m_head = NULL;
3878 tx_buffer->next_eop = -1;
3880 if (++curr_txd == adapter->num_tx_desc)
3883 adapter->num_tx_desc_avail--;
3884 adapter->next_avail_tx_desc = curr_txd;
3885 adapter->tx_tso = TRUE;
3890 #endif /* __FreeBSD_version >= 700000 */
3892 /**********************************************************************
3894 * Examine each tx_buffer in the used queue. If the hardware is done
3895 * processing the packet then free associated resources. The
3896 * tx_buffer is put back on the free queue.
3898 **********************************************************************/
3900 em_txeof(struct adapter *adapter)
3902 int first, last, done, num_avail;
3903 struct em_buffer *tx_buffer;
3904 struct e1000_tx_desc *tx_desc, *eop_desc;
3905 struct ifnet *ifp = adapter->ifp;
3907 EM_TX_LOCK_ASSERT(adapter);
3909 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
3912 num_avail = adapter->num_tx_desc_avail;
3913 first = adapter->next_tx_to_clean;
3914 tx_desc = &adapter->tx_desc_base[first];
3915 tx_buffer = &adapter->tx_buffer_area[first];
3916 last = tx_buffer->next_eop;
3917 eop_desc = &adapter->tx_desc_base[last];
3920 * What this does is get the index of the
3921 * first descriptor AFTER the EOP of the
3922 * first packet, that way we can do the
3923 * simple comparison on the inner while loop.
3925 if (++last == adapter->num_tx_desc)
3929 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3930 BUS_DMASYNC_POSTREAD);
3932 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
3933 /* We clean the range of the packet */
3934 while (first != done) {
3935 tx_desc->upper.data = 0;
3936 tx_desc->lower.data = 0;
3937 tx_desc->buffer_addr = 0;
3940 if (tx_buffer->m_head) {
3942 bus_dmamap_sync(adapter->txtag,
3944 BUS_DMASYNC_POSTWRITE);
3945 bus_dmamap_unload(adapter->txtag,
3948 m_freem(tx_buffer->m_head);
3949 tx_buffer->m_head = NULL;
3951 tx_buffer->next_eop = -1;
3952 adapter->watchdog_time = ticks;
3954 if (++first == adapter->num_tx_desc)
3957 tx_buffer = &adapter->tx_buffer_area[first];
3958 tx_desc = &adapter->tx_desc_base[first];
3960 /* See if we can continue to the next packet */
3961 last = tx_buffer->next_eop;
3963 eop_desc = &adapter->tx_desc_base[last];
3964 /* Get new done point */
3965 if (++last == adapter->num_tx_desc) last = 0;
3970 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3971 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3973 adapter->next_tx_to_clean = first;
3976 * If we have enough room, clear IFF_DRV_OACTIVE to
3977 * tell the stack that it is OK to send packets.
3978 * If there are no pending descriptors, clear the watchdog.
3980 if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
3981 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3982 if (num_avail == adapter->num_tx_desc) {
3983 adapter->watchdog_check = FALSE;
3984 adapter->num_tx_desc_avail = num_avail;
3989 adapter->num_tx_desc_avail = num_avail;
3993 /*********************************************************************
3995 * When Link is lost sometimes there is work still in the TX ring
3996 * which may result in a watchdog, rather than allow that we do an
3997 * attempted cleanup and then reinit here. Note that this has been
3998 * seens mostly with fiber adapters.
4000 **********************************************************************/
4002 em_tx_purge(struct adapter *adapter)
4004 if ((!adapter->link_active) && (adapter->watchdog_check)) {
4005 EM_TX_LOCK(adapter);
4007 EM_TX_UNLOCK(adapter);
4008 if (adapter->watchdog_check) /* Still outstanding? */
4009 em_init_locked(adapter);
4013 /*********************************************************************
4015 * Get a buffer from system mbuf buffer pool.
4017 **********************************************************************/
4019 em_get_buf(struct adapter *adapter, int i)
4022 bus_dma_segment_t segs[1];
4024 struct em_buffer *rx_buffer;
4027 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
4029 adapter->mbuf_cluster_failed++;
4032 m->m_len = m->m_pkthdr.len = MCLBYTES;
4034 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
4035 m_adj(m, ETHER_ALIGN);
4038 * Using memory from the mbuf cluster pool, invoke the
4039 * bus_dma machinery to arrange the memory mapping.
4041 error = bus_dmamap_load_mbuf_sg(adapter->rxtag,
4042 adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT);
4048 /* If nsegs is wrong then the stack is corrupt. */
4049 KASSERT(nsegs == 1, ("Too many segments returned!"));
4051 rx_buffer = &adapter->rx_buffer_area[i];
4052 if (rx_buffer->m_head != NULL)
4053 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4055 map = rx_buffer->map;
4056 rx_buffer->map = adapter->rx_sparemap;
4057 adapter->rx_sparemap = map;
4058 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
4059 rx_buffer->m_head = m;
4061 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
4065 /*********************************************************************
4067 * Allocate memory for rx_buffer structures. Since we use one
4068 * rx_buffer per received packet, the maximum number of rx_buffer's
4069 * that we'll need is equal to the number of receive descriptors
4070 * that we've allocated.
4072 **********************************************************************/
4074 em_allocate_receive_structures(struct adapter *adapter)
4076 device_t dev = adapter->dev;
4077 struct em_buffer *rx_buffer;
4080 adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) *
4081 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
4082 if (adapter->rx_buffer_area == NULL) {
4083 device_printf(dev, "Unable to allocate rx_buffer memory\n");
4087 #if __FreeBSD_version >= 700000
4088 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
4090 error = bus_dma_tag_create(NULL, /* parent */
4092 1, 0, /* alignment, bounds */
4093 BUS_SPACE_MAXADDR, /* lowaddr */
4094 BUS_SPACE_MAXADDR, /* highaddr */
4095 NULL, NULL, /* filter, filterarg */
4096 MCLBYTES, /* maxsize */
4098 MCLBYTES, /* maxsegsize */
4100 NULL, /* lockfunc */
4104 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
4109 /* Create the spare map (used by getbuf) */
4110 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4111 &adapter->rx_sparemap);
4113 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4118 rx_buffer = adapter->rx_buffer_area;
4119 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4120 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4123 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4132 em_free_receive_structures(adapter);
4136 /*********************************************************************
4138 * (Re)initialize receive structures.
4140 **********************************************************************/
4142 em_setup_receive_structures(struct adapter *adapter)
4144 struct em_buffer *rx_buffer;
4147 /* Reset descriptor ring */
4148 bzero(adapter->rx_desc_base,
4149 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
4151 /* Free current RX buffers. */
4152 rx_buffer = adapter->rx_buffer_area;
4153 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4154 if (rx_buffer->m_head != NULL) {
4155 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4156 BUS_DMASYNC_POSTREAD);
4157 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4158 m_freem(rx_buffer->m_head);
4159 rx_buffer->m_head = NULL;
4163 /* Allocate new ones. */
4164 for (i = 0; i < adapter->num_rx_desc; i++) {
4165 error = em_get_buf(adapter, i);
4170 /* Setup our descriptor pointers */
4171 adapter->next_rx_desc_to_check = 0;
4172 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4173 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4178 /*********************************************************************
4180 * Enable receive unit.
4182 **********************************************************************/
4183 #define MAX_INTS_PER_SEC 8000
4184 #define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256)
4187 em_initialize_receive_unit(struct adapter *adapter)
4189 struct ifnet *ifp = adapter->ifp;
4193 INIT_DEBUGOUT("em_initialize_receive_unit: begin");
4196 * Make sure receives are disabled while setting
4197 * up the descriptor ring
4199 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4200 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
4202 if (adapter->hw.mac.type >= e1000_82540) {
4203 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
4204 adapter->rx_abs_int_delay.value);
4206 * Set the interrupt throttling rate. Value is calculated
4207 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
4209 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
4213 ** When using MSIX interrupts we need to throttle
4214 ** using the EITR register (82574 only)
4217 for (int i = 0; i < 4; i++)
4218 E1000_WRITE_REG(&adapter->hw,
4219 E1000_EITR_82574(i), DEFAULT_ITR);
4221 /* Disable accelerated ackknowledge */
4222 if (adapter->hw.mac.type == e1000_82574)
4223 E1000_WRITE_REG(&adapter->hw,
4224 E1000_RFCTL, E1000_RFCTL_ACK_DIS);
4226 /* Setup the Base and Length of the Rx Descriptor Ring */
4227 bus_addr = adapter->rxdma.dma_paddr;
4228 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
4229 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
4230 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
4231 (u32)(bus_addr >> 32));
4232 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
4235 /* Setup the Receive Control Register */
4236 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4237 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
4238 E1000_RCTL_RDMTS_HALF |
4239 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4241 /* Make sure VLAN Filters are off */
4242 rctl &= ~E1000_RCTL_VFE;
4244 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
4245 rctl |= E1000_RCTL_SBP;
4247 rctl &= ~E1000_RCTL_SBP;
4249 switch (adapter->rx_buffer_len) {
4252 rctl |= E1000_RCTL_SZ_2048;
4255 rctl |= E1000_RCTL_SZ_4096 |
4256 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4259 rctl |= E1000_RCTL_SZ_8192 |
4260 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4263 rctl |= E1000_RCTL_SZ_16384 |
4264 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4268 if (ifp->if_mtu > ETHERMTU)
4269 rctl |= E1000_RCTL_LPE;
4271 rctl &= ~E1000_RCTL_LPE;
4273 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
4274 if ((adapter->hw.mac.type >= e1000_82543) &&
4275 (ifp->if_capenable & IFCAP_RXCSUM)) {
4276 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
4277 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
4278 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
4282 ** XXX TEMPORARY WORKAROUND: on some systems with 82573
4283 ** long latencies are observed, like Lenovo X60. This
4284 ** change eliminates the problem, but since having positive
4285 ** values in RDTR is a known source of problems on other
4286 ** platforms another solution is being sought.
4288 if (adapter->hw.mac.type == e1000_82573)
4289 E1000_WRITE_REG(&adapter->hw, E1000_RDTR, 0x20);
4291 /* Enable Receives */
4292 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4295 * Setup the HW Rx Head and
4296 * Tail Descriptor Pointers
4298 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
4299 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
4304 /*********************************************************************
4306 * Free receive related data structures.
4308 **********************************************************************/
4310 em_free_receive_structures(struct adapter *adapter)
4312 struct em_buffer *rx_buffer;
4315 INIT_DEBUGOUT("free_receive_structures: begin");
4317 if (adapter->rx_sparemap) {
4318 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
4319 adapter->rx_sparemap = NULL;
4322 /* Cleanup any existing buffers */
4323 if (adapter->rx_buffer_area != NULL) {
4324 rx_buffer = adapter->rx_buffer_area;
4325 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4326 if (rx_buffer->m_head != NULL) {
4327 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4328 BUS_DMASYNC_POSTREAD);
4329 bus_dmamap_unload(adapter->rxtag,
4331 m_freem(rx_buffer->m_head);
4332 rx_buffer->m_head = NULL;
4333 } else if (rx_buffer->map != NULL)
4334 bus_dmamap_unload(adapter->rxtag,
4336 if (rx_buffer->map != NULL) {
4337 bus_dmamap_destroy(adapter->rxtag,
4339 rx_buffer->map = NULL;
4344 if (adapter->rx_buffer_area != NULL) {
4345 free(adapter->rx_buffer_area, M_DEVBUF);
4346 adapter->rx_buffer_area = NULL;
4349 if (adapter->rxtag != NULL) {
4350 bus_dma_tag_destroy(adapter->rxtag);
4351 adapter->rxtag = NULL;
4355 /*********************************************************************
4357 * This routine executes in interrupt context. It replenishes
4358 * the mbufs in the descriptor and sends data which has been
4359 * dma'ed into host memory to upper layer.
4361 * We loop at most count times if count is > 0, or until done if
4364 * For polling we also now return the number of cleaned packets
4365 *********************************************************************/
4367 em_rxeof(struct adapter *adapter, int count)
4369 struct ifnet *ifp = adapter->ifp;;
4371 u8 status, accept_frame = 0, eop = 0;
4372 u16 len, desc_len, prev_len_adj;
4374 struct e1000_rx_desc *current_desc;
4376 EM_RX_LOCK(adapter);
4377 i = adapter->next_rx_desc_to_check;
4378 current_desc = &adapter->rx_desc_base[i];
4379 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4380 BUS_DMASYNC_POSTREAD);
4382 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
4383 EM_RX_UNLOCK(adapter);
4387 while ((current_desc->status & E1000_RXD_STAT_DD) &&
4389 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4390 struct mbuf *m = NULL;
4392 mp = adapter->rx_buffer_area[i].m_head;
4394 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
4395 * needs to access the last received byte in the mbuf.
4397 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
4398 BUS_DMASYNC_POSTREAD);
4402 desc_len = le16toh(current_desc->length);
4403 status = current_desc->status;
4404 if (status & E1000_RXD_STAT_EOP) {
4407 if (desc_len < ETHER_CRC_LEN) {
4409 prev_len_adj = ETHER_CRC_LEN - desc_len;
4411 len = desc_len - ETHER_CRC_LEN;
4417 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
4419 u32 pkt_len = desc_len;
4421 if (adapter->fmp != NULL)
4422 pkt_len += adapter->fmp->m_pkthdr.len;
4424 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
4425 if (TBI_ACCEPT(&adapter->hw, status,
4426 current_desc->errors, pkt_len, last_byte,
4427 adapter->min_frame_size, adapter->max_frame_size)) {
4428 e1000_tbi_adjust_stats_82543(&adapter->hw,
4429 &adapter->stats, pkt_len,
4430 adapter->hw.mac.addr,
4431 adapter->max_frame_size);
4439 if (em_get_buf(adapter, i) != 0) {
4444 /* Assign correct length to the current fragment */
4447 if (adapter->fmp == NULL) {
4448 mp->m_pkthdr.len = len;
4449 adapter->fmp = mp; /* Store the first mbuf */
4452 /* Chain mbuf's together */
4453 mp->m_flags &= ~M_PKTHDR;
4455 * Adjust length of previous mbuf in chain if
4456 * we received less than 4 bytes in the last
4459 if (prev_len_adj > 0) {
4460 adapter->lmp->m_len -= prev_len_adj;
4461 adapter->fmp->m_pkthdr.len -=
4464 adapter->lmp->m_next = mp;
4465 adapter->lmp = adapter->lmp->m_next;
4466 adapter->fmp->m_pkthdr.len += len;
4470 adapter->fmp->m_pkthdr.rcvif = ifp;
4472 em_receive_checksum(adapter, current_desc,
4474 #ifndef __NO_STRICT_ALIGNMENT
4475 if (adapter->max_frame_size >
4476 (MCLBYTES - ETHER_ALIGN) &&
4477 em_fixup_rx(adapter) != 0)
4480 if (status & E1000_RXD_STAT_VP) {
4481 #if __FreeBSD_version < 700000
4482 VLAN_INPUT_TAG_NEW(ifp, adapter->fmp,
4483 (le16toh(current_desc->special) &
4484 E1000_RXD_SPC_VLAN_MASK));
4486 adapter->fmp->m_pkthdr.ether_vtag =
4487 (le16toh(current_desc->special) &
4488 E1000_RXD_SPC_VLAN_MASK);
4489 adapter->fmp->m_flags |= M_VLANTAG;
4492 #ifndef __NO_STRICT_ALIGNMENT
4496 adapter->fmp = NULL;
4497 adapter->lmp = NULL;
4502 /* Reuse loaded DMA map and just update mbuf chain */
4503 mp = adapter->rx_buffer_area[i].m_head;
4504 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
4505 mp->m_data = mp->m_ext.ext_buf;
4507 if (adapter->max_frame_size <=
4508 (MCLBYTES - ETHER_ALIGN))
4509 m_adj(mp, ETHER_ALIGN);
4510 if (adapter->fmp != NULL) {
4511 m_freem(adapter->fmp);
4512 adapter->fmp = NULL;
4513 adapter->lmp = NULL;
4518 /* Zero out the receive descriptors status. */
4519 current_desc->status = 0;
4520 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4521 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4523 /* Advance our pointers to the next descriptor. */
4524 if (++i == adapter->num_rx_desc)
4526 /* Call into the stack */
4528 adapter->next_rx_desc_to_check = i;
4529 EM_RX_UNLOCK(adapter);
4530 (*ifp->if_input)(ifp, m);
4531 EM_RX_LOCK(adapter);
4533 i = adapter->next_rx_desc_to_check;
4535 current_desc = &adapter->rx_desc_base[i];
4537 adapter->next_rx_desc_to_check = i;
4539 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
4541 i = adapter->num_rx_desc - 1;
4542 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
4543 EM_RX_UNLOCK(adapter);
4547 #ifndef __NO_STRICT_ALIGNMENT
4549 * When jumbo frames are enabled we should realign entire payload on
4550 * architecures with strict alignment. This is serious design mistake of 8254x
4551 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
4552 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
4553 * payload. On architecures without strict alignment restrictions 8254x still
4554 * performs unaligned memory access which would reduce the performance too.
4555 * To avoid copying over an entire frame to align, we allocate a new mbuf and
4556 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
4557 * existing mbuf chain.
4559 * Be aware, best performance of the 8254x is achived only when jumbo frame is
4560 * not used at all on architectures with strict alignment.
4563 em_fixup_rx(struct adapter *adapter)
4570 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
4571 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
4572 m->m_data += ETHER_HDR_LEN;
4574 MGETHDR(n, M_DONTWAIT, MT_DATA);
4576 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
4577 m->m_data += ETHER_HDR_LEN;
4578 m->m_len -= ETHER_HDR_LEN;
4579 n->m_len = ETHER_HDR_LEN;
4580 M_MOVE_PKTHDR(n, m);
4584 adapter->dropped_pkts++;
4585 m_freem(adapter->fmp);
4586 adapter->fmp = NULL;
4595 /*********************************************************************
4597 * Verify that the hardware indicated that the checksum is valid.
4598 * Inform the stack about the status of checksum so that stack
4599 * doesn't spend time verifying the checksum.
4601 *********************************************************************/
4603 em_receive_checksum(struct adapter *adapter,
4604 struct e1000_rx_desc *rx_desc, struct mbuf *mp)
4606 /* 82543 or newer only */
4607 if ((adapter->hw.mac.type < e1000_82543) ||
4608 /* Ignore Checksum bit is set */
4609 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
4610 mp->m_pkthdr.csum_flags = 0;
4614 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
4616 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
4617 /* IP Checksum Good */
4618 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
4619 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4622 mp->m_pkthdr.csum_flags = 0;
4626 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
4628 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
4629 mp->m_pkthdr.csum_flags |=
4630 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4631 mp->m_pkthdr.csum_data = htons(0xffff);
4636 #if __FreeBSD_version >= 700029
4638 * This routine is run via an vlan
4642 em_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4644 struct adapter *adapter = ifp->if_softc;
4647 if (ifp->if_softc != arg) /* Not our event */
4650 if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */
4653 index = (vtag >> 5) & 0x7F;
4655 em_shadow_vfta[index] |= (1 << bit);
4656 ++adapter->num_vlans;
4657 /* Re-init to load the changes */
4662 * This routine is run via an vlan
4666 em_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4668 struct adapter *adapter = ifp->if_softc;
4671 if (ifp->if_softc != arg)
4674 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
4677 index = (vtag >> 5) & 0x7F;
4679 em_shadow_vfta[index] &= ~(1 << bit);
4680 --adapter->num_vlans;
4681 /* Re-init to load the changes */
4686 em_setup_vlan_hw_support(struct adapter *adapter)
4688 struct e1000_hw *hw = &adapter->hw;
4692 ** We get here thru init_locked, meaning
4693 ** a soft reset, this has already cleared
4694 ** the VFTA and other state, so if there
4695 ** have been no vlan's registered do nothing.
4697 if (adapter->num_vlans == 0)
4701 ** A soft reset zero's out the VFTA, so
4702 ** we need to repopulate it now.
4704 for (int i = 0; i < EM_VFTA_SIZE; i++)
4705 if (em_shadow_vfta[i] != 0)
4706 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
4707 i, em_shadow_vfta[i]);
4709 reg = E1000_READ_REG(hw, E1000_CTRL);
4710 reg |= E1000_CTRL_VME;
4711 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4713 /* Enable the Filter Table */
4714 reg = E1000_READ_REG(hw, E1000_RCTL);
4715 reg &= ~E1000_RCTL_CFIEN;
4716 reg |= E1000_RCTL_VFE;
4717 E1000_WRITE_REG(hw, E1000_RCTL, reg);
4719 /* Update the frame size */
4720 E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
4721 adapter->max_frame_size + VLAN_TAG_SIZE);
4726 em_enable_intr(struct adapter *adapter)
4728 struct e1000_hw *hw = &adapter->hw;
4729 u32 ims_mask = IMS_ENABLE_MASK;
4731 if (adapter->msix) {
4732 E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
4733 ims_mask |= EM_MSIX_MASK;
4735 E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
4739 em_disable_intr(struct adapter *adapter)
4741 struct e1000_hw *hw = &adapter->hw;
4744 E1000_WRITE_REG(hw, EM_EIAC, 0);
4745 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
4749 * Bit of a misnomer, what this really means is
4750 * to enable OS management of the system... aka
4751 * to disable special hardware management features
4754 em_init_manageability(struct adapter *adapter)
4756 /* A shared code workaround */
4757 #define E1000_82542_MANC2H E1000_MANC2H
4758 if (adapter->has_manage) {
4759 int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H);
4760 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4762 /* disable hardware interception of ARP */
4763 manc &= ~(E1000_MANC_ARP_EN);
4765 /* enable receiving management packets to the host */
4766 if (adapter->hw.mac.type >= e1000_82571) {
4767 manc |= E1000_MANC_EN_MNG2HOST;
4768 #define E1000_MNG2HOST_PORT_623 (1 << 5)
4769 #define E1000_MNG2HOST_PORT_664 (1 << 6)
4770 manc2h |= E1000_MNG2HOST_PORT_623;
4771 manc2h |= E1000_MNG2HOST_PORT_664;
4772 E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
4775 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4780 * Give control back to hardware management
4781 * controller if there is one.
4784 em_release_manageability(struct adapter *adapter)
4786 if (adapter->has_manage) {
4787 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4789 /* re-enable hardware interception of ARP */
4790 manc |= E1000_MANC_ARP_EN;
4792 if (adapter->hw.mac.type >= e1000_82571)
4793 manc &= ~E1000_MANC_EN_MNG2HOST;
4795 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4800 * em_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
4801 * For ASF and Pass Through versions of f/w this means
4802 * that the driver is loaded. For AMT version type f/w
4803 * this means that the network i/f is open.
4806 em_get_hw_control(struct adapter *adapter)
4810 if (adapter->hw.mac.type == e1000_82573) {
4811 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4812 E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4813 swsm | E1000_SWSM_DRV_LOAD);
4817 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4818 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4819 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
4824 * em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4825 * For ASF and Pass Through versions of f/w this means that
4826 * the driver is no longer loaded. For AMT versions of the
4827 * f/w this means that the network i/f is closed.
4830 em_release_hw_control(struct adapter *adapter)
4834 if (!adapter->has_manage)
4837 if (adapter->hw.mac.type == e1000_82573) {
4838 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4839 E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4840 swsm & ~E1000_SWSM_DRV_LOAD);
4844 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4845 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4846 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4851 em_is_valid_ether_addr(u8 *addr)
4853 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
4855 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
4863 ** Parse the interface capabilities with regard
4864 ** to both system management and wake-on-lan for
4868 em_get_wakeup(device_t dev)
4870 struct adapter *adapter = device_get_softc(dev);
4871 u16 eeprom_data = 0, device_id, apme_mask;
4873 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
4874 apme_mask = EM_EEPROM_APME;
4876 switch (adapter->hw.mac.type) {
4881 e1000_read_nvm(&adapter->hw,
4882 NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
4883 apme_mask = EM_82544_APME;
4887 adapter->has_amt = TRUE;
4890 case e1000_82546_rev_3:
4893 case e1000_80003es2lan:
4894 if (adapter->hw.bus.func == 1) {
4895 e1000_read_nvm(&adapter->hw,
4896 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
4899 e1000_read_nvm(&adapter->hw,
4900 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4904 case e1000_ich10lan:
4906 apme_mask = E1000_WUC_APME;
4907 adapter->has_amt = TRUE;
4908 eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC);
4911 e1000_read_nvm(&adapter->hw,
4912 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4915 if (eeprom_data & apme_mask)
4916 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
4918 * We have the eeprom settings, now apply the special cases
4919 * where the eeprom may be wrong or the board won't support
4920 * wake on lan on a particular port
4922 device_id = pci_get_device(dev);
4923 switch (device_id) {
4924 case E1000_DEV_ID_82546GB_PCIE:
4927 case E1000_DEV_ID_82546EB_FIBER:
4928 case E1000_DEV_ID_82546GB_FIBER:
4929 case E1000_DEV_ID_82571EB_FIBER:
4930 /* Wake events only supported on port A for dual fiber
4931 * regardless of eeprom setting */
4932 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
4933 E1000_STATUS_FUNC_1)
4936 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
4937 case E1000_DEV_ID_82571EB_QUAD_COPPER:
4938 case E1000_DEV_ID_82571EB_QUAD_FIBER:
4939 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
4940 /* if quad port adapter, disable WoL on all but port A */
4941 if (global_quad_port_a != 0)
4943 /* Reset for multiple quad port adapters */
4944 if (++global_quad_port_a == 4)
4945 global_quad_port_a = 0;
4953 * Enable PCI Wake On Lan capability
4956 em_enable_wakeup(device_t dev)
4958 struct adapter *adapter = device_get_softc(dev);
4959 struct ifnet *ifp = adapter->ifp;
4960 u32 pmc, ctrl, ctrl_ext, rctl;
4963 if ((pci_find_extcap(dev, PCIY_PMG, &pmc) != 0))
4966 /* Advertise the wakeup capability */
4967 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
4968 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
4969 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
4970 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4972 /* ICH workaround code */
4973 if ((adapter->hw.mac.type == e1000_ich8lan) ||
4974 (adapter->hw.mac.type == e1000_pchlan) ||
4975 (adapter->hw.mac.type == e1000_ich9lan) ||
4976 (adapter->hw.mac.type == e1000_ich10lan)) {
4977 e1000_disable_gig_wol_ich8lan(&adapter->hw);
4978 e1000_hv_phy_powerdown_workaround_ich8lan(&adapter->hw);
4981 /* Keep the laser running on Fiber adapters */
4982 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
4983 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
4984 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4985 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
4986 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
4990 ** Determine type of Wakeup: note that wol
4991 ** is set with all bits on by default.
4993 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
4994 adapter->wol &= ~E1000_WUFC_MAG;
4996 if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
4997 adapter->wol &= ~E1000_WUFC_MC;
4999 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
5000 rctl |= E1000_RCTL_MPE;
5001 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
5004 if (adapter->hw.mac.type == e1000_pchlan) {
5005 if (em_enable_phy_wakeup(adapter))
5008 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
5009 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
5012 if (adapter->hw.phy.type == e1000_phy_igp_3)
5013 e1000_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
5016 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
5017 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
5018 if (ifp->if_capenable & IFCAP_WOL)
5019 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
5020 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
5026 ** WOL in the newer chipset interfaces (pchlan)
5027 ** require thing to be copied into the phy
5030 em_enable_phy_wakeup(struct adapter *adapter)
5032 struct e1000_hw *hw = &adapter->hw;
5036 /* copy MAC RARs to PHY RARs */
5037 for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
5038 mreg = E1000_READ_REG(hw, E1000_RAL(i));
5039 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
5040 e1000_write_phy_reg(hw, BM_RAR_M(i),
5041 (u16)((mreg >> 16) & 0xFFFF));
5042 mreg = E1000_READ_REG(hw, E1000_RAH(i));
5043 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
5044 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
5045 (u16)((mreg >> 16) & 0xFFFF));
5048 /* copy MAC MTA to PHY MTA */
5049 for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
5050 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
5051 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
5052 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
5053 (u16)((mreg >> 16) & 0xFFFF));
5056 /* configure PHY Rx Control register */
5057 e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
5058 mreg = E1000_READ_REG(hw, E1000_RCTL);
5059 if (mreg & E1000_RCTL_UPE)
5060 preg |= BM_RCTL_UPE;
5061 if (mreg & E1000_RCTL_MPE)
5062 preg |= BM_RCTL_MPE;
5063 preg &= ~(BM_RCTL_MO_MASK);
5064 if (mreg & E1000_RCTL_MO_3)
5065 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
5066 << BM_RCTL_MO_SHIFT);
5067 if (mreg & E1000_RCTL_BAM)
5068 preg |= BM_RCTL_BAM;
5069 if (mreg & E1000_RCTL_PMCF)
5070 preg |= BM_RCTL_PMCF;
5071 mreg = E1000_READ_REG(hw, E1000_CTRL);
5072 if (mreg & E1000_CTRL_RFCE)
5073 preg |= BM_RCTL_RFCE;
5074 e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
5076 /* enable PHY wakeup in MAC register */
5077 E1000_WRITE_REG(hw, E1000_WUC,
5078 E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
5079 E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
5081 /* configure and enable PHY wakeup in PHY registers */
5082 e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
5083 e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
5085 /* activate PHY wakeup */
5086 ret = hw->phy.ops.acquire(hw);
5088 printf("Could not acquire PHY\n");
5091 e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
5092 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
5093 ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
5095 printf("Could not read PHY page 769\n");
5098 preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
5099 ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
5101 printf("Could not set PHY Host Wakeup bit\n");
5103 hw->phy.ops.release(hw);
5109 /*********************************************************************
5110 * 82544 Coexistence issue workaround.
5111 * There are 2 issues.
5112 * 1. Transmit Hang issue.
5113 * To detect this issue, following equation can be used...
5114 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
5115 * If SUM[3:0] is in between 1 to 4, we will have this issue.
5118 * To detect this issue, following equation can be used...
5119 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
5120 * If SUM[3:0] is in between 9 to c, we will have this issue.
5124 * Make sure we do not have ending address
5125 * as 1,2,3,4(Hang) or 9,a,b,c (DAC)
5127 *************************************************************************/
5129 em_fill_descriptors (bus_addr_t address, u32 length,
5130 PDESC_ARRAY desc_array)
5132 u32 safe_terminator;
5134 /* Since issue is sensitive to length and address.*/
5135 /* Let us first check the address...*/
5137 desc_array->descriptor[0].address = address;
5138 desc_array->descriptor[0].length = length;
5139 desc_array->elements = 1;
5140 return (desc_array->elements);
5142 safe_terminator = (u32)((((u32)address & 0x7) +
5143 (length & 0xF)) & 0xF);
5144 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
5145 if (safe_terminator == 0 ||
5146 (safe_terminator > 4 &&
5147 safe_terminator < 9) ||
5148 (safe_terminator > 0xC &&
5149 safe_terminator <= 0xF)) {
5150 desc_array->descriptor[0].address = address;
5151 desc_array->descriptor[0].length = length;
5152 desc_array->elements = 1;
5153 return (desc_array->elements);
5156 desc_array->descriptor[0].address = address;
5157 desc_array->descriptor[0].length = length - 4;
5158 desc_array->descriptor[1].address = address + (length - 4);
5159 desc_array->descriptor[1].length = 4;
5160 desc_array->elements = 2;
5161 return (desc_array->elements);
5164 /**********************************************************************
5166 * Update the board statistics counters.
5168 **********************************************************************/
5170 em_update_stats_counters(struct adapter *adapter)
5174 if(adapter->hw.phy.media_type == e1000_media_type_copper ||
5175 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
5176 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
5177 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
5179 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
5180 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
5181 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
5182 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
5184 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
5185 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
5186 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
5187 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
5188 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
5189 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
5190 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
5191 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
5192 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
5193 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
5194 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
5195 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
5196 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
5197 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
5198 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
5199 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
5200 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
5201 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
5202 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
5203 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
5205 /* For the 64-bit byte counters the low dword must be read first. */
5206 /* Both registers clear on the read of the high dword */
5208 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCH);
5209 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCH);
5211 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
5212 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
5213 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
5214 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
5215 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
5217 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
5218 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
5220 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
5221 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
5222 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
5223 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
5224 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
5225 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
5226 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
5227 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
5228 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
5229 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
5231 if (adapter->hw.mac.type >= e1000_82543) {
5232 adapter->stats.algnerrc +=
5233 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
5234 adapter->stats.rxerrc +=
5235 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
5236 adapter->stats.tncrs +=
5237 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
5238 adapter->stats.cexterr +=
5239 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
5240 adapter->stats.tsctc +=
5241 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
5242 adapter->stats.tsctfc +=
5243 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
5247 ifp->if_collisions = adapter->stats.colc;
5250 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
5251 adapter->stats.crcerrs + adapter->stats.algnerrc +
5252 adapter->stats.ruc + adapter->stats.roc +
5253 adapter->stats.mpc + adapter->stats.cexterr;
5256 ifp->if_oerrors = adapter->stats.ecol +
5257 adapter->stats.latecol + adapter->watchdog_events;
5261 /**********************************************************************
5263 * This routine is called only when em_display_debug_stats is enabled.
5264 * This routine provides a way to take a look at important statistics
5265 * maintained by the driver and hardware.
5267 **********************************************************************/
5269 em_print_debug_info(struct adapter *adapter)
5271 device_t dev = adapter->dev;
5272 u8 *hw_addr = adapter->hw.hw_addr;
5274 device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
5275 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n",
5276 E1000_READ_REG(&adapter->hw, E1000_CTRL),
5277 E1000_READ_REG(&adapter->hw, E1000_RCTL));
5278 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n",
5279 ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff0000) >> 16),\
5280 (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) );
5281 device_printf(dev, "Flow control watermarks high = %d low = %d\n",
5282 adapter->hw.fc.high_water,
5283 adapter->hw.fc.low_water);
5284 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
5285 E1000_READ_REG(&adapter->hw, E1000_TIDV),
5286 E1000_READ_REG(&adapter->hw, E1000_TADV));
5287 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
5288 E1000_READ_REG(&adapter->hw, E1000_RDTR),
5289 E1000_READ_REG(&adapter->hw, E1000_RADV));
5290 device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n",
5291 (long long)adapter->tx_fifo_wrk_cnt,
5292 (long long)adapter->tx_fifo_reset_cnt);
5293 device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
5294 E1000_READ_REG(&adapter->hw, E1000_TDH(0)),
5295 E1000_READ_REG(&adapter->hw, E1000_TDT(0)));
5296 device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
5297 E1000_READ_REG(&adapter->hw, E1000_RDH(0)),
5298 E1000_READ_REG(&adapter->hw, E1000_RDT(0)));
5299 device_printf(dev, "Num Tx descriptors avail = %d\n",
5300 adapter->num_tx_desc_avail);
5301 device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
5302 adapter->no_tx_desc_avail1);
5303 device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
5304 adapter->no_tx_desc_avail2);
5305 device_printf(dev, "Std mbuf failed = %ld\n",
5306 adapter->mbuf_alloc_failed);
5307 device_printf(dev, "Std mbuf cluster failed = %ld\n",
5308 adapter->mbuf_cluster_failed);
5309 device_printf(dev, "Driver dropped packets = %ld\n",
5310 adapter->dropped_pkts);
5311 device_printf(dev, "Driver tx dma failure in encap = %ld\n",
5312 adapter->no_tx_dma_setup);
5316 em_print_hw_stats(struct adapter *adapter)
5318 device_t dev = adapter->dev;
5320 device_printf(dev, "Excessive collisions = %lld\n",
5321 (long long)adapter->stats.ecol);
5322 #if (DEBUG_HW > 0) /* Dont output these errors normally */
5323 device_printf(dev, "Symbol errors = %lld\n",
5324 (long long)adapter->stats.symerrs);
5326 device_printf(dev, "Sequence errors = %lld\n",
5327 (long long)adapter->stats.sec);
5328 device_printf(dev, "Defer count = %lld\n",
5329 (long long)adapter->stats.dc);
5330 device_printf(dev, "Missed Packets = %lld\n",
5331 (long long)adapter->stats.mpc);
5332 device_printf(dev, "Receive No Buffers = %lld\n",
5333 (long long)adapter->stats.rnbc);
5334 /* RLEC is inaccurate on some hardware, calculate our own. */
5335 device_printf(dev, "Receive Length Errors = %lld\n",
5336 ((long long)adapter->stats.roc + (long long)adapter->stats.ruc));
5337 device_printf(dev, "Receive errors = %lld\n",
5338 (long long)adapter->stats.rxerrc);
5339 device_printf(dev, "Crc errors = %lld\n",
5340 (long long)adapter->stats.crcerrs);
5341 device_printf(dev, "Alignment errors = %lld\n",
5342 (long long)adapter->stats.algnerrc);
5343 device_printf(dev, "Collision/Carrier extension errors = %lld\n",
5344 (long long)adapter->stats.cexterr);
5345 device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns);
5346 device_printf(dev, "watchdog timeouts = %ld\n",
5347 adapter->watchdog_events);
5348 device_printf(dev, "RX MSIX IRQ = %ld TX MSIX IRQ = %ld"
5349 " LINK MSIX IRQ = %ld\n", adapter->rx_irq,
5350 adapter->tx_irq , adapter->link_irq);
5351 device_printf(dev, "XON Rcvd = %lld\n",
5352 (long long)adapter->stats.xonrxc);
5353 device_printf(dev, "XON Xmtd = %lld\n",
5354 (long long)adapter->stats.xontxc);
5355 device_printf(dev, "XOFF Rcvd = %lld\n",
5356 (long long)adapter->stats.xoffrxc);
5357 device_printf(dev, "XOFF Xmtd = %lld\n",
5358 (long long)adapter->stats.xofftxc);
5359 device_printf(dev, "Good Packets Rcvd = %lld\n",
5360 (long long)adapter->stats.gprc);
5361 device_printf(dev, "Good Packets Xmtd = %lld\n",
5362 (long long)adapter->stats.gptc);
5363 device_printf(dev, "TSO Contexts Xmtd = %lld\n",
5364 (long long)adapter->stats.tsctc);
5365 device_printf(dev, "TSO Contexts Failed = %lld\n",
5366 (long long)adapter->stats.tsctfc);
5369 /**********************************************************************
5371 * This routine provides a way to dump out the adapter eeprom,
5372 * often a useful debug/service tool. This only dumps the first
5373 * 32 words, stuff that matters is in that extent.
5375 **********************************************************************/
5377 em_print_nvm_info(struct adapter *adapter)
5382 /* Its a bit crude, but it gets the job done */
5383 printf("\nInterface EEPROM Dump:\n");
5384 printf("Offset\n0x0000 ");
5385 for (i = 0, j = 0; i < 32; i++, j++) {
5386 if (j == 8) { /* Make the offset block */
5388 printf("\n0x00%x0 ",row);
5390 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
5391 printf("%04x ", eeprom_data);
5397 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5399 struct adapter *adapter;
5404 error = sysctl_handle_int(oidp, &result, 0, req);
5406 if (error || !req->newptr)
5410 adapter = (struct adapter *)arg1;
5411 em_print_debug_info(adapter);
5414 * This value will cause a hex dump of the
5415 * first 32 16-bit words of the EEPROM to
5419 adapter = (struct adapter *)arg1;
5420 em_print_nvm_info(adapter);
5428 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
5430 struct adapter *adapter;
5435 error = sysctl_handle_int(oidp, &result, 0, req);
5437 if (error || !req->newptr)
5441 adapter = (struct adapter *)arg1;
5442 em_print_hw_stats(adapter);
5449 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
5451 struct em_int_delay_info *info;
5452 struct adapter *adapter;
5458 info = (struct em_int_delay_info *)arg1;
5459 usecs = info->value;
5460 error = sysctl_handle_int(oidp, &usecs, 0, req);
5461 if (error != 0 || req->newptr == NULL)
5463 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
5465 info->value = usecs;
5466 ticks = EM_USECS_TO_TICKS(usecs);
5468 adapter = info->adapter;
5470 EM_CORE_LOCK(adapter);
5471 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
5472 regval = (regval & ~0xffff) | (ticks & 0xffff);
5473 /* Handle a few special cases. */
5474 switch (info->offset) {
5479 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
5480 /* Don't write 0 into the TIDV register. */
5483 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
5486 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
5487 EM_CORE_UNLOCK(adapter);
5492 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
5493 const char *description, struct em_int_delay_info *info,
5494 int offset, int value)
5496 info->adapter = adapter;
5497 info->offset = offset;
5498 info->value = value;
5499 SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
5500 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5501 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
5502 info, 0, em_sysctl_int_delay, "I", description);
5505 #ifndef EM_LEGACY_IRQ
5507 em_add_rx_process_limit(struct adapter *adapter, const char *name,
5508 const char *description, int *limit, int value)
5511 SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
5512 SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
5513 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);