1 /******************************************************************************
3 Copyright (c) 2001-2010, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
34 #ifdef HAVE_KERNEL_OPTION_HEADERS
35 #include "opt_device_polling.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #if __FreeBSD_version >= 800000
43 #include <sys/buf_ring.h>
46 #include <sys/endian.h>
47 #include <sys/kernel.h>
48 #include <sys/kthread.h>
49 #include <sys/malloc.h>
51 #include <sys/module.h>
53 #include <sys/socket.h>
54 #include <sys/sockio.h>
55 #include <sys/sysctl.h>
56 #include <sys/taskqueue.h>
57 #include <sys/eventhandler.h>
60 #include <net/ethernet.h>
62 #include <net/if_arp.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
65 #include <net/ifq_var.h>
67 #include <net/if_types.h>
68 #include <net/vlan/if_vlan_var.h>
69 #include <net/vlan/if_vlan_ether.h>
71 #include <netinet/in_systm.h>
72 #include <netinet/in.h>
73 #include <netinet/if_ether.h>
74 #include <netinet/ip.h>
75 #include <netinet/ip6.h>
76 #include <netinet/tcp.h>
77 #include <netinet/udp.h>
79 #include <sys/in_cksum.h>
80 #include <bus/pci/pcivar.h>
81 #include <bus/pci/pcireg.h>
83 #include "e1000_api.h"
84 #include "e1000_82571.h"
86 #include "ifcap_defines.h" // XXX
88 /*********************************************************************
89 * Set this to one to display debug statistics
90 *********************************************************************/
91 int em_display_debug_stats = 0;
93 /*********************************************************************
95 *********************************************************************/
96 char em_driver_version[] = "6.9.25";
99 /*********************************************************************
100 * PCI Device ID Table
102 * Used by probe to select devices to load on
103 * Last field stores an index into e1000_strings
104 * Last entry must be all 0s
106 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
107 *********************************************************************/
109 static em_vendor_info_t em_vendor_info_array[] =
111 /* Intel(R) PRO/1000 Network Connection */
112 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
113 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
114 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
115 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
116 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
118 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
119 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
120 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
121 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
122 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
123 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
124 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
126 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
128 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
129 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
131 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
132 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
133 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
134 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
136 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
137 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
138 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
139 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
140 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
142 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
143 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
144 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
145 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
146 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
147 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
148 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
149 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
150 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
151 PCI_ANY_ID, PCI_ANY_ID, 0},
153 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
154 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
155 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
157 { 0x8086, E1000_DEV_ID_82571EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
158 { 0x8086, E1000_DEV_ID_82571EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
159 { 0x8086, E1000_DEV_ID_82571EB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
160 { 0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL,
161 PCI_ANY_ID, PCI_ANY_ID, 0},
162 { 0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD,
163 PCI_ANY_ID, PCI_ANY_ID, 0},
164 { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER,
165 PCI_ANY_ID, PCI_ANY_ID, 0},
166 { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP,
167 PCI_ANY_ID, PCI_ANY_ID, 0},
168 { 0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER,
169 PCI_ANY_ID, PCI_ANY_ID, 0},
170 { 0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER,
171 PCI_ANY_ID, PCI_ANY_ID, 0},
172 { 0x8086, E1000_DEV_ID_82572EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
173 { 0x8086, E1000_DEV_ID_82572EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
174 { 0x8086, E1000_DEV_ID_82572EI_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
175 { 0x8086, E1000_DEV_ID_82572EI, PCI_ANY_ID, PCI_ANY_ID, 0},
177 { 0x8086, E1000_DEV_ID_82573E, PCI_ANY_ID, PCI_ANY_ID, 0},
178 { 0x8086, E1000_DEV_ID_82573E_IAMT, PCI_ANY_ID, PCI_ANY_ID, 0},
179 { 0x8086, E1000_DEV_ID_82573L, PCI_ANY_ID, PCI_ANY_ID, 0},
180 { 0x8086, E1000_DEV_ID_82583V, PCI_ANY_ID, PCI_ANY_ID, 0},
181 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT,
182 PCI_ANY_ID, PCI_ANY_ID, 0},
183 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT,
184 PCI_ANY_ID, PCI_ANY_ID, 0},
185 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT,
186 PCI_ANY_ID, PCI_ANY_ID, 0},
187 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT,
188 PCI_ANY_ID, PCI_ANY_ID, 0},
189 { 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
190 { 0x8086, E1000_DEV_ID_ICH8_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
191 { 0x8086, E1000_DEV_ID_ICH8_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0},
192 { 0x8086, E1000_DEV_ID_ICH8_IFE, PCI_ANY_ID, PCI_ANY_ID, 0},
193 { 0x8086, E1000_DEV_ID_ICH8_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0},
194 { 0x8086, E1000_DEV_ID_ICH8_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
195 { 0x8086, E1000_DEV_ID_ICH8_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0},
196 { 0x8086, E1000_DEV_ID_ICH8_82567V_3, PCI_ANY_ID, PCI_ANY_ID, 0},
197 { 0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
198 { 0x8086, E1000_DEV_ID_ICH9_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
199 { 0x8086, E1000_DEV_ID_ICH9_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0},
200 { 0x8086, E1000_DEV_ID_ICH9_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0},
201 { 0x8086, E1000_DEV_ID_ICH9_IGP_M_V, PCI_ANY_ID, PCI_ANY_ID, 0},
202 { 0x8086, E1000_DEV_ID_ICH9_IFE, PCI_ANY_ID, PCI_ANY_ID, 0},
203 { 0x8086, E1000_DEV_ID_ICH9_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0},
204 { 0x8086, E1000_DEV_ID_ICH9_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
205 { 0x8086, E1000_DEV_ID_ICH9_BM, PCI_ANY_ID, PCI_ANY_ID, 0},
206 { 0x8086, E1000_DEV_ID_82574L, PCI_ANY_ID, PCI_ANY_ID, 0},
207 { 0x8086, E1000_DEV_ID_82574LA, PCI_ANY_ID, PCI_ANY_ID, 0},
208 { 0x8086, E1000_DEV_ID_ICH10_R_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
209 { 0x8086, E1000_DEV_ID_ICH10_R_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
210 { 0x8086, E1000_DEV_ID_ICH10_R_BM_V, PCI_ANY_ID, PCI_ANY_ID, 0},
211 { 0x8086, E1000_DEV_ID_ICH10_D_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
212 { 0x8086, E1000_DEV_ID_ICH10_D_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
213 { 0x8086, E1000_DEV_ID_PCH_M_HV_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
214 { 0x8086, E1000_DEV_ID_PCH_M_HV_LC, PCI_ANY_ID, PCI_ANY_ID, 0},
215 { 0x8086, E1000_DEV_ID_PCH_D_HV_DM, PCI_ANY_ID, PCI_ANY_ID, 0},
216 { 0x8086, E1000_DEV_ID_PCH_D_HV_DC, PCI_ANY_ID, PCI_ANY_ID, 0},
217 /* required last entry */
221 /*********************************************************************
222 * Table of branding strings for all supported NICs.
223 *********************************************************************/
225 static char *em_strings[] = {
226 "Intel(R) PRO/1000 Network Connection"
229 /*********************************************************************
230 * Function prototypes
231 *********************************************************************/
232 static int em_probe(device_t);
233 static int em_attach(device_t);
234 static int em_detach(device_t);
235 static int em_shutdown(device_t);
236 static int em_suspend(device_t);
237 static int em_resume(device_t);
238 static void em_start(struct ifnet *);
239 static void em_start_locked(struct ifnet *ifp);
240 #if __FreeBSD_version >= 800000
241 static int em_mq_start(struct ifnet *, struct mbuf *);
242 static int em_mq_start_locked(struct ifnet *, struct mbuf *);
243 static void em_qflush(struct ifnet *);
245 static int em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
246 static void em_init(void *);
247 static void em_init_locked(struct adapter *);
248 static void em_stop(void *);
249 static void em_media_status(struct ifnet *, struct ifmediareq *);
250 static int em_media_change(struct ifnet *);
251 static void em_identify_hardware(struct adapter *);
252 static int em_allocate_pci_resources(struct adapter *);
253 static int em_allocate_legacy(struct adapter *adapter);
254 static int em_allocate_msix(struct adapter *adapter);
255 static int em_setup_msix(struct adapter *);
256 static void em_free_pci_resources(struct adapter *);
257 static void em_local_timer(void *);
258 static int em_hardware_init(struct adapter *);
259 static void em_setup_interface(device_t, struct adapter *);
260 static void em_setup_transmit_structures(struct adapter *);
261 static void em_initialize_transmit_unit(struct adapter *);
262 static int em_setup_receive_structures(struct adapter *);
263 static void em_initialize_receive_unit(struct adapter *);
264 static void em_enable_intr(struct adapter *);
265 static void em_disable_intr(struct adapter *);
266 static void em_free_transmit_structures(struct adapter *);
267 static void em_free_receive_structures(struct adapter *);
268 static void em_update_stats_counters(struct adapter *);
269 static void em_txeof(struct adapter *);
270 static void em_tx_purge(struct adapter *);
271 static int em_allocate_receive_structures(struct adapter *);
272 static int em_allocate_transmit_structures(struct adapter *);
273 static int em_rxeof(struct adapter *, int);
274 #ifndef __NO_STRICT_ALIGNMENT
275 static int em_fixup_rx(struct adapter *);
277 static void em_receive_checksum(struct adapter *, struct e1000_rx_desc *,
279 static void em_transmit_checksum_setup(struct adapter *, struct mbuf *,
282 static bool em_tso_setup(struct adapter *, struct mbuf *,
285 static void em_set_promisc(struct adapter *);
286 static void em_disable_promisc(struct adapter *);
287 static void em_set_multi(struct adapter *);
288 static void em_print_hw_stats(struct adapter *);
289 static void em_update_link_status(struct adapter *);
290 static int em_get_buf(struct adapter *, int);
292 static void em_register_vlan(void *, struct ifnet *, u16);
293 static void em_unregister_vlan(void *, struct ifnet *, u16);
294 static void em_setup_vlan_hw_support(struct adapter *);
296 static int em_xmit(struct adapter *, struct mbuf **);
297 static void em_smartspeed(struct adapter *);
298 static int em_82547_fifo_workaround(struct adapter *, int);
299 static void em_82547_update_fifo_head(struct adapter *, int);
300 static int em_82547_tx_fifo_reset(struct adapter *);
301 static void em_82547_move_tail(void *);
302 static int em_dma_malloc(struct adapter *, bus_size_t,
303 struct em_dma_alloc *, int);
304 static void em_dma_free(struct adapter *, struct em_dma_alloc *);
305 static void em_print_debug_info(struct adapter *);
306 static void em_print_nvm_info(struct adapter *);
307 static int em_is_valid_ether_addr(u8 *);
308 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS);
309 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
310 static u32 em_fill_descriptors (bus_addr_t address, u32 length,
311 PDESC_ARRAY desc_array);
312 static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
313 static void em_add_int_delay_sysctl(struct adapter *, const char *,
314 const char *, struct em_int_delay_info *, int, int);
315 /* Management and WOL Support */
316 static void em_init_manageability(struct adapter *);
317 static void em_release_manageability(struct adapter *);
318 static void em_get_hw_control(struct adapter *);
319 static void em_release_hw_control(struct adapter *);
320 static void em_get_wakeup(device_t);
321 static void em_enable_wakeup(device_t);
322 static int em_enable_phy_wakeup(struct adapter *);
325 static void em_intr(void *);
327 static void em_irq_fast(void *);
330 static void em_msix_tx(void *);
331 static void em_msix_rx(void *);
332 static void em_msix_link(void *);
333 static void em_handle_rx(void *context, int pending);
334 static void em_handle_tx(void *context, int pending);
336 static void em_handle_rxtx(void *context, int pending);
337 static void em_handle_link(void *context, int pending);
338 static void em_add_rx_process_limit(struct adapter *, const char *,
339 const char *, int *, int);
340 #endif /* ~EM_LEGACY_IRQ */
342 #ifdef DEVICE_POLLING
343 static poll_handler_t em_poll;
346 /*********************************************************************
347 * FreeBSD Device Interface Entry Points
348 *********************************************************************/
350 static device_method_t em_methods[] = {
351 /* Device interface */
352 DEVMETHOD(device_probe, em_probe),
353 DEVMETHOD(device_attach, em_attach),
354 DEVMETHOD(device_detach, em_detach),
355 DEVMETHOD(device_shutdown, em_shutdown),
356 DEVMETHOD(device_suspend, em_suspend),
357 DEVMETHOD(device_resume, em_resume),
361 static driver_t em_driver = {
362 "em", em_methods, sizeof(struct adapter),
365 static devclass_t em_devclass;
366 DRIVER_MODULE(em, pci, em_driver, em_devclass, NULL, NULL);
367 MODULE_DEPEND(em, pci, 1, 1, 1);
368 MODULE_DEPEND(em, ether, 1, 1, 1);
370 /*********************************************************************
371 * Tunable default values.
372 *********************************************************************/
374 #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
375 #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
378 /* Allow common code without TSO */
383 static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
384 static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
385 static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
386 static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
387 static int em_rxd = EM_DEFAULT_RXD;
388 static int em_txd = EM_DEFAULT_TXD;
389 static int em_smart_pwr_down = FALSE;
390 /* Controls whether promiscuous also shows bad packets */
391 static int em_debug_sbp = FALSE;
392 /* Local switch for MSI/MSIX */
393 static int em_enable_msi = TRUE;
395 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
396 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
397 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
398 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
399 TUNABLE_INT("hw.em.rxd", &em_rxd);
400 TUNABLE_INT("hw.em.txd", &em_txd);
401 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
402 TUNABLE_INT("hw.em.sbp", &em_debug_sbp);
403 TUNABLE_INT("hw.em.enable_msi", &em_enable_msi);
405 #ifndef EM_LEGACY_IRQ
406 /* How many packets rxeof tries to clean at a time */
407 static int em_rx_process_limit = 100;
408 TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
411 /* Flow control setting - default to FULL */
412 static int em_fc_setting = e1000_fc_full;
413 TUNABLE_INT("hw.em.fc_setting", &em_fc_setting);
416 ** Shadow VFTA table, this is needed because
417 ** the real vlan filter table gets cleared during
418 ** a soft reset and the driver needs to be able
421 static u32 em_shadow_vfta[EM_VFTA_SIZE];
423 /* Global used in WOL setup with multiport cards */
424 static int global_quad_port_a = 0;
426 /*********************************************************************
427 * Device identification routine
429 * em_probe determines if the driver should be loaded on
430 * adapter based on PCI vendor/device id of the adapter.
432 * return BUS_PROBE_DEFAULT on success, positive on failure
433 *********************************************************************/
436 em_probe(device_t dev)
438 char adapter_name[60];
439 u16 pci_vendor_id = 0;
440 u16 pci_device_id = 0;
441 u16 pci_subvendor_id = 0;
442 u16 pci_subdevice_id = 0;
443 em_vendor_info_t *ent;
445 INIT_DEBUGOUT("em_probe: begin");
447 pci_vendor_id = pci_get_vendor(dev);
448 if (pci_vendor_id != EM_VENDOR_ID)
451 pci_device_id = pci_get_device(dev);
452 pci_subvendor_id = pci_get_subvendor(dev);
453 pci_subdevice_id = pci_get_subdevice(dev);
455 ent = em_vendor_info_array;
456 while (ent->vendor_id != 0) {
457 if ((pci_vendor_id == ent->vendor_id) &&
458 (pci_device_id == ent->device_id) &&
460 ((pci_subvendor_id == ent->subvendor_id) ||
461 (ent->subvendor_id == PCI_ANY_ID)) &&
463 ((pci_subdevice_id == ent->subdevice_id) ||
464 (ent->subdevice_id == PCI_ANY_ID))) {
465 ksprintf(adapter_name, "%s %s",
466 em_strings[ent->index],
468 device_set_desc_copy(dev, adapter_name);
469 return (BUS_PROBE_DEFAULT);
477 /*********************************************************************
478 * Device initialization routine
480 * The attach entry point is called when the driver is being loaded.
481 * This routine identifies the type of hardware, allocates all resources
482 * and initializes the hardware.
484 * return 0 on success, positive on failure
485 *********************************************************************/
488 em_attach(device_t dev)
490 struct adapter *adapter;
494 INIT_DEBUGOUT("em_attach: begin");
496 adapter = device_get_softc(dev);
497 adapter->dev = adapter->osdep.dev = dev;
499 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
500 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
501 EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
504 sysctl_ctx_init(&adapter->sysctl_ctx);
505 adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx,
506 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
507 device_get_nameunit(adapter->dev),
509 if (adapter->sysctl_tree == NULL) {
510 device_printf(adapter->dev, "can't add sysctl node\n");
515 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
516 SYSCTL_CHILDREN(adapter->sysctl_tree),
517 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
518 em_sysctl_debug_info, "I", "Debug Information");
520 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
521 SYSCTL_CHILDREN(adapter->sysctl_tree),
522 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
523 em_sysctl_stats, "I", "Statistics");
525 callout_init(&adapter->timer);
526 callout_init(&adapter->tx_fifo_timer);
528 /* Determine hardware and mac info */
529 em_identify_hardware(adapter);
531 /* Setup PCI resources */
532 if (em_allocate_pci_resources(adapter)) {
533 device_printf(dev, "Allocation of PCI resources failed\n");
539 ** For ICH8 and family we need to
540 ** map the flash memory, and this
541 ** must happen after the MAC is
544 if ((adapter->hw.mac.type == e1000_ich8lan) ||
545 (adapter->hw.mac.type == e1000_pchlan) ||
546 (adapter->hw.mac.type == e1000_ich9lan) ||
547 (adapter->hw.mac.type == e1000_ich10lan)) {
548 int rid = EM_BAR_TYPE_FLASH;
549 adapter->flash = bus_alloc_resource_any(dev,
550 SYS_RES_MEMORY, &rid, RF_ACTIVE);
551 if (adapter->flash == NULL) {
552 device_printf(dev, "Mapping of Flash failed\n");
556 /* This is used in the shared code */
557 adapter->hw.flash_address = (u8 *)adapter->flash;
558 adapter->osdep.flash_bus_space_tag =
559 rman_get_bustag(adapter->flash);
560 adapter->osdep.flash_bus_space_handle =
561 rman_get_bushandle(adapter->flash);
564 /* Do Shared Code initialization */
565 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
566 device_printf(dev, "Setup of Shared code failed\n");
571 e1000_get_bus_info(&adapter->hw);
573 /* Set up some sysctls for the tunable interrupt delays */
574 em_add_int_delay_sysctl(adapter, "rx_int_delay",
575 "receive interrupt delay in usecs", &adapter->rx_int_delay,
576 E1000_REGISTER(&adapter->hw, E1000_RDTR), em_rx_int_delay_dflt);
577 em_add_int_delay_sysctl(adapter, "tx_int_delay",
578 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
579 E1000_REGISTER(&adapter->hw, E1000_TIDV), em_tx_int_delay_dflt);
580 if (adapter->hw.mac.type >= e1000_82540) {
581 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
582 "receive interrupt delay limit in usecs",
583 &adapter->rx_abs_int_delay,
584 E1000_REGISTER(&adapter->hw, E1000_RADV),
585 em_rx_abs_int_delay_dflt);
586 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
587 "transmit interrupt delay limit in usecs",
588 &adapter->tx_abs_int_delay,
589 E1000_REGISTER(&adapter->hw, E1000_TADV),
590 em_tx_abs_int_delay_dflt);
593 #ifndef EM_LEGACY_IRQ
594 /* Sysctls for limiting the amount of work done in the taskqueue */
595 em_add_rx_process_limit(adapter, "rx_processing_limit",
596 "max number of rx packets to process", &adapter->rx_process_limit,
597 em_rx_process_limit);
601 * Validate number of transmit and receive descriptors. It
602 * must not exceed hardware maximum, and must be multiple
603 * of E1000_DBA_ALIGN.
605 if (((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
606 (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) ||
607 (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) ||
608 (em_txd < EM_MIN_TXD)) {
609 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
610 EM_DEFAULT_TXD, em_txd);
611 adapter->num_tx_desc = EM_DEFAULT_TXD;
613 adapter->num_tx_desc = em_txd;
614 if (((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
615 (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) ||
616 (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) ||
617 (em_rxd < EM_MIN_RXD)) {
618 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
619 EM_DEFAULT_RXD, em_rxd);
620 adapter->num_rx_desc = EM_DEFAULT_RXD;
622 adapter->num_rx_desc = em_rxd;
624 adapter->hw.mac.autoneg = DO_AUTO_NEG;
625 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
626 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
627 adapter->rx_buffer_len = 2048;
629 e1000_init_script_state_82541(&adapter->hw, TRUE);
630 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
633 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
634 adapter->hw.phy.mdix = AUTO_ALL_MODES;
635 adapter->hw.phy.disable_polarity_correction = FALSE;
636 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
640 * Set the frame limits assuming
641 * standard ethernet sized frames.
643 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
644 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
647 * This controls when hardware reports transmit completion
650 adapter->hw.mac.report_tx_early = 1;
652 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
655 /* Allocate Transmit Descriptor ring */
656 if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
657 device_printf(dev, "Unable to allocate tx_desc memory\n");
661 adapter->tx_desc_base =
662 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
664 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
667 /* Allocate Receive Descriptor ring */
668 if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
669 device_printf(dev, "Unable to allocate rx_desc memory\n");
673 adapter->rx_desc_base =
674 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
677 ** Start from a known state, this is
678 ** important in reading the nvm and
681 e1000_reset_hw(&adapter->hw);
683 /* Make sure we have a good EEPROM before we read from it */
684 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
686 ** Some PCI-E parts fail the first check due to
687 ** the link being in sleep state, call it again,
688 ** if it fails a second time its a real issue.
690 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
692 "The EEPROM Checksum Is Not Valid\n");
698 /* Copy the permanent MAC address out of the EEPROM */
699 if (e1000_read_mac_addr(&adapter->hw) < 0) {
700 device_printf(dev, "EEPROM read error while reading MAC"
706 if (!em_is_valid_ether_addr(adapter->hw.mac.addr)) {
707 device_printf(dev, "Invalid MAC address\n");
712 /* Initialize the hardware */
713 if (em_hardware_init(adapter)) {
714 device_printf(dev, "Unable to initialize the hardware\n");
719 /* Allocate transmit descriptors and buffers */
720 if (em_allocate_transmit_structures(adapter)) {
721 device_printf(dev, "Could not setup transmit structures\n");
726 /* Allocate receive descriptors and buffers */
727 if (em_allocate_receive_structures(adapter)) {
728 device_printf(dev, "Could not setup receive structures\n");
734 ** Do interrupt configuration
736 if (adapter->msi > 1) /* Do MSI/X */
737 error = em_allocate_msix(adapter);
738 else /* MSI or Legacy */
739 error = em_allocate_legacy(adapter);
744 * Get Wake-on-Lan and Management info for later use
748 /* Setup OS specific network interface */
749 em_setup_interface(dev, adapter);
751 /* Initialize statistics */
752 em_update_stats_counters(adapter);
754 adapter->hw.mac.get_link_status = 1;
755 em_update_link_status(adapter);
757 /* Indicate SOL/IDER usage */
758 if (e1000_check_reset_block(&adapter->hw))
760 "PHY reset is blocked due to SOL/IDER session.\n");
762 /* Do we need workaround for 82544 PCI-X adapter? */
763 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
764 adapter->hw.mac.type == e1000_82544)
765 adapter->pcix_82544 = TRUE;
767 adapter->pcix_82544 = FALSE;
769 /* Register for VLAN events */
770 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
771 em_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
772 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
773 em_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
775 /* Non-AMT based hardware can now take control from firmware */
776 if (adapter->has_manage && !adapter->has_amt)
777 em_get_hw_control(adapter);
779 /* Tell the stack that the interface is not active */
780 adapter->ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
782 INIT_DEBUGOUT("em_attach: end");
787 em_free_transmit_structures(adapter);
790 em_release_hw_control(adapter);
791 em_dma_free(adapter, &adapter->rxdma);
793 em_dma_free(adapter, &adapter->txdma);
796 em_free_pci_resources(adapter);
798 sysctl_ctx_free(&adapter->sysctl_ctx);
799 EM_TX_LOCK_DESTROY(adapter);
800 EM_RX_LOCK_DESTROY(adapter);
801 EM_CORE_LOCK_DESTROY(adapter);
806 /*********************************************************************
807 * Device removal routine
809 * The detach entry point is called when the driver is being removed.
810 * This routine stops the adapter and deallocates all the resources
811 * that were allocated for driver operation.
813 * return 0 on success, positive on failure
814 *********************************************************************/
817 em_detach(device_t dev)
819 struct adapter *adapter = device_get_softc(dev);
821 INIT_DEBUGOUT("em_detach: begin");
823 /* Make sure VLANS are not using driver */
824 if (adapter->ifp->if_vlantrunks != NULL) {
825 device_printf(dev,"Vlan in use, detach first\n");
829 #ifdef DEVICE_POLLING
830 if (ifp->if_capenable & IFCAP_POLLING)
831 ether_poll_deregister(ifp);
834 EM_CORE_LOCK(adapter);
836 adapter->in_detach = 1;
838 e1000_phy_hw_reset(&adapter->hw);
840 em_release_manageability(adapter);
842 EM_TX_UNLOCK(adapter);
843 EM_CORE_UNLOCK(adapter);
845 /* Unregister VLAN events */
846 if (adapter->vlan_attach != NULL)
847 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
848 if (adapter->vlan_detach != NULL)
849 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
851 ether_ifdetach(adapter->ifp);
852 callout_stop(&adapter->timer);
853 callout_stop(&adapter->tx_fifo_timer);
855 em_free_pci_resources(adapter);
856 bus_generic_detach(dev);
858 em_free_transmit_structures(adapter);
859 em_free_receive_structures(adapter);
861 /* Free Transmit Descriptor ring */
862 if (adapter->tx_desc_base) {
863 em_dma_free(adapter, &adapter->txdma);
864 adapter->tx_desc_base = NULL;
867 /* Free Receive Descriptor ring */
868 if (adapter->rx_desc_base) {
869 em_dma_free(adapter, &adapter->rxdma);
870 adapter->rx_desc_base = NULL;
873 em_release_hw_control(adapter);
874 sysctl_ctx_free(&adapter->sysctl_ctx);
875 EM_TX_LOCK_DESTROY(adapter);
876 EM_RX_LOCK_DESTROY(adapter);
877 EM_CORE_LOCK_DESTROY(adapter);
882 /*********************************************************************
884 * Shutdown entry point
886 **********************************************************************/
889 em_shutdown(device_t dev)
891 return em_suspend(dev);
895 * Suspend/resume device methods.
898 em_suspend(device_t dev)
900 struct adapter *adapter = device_get_softc(dev);
902 EM_CORE_LOCK(adapter);
904 em_release_manageability(adapter);
905 em_release_hw_control(adapter);
906 em_enable_wakeup(dev);
908 EM_CORE_UNLOCK(adapter);
910 return bus_generic_suspend(dev);
914 em_resume(device_t dev)
916 struct adapter *adapter = device_get_softc(dev);
917 struct ifnet *ifp = adapter->ifp;
919 EM_CORE_LOCK(adapter);
920 em_init_locked(adapter);
921 em_init_manageability(adapter);
922 EM_CORE_UNLOCK(adapter);
925 return bus_generic_resume(dev);
929 /*********************************************************************
930 * Transmit entry point
932 * em_start is called by the stack to initiate a transmit.
933 * The driver will remain in this routine as long as there are
934 * packets to transmit and transmit resources are available.
935 * In case resources are not available stack is notified and
936 * the packet is requeued.
937 **********************************************************************/
941 em_mq_start_locked(struct ifnet *ifp, struct mbuf *m)
943 struct adapter *adapter = ifp->if_softc;
945 int error = E1000_SUCCESS;
947 EM_TX_LOCK_ASSERT(adapter);
948 /* To allow being called from a tasklet */
952 if (((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) !=
954 || (!adapter->link_active)) {
955 error = drbr_enqueue(ifp, adapter->br, m);
957 } else if (!drbr_needs_enqueue(ifp, adapter->br) &&
958 (adapter->num_tx_desc_avail > EM_TX_OP_THRESHOLD)) {
959 if ((error = em_xmit(adapter, &m)) != 0) {
961 error = drbr_enqueue(ifp, adapter->br, m);
965 * We've bypassed the buf ring so we need to update
968 drbr_stats_update(ifp, m->m_pkthdr.len, m->m_flags);
970 ** Send a copy of the frame to the BPF
971 ** listener and set the watchdog on.
973 ETHER_BPF_MTAP(ifp, m);
974 adapter->watchdog_check = TRUE;
976 } else if ((error = drbr_enqueue(ifp, adapter->br, m)) != 0)
980 if (drbr_empty(ifp, adapter->br))
982 /* Process the queue */
984 if ((ifp->if_flags & IFF_RUNNING) == 0)
986 next = drbr_dequeue(ifp, adapter->br);
989 if ((error = em_xmit(adapter, &next)) != 0) {
991 error = drbr_enqueue(ifp, adapter->br, next);
994 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
995 ETHER_BPF_MTAP(ifp, next);
996 /* Set the watchdog */
997 adapter->watchdog_check = TRUE;
1000 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
1001 ifp->if_flags |= IFF_OACTIVE;
1007 ** Multiqueue capable stack interface, this is not
1008 ** yet truely multiqueue, but that is coming...
1011 em_mq_start(struct ifnet *ifp, struct mbuf *m)
1014 struct adapter *adapter = ifp->if_softc;
1017 if (EM_TX_TRYLOCK(adapter)) {
1018 if (ifp->if_flags & IFF_RUNNING)
1019 error = em_mq_start_locked(ifp, m);
1020 EM_TX_UNLOCK(adapter);
1022 error = drbr_enqueue(ifp, adapter->br, m);
1028 em_qflush(struct ifnet *ifp)
1031 struct adapter *adapter = (struct adapter *)ifp->if_softc;
1033 EM_TX_LOCK(adapter);
1034 while ((m = buf_ring_dequeue_sc(adapter->br)) != NULL)
1037 EM_TX_UNLOCK(adapter);
1042 em_start_locked(struct ifnet *ifp)
1044 struct adapter *adapter = ifp->if_softc;
1045 struct mbuf *m_head;
1047 EM_TX_LOCK_ASSERT(adapter);
1049 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) !=
1052 if (!adapter->link_active)
1055 while (!ifq_is_empty(&ifp->if_snd)) {
1057 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1061 * Encapsulation can modify our pointer, and or make it
1062 * NULL on failure. In that event, we can't requeue.
1064 if (em_xmit(adapter, &m_head)) {
1067 ifp->if_flags |= IFF_OACTIVE;
1068 ifq_prepend(&ifp->if_snd, m_head);
1072 /* Send a copy of the frame to the BPF listener */
1073 ETHER_BPF_MTAP(ifp, m_head);
1075 /* Set timeout in case hardware has problems transmitting. */
1076 adapter->watchdog_check = TRUE;
1078 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
1079 ifp->if_flags |= IFF_OACTIVE;
1085 em_start(struct ifnet *ifp)
1087 struct adapter *adapter = ifp->if_softc;
1089 EM_TX_LOCK(adapter);
1090 if (ifp->if_flags & IFF_RUNNING)
1091 em_start_locked(ifp);
1092 EM_TX_UNLOCK(adapter);
1095 /*********************************************************************
1098 * em_ioctl is called when the user wants to configure the
1101 * return 0 on success, positive on failure
1102 **********************************************************************/
1105 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred * uc)
1107 struct adapter *adapter = ifp->if_softc;
1108 struct ifreq *ifr = (struct ifreq *)data;
1110 struct ifaddr *ifa = (struct ifaddr *)data;
1114 if (adapter->in_detach)
1120 if (ifa->ifa_addr->sa_family == AF_INET) {
1123 * Since resetting hardware takes a very long time
1124 * and results in link renegotiation we only
1125 * initialize the hardware only when it is absolutely
1128 ifp->if_flags |= IFF_UP;
1129 if (!(ifp->if_flags & IFF_RUNNING)) {
1130 EM_CORE_LOCK(adapter);
1131 em_init_locked(adapter);
1132 EM_CORE_UNLOCK(adapter);
1134 arp_ifinit(ifp, ifa);
1137 error = ether_ioctl(ifp, command, data);
1142 u16 eeprom_data = 0;
1144 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1146 EM_CORE_LOCK(adapter);
1147 switch (adapter->hw.mac.type) {
1150 * 82573 only supports jumbo frames
1151 * if ASPM is disabled.
1153 e1000_read_nvm(&adapter->hw,
1154 NVM_INIT_3GIO_3, 1, &eeprom_data);
1155 if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
1156 max_frame_size = ETHER_MAX_LEN;
1159 /* Allow Jumbo frames - fall thru */
1163 case e1000_ich10lan:
1165 case e1000_80003es2lan: /* Limit Jumbo Frame size */
1166 max_frame_size = 9234;
1169 max_frame_size = 4096;
1171 /* Adapters that do not support jumbo frames */
1175 max_frame_size = ETHER_MAX_LEN;
1178 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1180 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1182 EM_CORE_UNLOCK(adapter);
1187 ifp->if_mtu = ifr->ifr_mtu;
1188 adapter->max_frame_size =
1189 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1190 em_init_locked(adapter);
1191 EM_CORE_UNLOCK(adapter);
1195 IOCTL_DEBUGOUT("ioctl rcv'd:\
1196 SIOCSIFFLAGS (Set Interface Flags)");
1197 EM_CORE_LOCK(adapter);
1198 if (ifp->if_flags & IFF_UP) {
1199 if ((ifp->if_flags & IFF_RUNNING)) {
1200 if ((ifp->if_flags ^ adapter->if_flags) &
1201 (IFF_PROMISC | IFF_ALLMULTI)) {
1202 em_disable_promisc(adapter);
1203 em_set_promisc(adapter);
1206 em_init_locked(adapter);
1208 if (ifp->if_flags & IFF_RUNNING) {
1209 EM_TX_LOCK(adapter);
1211 EM_TX_UNLOCK(adapter);
1213 adapter->if_flags = ifp->if_flags;
1214 EM_CORE_UNLOCK(adapter);
1218 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
1219 if (ifp->if_flags & IFF_RUNNING) {
1220 EM_CORE_LOCK(adapter);
1221 em_disable_intr(adapter);
1222 em_set_multi(adapter);
1223 if (adapter->hw.mac.type == e1000_82542 &&
1224 adapter->hw.revision_id == E1000_REVISION_2) {
1225 em_initialize_receive_unit(adapter);
1227 #ifdef DEVICE_POLLING
1228 if (!(ifp->if_capenable & IFCAP_POLLING))
1230 em_enable_intr(adapter);
1231 EM_CORE_UNLOCK(adapter);
1235 /* Check SOL/IDER usage */
1236 EM_CORE_LOCK(adapter);
1237 if (e1000_check_reset_block(&adapter->hw)) {
1238 EM_CORE_UNLOCK(adapter);
1239 device_printf(adapter->dev, "Media change is"
1240 " blocked due to SOL/IDER session.\n");
1243 EM_CORE_UNLOCK(adapter);
1245 IOCTL_DEBUGOUT("ioctl rcv'd: \
1246 SIOCxIFMEDIA (Get/Set Interface Media)");
1247 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1253 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1255 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1256 #ifdef DEVICE_POLLING
1257 if (mask & IFCAP_POLLING) {
1258 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1259 error = ether_poll_register(em_poll, ifp);
1262 EM_CORE_LOCK(adapter);
1263 em_disable_intr(adapter);
1264 ifp->if_capenable |= IFCAP_POLLING;
1265 EM_CORE_UNLOCK(adapter);
1267 error = ether_poll_deregister(ifp);
1268 /* Enable interrupt even in error case */
1269 EM_CORE_LOCK(adapter);
1270 em_enable_intr(adapter);
1271 ifp->if_capenable &= ~IFCAP_POLLING;
1272 EM_CORE_UNLOCK(adapter);
1276 if (mask & IFCAP_HWCSUM) {
1277 ifp->if_capenable ^= IFCAP_HWCSUM;
1281 if (mask & IFCAP_TSO4) {
1282 ifp->if_capenable ^= IFCAP_TSO4;
1286 if (mask & IFCAP_VLAN_HWTAGGING) {
1287 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1291 if (mask & IFCAP_VLAN_HWFILTER) {
1292 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1296 if ((mask & IFCAP_WOL) &&
1297 (ifp->if_capabilities & IFCAP_WOL) != 0) {
1298 if (mask & IFCAP_WOL_MCAST)
1299 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1300 if (mask & IFCAP_WOL_MAGIC)
1301 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1304 if (reinit && (ifp->if_flags & IFF_RUNNING))
1307 VLAN_CAPABILITIES(ifp);
1313 error = ether_ioctl(ifp, command, data);
1321 /*********************************************************************
1324 * This routine is used in two ways. It is used by the stack as
1325 * init entry point in network interface structure. It is also used
1326 * by the driver as a hw/sw initialization routine to get to a
1329 * return 0 on success, positive on failure
1330 **********************************************************************/
1333 em_init_locked(struct adapter *adapter)
1335 struct ifnet *ifp = adapter->ifp;
1336 device_t dev = adapter->dev;
1339 INIT_DEBUGOUT("em_init: begin");
1341 EM_CORE_LOCK_ASSERT(adapter);
1343 EM_TX_LOCK(adapter);
1345 EM_TX_UNLOCK(adapter);
1348 * Packet Buffer Allocation (PBA)
1349 * Writing PBA sets the receive portion of the buffer
1350 * the remainder is used for the transmit buffer.
1352 * Devices before the 82547 had a Packet Buffer of 64K.
1353 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1354 * After the 82547 the buffer was reduced to 40K.
1355 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1356 * Note: default does not leave enough room for Jumbo Frame >10k.
1358 switch (adapter->hw.mac.type) {
1360 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1361 if (adapter->max_frame_size > 8192)
1362 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1364 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1365 adapter->tx_fifo_head = 0;
1366 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1367 adapter->tx_fifo_size =
1368 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1370 /* Total Packet Buffer on these is 48K */
1373 case e1000_80003es2lan:
1374 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1376 case e1000_82573: /* 82573: Total Packet Buffer is 32K */
1377 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
1381 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
1384 case e1000_ich10lan:
1386 pba = E1000_PBA_10K;
1392 /* Devices before 82547 had a Packet Buffer of 64K. */
1393 if (adapter->max_frame_size > 8192)
1394 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1396 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1399 INIT_DEBUGOUT1("em_init: pba=%dK",pba);
1400 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1402 /* Get the latest mac address, User can use a LAA */
1403 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1406 /* Put the address into the Receive Address Array */
1407 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1410 * With the 82571 adapter, RAR[0] may be overwritten
1411 * when the other port is reset, we make a duplicate
1412 * in RAR[14] for that eventuality, this assures
1413 * the interface continues to function.
1415 if (adapter->hw.mac.type == e1000_82571) {
1416 e1000_set_laa_state_82571(&adapter->hw, TRUE);
1417 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr,
1418 E1000_RAR_ENTRIES - 1);
1421 /* Initialize the hardware */
1422 if (em_hardware_init(adapter)) {
1423 device_printf(dev, "Unable to initialize the hardware\n");
1426 em_update_link_status(adapter);
1428 /* Setup VLAN support, basic and offload if available */
1429 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1430 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1431 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
1432 /* Use real VLAN Filter support */
1433 em_setup_vlan_hw_support(adapter);
1436 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1437 ctrl |= E1000_CTRL_VME;
1438 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1442 /* Set hardware offload abilities */
1443 ifp->if_hwassist = 0;
1444 if (adapter->hw.mac.type >= e1000_82543) {
1445 if (ifp->if_capenable & IFCAP_TXCSUM)
1446 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1448 if (ifp->if_capenable & IFCAP_TSO4)
1449 ifp->if_hwassist |= CSUM_TSO;
1453 /* Configure for OS presence */
1454 em_init_manageability(adapter);
1456 /* Prepare transmit descriptors and buffers */
1457 em_setup_transmit_structures(adapter);
1458 em_initialize_transmit_unit(adapter);
1460 /* Setup Multicast table */
1461 em_set_multi(adapter);
1463 /* Prepare receive descriptors and buffers */
1464 if (em_setup_receive_structures(adapter)) {
1465 device_printf(dev, "Could not setup receive structures\n");
1466 EM_TX_LOCK(adapter);
1468 EM_TX_UNLOCK(adapter);
1471 em_initialize_receive_unit(adapter);
1473 /* Don't lose promiscuous settings */
1474 em_set_promisc(adapter);
1476 ifp->if_flags |= IFF_RUNNING;
1477 ifp->if_flags &= ~IFF_OACTIVE;
1479 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1480 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1482 /* MSI/X configuration for 82574 */
1483 if (adapter->hw.mac.type == e1000_82574) {
1485 tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1486 tmp |= E1000_CTRL_EXT_PBA_CLR;
1487 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1489 ** Set the IVAR - interrupt vector routing.
1490 ** Each nibble represents a vector, high bit
1491 ** is enable, other 3 bits are the MSIX table
1492 ** entry, we map RXQ0 to 0, TXQ0 to 1, and
1493 ** Link (other) to 2, hence the magic number.
1495 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1498 #ifdef DEVICE_POLLING
1500 * Only enable interrupts if we are not polling, make sure
1501 * they are off otherwise.
1503 if (ifp->if_capenable & IFCAP_POLLING)
1504 em_disable_intr(adapter);
1506 #endif /* DEVICE_POLLING */
1507 em_enable_intr(adapter);
1509 /* AMT based hardware can now take control from firmware */
1510 if (adapter->has_manage && adapter->has_amt)
1511 em_get_hw_control(adapter);
1513 /* Don't reset the phy next time init gets called */
1514 adapter->hw.phy.reset_disable = TRUE;
1520 struct adapter *adapter = arg;
1522 EM_CORE_LOCK(adapter);
1523 em_init_locked(adapter);
1524 EM_CORE_UNLOCK(adapter);
1528 #ifdef DEVICE_POLLING
1529 /*********************************************************************
1531 * Legacy polling routine
1533 *********************************************************************/
1535 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1537 struct adapter *adapter = ifp->if_softc;
1538 u32 reg_icr, rx_done = 0;
1540 EM_CORE_LOCK(adapter);
1541 if ((ifp->if_flags & IFF_RUNNING) == 0) {
1542 EM_CORE_UNLOCK(adapter);
1546 if (cmd == POLL_AND_CHECK_STATUS) {
1547 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1548 /* Link status change */
1549 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1550 adapter->hw.mac.get_link_status = 1;
1551 em_update_link_status(adapter);
1553 if (reg_icr & E1000_ICR_RXO)
1554 adapter->rx_overruns++;
1556 EM_CORE_UNLOCK(adapter);
1558 rx_done = em_rxeof(adapter, count);
1560 EM_TX_LOCK(adapter);
1563 if (!drbr_empty(ifp, adapter->br))
1564 em_mq_start_locked(ifp, NULL);
1566 if (!ifq_is_empty(&ifp->if_snd))
1567 em_start_locked(ifp);
1569 EM_TX_UNLOCK(adapter);
1572 #endif /* DEVICE_POLLING */
1574 #ifdef EM_LEGACY_IRQ
1575 /*********************************************************************
1577 * Legacy Interrupt Service routine
1579 *********************************************************************/
1584 struct adapter *adapter = arg;
1585 struct ifnet *ifp = adapter->ifp;
1589 if (ifp->if_capenable & IFCAP_POLLING)
1592 EM_CORE_LOCK(adapter);
1593 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1594 if (reg_icr & E1000_ICR_RXO)
1595 adapter->rx_overruns++;
1596 if ((reg_icr == 0xffffffff) || (reg_icr == 0)||
1597 (adapter->hw.mac.type >= e1000_82571 &&
1598 (reg_icr & E1000_ICR_INT_ASSERTED) == 0))
1601 if ((ifp->if_flags & IFF_RUNNING) == 0)
1604 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1605 callout_stop(&adapter->timer);
1606 adapter->hw.mac.get_link_status = 1;
1607 em_update_link_status(adapter);
1608 /* Deal with TX cruft when link lost */
1609 em_tx_purge(adapter);
1610 callout_reset(&adapter->timer, hz,
1611 em_local_timer, adapter);
1615 EM_TX_LOCK(adapter);
1617 em_rxeof(adapter, -1);
1619 if (ifp->if_flags & IFF_RUNNING &&
1620 !ifq_is_empty(&ifp->if_snd))
1621 em_start_locked(ifp);
1622 EM_TX_UNLOCK(adapter);
1625 EM_CORE_UNLOCK(adapter);
1629 #else /* EM_FAST_IRQ, then fast interrupt routines only */
1632 em_handle_link(void *context, int pending)
1634 struct adapter *adapter = context;
1635 struct ifnet *ifp = adapter->ifp;
1637 if (!(ifp->if_flags & IFF_RUNNING))
1640 EM_CORE_LOCK(adapter);
1641 callout_stop(&adapter->timer);
1642 em_update_link_status(adapter);
1643 /* Deal with TX cruft when link lost */
1644 em_tx_purge(adapter);
1645 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1646 EM_CORE_UNLOCK(adapter);
1650 /* Combined RX/TX handler, used by Legacy and MSI */
1652 em_handle_rxtx(void *context, int pending)
1654 struct adapter *adapter = context;
1655 struct ifnet *ifp = adapter->ifp;
1658 if (ifp->if_flags & IFF_RUNNING) {
1659 if (em_rxeof(adapter, adapter->rx_process_limit) != 0)
1660 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1661 EM_TX_LOCK(adapter);
1664 if (!drbr_empty(ifp, adapter->br))
1665 em_mq_start_locked(ifp, NULL);
1667 if (!ifq_is_empty(&ifp->if_snd))
1668 em_start_locked(ifp);
1670 EM_TX_UNLOCK(adapter);
1673 em_enable_intr(adapter);
1676 /*********************************************************************
1678 * Fast Legacy/MSI Combined Interrupt Service routine
1680 *********************************************************************/
1681 #define FILTER_STRAY
1682 #define FILTER_HANDLED
1684 em_irq_fast(void *arg)
1686 struct adapter *adapter = arg;
1692 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1695 if (reg_icr == 0xffffffff)
1696 return FILTER_STRAY;
1698 /* Definitely not our interrupt. */
1700 return FILTER_STRAY;
1703 * Starting with the 82571 chip, bit 31 should be used to
1704 * determine whether the interrupt belongs to us.
1706 if (adapter->hw.mac.type >= e1000_82571 &&
1707 (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1708 return FILTER_STRAY;
1711 * Mask interrupts until the taskqueue is finished running. This is
1712 * cheap, just assume that it is needed. This also works around the
1713 * MSI message reordering errata on certain systems.
1715 em_disable_intr(adapter);
1716 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1718 /* Link status change */
1719 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1720 adapter->hw.mac.get_link_status = 1;
1721 taskqueue_enqueue(taskqueue_swi, &adapter->link_task);
1724 if (reg_icr & E1000_ICR_RXO)
1725 adapter->rx_overruns++;
1726 return FILTER_HANDLED;
1729 /*********************************************************************
1731 * MSIX Interrupt Service Routines
1733 **********************************************************************/
1734 #define EM_MSIX_TX 0x00040000
1735 #define EM_MSIX_RX 0x00010000
1736 #define EM_MSIX_LINK 0x00100000
1739 em_msix_tx(void *arg)
1741 struct adapter *adapter = arg;
1742 struct ifnet *ifp = adapter->ifp;
1745 if (ifp->if_flags & IFF_RUNNING) {
1746 EM_TX_LOCK(adapter);
1748 EM_TX_UNLOCK(adapter);
1749 taskqueue_enqueue(adapter->tq, &adapter->tx_task);
1751 /* Reenable this interrupt */
1752 E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_TX);
1756 /*********************************************************************
1758 * MSIX RX Interrupt Service routine
1760 **********************************************************************/
1763 em_msix_rx(void *arg)
1765 struct adapter *adapter = arg;
1766 struct ifnet *ifp = adapter->ifp;
1769 if ((ifp->if_flags & IFF_RUNNING) &&
1770 (em_rxeof(adapter, adapter->rx_process_limit) != 0))
1771 taskqueue_enqueue(adapter->tq, &adapter->rx_task);
1772 /* Reenable this interrupt */
1773 E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_RX);
1777 /*********************************************************************
1779 * MSIX Link Fast Interrupt Service routine
1781 **********************************************************************/
1784 em_msix_link(void *arg)
1786 struct adapter *adapter = arg;
1789 ++adapter->link_irq;
1790 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1792 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1793 adapter->hw.mac.get_link_status = 1;
1794 taskqueue_enqueue(taskqueue_swi, &adapter->link_task);
1796 E1000_WRITE_REG(&adapter->hw, E1000_IMS,
1797 EM_MSIX_LINK | E1000_IMS_LSC);
1802 em_handle_rx(void *context, int pending)
1804 struct adapter *adapter = context;
1805 struct ifnet *ifp = adapter->ifp;
1807 if ((ifp->if_flags & IFF_RUNNING) &&
1808 (em_rxeof(adapter, adapter->rx_process_limit) != 0))
1809 taskqueue_enqueue(adapter->tq, &adapter->rx_task);
1814 em_handle_tx(void *context, int pending)
1816 struct adapter *adapter = context;
1817 struct ifnet *ifp = adapter->ifp;
1819 if (ifp->if_flags & IFF_RUNNING) {
1820 if (!EM_TX_TRYLOCK(adapter))
1824 if (!drbr_empty(ifp, adapter->br))
1825 em_mq_start_locked(ifp, NULL);
1827 if (!ifq_is_empty(&ifp->if_snd))
1828 em_start_locked(ifp);
1830 EM_TX_UNLOCK(adapter);
1833 #endif /* EM_FAST_IRQ */
1835 /*********************************************************************
1837 * Media Ioctl callback
1839 * This routine is called whenever the user queries the status of
1840 * the interface using ifconfig.
1842 **********************************************************************/
1844 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1846 struct adapter *adapter = ifp->if_softc;
1847 u_char fiber_type = IFM_1000_SX;
1849 INIT_DEBUGOUT("em_media_status: begin");
1851 EM_CORE_LOCK(adapter);
1852 em_update_link_status(adapter);
1854 ifmr->ifm_status = IFM_AVALID;
1855 ifmr->ifm_active = IFM_ETHER;
1857 if (!adapter->link_active) {
1858 EM_CORE_UNLOCK(adapter);
1862 ifmr->ifm_status |= IFM_ACTIVE;
1864 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1865 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1866 if (adapter->hw.mac.type == e1000_82545)
1867 fiber_type = IFM_1000_LX;
1868 ifmr->ifm_active |= fiber_type | IFM_FDX;
1870 switch (adapter->link_speed) {
1872 ifmr->ifm_active |= IFM_10_T;
1875 ifmr->ifm_active |= IFM_100_TX;
1878 ifmr->ifm_active |= IFM_1000_T;
1881 if (adapter->link_duplex == FULL_DUPLEX)
1882 ifmr->ifm_active |= IFM_FDX;
1884 ifmr->ifm_active |= IFM_HDX;
1886 EM_CORE_UNLOCK(adapter);
1889 /*********************************************************************
1891 * Media Ioctl callback
1893 * This routine is called when the user changes speed/duplex using
1894 * media/mediopt option with ifconfig.
1896 **********************************************************************/
1898 em_media_change(struct ifnet *ifp)
1900 struct adapter *adapter = ifp->if_softc;
1901 struct ifmedia *ifm = &adapter->media;
1903 INIT_DEBUGOUT("em_media_change: begin");
1905 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1908 EM_CORE_LOCK(adapter);
1909 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1911 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1912 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1917 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1918 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1921 adapter->hw.mac.autoneg = FALSE;
1922 adapter->hw.phy.autoneg_advertised = 0;
1923 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1924 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1926 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1929 adapter->hw.mac.autoneg = FALSE;
1930 adapter->hw.phy.autoneg_advertised = 0;
1931 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1932 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1934 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1937 device_printf(adapter->dev, "Unsupported media type\n");
1940 /* As the speed/duplex settings my have changed we need to
1943 adapter->hw.phy.reset_disable = FALSE;
1945 em_init_locked(adapter);
1946 EM_CORE_UNLOCK(adapter);
1951 /*********************************************************************
1953 * This routine maps the mbufs to tx descriptors.
1955 * return 0 on success, positive on failure
1956 **********************************************************************/
1959 em_xmit(struct adapter *adapter, struct mbuf **m_headp)
1961 bus_dma_segment_t segs[EM_MAX_SCATTER];
1963 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1964 struct e1000_tx_desc *ctxd = NULL;
1965 struct mbuf *m_head;
1966 u32 txd_upper, txd_lower, txd_used, txd_saved;
1967 int nsegs, i, j, first, last = 0;
1968 int error, do_tso, tso_desc = 0;
1971 txd_upper = txd_lower = txd_used = txd_saved = 0;
1974 do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0);
1980 * Force a cleanup if number of TX descriptors
1981 * available hits the threshold
1983 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1985 /* Now do we at least have a minimal? */
1986 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
1987 adapter->no_tx_desc_avail1++;
1995 * If an mbuf is only header we need
1996 * to pull 4 bytes of data into it.
1998 if (do_tso && (m_head->m_len <= M_TSO_LEN)) {
1999 m_head = m_pullup(m_head, M_TSO_LEN + 4);
2006 * Map the packet for DMA
2008 * Capture the first descriptor index,
2009 * this descriptor will have the index
2010 * of the EOP which is the only one that
2011 * now gets a DONE bit writeback.
2013 first = adapter->next_avail_tx_desc;
2014 tx_buffer = &adapter->tx_buffer_area[first];
2015 tx_buffer_mapped = tx_buffer;
2016 map = tx_buffer->map;
2018 error = bus_dmamap_load_mbuf_segment(adapter->txtag, map,
2019 *m_headp, segs, EM_MAX_SCATTER, &nsegs, BUS_DMA_NOWAIT);
2022 * There are two types of errors we can (try) to handle:
2023 * - EFBIG means the mbuf chain was too long and bus_dma ran
2024 * out of segments. Defragment the mbuf chain and try again.
2025 * - ENOMEM means bus_dma could not obtain enough bounce buffers
2026 * at this point in time. Defer sending and try again later.
2027 * All other errors, in particular EINVAL, are fatal and prevent the
2028 * mbuf chain from ever going through. Drop it and report error.
2030 if (error == EFBIG) {
2033 m = m_defrag(*m_headp, MB_DONTWAIT);
2035 adapter->mbuf_alloc_failed++;
2043 error = bus_dmamap_load_mbuf_segment(adapter->txtag, map,
2044 *m_headp, segs, EM_MAX_SCATTER, &nsegs, BUS_DMA_NOWAIT);
2047 adapter->no_tx_dma_setup++;
2052 } else if (error != 0) {
2053 adapter->no_tx_dma_setup++;
2058 * TSO Hardware workaround, if this packet is not
2059 * TSO, and is only a single descriptor long, and
2060 * it follows a TSO burst, then we need to add a
2061 * sentinel descriptor to prevent premature writeback.
2063 if ((do_tso == 0) && (adapter->tx_tso == TRUE)) {
2066 adapter->tx_tso = FALSE;
2069 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
2070 adapter->no_tx_desc_avail2++;
2071 bus_dmamap_unload(adapter->txtag, map);
2076 /* Do hardware assists */
2078 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2079 error = em_tso_setup(adapter, m_head, &txd_upper, &txd_lower);
2081 return (ENXIO); /* something foobar */
2082 /* we need to make a final sentinel transmit desc */
2086 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
2087 em_transmit_checksum_setup(adapter, m_head,
2088 &txd_upper, &txd_lower);
2090 i = adapter->next_avail_tx_desc;
2091 if (adapter->pcix_82544)
2094 /* Set up our transmit descriptors */
2095 for (j = 0; j < nsegs; j++) {
2097 bus_addr_t seg_addr;
2098 /* If adapter is 82544 and on PCIX bus */
2099 if(adapter->pcix_82544) {
2100 DESC_ARRAY desc_array;
2101 u32 array_elements, counter;
2103 * Check the Address and Length combination and
2104 * split the data accordingly
2106 array_elements = em_fill_descriptors(segs[j].ds_addr,
2107 segs[j].ds_len, &desc_array);
2108 for (counter = 0; counter < array_elements; counter++) {
2109 if (txd_used == adapter->num_tx_desc_avail) {
2110 adapter->next_avail_tx_desc = txd_saved;
2111 adapter->no_tx_desc_avail2++;
2112 bus_dmamap_unload(adapter->txtag, map);
2115 tx_buffer = &adapter->tx_buffer_area[i];
2116 ctxd = &adapter->tx_desc_base[i];
2117 ctxd->buffer_addr = htole64(
2118 desc_array.descriptor[counter].address);
2119 ctxd->lower.data = htole32(
2120 (adapter->txd_cmd | txd_lower | (u16)
2121 desc_array.descriptor[counter].length));
2123 htole32((txd_upper));
2125 if (++i == adapter->num_tx_desc)
2127 tx_buffer->m_head = NULL;
2128 tx_buffer->next_eop = -1;
2132 tx_buffer = &adapter->tx_buffer_area[i];
2133 ctxd = &adapter->tx_desc_base[i];
2134 seg_addr = segs[j].ds_addr;
2135 seg_len = segs[j].ds_len;
2138 ** If this is the last descriptor, we want to
2139 ** split it so we have a small final sentinel
2141 if (tso_desc && (j == (nsegs -1)) && (seg_len > 8)) {
2143 ctxd->buffer_addr = htole64(seg_addr);
2144 ctxd->lower.data = htole32(
2145 adapter->txd_cmd | txd_lower | seg_len);
2148 if (++i == adapter->num_tx_desc)
2150 /* Now make the sentinel */
2151 ++txd_used; /* using an extra txd */
2152 ctxd = &adapter->tx_desc_base[i];
2153 tx_buffer = &adapter->tx_buffer_area[i];
2155 htole64(seg_addr + seg_len);
2156 ctxd->lower.data = htole32(
2157 adapter->txd_cmd | txd_lower | 4);
2161 if (++i == adapter->num_tx_desc)
2164 ctxd->buffer_addr = htole64(seg_addr);
2165 ctxd->lower.data = htole32(
2166 adapter->txd_cmd | txd_lower | seg_len);
2170 if (++i == adapter->num_tx_desc)
2173 tx_buffer->m_head = NULL;
2174 tx_buffer->next_eop = -1;
2178 adapter->next_avail_tx_desc = i;
2179 if (adapter->pcix_82544)
2180 adapter->num_tx_desc_avail -= txd_used;
2182 adapter->num_tx_desc_avail -= nsegs;
2183 if (tso_desc) /* TSO used an extra for sentinel */
2184 adapter->num_tx_desc_avail -= txd_used;
2190 if (m_head->m_flags & M_VLANTAG) {
2191 /* Set the vlan id. */
2192 ctxd->upper.fields.special =
2193 htole16(m_head->m_pkthdr.ether_vlantag);
2194 /* Tell hardware to add tag */
2195 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
2198 tx_buffer->m_head = m_head;
2199 tx_buffer_mapped->map = tx_buffer->map;
2200 tx_buffer->map = map;
2201 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
2204 * Last Descriptor of Packet
2205 * needs End Of Packet (EOP)
2206 * and Report Status (RS)
2209 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
2211 * Keep track in the first buffer which
2212 * descriptor will be written back
2214 tx_buffer = &adapter->tx_buffer_area[first];
2215 tx_buffer->next_eop = last;
2216 adapter->watchdog_time = ticks;
2219 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
2220 * that this frame is available to transmit.
2222 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2223 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2224 if (adapter->hw.mac.type == e1000_82547 &&
2225 adapter->link_duplex == HALF_DUPLEX)
2226 em_82547_move_tail(adapter);
2228 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
2229 if (adapter->hw.mac.type == e1000_82547)
2230 em_82547_update_fifo_head(adapter,
2231 m_head->m_pkthdr.len);
2237 /*********************************************************************
2239 * 82547 workaround to avoid controller hang in half-duplex environment.
2240 * The workaround is to avoid queuing a large packet that would span
2241 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
2242 * in this case. We do that only when FIFO is quiescent.
2244 **********************************************************************/
2246 em_82547_move_tail_locked(void *arg)
2248 struct adapter *adapter = arg;
2250 struct e1000_tx_desc *tx_desc;
2251 u16 hw_tdt, sw_tdt, length = 0;
2254 EM_TX_LOCK_ASSERT(adapter);
2256 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
2257 sw_tdt = adapter->next_avail_tx_desc;
2259 while (hw_tdt != sw_tdt) {
2260 tx_desc = &adapter->tx_desc_base[hw_tdt];
2261 length += tx_desc->lower.flags.length;
2262 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
2263 if (++hw_tdt == adapter->num_tx_desc)
2267 if (em_82547_fifo_workaround(adapter, length)) {
2268 adapter->tx_fifo_wrk_cnt++;
2269 callout_reset(&adapter->tx_fifo_timer, 1,
2270 em_82547_move_tail, adapter);
2273 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
2274 em_82547_update_fifo_head(adapter, length);
2281 em_82547_move_tail(void *arg)
2283 struct adapter *adapter = arg;
2284 EM_TX_LOCK(adapter);
2285 em_82547_move_tail_locked(arg);
2286 EM_TX_UNLOCK(adapter);
2290 em_82547_fifo_workaround(struct adapter *adapter, int len)
2292 int fifo_space, fifo_pkt_len;
2294 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2296 if (adapter->link_duplex == HALF_DUPLEX) {
2297 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
2299 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
2300 if (em_82547_tx_fifo_reset(adapter))
2311 em_82547_update_fifo_head(struct adapter *adapter, int len)
2313 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2315 /* tx_fifo_head is always 16 byte aligned */
2316 adapter->tx_fifo_head += fifo_pkt_len;
2317 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
2318 adapter->tx_fifo_head -= adapter->tx_fifo_size;
2324 em_82547_tx_fifo_reset(struct adapter *adapter)
2328 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
2329 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
2330 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
2331 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
2332 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
2333 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
2334 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
2335 /* Disable TX unit */
2336 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2337 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
2338 tctl & ~E1000_TCTL_EN);
2340 /* Reset FIFO pointers */
2341 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
2342 adapter->tx_head_addr);
2343 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
2344 adapter->tx_head_addr);
2345 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
2346 adapter->tx_head_addr);
2347 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
2348 adapter->tx_head_addr);
2350 /* Re-enable TX unit */
2351 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2352 E1000_WRITE_FLUSH(&adapter->hw);
2354 adapter->tx_fifo_head = 0;
2355 adapter->tx_fifo_reset_cnt++;
2365 em_set_promisc(struct adapter *adapter)
2367 struct ifnet *ifp = adapter->ifp;
2370 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2372 if (ifp->if_flags & IFF_PROMISC) {
2373 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2374 /* Turn this on if you want to see bad packets */
2376 reg_rctl |= E1000_RCTL_SBP;
2377 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2378 } else if (ifp->if_flags & IFF_ALLMULTI) {
2379 reg_rctl |= E1000_RCTL_MPE;
2380 reg_rctl &= ~E1000_RCTL_UPE;
2381 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2386 em_disable_promisc(struct adapter *adapter)
2390 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2392 reg_rctl &= (~E1000_RCTL_UPE);
2393 reg_rctl &= (~E1000_RCTL_MPE);
2394 reg_rctl &= (~E1000_RCTL_SBP);
2395 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2399 /*********************************************************************
2402 * This routine is called whenever multicast address list is updated.
2404 **********************************************************************/
2407 em_set_multi(struct adapter *adapter)
2409 struct ifnet *ifp = adapter->ifp;
2410 struct ifmultiaddr *ifma;
2412 u8 *mta; /* Multicast array memory */
2415 IOCTL_DEBUGOUT("em_set_multi: begin");
2417 if (adapter->hw.mac.type == e1000_82542 &&
2418 adapter->hw.revision_id == E1000_REVISION_2) {
2419 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2420 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2421 e1000_pci_clear_mwi(&adapter->hw);
2422 reg_rctl |= E1000_RCTL_RST;
2423 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2427 /* Allocate temporary memory to setup array */
2428 mta = kmalloc(sizeof(u8) *
2429 (ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES),
2430 M_DEVBUF, M_INTWAIT | M_ZERO);
2432 panic("em_set_multi memory failure\n");
2435 #if __FreeBSD_version < 800000
2438 if_maddr_rlock(ifp);
2441 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2442 if (ifma->ifma_addr->sa_family != AF_LINK)
2445 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2448 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2449 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
2453 #if __FreeBSD_version < 800000
2454 IF_ADDR_UNLOCK(ifp);
2456 if_maddr_runlock(ifp);
2459 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
2460 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2461 reg_rctl |= E1000_RCTL_MPE;
2462 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2464 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
2466 if (adapter->hw.mac.type == e1000_82542 &&
2467 adapter->hw.revision_id == E1000_REVISION_2) {
2468 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2469 reg_rctl &= ~E1000_RCTL_RST;
2470 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2472 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2473 e1000_pci_set_mwi(&adapter->hw);
2475 kfree(mta, M_DEVBUF);
2479 /*********************************************************************
2482 * This routine checks for link status and updates statistics.
2484 **********************************************************************/
2487 em_local_timer_locked(void *arg)
2489 struct adapter *adapter = arg;
2490 struct ifnet *ifp = adapter->ifp;
2492 EM_CORE_LOCK_ASSERT(adapter);
2494 #ifndef DEVICE_POLLING
2495 taskqueue_enqueue(adapter->tq,
2496 &adapter->rxtx_task);
2498 em_update_link_status(adapter);
2499 em_update_stats_counters(adapter);
2501 /* Reset LAA into RAR[0] on 82571 */
2502 if (e1000_get_laa_state_82571(&adapter->hw) == TRUE)
2503 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2505 if (em_display_debug_stats && ifp->if_flags & IFF_RUNNING)
2506 em_print_hw_stats(adapter);
2508 em_smartspeed(adapter);
2511 * We check the watchdog: the time since
2512 * the last TX descriptor was cleaned.
2513 * This implies a functional TX engine.
2515 if ((adapter->watchdog_check == TRUE) &&
2516 (ticks - adapter->watchdog_time > EM_WATCHDOG))
2519 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
2522 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2523 adapter->ifp->if_flags &= ~IFF_RUNNING;
2524 adapter->watchdog_events++;
2525 em_init_locked(adapter);
2529 em_local_timer(void *arg)
2531 struct adapter *adapter = arg;
2533 EM_CORE_LOCK(adapter);
2534 em_local_timer_locked(arg);
2535 EM_CORE_UNLOCK(adapter);
2540 em_update_link_status(struct adapter *adapter)
2542 struct e1000_hw *hw = &adapter->hw;
2543 struct ifnet *ifp = adapter->ifp;
2544 device_t dev = adapter->dev;
2547 /* Get the cached link value or read phy for real */
2548 switch (hw->phy.media_type) {
2549 case e1000_media_type_copper:
2550 if (hw->mac.get_link_status) {
2551 /* Do the work to read phy */
2552 e1000_check_for_link(hw);
2553 link_check = !hw->mac.get_link_status;
2554 if (link_check) /* ESB2 fix */
2555 e1000_cfg_on_link_up(hw);
2559 case e1000_media_type_fiber:
2560 e1000_check_for_link(hw);
2561 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2564 case e1000_media_type_internal_serdes:
2565 e1000_check_for_link(hw);
2566 link_check = adapter->hw.mac.serdes_has_link;
2569 case e1000_media_type_unknown:
2573 /* Now check for a transition */
2574 if (link_check && (adapter->link_active == 0)) {
2575 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2576 &adapter->link_duplex);
2577 /* Check if we must disable SPEED_MODE bit on PCI-E */
2578 if ((adapter->link_speed != SPEED_1000) &&
2579 ((hw->mac.type == e1000_82571) ||
2580 (hw->mac.type == e1000_82572))) {
2582 tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
2583 tarc0 &= ~SPEED_MODE_BIT;
2584 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
2587 device_printf(dev, "Link is up %d Mbps %s\n",
2588 adapter->link_speed,
2589 ((adapter->link_duplex == FULL_DUPLEX) ?
2590 "Full Duplex" : "Half Duplex"));
2591 adapter->link_active = 1;
2592 adapter->smartspeed = 0;
2593 ifp->if_baudrate = adapter->link_speed * 1000000;
2594 ifp->if_link_state = LINK_STATE_UP;
2595 if_link_state_change(ifp);
2596 } else if (!link_check && (adapter->link_active == 1)) {
2597 ifp->if_baudrate = adapter->link_speed = 0;
2598 adapter->link_duplex = 0;
2600 device_printf(dev, "Link is Down\n");
2601 adapter->link_active = 0;
2602 /* Link down, disable watchdog */
2603 adapter->watchdog_check = FALSE;
2604 ifp->if_link_state = LINK_STATE_DOWN;
2605 if_link_state_change(ifp);
2609 /*********************************************************************
2611 * This routine disables all traffic on the adapter by issuing a
2612 * global reset on the MAC and deallocates TX/RX buffers.
2614 * This routine should always be called with BOTH the CORE
2616 **********************************************************************/
2621 struct adapter *adapter = arg;
2622 struct ifnet *ifp = adapter->ifp;
2624 EM_CORE_LOCK_ASSERT(adapter);
2625 EM_TX_LOCK_ASSERT(adapter);
2627 INIT_DEBUGOUT("em_stop: begin");
2629 em_disable_intr(adapter);
2630 callout_stop(&adapter->timer);
2631 callout_stop(&adapter->tx_fifo_timer);
2633 /* Tell the stack that the interface is no longer active */
2634 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2636 e1000_reset_hw(&adapter->hw);
2637 if (adapter->hw.mac.type >= e1000_82544)
2638 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2642 /*********************************************************************
2644 * Determine hardware revision.
2646 **********************************************************************/
2648 em_identify_hardware(struct adapter *adapter)
2650 device_t dev = adapter->dev;
2652 /* Make sure our PCI config space has the necessary stuff set */
2653 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2654 if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2655 (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2656 device_printf(dev, "Memory Access and/or Bus Master bits "
2658 adapter->hw.bus.pci_cmd_word |=
2659 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2660 pci_write_config(dev, PCIR_COMMAND,
2661 adapter->hw.bus.pci_cmd_word, 2);
2664 /* Save off the information about this board */
2665 adapter->hw.vendor_id = pci_get_vendor(dev);
2666 adapter->hw.device_id = pci_get_device(dev);
2667 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2668 adapter->hw.subsystem_vendor_id =
2669 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2670 adapter->hw.subsystem_device_id =
2671 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2673 /* Do Shared Code Init and Setup */
2674 if (e1000_set_mac_type(&adapter->hw)) {
2675 device_printf(dev, "Setup init failure\n");
2681 em_allocate_pci_resources(struct adapter *adapter)
2683 device_t dev = adapter->dev;
2684 int val, rid, error = E1000_SUCCESS;
2687 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2689 if (adapter->memory == NULL) {
2690 device_printf(dev, "Unable to allocate bus resource: memory\n");
2693 adapter->osdep.mem_bus_space_tag =
2694 rman_get_bustag(adapter->memory);
2695 adapter->osdep.mem_bus_space_handle =
2696 rman_get_bushandle(adapter->memory);
2697 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2699 /* Only older adapters use IO mapping */
2700 if ((adapter->hw.mac.type > e1000_82543) &&
2701 (adapter->hw.mac.type < e1000_82571)) {
2702 /* Figure our where our IO BAR is ? */
2703 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2704 val = pci_read_config(dev, rid, 4);
2705 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2706 adapter->io_rid = rid;
2710 /* check for 64bit BAR */
2711 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2714 if (rid >= PCIR_CIS) {
2715 device_printf(dev, "Unable to locate IO BAR\n");
2718 adapter->ioport = bus_alloc_resource_any(dev,
2719 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2720 if (adapter->ioport == NULL) {
2721 device_printf(dev, "Unable to allocate bus resource: "
2725 adapter->hw.io_base = 0;
2726 adapter->osdep.io_bus_space_tag =
2727 rman_get_bustag(adapter->ioport);
2728 adapter->osdep.io_bus_space_handle =
2729 rman_get_bushandle(adapter->ioport);
2733 ** Init the resource arrays
2734 ** used by MSIX setup
2736 for (int i = 0; i < 3; i++) {
2737 adapter->rid[i] = i + 1; /* MSI/X RID starts at 1 */
2738 adapter->tag[i] = NULL;
2739 adapter->res[i] = NULL;
2743 * Setup MSI/X or MSI if PCI Express
2746 adapter->msi = em_setup_msix(adapter);
2748 adapter->hw.back = &adapter->osdep;
2753 /*********************************************************************
2755 * Setup the Legacy or MSI Interrupt handler
2757 **********************************************************************/
2759 em_allocate_legacy(struct adapter *adapter)
2761 device_t dev = adapter->dev;
2764 /* Manually turn off all interrupts */
2765 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2767 /* Legacy RID is 0 */
2768 if (adapter->msi == 0)
2769 adapter->rid[0] = 0;
2771 /* We allocate a single interrupt resource */
2772 adapter->res[0] = bus_alloc_resource_any(dev,
2773 SYS_RES_IRQ, &adapter->rid[0], RF_SHAREABLE | RF_ACTIVE);
2774 if (adapter->res[0] == NULL) {
2775 device_printf(dev, "Unable to allocate bus resource: "
2780 #ifdef EM_LEGACY_IRQ
2781 /* We do Legacy setup */
2782 if ((error = bus_setup_intr(dev, adapter->res[0],
2783 /*INTR_TYPE_NET |*/ INTR_MPSAFE, em_intr, adapter,
2784 &adapter->tag[0], NULL)) != 0) {
2785 device_printf(dev, "Failed to register interrupt handler");
2789 #else /* FAST_IRQ */
2791 * Try allocating a fast interrupt and the associated deferred
2792 * processing contexts.
2794 TASK_INIT(&adapter->rxtx_task, 0, em_handle_rxtx, adapter);
2795 TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2796 adapter->tq = taskqueue_create("em_taskq", M_INTWAIT,
2797 taskqueue_thread_enqueue, &adapter->tq);
2798 taskqueue_start_threads(&adapter->tq, 1, TDPRI_KERN_DAEMON /*PI_NET*/, -1, "%s taskq",
2799 device_get_nameunit(adapter->dev));
2800 if ((error = bus_setup_intr(dev, adapter->res[0],
2801 /*INTR_TYPE_NET |*/ 0, em_irq_fast, adapter,
2802 &adapter->tag[0], NULL)) != 0) {
2803 device_printf(dev, "Failed to register fast interrupt "
2804 "handler: %d\n", error);
2805 taskqueue_free(adapter->tq);
2809 #endif /* EM_LEGACY_IRQ */
2814 /*********************************************************************
2816 * Setup the MSIX Interrupt handlers
2817 * This is not really Multiqueue, rather
2818 * its just multiple interrupt vectors.
2820 **********************************************************************/
2822 em_allocate_msix(struct adapter *adapter)
2824 device_t dev = adapter->dev;
2827 /* Make sure all interrupts are disabled */
2828 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2830 /* First get the resources */
2831 for (int i = 0; i < adapter->msi; i++) {
2832 adapter->res[i] = bus_alloc_resource_any(dev,
2833 SYS_RES_IRQ, &adapter->rid[i], RF_ACTIVE);
2834 if (adapter->res[i] == NULL) {
2836 "Unable to allocate bus resource: "
2837 "MSIX Interrupt\n");
2843 * Now allocate deferred processing contexts.
2845 TASK_INIT(&adapter->rx_task, 0, em_handle_rx, adapter);
2846 TASK_INIT(&adapter->tx_task, 0, em_handle_tx, adapter);
2848 * Handle compatibility for msi case for deferral due to
2851 TASK_INIT(&adapter->rxtx_task, 0, em_handle_tx, adapter);
2852 TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2853 adapter->tq = taskqueue_create("em_taskq", M_INTWAIT,
2854 taskqueue_thread_enqueue, &adapter->tq);
2855 taskqueue_start_threads(&adapter->tq, 1, TDPRI_KERN_DAEMON /*PI_NET*/, -1, "%s taskq",
2856 device_get_nameunit(adapter->dev));
2859 * And setup the interrupt handlers
2862 /* First slot to RX */
2863 if ((error = bus_setup_intr(dev, adapter->res[0],
2864 /*INTR_TYPE_NET |*/ INTR_MPSAFE, em_msix_rx, adapter,
2865 &adapter->tag[0], NULL)) != 0) {
2866 device_printf(dev, "Failed to register RX handler");
2871 if ((error = bus_setup_intr(dev, adapter->res[1],
2872 /*INTR_TYPE_NET |*/ INTR_MPSAFE, em_msix_tx, adapter,
2873 &adapter->tag[1], NULL)) != 0) {
2874 device_printf(dev, "Failed to register TX handler");
2879 if ((error = bus_setup_intr(dev, adapter->res[2],
2880 /*INTR_TYPE_NET |*/ INTR_MPSAFE, em_msix_link, adapter,
2881 &adapter->tag[2], NULL)) != 0) {
2882 device_printf(dev, "Failed to register TX handler");
2891 em_free_pci_resources(struct adapter *adapter)
2893 device_t dev = adapter->dev;
2895 /* Make sure the for loop below runs once */
2896 if (adapter->msi == 0)
2900 * First release all the interrupt resources:
2901 * notice that since these are just kept
2902 * in an array we can do the same logic
2903 * whether its MSIX or just legacy.
2905 for (int i = 0; i < adapter->msi; i++) {
2906 if (adapter->tag[i] != NULL) {
2907 bus_teardown_intr(dev, adapter->res[i],
2909 adapter->tag[i] = NULL;
2911 if (adapter->res[i] != NULL) {
2912 bus_release_resource(dev, SYS_RES_IRQ,
2913 adapter->rid[i], adapter->res[i]);
2918 pci_release_msi(dev);
2920 if (adapter->msix != NULL)
2921 bus_release_resource(dev, SYS_RES_MEMORY,
2922 PCIR_BAR(EM_MSIX_BAR), adapter->msix);
2924 if (adapter->memory != NULL)
2925 bus_release_resource(dev, SYS_RES_MEMORY,
2926 PCIR_BAR(0), adapter->memory);
2928 if (adapter->flash != NULL)
2929 bus_release_resource(dev, SYS_RES_MEMORY,
2930 EM_FLASH, adapter->flash);
2932 if (adapter->ioport != NULL)
2933 bus_release_resource(dev, SYS_RES_IOPORT,
2934 adapter->io_rid, adapter->ioport);
2938 * Setup MSI or MSI/X
2941 em_setup_msix(struct adapter *adapter)
2943 device_t dev = adapter->dev;
2946 if (adapter->hw.mac.type < e1000_82571)
2949 /* Setup MSI/X for Hartwell */
2950 if (adapter->hw.mac.type == e1000_82574) {
2951 /* Map the MSIX BAR */
2952 int rid = PCIR_BAR(EM_MSIX_BAR);
2953 adapter->msix = bus_alloc_resource_any(dev,
2954 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2955 if (!adapter->msix) {
2956 /* May not be enabled */
2957 device_printf(adapter->dev,
2958 "Unable to map MSIX table \n");
2961 val = pci_msix_count(dev);
2963 ** 82574 can be configured for 5 but
2964 ** we limit use to 3.
2966 if (val > 3) val = 3;
2967 if ((val) && pci_alloc_msix(dev, &val) == 0) {
2968 device_printf(adapter->dev,"Using MSIX interrupts\n");
2973 val = pci_msi_count(dev);
2974 if (val == 1 && pci_alloc_msi(dev, &val) == 0) {
2976 device_printf(adapter->dev,"Using MSI interrupt\n");
2982 /*********************************************************************
2984 * Initialize the hardware to a configuration
2985 * as specified by the adapter structure.
2987 **********************************************************************/
2989 em_hardware_init(struct adapter *adapter)
2991 device_t dev = adapter->dev;
2994 INIT_DEBUGOUT("em_hardware_init: begin");
2996 /* Issue a global reset */
2997 e1000_reset_hw(&adapter->hw);
2999 /* When hardware is reset, fifo_head is also reset */
3000 adapter->tx_fifo_head = 0;
3002 /* Set up smart power down as default off on newer adapters. */
3003 if (!em_smart_pwr_down && (adapter->hw.mac.type == e1000_82571 ||
3004 adapter->hw.mac.type == e1000_82572)) {
3007 /* Speed up time to link by disabling smart power down. */
3008 e1000_read_phy_reg(&adapter->hw,
3009 IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
3010 phy_tmp &= ~IGP02E1000_PM_SPD;
3011 e1000_write_phy_reg(&adapter->hw,
3012 IGP02E1000_PHY_POWER_MGMT, phy_tmp);
3016 * These parameters control the automatic generation (Tx) and
3017 * response (Rx) to Ethernet PAUSE frames.
3018 * - High water mark should allow for at least two frames to be
3019 * received after sending an XOFF.
3020 * - Low water mark works best when it is very near the high water mark.
3021 * This allows the receiver to restart by sending XON when it has
3022 * drained a bit. Here we use an arbitary value of 1500 which will
3023 * restart after one full frame is pulled from the buffer. There
3024 * could be several smaller frames in the buffer and if so they will
3025 * not trigger the XON until their total number reduces the buffer
3027 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
3029 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
3032 adapter->hw.fc.high_water = rx_buffer_size -
3033 roundup2(adapter->max_frame_size, 1024);
3034 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
3036 if (adapter->hw.mac.type == e1000_80003es2lan)
3037 adapter->hw.fc.pause_time = 0xFFFF;
3039 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
3040 adapter->hw.fc.send_xon = TRUE;
3042 /* Set Flow control, use the tunable location if sane */
3043 if ((em_fc_setting >= 0) || (em_fc_setting < 4))
3044 adapter->hw.fc.requested_mode = em_fc_setting;
3046 adapter->hw.fc.requested_mode = e1000_fc_none;
3048 /* Override - workaround for PCHLAN issue */
3049 if (adapter->hw.mac.type == e1000_pchlan)
3050 adapter->hw.fc.requested_mode = e1000_fc_rx_pause;
3052 if (e1000_init_hw(&adapter->hw) < 0) {
3053 device_printf(dev, "Hardware Initialization Failed\n");
3057 e1000_check_for_link(&adapter->hw);
3062 /*********************************************************************
3064 * Setup networking device structure and register an interface.
3066 **********************************************************************/
3068 em_setup_interface(device_t dev, struct adapter *adapter)
3072 INIT_DEBUGOUT("em_setup_interface: begin");
3074 ifp = adapter->ifp = &adapter->arpcom.ac_if;
3075 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3076 ifp->if_mtu = ETHERMTU;
3077 ifp->if_init = em_init;
3078 ifp->if_softc = adapter;
3079 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3080 ifp->if_ioctl = em_ioctl;
3081 ifp->if_start = em_start;
3082 ifq_set_maxlen(&ifp->if_snd, adapter->num_tx_desc - 1);
3083 ifq_set_ready(&ifp->if_snd);
3085 ether_ifattach(ifp, adapter->hw.mac.addr, NULL);
3087 ifp->if_capabilities = ifp->if_capenable = 0;
3089 #if __FreeBSD_version >= 800000
3090 /* Multiqueue tx functions */
3091 ifp->if_transmit = em_mq_start;
3092 ifp->if_qflush = em_qflush;
3093 adapter->br = buf_ring_alloc(4096, M_DEVBUF, M_WAITOK, &adapter->tx_mtx);
3095 if (adapter->hw.mac.type >= e1000_82543) {
3097 version_cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
3098 ifp->if_capabilities |= version_cap;
3099 ifp->if_capenable |= version_cap;
3103 /* Identify TSO capable adapters */
3104 if ((adapter->hw.mac.type > e1000_82544) &&
3105 (adapter->hw.mac.type != e1000_82547))
3106 ifp->if_capabilities |= IFCAP_TSO4;
3108 * By default only enable on PCI-E, this
3109 * can be overriden by ifconfig.
3111 if (adapter->hw.mac.type >= e1000_82571)
3112 ifp->if_capenable |= IFCAP_TSO4;
3115 * Tell the upper layer(s) we
3116 * support full VLAN capability
3118 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3119 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
3120 ifp->if_capenable |= (IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING);
3123 ** Dont turn this on by default, if vlans are
3124 ** created on another pseudo device (eg. lagg)
3125 ** then vlan events are not passed thru, breaking
3126 ** operation, but with HW FILTER off it works. If
3127 ** using vlans directly on the em driver you can
3128 ** enable this and get full hardware tag filtering.
3130 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
3132 #ifdef DEVICE_POLLING
3133 ifp->if_capabilities |= IFCAP_POLLING;
3136 /* Limit WOL to MAGIC, not clear others are used */
3138 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
3139 ifp->if_capenable |= IFCAP_WOL_MAGIC;
3143 * Specify the media types supported by this adapter and register
3144 * callbacks to update media and link information
3146 ifmedia_init(&adapter->media, IFM_IMASK,
3147 em_media_change, em_media_status);
3148 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3149 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
3150 u_char fiber_type = IFM_1000_SX; /* default type */
3152 if (adapter->hw.mac.type == e1000_82545)
3153 fiber_type = IFM_1000_LX;
3154 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
3156 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
3158 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
3159 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
3161 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
3163 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
3165 if (adapter->hw.phy.type != e1000_phy_ife) {
3166 ifmedia_add(&adapter->media,
3167 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
3168 ifmedia_add(&adapter->media,
3169 IFM_ETHER | IFM_1000_T, 0, NULL);
3172 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3173 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3177 /*********************************************************************
3179 * Workaround for SmartSpeed on 82541 and 82547 controllers
3181 **********************************************************************/
3183 em_smartspeed(struct adapter *adapter)
3187 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
3188 adapter->hw.mac.autoneg == 0 ||
3189 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
3192 if (adapter->smartspeed == 0) {
3193 /* If Master/Slave config fault is asserted twice,
3194 * we assume back-to-back */
3195 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3196 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
3198 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3199 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
3200 e1000_read_phy_reg(&adapter->hw,
3201 PHY_1000T_CTRL, &phy_tmp);
3202 if(phy_tmp & CR_1000T_MS_ENABLE) {
3203 phy_tmp &= ~CR_1000T_MS_ENABLE;
3204 e1000_write_phy_reg(&adapter->hw,
3205 PHY_1000T_CTRL, phy_tmp);
3206 adapter->smartspeed++;
3207 if(adapter->hw.mac.autoneg &&
3208 !e1000_copper_link_autoneg(&adapter->hw) &&
3209 !e1000_read_phy_reg(&adapter->hw,
3210 PHY_CONTROL, &phy_tmp)) {
3211 phy_tmp |= (MII_CR_AUTO_NEG_EN |
3212 MII_CR_RESTART_AUTO_NEG);
3213 e1000_write_phy_reg(&adapter->hw,
3214 PHY_CONTROL, phy_tmp);
3219 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
3220 /* If still no link, perhaps using 2/3 pair cable */
3221 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
3222 phy_tmp |= CR_1000T_MS_ENABLE;
3223 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
3224 if(adapter->hw.mac.autoneg &&
3225 !e1000_copper_link_autoneg(&adapter->hw) &&
3226 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
3227 phy_tmp |= (MII_CR_AUTO_NEG_EN |
3228 MII_CR_RESTART_AUTO_NEG);
3229 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
3232 /* Restart process after EM_SMARTSPEED_MAX iterations */
3233 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
3234 adapter->smartspeed = 0;
3239 * Manage DMA'able memory.
3242 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3246 *(bus_addr_t *) arg = segs[0].ds_addr;
3250 em_dma_malloc(struct adapter *adapter, bus_size_t size,
3251 struct em_dma_alloc *dma, int mapflags)
3255 error = bus_dma_tag_create(NULL, /* parent */
3256 EM_DBA_ALIGN, 0, /* alignment, bounds */
3257 BUS_SPACE_MAXADDR, /* lowaddr */
3258 BUS_SPACE_MAXADDR, /* highaddr */
3259 NULL, NULL, /* filter, filterarg */
3262 size, /* maxsegsize */
3266 device_printf(adapter->dev,
3267 "%s: bus_dma_tag_create failed: %d\n",
3272 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
3273 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
3275 device_printf(adapter->dev,
3276 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
3277 __func__, (uintmax_t)size, error);
3282 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
3283 size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
3284 if (error || dma->dma_paddr == 0) {
3285 device_printf(adapter->dev,
3286 "%s: bus_dmamap_load failed: %d\n",
3294 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3296 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3297 bus_dma_tag_destroy(dma->dma_tag);
3299 dma->dma_map = NULL;
3300 dma->dma_tag = NULL;
3306 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
3308 if (dma->dma_tag == NULL)
3310 if (dma->dma_map != NULL) {
3311 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
3312 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3313 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3314 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3315 dma->dma_map = NULL;
3317 bus_dma_tag_destroy(dma->dma_tag);
3318 dma->dma_tag = NULL;
3322 /*********************************************************************
3324 * Allocate memory for tx_buffer structures. The tx_buffer stores all
3325 * the information needed to transmit a packet on the wire.
3327 **********************************************************************/
3329 em_allocate_transmit_structures(struct adapter *adapter)
3331 device_t dev = adapter->dev;
3332 struct em_buffer *tx_buffer;
3336 * Create DMA tags for tx descriptors
3338 if ((error = bus_dma_tag_create(NULL, /* parent */
3339 1, 0, /* alignment, bounds */
3340 BUS_SPACE_MAXADDR, /* lowaddr */
3341 BUS_SPACE_MAXADDR, /* highaddr */
3342 NULL, NULL, /* filter, filterarg */
3343 EM_TSO_SIZE, /* maxsize */
3344 EM_MAX_SCATTER, /* nsegments */
3345 EM_TSO_SEG_SIZE, /* maxsegsize */
3347 &adapter->txtag)) != 0) {
3348 device_printf(dev, "Unable to allocate TX DMA tag\n");
3352 adapter->tx_buffer_area = kmalloc(sizeof(struct em_buffer) *
3353 adapter->num_tx_desc, M_DEVBUF, M_INTWAIT | M_ZERO);
3354 if (adapter->tx_buffer_area == NULL) {
3355 device_printf(dev, "Unable to allocate tx_buffer memory\n");
3360 /* Create the descriptor buffer dma maps */
3361 for (int i = 0; i < adapter->num_tx_desc; i++) {
3362 tx_buffer = &adapter->tx_buffer_area[i];
3363 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
3365 device_printf(dev, "Unable to create TX DMA map\n");
3368 tx_buffer->next_eop = -1;
3373 em_free_transmit_structures(adapter);
3377 /*********************************************************************
3379 * (Re)Initialize transmit structures.
3381 **********************************************************************/
3383 em_setup_transmit_structures(struct adapter *adapter)
3385 struct em_buffer *tx_buffer;
3387 /* Clear the old ring contents */
3388 bzero(adapter->tx_desc_base,
3389 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
3391 /* Free any existing TX buffers */
3392 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
3393 tx_buffer = &adapter->tx_buffer_area[i];
3394 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3395 BUS_DMASYNC_POSTWRITE);
3396 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
3397 m_freem(tx_buffer->m_head);
3398 tx_buffer->m_head = NULL;
3399 tx_buffer->next_eop = -1;
3403 adapter->next_avail_tx_desc = 0;
3404 adapter->next_tx_to_clean = 0;
3405 adapter->num_tx_desc_avail = adapter->num_tx_desc;
3407 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3408 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3413 /*********************************************************************
3415 * Enable transmit unit.
3417 **********************************************************************/
3419 em_initialize_transmit_unit(struct adapter *adapter)
3421 u32 tctl, tarc, tipg = 0;
3424 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
3425 /* Setup the Base and Length of the Tx Descriptor Ring */
3426 bus_addr = adapter->txdma.dma_paddr;
3427 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
3428 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
3429 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
3430 (u32)(bus_addr >> 32));
3431 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
3433 /* Setup the HW Tx Head and Tail descriptor pointers */
3434 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
3435 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
3437 HW_DEBUGOUT2("Base = %x, Length = %x\n",
3438 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
3439 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
3441 /* Set the default values for the Tx Inter Packet Gap timer */
3442 switch (adapter->hw.mac.type) {
3444 tipg = DEFAULT_82542_TIPG_IPGT;
3445 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3446 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3448 case e1000_80003es2lan:
3449 tipg = DEFAULT_82543_TIPG_IPGR1;
3450 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
3451 E1000_TIPG_IPGR2_SHIFT;
3454 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3455 (adapter->hw.phy.media_type ==
3456 e1000_media_type_internal_serdes))
3457 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
3459 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
3460 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3461 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3464 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
3465 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
3466 if(adapter->hw.mac.type >= e1000_82540)
3467 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
3468 adapter->tx_abs_int_delay.value);
3470 if ((adapter->hw.mac.type == e1000_82571) ||
3471 (adapter->hw.mac.type == e1000_82572)) {
3472 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3473 tarc |= SPEED_MODE_BIT;
3474 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3475 } else if (adapter->hw.mac.type == e1000_80003es2lan) {
3476 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3478 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3479 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1));
3481 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
3484 /* Program the Transmit Control Register */
3485 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
3486 tctl &= ~E1000_TCTL_CT;
3487 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
3488 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
3490 if (adapter->hw.mac.type >= e1000_82571)
3491 tctl |= E1000_TCTL_MULR;
3493 /* This write will effectively turn on the transmit unit. */
3494 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
3496 /* Setup Transmit Descriptor Base Settings */
3497 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
3499 if (adapter->tx_int_delay.value > 0)
3500 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3503 /*********************************************************************
3505 * Free all transmit related data structures.
3507 **********************************************************************/
3509 em_free_transmit_structures(struct adapter *adapter)
3511 struct em_buffer *tx_buffer;
3513 INIT_DEBUGOUT("free_transmit_structures: begin");
3515 if (adapter->tx_buffer_area != NULL) {
3516 for (int i = 0; i < adapter->num_tx_desc; i++) {
3517 tx_buffer = &adapter->tx_buffer_area[i];
3518 if (tx_buffer->m_head != NULL) {
3519 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3520 BUS_DMASYNC_POSTWRITE);
3521 bus_dmamap_unload(adapter->txtag,
3523 m_freem(tx_buffer->m_head);
3524 tx_buffer->m_head = NULL;
3525 } else if (tx_buffer->map != NULL)
3526 bus_dmamap_unload(adapter->txtag,
3528 if (tx_buffer->map != NULL) {
3529 bus_dmamap_destroy(adapter->txtag,
3531 tx_buffer->map = NULL;
3535 if (adapter->tx_buffer_area != NULL) {
3536 kfree(adapter->tx_buffer_area, M_DEVBUF);
3537 adapter->tx_buffer_area = NULL;
3539 if (adapter->txtag != NULL) {
3540 bus_dma_tag_destroy(adapter->txtag);
3541 adapter->txtag = NULL;
3543 #if __FreeBSD_version >= 800000
3544 if (adapter->br != NULL)
3545 buf_ring_free(adapter->br, M_DEVBUF);
3549 /*********************************************************************
3551 * The offload context needs to be set when we transfer the first
3552 * packet of a particular protocol (TCP/UDP). This routine has been
3553 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
3555 * Added back the old method of keeping the current context type
3556 * and not setting if unnecessary, as this is reported to be a
3557 * big performance win. -jfv
3558 **********************************************************************/
3560 em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
3561 u32 *txd_upper, u32 *txd_lower)
3563 struct e1000_context_desc *TXD = NULL;
3564 struct em_buffer *tx_buffer;
3565 struct ether_vlan_header *eh;
3566 struct ip *ip = NULL;
3567 struct ip6_hdr *ip6;
3568 int curr_txd, ehdrlen;
3569 u32 cmd, hdr_len, ip_hlen;
3574 cmd = hdr_len = ipproto = 0;
3575 curr_txd = adapter->next_avail_tx_desc;
3578 * Determine where frame payload starts.
3579 * Jump over vlan headers if already present,
3580 * helpful for QinQ too.
3582 eh = mtod(mp, struct ether_vlan_header *);
3583 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3584 etype = ntohs(eh->evl_proto);
3585 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3587 etype = ntohs(eh->evl_encap_proto);
3588 ehdrlen = ETHER_HDR_LEN;
3592 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
3593 * TODO: Support SCTP too when it hits the tree.
3597 ip = (struct ip *)(mp->m_data + ehdrlen);
3598 ip_hlen = ip->ip_hl << 2;
3600 /* Setup of IP header checksum. */
3601 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
3603 * Start offset for header checksum calculation.
3604 * End offset for header checksum calculation.
3605 * Offset of place to put the checksum.
3607 TXD = (struct e1000_context_desc *)
3608 &adapter->tx_desc_base[curr_txd];
3609 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3610 TXD->lower_setup.ip_fields.ipcse =
3611 htole16(ehdrlen + ip_hlen);
3612 TXD->lower_setup.ip_fields.ipcso =
3613 ehdrlen + offsetof(struct ip, ip_sum);
3614 cmd |= E1000_TXD_CMD_IP;
3615 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3618 if (mp->m_len < ehdrlen + ip_hlen)
3619 return; /* failure */
3621 hdr_len = ehdrlen + ip_hlen;
3625 case ETHERTYPE_IPV6:
3626 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3627 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
3629 if (mp->m_len < ehdrlen + ip_hlen)
3630 return; /* failure */
3632 /* IPv6 doesn't have a header checksum. */
3634 hdr_len = ehdrlen + ip_hlen;
3635 ipproto = ip6->ip6_nxt;
3646 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
3647 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3648 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3649 /* no need for context if already set */
3650 if (adapter->last_hw_offload == CSUM_TCP)
3652 adapter->last_hw_offload = CSUM_TCP;
3654 * Start offset for payload checksum calculation.
3655 * End offset for payload checksum calculation.
3656 * Offset of place to put the checksum.
3658 TXD = (struct e1000_context_desc *)
3659 &adapter->tx_desc_base[curr_txd];
3660 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3661 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3662 TXD->upper_setup.tcp_fields.tucso =
3663 hdr_len + offsetof(struct tcphdr, th_sum);
3664 cmd |= E1000_TXD_CMD_TCP;
3669 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
3670 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3671 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3672 /* no need for context if already set */
3673 if (adapter->last_hw_offload == CSUM_UDP)
3675 adapter->last_hw_offload = CSUM_UDP;
3677 * Start offset for header checksum calculation.
3678 * End offset for header checksum calculation.
3679 * Offset of place to put the checksum.
3681 TXD = (struct e1000_context_desc *)
3682 &adapter->tx_desc_base[curr_txd];
3683 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3684 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3685 TXD->upper_setup.tcp_fields.tucso =
3686 hdr_len + offsetof(struct udphdr, uh_sum);
3694 TXD->tcp_seg_setup.data = htole32(0);
3695 TXD->cmd_and_length =
3696 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
3697 tx_buffer = &adapter->tx_buffer_area[curr_txd];
3698 tx_buffer->m_head = NULL;
3699 tx_buffer->next_eop = -1;
3701 if (++curr_txd == adapter->num_tx_desc)
3704 adapter->num_tx_desc_avail--;
3705 adapter->next_avail_tx_desc = curr_txd;
3710 /**********************************************************************
3712 * Setup work for hardware segmentation offload (TSO)
3714 **********************************************************************/
3716 em_tso_setup(struct adapter *adapter, struct mbuf *mp, u32 *txd_upper,
3719 struct e1000_context_desc *TXD;
3720 struct em_buffer *tx_buffer;
3721 struct ether_vlan_header *eh;
3723 struct ip6_hdr *ip6;
3725 int curr_txd, ehdrlen, hdr_len, ip_hlen, isip6;
3729 * This function could/should be extended to support IP/IPv6
3730 * fragmentation as well. But as they say, one step at a time.
3734 * Determine where frame payload starts.
3735 * Jump over vlan headers if already present,
3736 * helpful for QinQ too.
3738 eh = mtod(mp, struct ether_vlan_header *);
3739 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3740 etype = ntohs(eh->evl_proto);
3741 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3743 etype = ntohs(eh->evl_encap_proto);
3744 ehdrlen = ETHER_HDR_LEN;
3747 /* Ensure we have at least the IP+TCP header in the first mbuf. */
3748 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
3749 return FALSE; /* -1 */
3752 * We only support TCP for IPv4 and IPv6 (notyet) for the moment.
3753 * TODO: Support SCTP too when it hits the tree.
3758 ip = (struct ip *)(mp->m_data + ehdrlen);
3759 if (ip->ip_p != IPPROTO_TCP)
3760 return FALSE; /* 0 */
3763 ip_hlen = ip->ip_hl << 2;
3764 if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3765 return FALSE; /* -1 */
3766 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3768 th->th_sum = in_pseudo(ip->ip_src.s_addr,
3769 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3771 th->th_sum = mp->m_pkthdr.csum_data;
3774 case ETHERTYPE_IPV6:
3776 return FALSE; /* Not supported yet. */
3777 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3778 if (ip6->ip6_nxt != IPPROTO_TCP)
3779 return FALSE; /* 0 */
3781 ip_hlen = sizeof(struct ip6_hdr); /* XXX: no header stacking. */
3782 if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3783 return FALSE; /* -1 */
3784 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
3786 th->th_sum = in6_pseudo(ip6->ip6_src, ip->ip6_dst,
3787 htons(IPPROTO_TCP)); /* XXX: function notyet. */
3789 th->th_sum = mp->m_pkthdr.csum_data;
3795 hdr_len = ehdrlen + ip_hlen + (th->th_off << 2);
3797 *txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */
3798 E1000_TXD_DTYP_D | /* Data descr type */
3799 E1000_TXD_CMD_TSE); /* Do TSE on this packet */
3801 /* IP and/or TCP header checksum calculation and insertion. */
3802 *txd_upper = ((isip6 ? 0 : E1000_TXD_POPTS_IXSM) |
3803 E1000_TXD_POPTS_TXSM) << 8;
3805 curr_txd = adapter->next_avail_tx_desc;
3806 tx_buffer = &adapter->tx_buffer_area[curr_txd];
3807 TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
3809 /* IPv6 doesn't have a header checksum. */
3812 * Start offset for header checksum calculation.
3813 * End offset for header checksum calculation.
3814 * Offset of place put the checksum.
3816 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3817 TXD->lower_setup.ip_fields.ipcse =
3818 htole16(ehdrlen + ip_hlen - 1);
3819 TXD->lower_setup.ip_fields.ipcso =
3820 ehdrlen + offsetof(struct ip, ip_sum);
3823 * Start offset for payload checksum calculation.
3824 * End offset for payload checksum calculation.
3825 * Offset of place to put the checksum.
3827 TXD->upper_setup.tcp_fields.tucss =
3829 TXD->upper_setup.tcp_fields.tucse = 0;
3830 TXD->upper_setup.tcp_fields.tucso =
3831 ehdrlen + ip_hlen + offsetof(struct tcphdr, th_sum);
3833 * Payload size per packet w/o any headers.
3834 * Length of all headers up to payload.
3836 TXD->tcp_seg_setup.fields.mss = htole16(mp->m_pkthdr.tso_segsz);
3837 TXD->tcp_seg_setup.fields.hdr_len = hdr_len;
3839 TXD->cmd_and_length = htole32(adapter->txd_cmd |
3840 E1000_TXD_CMD_DEXT | /* Extended descr */
3841 E1000_TXD_CMD_TSE | /* TSE context */
3842 (isip6 ? 0 : E1000_TXD_CMD_IP) | /* Do IP csum */
3843 E1000_TXD_CMD_TCP | /* Do TCP checksum */
3844 (mp->m_pkthdr.len - (hdr_len))); /* Total len */
3846 tx_buffer->m_head = NULL;
3847 tx_buffer->next_eop = -1;
3849 if (++curr_txd == adapter->num_tx_desc)
3852 adapter->num_tx_desc_avail--;
3853 adapter->next_avail_tx_desc = curr_txd;
3854 adapter->tx_tso = TRUE;
3861 /**********************************************************************
3863 * Examine each tx_buffer in the used queue. If the hardware is done
3864 * processing the packet then free associated resources. The
3865 * tx_buffer is put back on the free queue.
3867 **********************************************************************/
3869 em_txeof(struct adapter *adapter)
3871 int first, last, done, num_avail;
3872 struct em_buffer *tx_buffer;
3873 struct e1000_tx_desc *tx_desc, *eop_desc;
3874 struct ifnet *ifp = adapter->ifp;
3876 EM_TX_LOCK_ASSERT(adapter);
3878 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
3881 num_avail = adapter->num_tx_desc_avail;
3882 first = adapter->next_tx_to_clean;
3883 tx_desc = &adapter->tx_desc_base[first];
3884 tx_buffer = &adapter->tx_buffer_area[first];
3885 last = tx_buffer->next_eop;
3886 eop_desc = &adapter->tx_desc_base[last];
3889 * What this does is get the index of the
3890 * first descriptor AFTER the EOP of the
3891 * first packet, that way we can do the
3892 * simple comparison on the inner while loop.
3894 if (++last == adapter->num_tx_desc)
3898 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3899 BUS_DMASYNC_POSTREAD);
3901 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
3902 /* We clean the range of the packet */
3903 while (first != done) {
3904 tx_desc->upper.data = 0;
3905 tx_desc->lower.data = 0;
3906 tx_desc->buffer_addr = 0;
3909 if (tx_buffer->m_head) {
3911 bus_dmamap_sync(adapter->txtag,
3913 BUS_DMASYNC_POSTWRITE);
3914 bus_dmamap_unload(adapter->txtag,
3917 m_freem(tx_buffer->m_head);
3918 tx_buffer->m_head = NULL;
3920 tx_buffer->next_eop = -1;
3921 adapter->watchdog_time = ticks;
3923 if (++first == adapter->num_tx_desc)
3926 tx_buffer = &adapter->tx_buffer_area[first];
3927 tx_desc = &adapter->tx_desc_base[first];
3929 /* See if we can continue to the next packet */
3930 last = tx_buffer->next_eop;
3932 eop_desc = &adapter->tx_desc_base[last];
3933 /* Get new done point */
3934 if (++last == adapter->num_tx_desc) last = 0;
3939 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3940 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3942 adapter->next_tx_to_clean = first;
3945 * If we have enough room, clear IFF_OACTIVE to
3946 * tell the stack that it is OK to send packets.
3947 * If there are no pending descriptors, clear the watchdog.
3949 if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
3950 ifp->if_flags &= ~IFF_OACTIVE;
3951 if (num_avail == adapter->num_tx_desc) {
3952 adapter->watchdog_check = FALSE;
3953 adapter->num_tx_desc_avail = num_avail;
3958 adapter->num_tx_desc_avail = num_avail;
3962 /*********************************************************************
3964 * When Link is lost sometimes there is work still in the TX ring
3965 * which may result in a watchdog, rather than allow that we do an
3966 * attempted cleanup and then reinit here. Note that this has been
3967 * seens mostly with fiber adapters.
3969 **********************************************************************/
3971 em_tx_purge(struct adapter *adapter)
3973 if ((!adapter->link_active) && (adapter->watchdog_check)) {
3974 EM_TX_LOCK(adapter);
3976 EM_TX_UNLOCK(adapter);
3977 if (adapter->watchdog_check) /* Still outstanding? */
3978 em_init_locked(adapter);
3982 /*********************************************************************
3984 * Get a buffer from system mbuf buffer pool.
3986 **********************************************************************/
3988 em_get_buf(struct adapter *adapter, int i)
3991 bus_dma_segment_t segs[1];
3993 struct em_buffer *rx_buffer;
3996 m = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
3998 adapter->mbuf_cluster_failed++;
4001 m->m_len = m->m_pkthdr.len = MCLBYTES;
4003 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
4004 m_adj(m, ETHER_ALIGN);
4007 * Using memory from the mbuf cluster pool, invoke the
4008 * bus_dma machinery to arrange the memory mapping.
4010 error = bus_dmamap_load_mbuf_segment(adapter->rxtag,
4011 adapter->rx_sparemap, m, segs, 1, &nsegs, BUS_DMA_NOWAIT);
4017 /* If nsegs is wrong then the stack is corrupt. */
4018 KASSERT(nsegs == 1, ("Too many segments returned!"));
4020 rx_buffer = &adapter->rx_buffer_area[i];
4021 if (rx_buffer->m_head != NULL)
4022 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4024 map = rx_buffer->map;
4025 rx_buffer->map = adapter->rx_sparemap;
4026 adapter->rx_sparemap = map;
4027 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
4028 rx_buffer->m_head = m;
4030 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
4034 /*********************************************************************
4036 * Allocate memory for rx_buffer structures. Since we use one
4037 * rx_buffer per received packet, the maximum number of rx_buffer's
4038 * that we'll need is equal to the number of receive descriptors
4039 * that we've allocated.
4041 **********************************************************************/
4043 em_allocate_receive_structures(struct adapter *adapter)
4045 device_t dev = adapter->dev;
4046 struct em_buffer *rx_buffer;
4049 adapter->rx_buffer_area = kmalloc(sizeof(struct em_buffer) *
4050 adapter->num_rx_desc, M_DEVBUF, M_INTWAIT | M_ZERO);
4051 if (adapter->rx_buffer_area == NULL) {
4052 device_printf(dev, "Unable to allocate rx_buffer memory\n");
4056 error = bus_dma_tag_create(NULL, /* parent */
4057 1, 0, /* alignment, bounds */
4058 BUS_SPACE_MAXADDR, /* lowaddr */
4059 BUS_SPACE_MAXADDR, /* highaddr */
4060 NULL, NULL, /* filter, filterarg */
4061 MCLBYTES, /* maxsize */
4063 MCLBYTES, /* maxsegsize */
4067 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
4072 /* Create the spare map (used by getbuf) */
4073 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4074 &adapter->rx_sparemap);
4076 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4081 rx_buffer = adapter->rx_buffer_area;
4082 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4083 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4086 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4095 em_free_receive_structures(adapter);
4099 /*********************************************************************
4101 * (Re)initialize receive structures.
4103 **********************************************************************/
4105 em_setup_receive_structures(struct adapter *adapter)
4107 struct em_buffer *rx_buffer;
4110 /* Reset descriptor ring */
4111 bzero(adapter->rx_desc_base,
4112 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
4114 /* Free current RX buffers. */
4115 rx_buffer = adapter->rx_buffer_area;
4116 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4117 if (rx_buffer->m_head != NULL) {
4118 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4119 BUS_DMASYNC_POSTREAD);
4120 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4121 m_freem(rx_buffer->m_head);
4122 rx_buffer->m_head = NULL;
4126 /* Allocate new ones. */
4127 for (i = 0; i < adapter->num_rx_desc; i++) {
4128 error = em_get_buf(adapter, i);
4133 /* Setup our descriptor pointers */
4134 adapter->next_rx_desc_to_check = 0;
4135 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4136 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4141 /*********************************************************************
4143 * Enable receive unit.
4145 **********************************************************************/
4146 #define MAX_INTS_PER_SEC 8000
4147 #define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256)
4150 em_initialize_receive_unit(struct adapter *adapter)
4152 struct ifnet *ifp = adapter->ifp;
4156 INIT_DEBUGOUT("em_initialize_receive_unit: begin");
4159 * Make sure receives are disabled while setting
4160 * up the descriptor ring
4162 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4163 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
4165 if (adapter->hw.mac.type >= e1000_82540) {
4166 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
4167 adapter->rx_abs_int_delay.value);
4169 * Set the interrupt throttling rate. Value is calculated
4170 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
4172 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
4176 ** When using MSIX interrupts we need to throttle
4177 ** using the EITR register (82574 only)
4180 for (int i = 0; i < 4; i++)
4181 E1000_WRITE_REG(&adapter->hw,
4182 E1000_EITR_82574(i), DEFAULT_ITR);
4184 /* Disable accelerated ackknowledge */
4185 if (adapter->hw.mac.type == e1000_82574)
4186 E1000_WRITE_REG(&adapter->hw,
4187 E1000_RFCTL, E1000_RFCTL_ACK_DIS);
4189 /* Setup the Base and Length of the Rx Descriptor Ring */
4190 bus_addr = adapter->rxdma.dma_paddr;
4191 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
4192 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
4193 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
4194 (u32)(bus_addr >> 32));
4195 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
4198 /* Setup the Receive Control Register */
4199 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4200 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
4201 E1000_RCTL_RDMTS_HALF |
4202 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4204 /* Make sure VLAN Filters are off */
4205 rctl &= ~E1000_RCTL_VFE;
4207 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
4208 rctl |= E1000_RCTL_SBP;
4210 rctl &= ~E1000_RCTL_SBP;
4212 switch (adapter->rx_buffer_len) {
4215 rctl |= E1000_RCTL_SZ_2048;
4218 rctl |= E1000_RCTL_SZ_4096 |
4219 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4222 rctl |= E1000_RCTL_SZ_8192 |
4223 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4226 rctl |= E1000_RCTL_SZ_16384 |
4227 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4231 if (ifp->if_mtu > ETHERMTU)
4232 rctl |= E1000_RCTL_LPE;
4234 rctl &= ~E1000_RCTL_LPE;
4236 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
4237 if ((adapter->hw.mac.type >= e1000_82543) &&
4238 (ifp->if_capenable & IFCAP_RXCSUM)) {
4239 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
4240 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
4241 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
4245 ** XXX TEMPORARY WORKAROUND: on some systems with 82573
4246 ** long latencies are observed, like Lenovo X60. This
4247 ** change eliminates the problem, but since having positive
4248 ** values in RDTR is a known source of problems on other
4249 ** platforms another solution is being sought.
4251 if (adapter->hw.mac.type == e1000_82573)
4252 E1000_WRITE_REG(&adapter->hw, E1000_RDTR, 0x20);
4254 /* Enable Receives */
4255 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4258 * Setup the HW Rx Head and
4259 * Tail Descriptor Pointers
4261 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
4262 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
4267 /*********************************************************************
4269 * Free receive related data structures.
4271 **********************************************************************/
4273 em_free_receive_structures(struct adapter *adapter)
4275 struct em_buffer *rx_buffer;
4278 INIT_DEBUGOUT("free_receive_structures: begin");
4280 if (adapter->rx_sparemap) {
4281 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
4282 adapter->rx_sparemap = NULL;
4285 /* Cleanup any existing buffers */
4286 if (adapter->rx_buffer_area != NULL) {
4287 rx_buffer = adapter->rx_buffer_area;
4288 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4289 if (rx_buffer->m_head != NULL) {
4290 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4291 BUS_DMASYNC_POSTREAD);
4292 bus_dmamap_unload(adapter->rxtag,
4294 m_freem(rx_buffer->m_head);
4295 rx_buffer->m_head = NULL;
4296 } else if (rx_buffer->map != NULL)
4297 bus_dmamap_unload(adapter->rxtag,
4299 if (rx_buffer->map != NULL) {
4300 bus_dmamap_destroy(adapter->rxtag,
4302 rx_buffer->map = NULL;
4307 if (adapter->rx_buffer_area != NULL) {
4308 kfree(adapter->rx_buffer_area, M_DEVBUF);
4309 adapter->rx_buffer_area = NULL;
4312 if (adapter->rxtag != NULL) {
4313 bus_dma_tag_destroy(adapter->rxtag);
4314 adapter->rxtag = NULL;
4318 /*********************************************************************
4320 * This routine executes in interrupt context. It replenishes
4321 * the mbufs in the descriptor and sends data which has been
4322 * dma'ed into host memory to upper layer.
4324 * We loop at most count times if count is > 0, or until done if
4327 * For polling we also now return the number of cleaned packets
4328 *********************************************************************/
4330 em_rxeof(struct adapter *adapter, int count)
4332 struct ifnet *ifp = adapter->ifp;;
4334 u8 status, accept_frame = 0, eop = 0;
4335 u16 len, desc_len, prev_len_adj;
4337 struct e1000_rx_desc *current_desc;
4339 EM_RX_LOCK(adapter);
4340 i = adapter->next_rx_desc_to_check;
4341 current_desc = &adapter->rx_desc_base[i];
4342 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4343 BUS_DMASYNC_POSTREAD);
4345 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
4346 EM_RX_UNLOCK(adapter);
4350 while ((current_desc->status & E1000_RXD_STAT_DD) &&
4352 (ifp->if_flags & IFF_RUNNING)) {
4353 struct mbuf *m = NULL;
4355 mp = adapter->rx_buffer_area[i].m_head;
4357 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
4358 * needs to access the last received byte in the mbuf.
4360 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
4361 BUS_DMASYNC_POSTREAD);
4365 desc_len = le16toh(current_desc->length);
4366 status = current_desc->status;
4367 if (status & E1000_RXD_STAT_EOP) {
4370 if (desc_len < ETHER_CRC_LEN) {
4372 prev_len_adj = ETHER_CRC_LEN - desc_len;
4374 len = desc_len - ETHER_CRC_LEN;
4380 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
4382 u32 pkt_len = desc_len;
4384 if (adapter->fmp != NULL)
4385 pkt_len += adapter->fmp->m_pkthdr.len;
4387 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
4388 if (TBI_ACCEPT(&adapter->hw, status,
4389 current_desc->errors, pkt_len, last_byte,
4390 adapter->min_frame_size, adapter->max_frame_size)) {
4391 e1000_tbi_adjust_stats_82543(&adapter->hw,
4392 &adapter->stats, pkt_len,
4393 adapter->hw.mac.addr,
4394 adapter->max_frame_size);
4402 if (em_get_buf(adapter, i) != 0) {
4407 /* Assign correct length to the current fragment */
4410 if (adapter->fmp == NULL) {
4411 mp->m_pkthdr.len = len;
4412 adapter->fmp = mp; /* Store the first mbuf */
4415 /* Chain mbuf's together */
4416 mp->m_flags &= ~M_PKTHDR;
4418 * Adjust length of previous mbuf in chain if
4419 * we received less than 4 bytes in the last
4422 if (prev_len_adj > 0) {
4423 adapter->lmp->m_len -= prev_len_adj;
4424 adapter->fmp->m_pkthdr.len -=
4427 adapter->lmp->m_next = mp;
4428 adapter->lmp = adapter->lmp->m_next;
4429 adapter->fmp->m_pkthdr.len += len;
4433 adapter->fmp->m_pkthdr.rcvif = ifp;
4435 em_receive_checksum(adapter, current_desc,
4437 #ifndef __NO_STRICT_ALIGNMENT
4438 if (adapter->max_frame_size >
4439 (MCLBYTES - ETHER_ALIGN) &&
4440 em_fixup_rx(adapter) != 0)
4443 if (status & E1000_RXD_STAT_VP) {
4445 VLAN_INPUT_TAG_NEW(ifp, adapter->fmp,
4446 (le16toh(current_desc->special) &
4447 E1000_RXD_SPC_VLAN_MASK));
4449 adapter->fmp->m_pkthdr.ether_vlantag =
4450 (le16toh(current_desc->special) &
4451 E1000_RXD_SPC_VLAN_MASK);
4452 adapter->fmp->m_flags |= M_VLANTAG;
4455 #ifndef __NO_STRICT_ALIGNMENT
4459 adapter->fmp = NULL;
4460 adapter->lmp = NULL;
4465 /* Reuse loaded DMA map and just update mbuf chain */
4466 mp = adapter->rx_buffer_area[i].m_head;
4467 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
4468 mp->m_data = mp->m_ext.ext_buf;
4470 if (adapter->max_frame_size <=
4471 (MCLBYTES - ETHER_ALIGN))
4472 m_adj(mp, ETHER_ALIGN);
4473 if (adapter->fmp != NULL) {
4474 m_freem(adapter->fmp);
4475 adapter->fmp = NULL;
4476 adapter->lmp = NULL;
4481 /* Zero out the receive descriptors status. */
4482 current_desc->status = 0;
4483 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4484 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4486 /* Advance our pointers to the next descriptor. */
4487 if (++i == adapter->num_rx_desc)
4489 /* Call into the stack */
4491 adapter->next_rx_desc_to_check = i;
4492 EM_RX_UNLOCK(adapter);
4493 (*ifp->if_input)(ifp, m);
4494 EM_RX_LOCK(adapter);
4496 i = adapter->next_rx_desc_to_check;
4498 current_desc = &adapter->rx_desc_base[i];
4500 adapter->next_rx_desc_to_check = i;
4502 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
4504 i = adapter->num_rx_desc - 1;
4505 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
4506 EM_RX_UNLOCK(adapter);
4510 #ifndef __NO_STRICT_ALIGNMENT
4512 * When jumbo frames are enabled we should realign entire payload on
4513 * architecures with strict alignment. This is serious design mistake of 8254x
4514 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
4515 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
4516 * payload. On architecures without strict alignment restrictions 8254x still
4517 * performs unaligned memory access which would reduce the performance too.
4518 * To avoid copying over an entire frame to align, we allocate a new mbuf and
4519 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
4520 * existing mbuf chain.
4522 * Be aware, best performance of the 8254x is achived only when jumbo frame is
4523 * not used at all on architectures with strict alignment.
4526 em_fixup_rx(struct adapter *adapter)
4533 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
4534 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
4535 m->m_data += ETHER_HDR_LEN;
4537 MGETHDR(n, MB_DONTWAIT, MT_DATA);
4539 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
4540 m->m_data += ETHER_HDR_LEN;
4541 m->m_len -= ETHER_HDR_LEN;
4542 n->m_len = ETHER_HDR_LEN;
4543 M_MOVE_PKTHDR(n, m);
4547 adapter->dropped_pkts++;
4548 m_freem(adapter->fmp);
4549 adapter->fmp = NULL;
4558 /*********************************************************************
4560 * Verify that the hardware indicated that the checksum is valid.
4561 * Inform the stack about the status of checksum so that stack
4562 * doesn't spend time verifying the checksum.
4564 *********************************************************************/
4566 em_receive_checksum(struct adapter *adapter,
4567 struct e1000_rx_desc *rx_desc, struct mbuf *mp)
4569 /* 82543 or newer only */
4570 if ((adapter->hw.mac.type < e1000_82543) ||
4571 /* Ignore Checksum bit is set */
4572 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
4573 mp->m_pkthdr.csum_flags = 0;
4577 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
4579 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
4580 /* IP Checksum Good */
4581 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
4582 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4585 mp->m_pkthdr.csum_flags = 0;
4589 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
4591 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
4592 mp->m_pkthdr.csum_flags |=
4593 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4594 mp->m_pkthdr.csum_data = htons(0xffff);
4600 * This routine is run via an vlan
4604 em_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4606 struct adapter *adapter = ifp->if_softc;
4609 if (ifp->if_softc != arg) /* Not our event */
4612 if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */
4615 index = (vtag >> 5) & 0x7F;
4617 em_shadow_vfta[index] |= (1 << bit);
4618 ++adapter->num_vlans;
4619 /* Re-init to load the changes */
4624 * This routine is run via an vlan
4628 em_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4630 struct adapter *adapter = ifp->if_softc;
4633 if (ifp->if_softc != arg)
4636 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
4639 index = (vtag >> 5) & 0x7F;
4641 em_shadow_vfta[index] &= ~(1 << bit);
4642 --adapter->num_vlans;
4643 /* Re-init to load the changes */
4648 em_setup_vlan_hw_support(struct adapter *adapter)
4650 struct e1000_hw *hw = &adapter->hw;
4654 ** We get here thru init_locked, meaning
4655 ** a soft reset, this has already cleared
4656 ** the VFTA and other state, so if there
4657 ** have been no vlan's registered do nothing.
4659 if (adapter->num_vlans == 0)
4663 ** A soft reset zero's out the VFTA, so
4664 ** we need to repopulate it now.
4666 for (int i = 0; i < EM_VFTA_SIZE; i++)
4667 if (em_shadow_vfta[i] != 0)
4668 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
4669 i, em_shadow_vfta[i]);
4671 reg = E1000_READ_REG(hw, E1000_CTRL);
4672 reg |= E1000_CTRL_VME;
4673 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4675 /* Enable the Filter Table */
4676 reg = E1000_READ_REG(hw, E1000_RCTL);
4677 reg &= ~E1000_RCTL_CFIEN;
4678 reg |= E1000_RCTL_VFE;
4679 E1000_WRITE_REG(hw, E1000_RCTL, reg);
4681 /* Update the frame size */
4682 E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
4683 adapter->max_frame_size + VLAN_TAG_SIZE);
4687 em_enable_intr(struct adapter *adapter)
4689 struct e1000_hw *hw = &adapter->hw;
4690 u32 ims_mask = IMS_ENABLE_MASK;
4692 if (adapter->msix) {
4693 E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
4694 ims_mask |= EM_MSIX_MASK;
4696 E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
4700 em_disable_intr(struct adapter *adapter)
4702 struct e1000_hw *hw = &adapter->hw;
4705 E1000_WRITE_REG(hw, EM_EIAC, 0);
4706 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
4710 * Bit of a misnomer, what this really means is
4711 * to enable OS management of the system... aka
4712 * to disable special hardware management features
4715 em_init_manageability(struct adapter *adapter)
4717 /* A shared code workaround */
4718 #define E1000_82542_MANC2H E1000_MANC2H
4719 if (adapter->has_manage) {
4720 int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H);
4721 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4723 /* disable hardware interception of ARP */
4724 manc &= ~(E1000_MANC_ARP_EN);
4726 /* enable receiving management packets to the host */
4727 if (adapter->hw.mac.type >= e1000_82571) {
4728 manc |= E1000_MANC_EN_MNG2HOST;
4729 #define E1000_MNG2HOST_PORT_623 (1 << 5)
4730 #define E1000_MNG2HOST_PORT_664 (1 << 6)
4731 manc2h |= E1000_MNG2HOST_PORT_623;
4732 manc2h |= E1000_MNG2HOST_PORT_664;
4733 E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
4736 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4741 * Give control back to hardware management
4742 * controller if there is one.
4745 em_release_manageability(struct adapter *adapter)
4747 if (adapter->has_manage) {
4748 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4750 /* re-enable hardware interception of ARP */
4751 manc |= E1000_MANC_ARP_EN;
4753 if (adapter->hw.mac.type >= e1000_82571)
4754 manc &= ~E1000_MANC_EN_MNG2HOST;
4756 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4761 * em_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
4762 * For ASF and Pass Through versions of f/w this means
4763 * that the driver is loaded. For AMT version type f/w
4764 * this means that the network i/f is open.
4767 em_get_hw_control(struct adapter *adapter)
4771 if (adapter->hw.mac.type == e1000_82573) {
4772 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4773 E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4774 swsm | E1000_SWSM_DRV_LOAD);
4778 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4779 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4780 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
4785 * em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4786 * For ASF and Pass Through versions of f/w this means that
4787 * the driver is no longer loaded. For AMT versions of the
4788 * f/w this means that the network i/f is closed.
4791 em_release_hw_control(struct adapter *adapter)
4795 if (!adapter->has_manage)
4798 if (adapter->hw.mac.type == e1000_82573) {
4799 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4800 E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4801 swsm & ~E1000_SWSM_DRV_LOAD);
4805 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4806 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4807 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4812 em_is_valid_ether_addr(u8 *addr)
4814 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
4816 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
4824 ** Parse the interface capabilities with regard
4825 ** to both system management and wake-on-lan for
4829 em_get_wakeup(device_t dev)
4831 struct adapter *adapter = device_get_softc(dev);
4832 u16 eeprom_data = 0, device_id, apme_mask;
4834 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
4835 apme_mask = EM_EEPROM_APME;
4837 switch (adapter->hw.mac.type) {
4842 e1000_read_nvm(&adapter->hw,
4843 NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
4844 apme_mask = EM_82544_APME;
4848 adapter->has_amt = TRUE;
4851 case e1000_82546_rev_3:
4854 case e1000_80003es2lan:
4855 if (adapter->hw.bus.func == 1) {
4856 e1000_read_nvm(&adapter->hw,
4857 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
4860 e1000_read_nvm(&adapter->hw,
4861 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4865 case e1000_ich10lan:
4867 apme_mask = E1000_WUC_APME;
4868 adapter->has_amt = TRUE;
4869 eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC);
4872 e1000_read_nvm(&adapter->hw,
4873 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4876 if (eeprom_data & apme_mask)
4877 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
4879 * We have the eeprom settings, now apply the special cases
4880 * where the eeprom may be wrong or the board won't support
4881 * wake on lan on a particular port
4883 device_id = pci_get_device(dev);
4884 switch (device_id) {
4885 case E1000_DEV_ID_82546GB_PCIE:
4888 case E1000_DEV_ID_82546EB_FIBER:
4889 case E1000_DEV_ID_82546GB_FIBER:
4890 case E1000_DEV_ID_82571EB_FIBER:
4891 /* Wake events only supported on port A for dual fiber
4892 * regardless of eeprom setting */
4893 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
4894 E1000_STATUS_FUNC_1)
4897 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
4898 case E1000_DEV_ID_82571EB_QUAD_COPPER:
4899 case E1000_DEV_ID_82571EB_QUAD_FIBER:
4900 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
4901 /* if quad port adapter, disable WoL on all but port A */
4902 if (global_quad_port_a != 0)
4904 /* Reset for multiple quad port adapters */
4905 if (++global_quad_port_a == 4)
4906 global_quad_port_a = 0;
4914 * Enable PCI Wake On Lan capability
4917 em_enable_wakeup(device_t dev)
4919 struct adapter *adapter = device_get_softc(dev);
4920 struct ifnet *ifp = adapter->ifp;
4921 u32 pmc, ctrl, ctrl_ext, rctl;
4924 if ((pci_find_extcap(dev, PCIY_PMG, &pmc) != 0))
4927 /* Advertise the wakeup capability */
4928 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
4929 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
4930 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
4931 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4933 /* ICH workaround code */
4934 if ((adapter->hw.mac.type == e1000_ich8lan) ||
4935 (adapter->hw.mac.type == e1000_pchlan) ||
4936 (adapter->hw.mac.type == e1000_ich9lan) ||
4937 (adapter->hw.mac.type == e1000_ich10lan)) {
4938 e1000_disable_gig_wol_ich8lan(&adapter->hw);
4939 e1000_hv_phy_powerdown_workaround_ich8lan(&adapter->hw);
4942 /* Keep the laser running on Fiber adapters */
4943 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
4944 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
4945 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4946 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
4947 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
4951 ** Determine type of Wakeup: note that wol
4952 ** is set with all bits on by default.
4954 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
4955 adapter->wol &= ~E1000_WUFC_MAG;
4957 if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
4958 adapter->wol &= ~E1000_WUFC_MC;
4960 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4961 rctl |= E1000_RCTL_MPE;
4962 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4965 if (adapter->hw.mac.type == e1000_pchlan) {
4966 if (em_enable_phy_wakeup(adapter))
4969 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4970 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
4973 if (adapter->hw.phy.type == e1000_phy_igp_3)
4974 e1000_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
4977 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
4978 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
4979 if (ifp->if_capenable & IFCAP_WOL)
4980 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4981 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
4987 ** WOL in the newer chipset interfaces (pchlan)
4988 ** require thing to be copied into the phy
4991 em_enable_phy_wakeup(struct adapter *adapter)
4993 struct e1000_hw *hw = &adapter->hw;
4997 /* copy MAC RARs to PHY RARs */
4998 for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4999 mreg = E1000_READ_REG(hw, E1000_RAL(i));
5000 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
5001 e1000_write_phy_reg(hw, BM_RAR_M(i),
5002 (u16)((mreg >> 16) & 0xFFFF));
5003 mreg = E1000_READ_REG(hw, E1000_RAH(i));
5004 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
5005 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
5006 (u16)((mreg >> 16) & 0xFFFF));
5009 /* copy MAC MTA to PHY MTA */
5010 for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
5011 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
5012 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
5013 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
5014 (u16)((mreg >> 16) & 0xFFFF));
5017 /* configure PHY Rx Control register */
5018 e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
5019 mreg = E1000_READ_REG(hw, E1000_RCTL);
5020 if (mreg & E1000_RCTL_UPE)
5021 preg |= BM_RCTL_UPE;
5022 if (mreg & E1000_RCTL_MPE)
5023 preg |= BM_RCTL_MPE;
5024 preg &= ~(BM_RCTL_MO_MASK);
5025 if (mreg & E1000_RCTL_MO_3)
5026 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
5027 << BM_RCTL_MO_SHIFT);
5028 if (mreg & E1000_RCTL_BAM)
5029 preg |= BM_RCTL_BAM;
5030 if (mreg & E1000_RCTL_PMCF)
5031 preg |= BM_RCTL_PMCF;
5032 mreg = E1000_READ_REG(hw, E1000_CTRL);
5033 if (mreg & E1000_CTRL_RFCE)
5034 preg |= BM_RCTL_RFCE;
5035 e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
5037 /* enable PHY wakeup in MAC register */
5038 E1000_WRITE_REG(hw, E1000_WUC,
5039 E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
5040 E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
5042 /* configure and enable PHY wakeup in PHY registers */
5043 e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
5044 e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
5046 /* activate PHY wakeup */
5047 ret = hw->phy.ops.acquire(hw);
5049 kprintf("Could not acquire PHY\n");
5052 e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
5053 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
5054 ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
5056 kprintf("Could not read PHY page 769\n");
5059 preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
5060 ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
5062 kprintf("Could not set PHY Host Wakeup bit\n");
5064 hw->phy.ops.release(hw);
5070 /*********************************************************************
5071 * 82544 Coexistence issue workaround.
5072 * There are 2 issues.
5073 * 1. Transmit Hang issue.
5074 * To detect this issue, following equation can be used...
5075 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
5076 * If SUM[3:0] is in between 1 to 4, we will have this issue.
5079 * To detect this issue, following equation can be used...
5080 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
5081 * If SUM[3:0] is in between 9 to c, we will have this issue.
5085 * Make sure we do not have ending address
5086 * as 1,2,3,4(Hang) or 9,a,b,c (DAC)
5088 *************************************************************************/
5090 em_fill_descriptors (bus_addr_t address, u32 length,
5091 PDESC_ARRAY desc_array)
5093 u32 safe_terminator;
5095 /* Since issue is sensitive to length and address.*/
5096 /* Let us first check the address...*/
5098 desc_array->descriptor[0].address = address;
5099 desc_array->descriptor[0].length = length;
5100 desc_array->elements = 1;
5101 return (desc_array->elements);
5103 safe_terminator = (u32)((((u32)address & 0x7) +
5104 (length & 0xF)) & 0xF);
5105 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
5106 if (safe_terminator == 0 ||
5107 (safe_terminator > 4 &&
5108 safe_terminator < 9) ||
5109 (safe_terminator > 0xC &&
5110 safe_terminator <= 0xF)) {
5111 desc_array->descriptor[0].address = address;
5112 desc_array->descriptor[0].length = length;
5113 desc_array->elements = 1;
5114 return (desc_array->elements);
5117 desc_array->descriptor[0].address = address;
5118 desc_array->descriptor[0].length = length - 4;
5119 desc_array->descriptor[1].address = address + (length - 4);
5120 desc_array->descriptor[1].length = 4;
5121 desc_array->elements = 2;
5122 return (desc_array->elements);
5125 /**********************************************************************
5127 * Update the board statistics counters.
5129 **********************************************************************/
5131 em_update_stats_counters(struct adapter *adapter)
5135 if(adapter->hw.phy.media_type == e1000_media_type_copper ||
5136 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
5137 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
5138 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
5140 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
5141 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
5142 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
5143 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
5145 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
5146 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
5147 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
5148 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
5149 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
5150 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
5151 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
5152 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
5153 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
5154 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
5155 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
5156 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
5157 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
5158 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
5159 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
5160 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
5161 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
5162 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
5163 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
5164 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
5166 /* For the 64-bit byte counters the low dword must be read first. */
5167 /* Both registers clear on the read of the high dword */
5169 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCH);
5170 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCH);
5172 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
5173 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
5174 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
5175 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
5176 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
5178 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
5179 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
5181 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
5182 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
5183 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
5184 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
5185 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
5186 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
5187 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
5188 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
5189 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
5190 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
5192 if (adapter->hw.mac.type >= e1000_82543) {
5193 adapter->stats.algnerrc +=
5194 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
5195 adapter->stats.rxerrc +=
5196 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
5197 adapter->stats.tncrs +=
5198 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
5199 adapter->stats.cexterr +=
5200 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
5201 adapter->stats.tsctc +=
5202 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
5203 adapter->stats.tsctfc +=
5204 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
5208 ifp->if_collisions = adapter->stats.colc;
5211 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
5212 adapter->stats.crcerrs + adapter->stats.algnerrc +
5213 adapter->stats.ruc + adapter->stats.roc +
5214 adapter->stats.mpc + adapter->stats.cexterr;
5217 ifp->if_oerrors = adapter->stats.ecol +
5218 adapter->stats.latecol + adapter->watchdog_events;
5222 /**********************************************************************
5224 * This routine is called only when em_display_debug_stats is enabled.
5225 * This routine provides a way to take a look at important statistics
5226 * maintained by the driver and hardware.
5228 **********************************************************************/
5230 em_print_debug_info(struct adapter *adapter)
5232 device_t dev = adapter->dev;
5233 u8 *hw_addr = adapter->hw.hw_addr;
5235 device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
5236 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n",
5237 E1000_READ_REG(&adapter->hw, E1000_CTRL),
5238 E1000_READ_REG(&adapter->hw, E1000_RCTL));
5239 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n",
5240 ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff0000) >> 16),\
5241 (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) );
5242 device_printf(dev, "Flow control watermarks high = %d low = %d\n",
5243 adapter->hw.fc.high_water,
5244 adapter->hw.fc.low_water);
5245 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
5246 E1000_READ_REG(&adapter->hw, E1000_TIDV),
5247 E1000_READ_REG(&adapter->hw, E1000_TADV));
5248 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
5249 E1000_READ_REG(&adapter->hw, E1000_RDTR),
5250 E1000_READ_REG(&adapter->hw, E1000_RADV));
5251 device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n",
5252 (long long)adapter->tx_fifo_wrk_cnt,
5253 (long long)adapter->tx_fifo_reset_cnt);
5254 device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
5255 E1000_READ_REG(&adapter->hw, E1000_TDH(0)),
5256 E1000_READ_REG(&adapter->hw, E1000_TDT(0)));
5257 device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
5258 E1000_READ_REG(&adapter->hw, E1000_RDH(0)),
5259 E1000_READ_REG(&adapter->hw, E1000_RDT(0)));
5260 device_printf(dev, "Num Tx descriptors avail = %d\n",
5261 adapter->num_tx_desc_avail);
5262 device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
5263 adapter->no_tx_desc_avail1);
5264 device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
5265 adapter->no_tx_desc_avail2);
5266 device_printf(dev, "Std mbuf failed = %ld\n",
5267 adapter->mbuf_alloc_failed);
5268 device_printf(dev, "Std mbuf cluster failed = %ld\n",
5269 adapter->mbuf_cluster_failed);
5270 device_printf(dev, "Driver dropped packets = %ld\n",
5271 adapter->dropped_pkts);
5272 device_printf(dev, "Driver tx dma failure in encap = %ld\n",
5273 adapter->no_tx_dma_setup);
5277 em_print_hw_stats(struct adapter *adapter)
5279 device_t dev = adapter->dev;
5281 device_printf(dev, "Excessive collisions = %lld\n",
5282 (long long)adapter->stats.ecol);
5283 #if (DEBUG_HW > 0) /* Dont output these errors normally */
5284 device_printf(dev, "Symbol errors = %lld\n",
5285 (long long)adapter->stats.symerrs);
5287 device_printf(dev, "Sequence errors = %lld\n",
5288 (long long)adapter->stats.sec);
5289 device_printf(dev, "Defer count = %lld\n",
5290 (long long)adapter->stats.dc);
5291 device_printf(dev, "Missed Packets = %lld\n",
5292 (long long)adapter->stats.mpc);
5293 device_printf(dev, "Receive No Buffers = %lld\n",
5294 (long long)adapter->stats.rnbc);
5295 /* RLEC is inaccurate on some hardware, calculate our own. */
5296 device_printf(dev, "Receive Length Errors = %lld\n",
5297 ((long long)adapter->stats.roc + (long long)adapter->stats.ruc));
5298 device_printf(dev, "Receive errors = %lld\n",
5299 (long long)adapter->stats.rxerrc);
5300 device_printf(dev, "Crc errors = %lld\n",
5301 (long long)adapter->stats.crcerrs);
5302 device_printf(dev, "Alignment errors = %lld\n",
5303 (long long)adapter->stats.algnerrc);
5304 device_printf(dev, "Collision/Carrier extension errors = %lld\n",
5305 (long long)adapter->stats.cexterr);
5306 device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns);
5307 device_printf(dev, "watchdog timeouts = %ld\n",
5308 adapter->watchdog_events);
5309 device_printf(dev, "RX MSIX IRQ = %ld TX MSIX IRQ = %ld"
5310 " LINK MSIX IRQ = %ld\n", adapter->rx_irq,
5311 adapter->tx_irq , adapter->link_irq);
5312 device_printf(dev, "XON Rcvd = %lld\n",
5313 (long long)adapter->stats.xonrxc);
5314 device_printf(dev, "XON Xmtd = %lld\n",
5315 (long long)adapter->stats.xontxc);
5316 device_printf(dev, "XOFF Rcvd = %lld\n",
5317 (long long)adapter->stats.xoffrxc);
5318 device_printf(dev, "XOFF Xmtd = %lld\n",
5319 (long long)adapter->stats.xofftxc);
5320 device_printf(dev, "Good Packets Rcvd = %lld\n",
5321 (long long)adapter->stats.gprc);
5322 device_printf(dev, "Good Packets Xmtd = %lld\n",
5323 (long long)adapter->stats.gptc);
5324 device_printf(dev, "TSO Contexts Xmtd = %lld\n",
5325 (long long)adapter->stats.tsctc);
5326 device_printf(dev, "TSO Contexts Failed = %lld\n",
5327 (long long)adapter->stats.tsctfc);
5330 /**********************************************************************
5332 * This routine provides a way to dump out the adapter eeprom,
5333 * often a useful debug/service tool. This only dumps the first
5334 * 32 words, stuff that matters is in that extent.
5336 **********************************************************************/
5338 em_print_nvm_info(struct adapter *adapter)
5343 /* Its a bit crude, but it gets the job done */
5344 kprintf("\nInterface EEPROM Dump:\n");
5345 kprintf("Offset\n0x0000 ");
5346 for (i = 0, j = 0; i < 32; i++, j++) {
5347 if (j == 8) { /* Make the offset block */
5349 kprintf("\n0x00%x0 ",row);
5351 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
5352 kprintf("%04x ", eeprom_data);
5358 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5360 struct adapter *adapter;
5365 error = sysctl_handle_int(oidp, &result, 0, req);
5367 if (error || !req->newptr)
5371 adapter = (struct adapter *)arg1;
5372 em_print_debug_info(adapter);
5375 * This value will cause a hex dump of the
5376 * first 32 16-bit words of the EEPROM to
5380 adapter = (struct adapter *)arg1;
5381 em_print_nvm_info(adapter);
5389 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
5391 struct adapter *adapter;
5396 error = sysctl_handle_int(oidp, &result, 0, req);
5398 if (error || !req->newptr)
5402 adapter = (struct adapter *)arg1;
5403 em_print_hw_stats(adapter);
5410 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
5412 struct em_int_delay_info *info;
5413 struct adapter *adapter;
5419 info = (struct em_int_delay_info *)arg1;
5420 usecs = info->value;
5421 error = sysctl_handle_int(oidp, &usecs, 0, req);
5422 if (error != 0 || req->newptr == NULL)
5424 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
5426 info->value = usecs;
5427 ticks = EM_USECS_TO_TICKS(usecs);
5429 adapter = info->adapter;
5431 EM_CORE_LOCK(adapter);
5432 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
5433 regval = (regval & ~0xffff) | (ticks & 0xffff);
5434 /* Handle a few special cases. */
5435 switch (info->offset) {
5440 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
5441 /* Don't write 0 into the TIDV register. */
5444 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
5447 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
5448 EM_CORE_UNLOCK(adapter);
5453 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
5454 const char *description, struct em_int_delay_info *info,
5455 int offset, int value)
5457 info->adapter = adapter;
5458 info->offset = offset;
5459 info->value = value;
5460 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
5461 SYSCTL_CHILDREN(adapter->sysctl_tree),
5462 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
5463 info, 0, em_sysctl_int_delay, "I", description);
5466 #ifndef EM_LEGACY_IRQ
5468 em_add_rx_process_limit(struct adapter *adapter, const char *name,
5469 const char *description, int *limit, int value)
5472 SYSCTL_ADD_INT(&adapter->sysctl_ctx,
5473 SYSCTL_CHILDREN(adapter->sysctl_tree),
5474 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);