1 /******************************************************************************
3 Copyright (c) 2001-2010, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
34 #ifdef HAVE_KERNEL_OPTION_HEADERS
35 #include "opt_device_polling.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #if __FreeBSD_version >= 800000
43 #include <sys/buf_ring.h>
46 #include <sys/endian.h>
47 #include <sys/kernel.h>
48 #include <sys/kthread.h>
49 #include <sys/malloc.h>
51 #include <sys/module.h>
53 #include <sys/socket.h>
54 #include <sys/sockio.h>
55 #include <sys/sysctl.h>
56 #include <sys/taskqueue.h>
57 #include <sys/eventhandler.h>
60 #include <net/ethernet.h>
62 #include <net/if_arp.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
65 #include <net/ifq_var.h>
67 #include <net/if_types.h>
68 #include <net/vlan/if_vlan_var.h>
69 #include <net/vlan/if_vlan_ether.h>
71 #include <netinet/in_systm.h>
72 #include <netinet/in.h>
73 #include <netinet/if_ether.h>
74 #include <netinet/ip.h>
75 #include <netinet/ip6.h>
76 #include <netinet/tcp.h>
77 #include <netinet/udp.h>
79 #include <sys/in_cksum.h>
80 #include <bus/pci/pcivar.h>
81 #include <bus/pci/pcireg.h>
83 #include "e1000_api.h"
84 #include "e1000_82571.h"
87 /*********************************************************************
88 * Set this to one to display debug statistics
89 *********************************************************************/
90 int em_display_debug_stats = 0;
92 /*********************************************************************
94 *********************************************************************/
95 char em_driver_version[] = "6.9.25";
98 /*********************************************************************
101 * Used by probe to select devices to load on
102 * Last field stores an index into e1000_strings
103 * Last entry must be all 0s
105 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
106 *********************************************************************/
108 static em_vendor_info_t em_vendor_info_array[] =
110 /* Intel(R) PRO/1000 Network Connection */
111 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
112 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
113 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
114 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
115 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
117 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
118 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
119 { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
120 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
121 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
122 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
123 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
125 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
127 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
128 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
130 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
131 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
132 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
133 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
135 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
136 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
137 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
138 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
139 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
141 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
142 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
143 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
144 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
145 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
146 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
147 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
148 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
149 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
150 PCI_ANY_ID, PCI_ANY_ID, 0},
152 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
153 { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
154 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
156 { 0x8086, E1000_DEV_ID_82571EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
157 { 0x8086, E1000_DEV_ID_82571EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
158 { 0x8086, E1000_DEV_ID_82571EB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
159 { 0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL,
160 PCI_ANY_ID, PCI_ANY_ID, 0},
161 { 0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD,
162 PCI_ANY_ID, PCI_ANY_ID, 0},
163 { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER,
164 PCI_ANY_ID, PCI_ANY_ID, 0},
165 { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP,
166 PCI_ANY_ID, PCI_ANY_ID, 0},
167 { 0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER,
168 PCI_ANY_ID, PCI_ANY_ID, 0},
169 { 0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER,
170 PCI_ANY_ID, PCI_ANY_ID, 0},
171 { 0x8086, E1000_DEV_ID_82572EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
172 { 0x8086, E1000_DEV_ID_82572EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
173 { 0x8086, E1000_DEV_ID_82572EI_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
174 { 0x8086, E1000_DEV_ID_82572EI, PCI_ANY_ID, PCI_ANY_ID, 0},
176 { 0x8086, E1000_DEV_ID_82573E, PCI_ANY_ID, PCI_ANY_ID, 0},
177 { 0x8086, E1000_DEV_ID_82573E_IAMT, PCI_ANY_ID, PCI_ANY_ID, 0},
178 { 0x8086, E1000_DEV_ID_82573L, PCI_ANY_ID, PCI_ANY_ID, 0},
179 { 0x8086, E1000_DEV_ID_82583V, PCI_ANY_ID, PCI_ANY_ID, 0},
180 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT,
181 PCI_ANY_ID, PCI_ANY_ID, 0},
182 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT,
183 PCI_ANY_ID, PCI_ANY_ID, 0},
184 { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT,
185 PCI_ANY_ID, PCI_ANY_ID, 0},
186 { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT,
187 PCI_ANY_ID, PCI_ANY_ID, 0},
188 { 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
189 { 0x8086, E1000_DEV_ID_ICH8_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
190 { 0x8086, E1000_DEV_ID_ICH8_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0},
191 { 0x8086, E1000_DEV_ID_ICH8_IFE, PCI_ANY_ID, PCI_ANY_ID, 0},
192 { 0x8086, E1000_DEV_ID_ICH8_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0},
193 { 0x8086, E1000_DEV_ID_ICH8_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
194 { 0x8086, E1000_DEV_ID_ICH8_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0},
195 { 0x8086, E1000_DEV_ID_ICH8_82567V_3, PCI_ANY_ID, PCI_ANY_ID, 0},
196 { 0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
197 { 0x8086, E1000_DEV_ID_ICH9_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0},
198 { 0x8086, E1000_DEV_ID_ICH9_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0},
199 { 0x8086, E1000_DEV_ID_ICH9_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0},
200 { 0x8086, E1000_DEV_ID_ICH9_IGP_M_V, PCI_ANY_ID, PCI_ANY_ID, 0},
201 { 0x8086, E1000_DEV_ID_ICH9_IFE, PCI_ANY_ID, PCI_ANY_ID, 0},
202 { 0x8086, E1000_DEV_ID_ICH9_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0},
203 { 0x8086, E1000_DEV_ID_ICH9_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0},
204 { 0x8086, E1000_DEV_ID_ICH9_BM, PCI_ANY_ID, PCI_ANY_ID, 0},
205 { 0x8086, E1000_DEV_ID_82574L, PCI_ANY_ID, PCI_ANY_ID, 0},
206 { 0x8086, E1000_DEV_ID_82574LA, PCI_ANY_ID, PCI_ANY_ID, 0},
207 { 0x8086, E1000_DEV_ID_ICH10_R_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
208 { 0x8086, E1000_DEV_ID_ICH10_R_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
209 { 0x8086, E1000_DEV_ID_ICH10_R_BM_V, PCI_ANY_ID, PCI_ANY_ID, 0},
210 { 0x8086, E1000_DEV_ID_ICH10_D_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
211 { 0x8086, E1000_DEV_ID_ICH10_D_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
212 { 0x8086, E1000_DEV_ID_PCH_M_HV_LM, PCI_ANY_ID, PCI_ANY_ID, 0},
213 { 0x8086, E1000_DEV_ID_PCH_M_HV_LC, PCI_ANY_ID, PCI_ANY_ID, 0},
214 { 0x8086, E1000_DEV_ID_PCH_D_HV_DM, PCI_ANY_ID, PCI_ANY_ID, 0},
215 { 0x8086, E1000_DEV_ID_PCH_D_HV_DC, PCI_ANY_ID, PCI_ANY_ID, 0},
216 /* required last entry */
220 /*********************************************************************
221 * Table of branding strings for all supported NICs.
222 *********************************************************************/
224 static char *em_strings[] = {
225 "Intel(R) PRO/1000 Network Connection"
228 /*********************************************************************
229 * Function prototypes
230 *********************************************************************/
231 static int em_probe(device_t);
232 static int em_attach(device_t);
233 static int em_detach(device_t);
234 static int em_shutdown(device_t);
235 static int em_suspend(device_t);
236 static int em_resume(device_t);
237 static void em_start(struct ifnet *);
238 static void em_start_locked(struct ifnet *ifp);
239 #if __FreeBSD_version >= 800000
240 static int em_mq_start(struct ifnet *, struct mbuf *);
241 static int em_mq_start_locked(struct ifnet *, struct mbuf *);
242 static void em_qflush(struct ifnet *);
244 static int em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
245 static void em_init(void *);
246 static void em_init_locked(struct adapter *);
247 static void em_stop(void *);
248 static void em_media_status(struct ifnet *, struct ifmediareq *);
249 static int em_media_change(struct ifnet *);
250 static void em_identify_hardware(struct adapter *);
251 static int em_allocate_pci_resources(struct adapter *);
252 static int em_allocate_legacy(struct adapter *adapter);
253 static int em_allocate_msix(struct adapter *adapter);
254 static int em_setup_msix(struct adapter *);
255 static void em_free_pci_resources(struct adapter *);
256 static void em_local_timer(void *);
257 static int em_hardware_init(struct adapter *);
258 static void em_setup_interface(device_t, struct adapter *);
259 static void em_setup_transmit_structures(struct adapter *);
260 static void em_initialize_transmit_unit(struct adapter *);
261 static int em_setup_receive_structures(struct adapter *);
262 static void em_initialize_receive_unit(struct adapter *);
263 static void em_enable_intr(struct adapter *);
264 static void em_disable_intr(struct adapter *);
265 static void em_free_transmit_structures(struct adapter *);
266 static void em_free_receive_structures(struct adapter *);
267 static void em_update_stats_counters(struct adapter *);
268 static void em_txeof(struct adapter *);
269 static void em_tx_purge(struct adapter *);
270 static int em_allocate_receive_structures(struct adapter *);
271 static int em_allocate_transmit_structures(struct adapter *);
272 static int em_rxeof(struct adapter *, int);
273 #ifndef __NO_STRICT_ALIGNMENT
274 static int em_fixup_rx(struct adapter *);
276 static void em_receive_checksum(struct adapter *, struct e1000_rx_desc *,
278 static void em_transmit_checksum_setup(struct adapter *, struct mbuf *,
281 static bool em_tso_setup(struct adapter *, struct mbuf *,
284 static void em_set_promisc(struct adapter *);
285 static void em_disable_promisc(struct adapter *);
286 static void em_set_multi(struct adapter *);
287 static void em_print_hw_stats(struct adapter *);
288 static void em_update_link_status(struct adapter *);
289 static int em_get_buf(struct adapter *, int);
291 static void em_register_vlan(void *, struct ifnet *, u16);
292 static void em_unregister_vlan(void *, struct ifnet *, u16);
293 static void em_setup_vlan_hw_support(struct adapter *);
295 static int em_xmit(struct adapter *, struct mbuf **);
296 static void em_smartspeed(struct adapter *);
297 static int em_82547_fifo_workaround(struct adapter *, int);
298 static void em_82547_update_fifo_head(struct adapter *, int);
299 static int em_82547_tx_fifo_reset(struct adapter *);
300 static void em_82547_move_tail(void *);
301 static int em_dma_malloc(struct adapter *, bus_size_t,
302 struct em_dma_alloc *, int);
303 static void em_dma_free(struct adapter *, struct em_dma_alloc *);
304 static void em_print_debug_info(struct adapter *);
305 static void em_print_nvm_info(struct adapter *);
306 static int em_is_valid_ether_addr(u8 *);
307 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS);
308 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
309 static u32 em_fill_descriptors (bus_addr_t address, u32 length,
310 PDESC_ARRAY desc_array);
311 static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
312 static void em_add_int_delay_sysctl(struct adapter *, const char *,
313 const char *, struct em_int_delay_info *, int, int);
314 /* Management and WOL Support */
315 static void em_init_manageability(struct adapter *);
316 static void em_release_manageability(struct adapter *);
317 static void em_get_hw_control(struct adapter *);
318 static void em_release_hw_control(struct adapter *);
319 static void em_get_wakeup(device_t);
320 static void em_enable_wakeup(device_t);
321 static int em_enable_phy_wakeup(struct adapter *);
324 static void em_intr(void *);
326 static void em_irq_fast(void *);
329 static void em_msix_tx(void *);
330 static void em_msix_rx(void *);
331 static void em_msix_link(void *);
332 static void em_handle_rx(void *context, int pending);
333 static void em_handle_tx(void *context, int pending);
335 static void em_handle_rxtx(void *context, int pending);
336 static void em_handle_link(void *context, int pending);
337 static void em_add_rx_process_limit(struct adapter *, const char *,
338 const char *, int *, int);
339 #endif /* ~EM_LEGACY_IRQ */
341 #ifdef DEVICE_POLLING
342 static poll_handler_t em_poll;
345 /*********************************************************************
346 * FreeBSD Device Interface Entry Points
347 *********************************************************************/
349 static device_method_t em_methods[] = {
350 /* Device interface */
351 DEVMETHOD(device_probe, em_probe),
352 DEVMETHOD(device_attach, em_attach),
353 DEVMETHOD(device_detach, em_detach),
354 DEVMETHOD(device_shutdown, em_shutdown),
355 DEVMETHOD(device_suspend, em_suspend),
356 DEVMETHOD(device_resume, em_resume),
360 static driver_t em_driver = {
361 "em", em_methods, sizeof(struct adapter),
364 static devclass_t em_devclass;
365 DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0);
366 MODULE_DEPEND(em, pci, 1, 1, 1);
367 MODULE_DEPEND(em, ether, 1, 1, 1);
369 /*********************************************************************
370 * Tunable default values.
371 *********************************************************************/
373 #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
374 #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
377 /* Allow common code without TSO */
382 static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
383 static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
384 static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
385 static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
386 static int em_rxd = EM_DEFAULT_RXD;
387 static int em_txd = EM_DEFAULT_TXD;
388 static int em_smart_pwr_down = FALSE;
389 /* Controls whether promiscuous also shows bad packets */
390 static int em_debug_sbp = FALSE;
391 /* Local switch for MSI/MSIX */
392 static int em_enable_msi = TRUE;
394 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
395 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
396 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
397 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
398 TUNABLE_INT("hw.em.rxd", &em_rxd);
399 TUNABLE_INT("hw.em.txd", &em_txd);
400 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
401 TUNABLE_INT("hw.em.sbp", &em_debug_sbp);
402 TUNABLE_INT("hw.em.enable_msi", &em_enable_msi);
404 #ifndef EM_LEGACY_IRQ
405 /* How many packets rxeof tries to clean at a time */
406 static int em_rx_process_limit = 100;
407 TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit);
410 /* Flow control setting - default to FULL */
411 static int em_fc_setting = e1000_fc_full;
412 TUNABLE_INT("hw.em.fc_setting", &em_fc_setting);
415 ** Shadow VFTA table, this is needed because
416 ** the real vlan filter table gets cleared during
417 ** a soft reset and the driver needs to be able
420 static u32 em_shadow_vfta[EM_VFTA_SIZE];
422 /* Global used in WOL setup with multiport cards */
423 static int global_quad_port_a = 0;
425 /*********************************************************************
426 * Device identification routine
428 * em_probe determines if the driver should be loaded on
429 * adapter based on PCI vendor/device id of the adapter.
431 * return BUS_PROBE_DEFAULT on success, positive on failure
432 *********************************************************************/
435 em_probe(device_t dev)
437 char adapter_name[60];
438 u16 pci_vendor_id = 0;
439 u16 pci_device_id = 0;
440 u16 pci_subvendor_id = 0;
441 u16 pci_subdevice_id = 0;
442 em_vendor_info_t *ent;
444 INIT_DEBUGOUT("em_probe: begin");
446 pci_vendor_id = pci_get_vendor(dev);
447 if (pci_vendor_id != EM_VENDOR_ID)
450 pci_device_id = pci_get_device(dev);
451 pci_subvendor_id = pci_get_subvendor(dev);
452 pci_subdevice_id = pci_get_subdevice(dev);
454 ent = em_vendor_info_array;
455 while (ent->vendor_id != 0) {
456 if ((pci_vendor_id == ent->vendor_id) &&
457 (pci_device_id == ent->device_id) &&
459 ((pci_subvendor_id == ent->subvendor_id) ||
460 (ent->subvendor_id == PCI_ANY_ID)) &&
462 ((pci_subdevice_id == ent->subdevice_id) ||
463 (ent->subdevice_id == PCI_ANY_ID))) {
464 ksprintf(adapter_name, "%s %s",
465 em_strings[ent->index],
467 device_set_desc_copy(dev, adapter_name);
468 return (BUS_PROBE_DEFAULT);
476 /*********************************************************************
477 * Device initialization routine
479 * The attach entry point is called when the driver is being loaded.
480 * This routine identifies the type of hardware, allocates all resources
481 * and initializes the hardware.
483 * return 0 on success, positive on failure
484 *********************************************************************/
487 em_attach(device_t dev)
489 struct adapter *adapter;
493 INIT_DEBUGOUT("em_attach: begin");
495 adapter = device_get_softc(dev);
496 adapter->dev = adapter->osdep.dev = dev;
498 EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
499 EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev));
500 EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev));
503 sysctl_ctx_init(&adapter->sysctl_ctx);
504 adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx,
505 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
506 device_get_nameunit(adapter->dev),
508 if (adapter->sysctl_tree == NULL) {
509 device_printf(adapter->dev, "can't add sysctl node\n");
514 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
515 SYSCTL_CHILDREN(adapter->sysctl_tree),
516 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
517 em_sysctl_debug_info, "I", "Debug Information");
519 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
520 SYSCTL_CHILDREN(adapter->sysctl_tree),
521 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
522 em_sysctl_stats, "I", "Statistics");
524 callout_init(&adapter->timer);
525 callout_init(&adapter->tx_fifo_timer);
527 /* Determine hardware and mac info */
528 em_identify_hardware(adapter);
530 /* Setup PCI resources */
531 if (em_allocate_pci_resources(adapter)) {
532 device_printf(dev, "Allocation of PCI resources failed\n");
538 ** For ICH8 and family we need to
539 ** map the flash memory, and this
540 ** must happen after the MAC is
543 if ((adapter->hw.mac.type == e1000_ich8lan) ||
544 (adapter->hw.mac.type == e1000_pchlan) ||
545 (adapter->hw.mac.type == e1000_ich9lan) ||
546 (adapter->hw.mac.type == e1000_ich10lan)) {
547 int rid = EM_BAR_TYPE_FLASH;
548 adapter->flash = bus_alloc_resource_any(dev,
549 SYS_RES_MEMORY, &rid, RF_ACTIVE);
550 if (adapter->flash == NULL) {
551 device_printf(dev, "Mapping of Flash failed\n");
555 /* This is used in the shared code */
556 adapter->hw.flash_address = (u8 *)adapter->flash;
557 adapter->osdep.flash_bus_space_tag =
558 rman_get_bustag(adapter->flash);
559 adapter->osdep.flash_bus_space_handle =
560 rman_get_bushandle(adapter->flash);
563 /* Do Shared Code initialization */
564 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
565 device_printf(dev, "Setup of Shared code failed\n");
570 e1000_get_bus_info(&adapter->hw);
572 /* Set up some sysctls for the tunable interrupt delays */
573 em_add_int_delay_sysctl(adapter, "rx_int_delay",
574 "receive interrupt delay in usecs", &adapter->rx_int_delay,
575 E1000_REGISTER(&adapter->hw, E1000_RDTR), em_rx_int_delay_dflt);
576 em_add_int_delay_sysctl(adapter, "tx_int_delay",
577 "transmit interrupt delay in usecs", &adapter->tx_int_delay,
578 E1000_REGISTER(&adapter->hw, E1000_TIDV), em_tx_int_delay_dflt);
579 if (adapter->hw.mac.type >= e1000_82540) {
580 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
581 "receive interrupt delay limit in usecs",
582 &adapter->rx_abs_int_delay,
583 E1000_REGISTER(&adapter->hw, E1000_RADV),
584 em_rx_abs_int_delay_dflt);
585 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
586 "transmit interrupt delay limit in usecs",
587 &adapter->tx_abs_int_delay,
588 E1000_REGISTER(&adapter->hw, E1000_TADV),
589 em_tx_abs_int_delay_dflt);
592 #ifndef EM_LEGACY_IRQ
593 /* Sysctls for limiting the amount of work done in the taskqueue */
594 em_add_rx_process_limit(adapter, "rx_processing_limit",
595 "max number of rx packets to process", &adapter->rx_process_limit,
596 em_rx_process_limit);
600 * Validate number of transmit and receive descriptors. It
601 * must not exceed hardware maximum, and must be multiple
602 * of E1000_DBA_ALIGN.
604 if (((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 ||
605 (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) ||
606 (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) ||
607 (em_txd < EM_MIN_TXD)) {
608 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
609 EM_DEFAULT_TXD, em_txd);
610 adapter->num_tx_desc = EM_DEFAULT_TXD;
612 adapter->num_tx_desc = em_txd;
613 if (((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 ||
614 (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) ||
615 (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) ||
616 (em_rxd < EM_MIN_RXD)) {
617 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
618 EM_DEFAULT_RXD, em_rxd);
619 adapter->num_rx_desc = EM_DEFAULT_RXD;
621 adapter->num_rx_desc = em_rxd;
623 adapter->hw.mac.autoneg = DO_AUTO_NEG;
624 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
625 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
626 adapter->rx_buffer_len = 2048;
628 e1000_init_script_state_82541(&adapter->hw, TRUE);
629 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
632 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
633 adapter->hw.phy.mdix = AUTO_ALL_MODES;
634 adapter->hw.phy.disable_polarity_correction = FALSE;
635 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
639 * Set the frame limits assuming
640 * standard ethernet sized frames.
642 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
643 adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
646 * This controls when hardware reports transmit completion
649 adapter->hw.mac.report_tx_early = 1;
651 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
654 /* Allocate Transmit Descriptor ring */
655 if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) {
656 device_printf(dev, "Unable to allocate tx_desc memory\n");
660 adapter->tx_desc_base =
661 (struct e1000_tx_desc *)adapter->txdma.dma_vaddr;
663 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
666 /* Allocate Receive Descriptor ring */
667 if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) {
668 device_printf(dev, "Unable to allocate rx_desc memory\n");
672 adapter->rx_desc_base =
673 (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
676 ** Start from a known state, this is
677 ** important in reading the nvm and
680 e1000_reset_hw(&adapter->hw);
682 /* Make sure we have a good EEPROM before we read from it */
683 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
685 ** Some PCI-E parts fail the first check due to
686 ** the link being in sleep state, call it again,
687 ** if it fails a second time its a real issue.
689 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
691 "The EEPROM Checksum Is Not Valid\n");
697 /* Copy the permanent MAC address out of the EEPROM */
698 if (e1000_read_mac_addr(&adapter->hw) < 0) {
699 device_printf(dev, "EEPROM read error while reading MAC"
705 if (!em_is_valid_ether_addr(adapter->hw.mac.addr)) {
706 device_printf(dev, "Invalid MAC address\n");
711 /* Initialize the hardware */
712 if (em_hardware_init(adapter)) {
713 device_printf(dev, "Unable to initialize the hardware\n");
718 /* Allocate transmit descriptors and buffers */
719 if (em_allocate_transmit_structures(adapter)) {
720 device_printf(dev, "Could not setup transmit structures\n");
725 /* Allocate receive descriptors and buffers */
726 if (em_allocate_receive_structures(adapter)) {
727 device_printf(dev, "Could not setup receive structures\n");
733 ** Do interrupt configuration
735 if (adapter->msi > 1) /* Do MSI/X */
736 error = em_allocate_msix(adapter);
737 else /* MSI or Legacy */
738 error = em_allocate_legacy(adapter);
743 * Get Wake-on-Lan and Management info for later use
747 /* Setup OS specific network interface */
748 em_setup_interface(dev, adapter);
750 /* Initialize statistics */
751 em_update_stats_counters(adapter);
753 adapter->hw.mac.get_link_status = 1;
754 em_update_link_status(adapter);
756 /* Indicate SOL/IDER usage */
757 if (e1000_check_reset_block(&adapter->hw))
759 "PHY reset is blocked due to SOL/IDER session.\n");
761 /* Do we need workaround for 82544 PCI-X adapter? */
762 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
763 adapter->hw.mac.type == e1000_82544)
764 adapter->pcix_82544 = TRUE;
766 adapter->pcix_82544 = FALSE;
768 /* Register for VLAN events */
769 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
770 em_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
771 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
772 em_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
774 /* Non-AMT based hardware can now take control from firmware */
775 if (adapter->has_manage && !adapter->has_amt)
776 em_get_hw_control(adapter);
778 /* Tell the stack that the interface is not active */
779 adapter->ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
781 INIT_DEBUGOUT("em_attach: end");
786 em_free_transmit_structures(adapter);
789 em_release_hw_control(adapter);
790 em_dma_free(adapter, &adapter->rxdma);
792 em_dma_free(adapter, &adapter->txdma);
795 em_free_pci_resources(adapter);
797 sysctl_ctx_free(&adapter->sysctl_ctx);
798 EM_TX_LOCK_DESTROY(adapter);
799 EM_RX_LOCK_DESTROY(adapter);
800 EM_CORE_LOCK_DESTROY(adapter);
805 /*********************************************************************
806 * Device removal routine
808 * The detach entry point is called when the driver is being removed.
809 * This routine stops the adapter and deallocates all the resources
810 * that were allocated for driver operation.
812 * return 0 on success, positive on failure
813 *********************************************************************/
816 em_detach(device_t dev)
818 struct adapter *adapter = device_get_softc(dev);
820 INIT_DEBUGOUT("em_detach: begin");
822 /* Make sure VLANS are not using driver */
823 if (adapter->ifp->if_vlantrunks != NULL) {
824 device_printf(dev,"Vlan in use, detach first\n");
828 #ifdef DEVICE_POLLING
829 if (ifp->if_capenable & IFCAP_POLLING)
830 ether_poll_deregister(ifp);
833 EM_CORE_LOCK(adapter);
835 adapter->in_detach = 1;
837 e1000_phy_hw_reset(&adapter->hw);
839 em_release_manageability(adapter);
841 EM_TX_UNLOCK(adapter);
842 EM_CORE_UNLOCK(adapter);
844 /* Unregister VLAN events */
845 if (adapter->vlan_attach != NULL)
846 EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
847 if (adapter->vlan_detach != NULL)
848 EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
850 ether_ifdetach(adapter->ifp);
851 callout_stop(&adapter->timer);
852 callout_stop(&adapter->tx_fifo_timer);
854 em_free_pci_resources(adapter);
855 bus_generic_detach(dev);
857 em_free_transmit_structures(adapter);
858 em_free_receive_structures(adapter);
860 /* Free Transmit Descriptor ring */
861 if (adapter->tx_desc_base) {
862 em_dma_free(adapter, &adapter->txdma);
863 adapter->tx_desc_base = NULL;
866 /* Free Receive Descriptor ring */
867 if (adapter->rx_desc_base) {
868 em_dma_free(adapter, &adapter->rxdma);
869 adapter->rx_desc_base = NULL;
872 em_release_hw_control(adapter);
873 sysctl_ctx_free(&adapter->sysctl_ctx);
874 EM_TX_LOCK_DESTROY(adapter);
875 EM_RX_LOCK_DESTROY(adapter);
876 EM_CORE_LOCK_DESTROY(adapter);
881 /*********************************************************************
883 * Shutdown entry point
885 **********************************************************************/
888 em_shutdown(device_t dev)
890 return em_suspend(dev);
894 * Suspend/resume device methods.
897 em_suspend(device_t dev)
899 struct adapter *adapter = device_get_softc(dev);
901 EM_CORE_LOCK(adapter);
903 em_release_manageability(adapter);
904 em_release_hw_control(adapter);
905 em_enable_wakeup(dev);
907 EM_CORE_UNLOCK(adapter);
909 return bus_generic_suspend(dev);
913 em_resume(device_t dev)
915 struct adapter *adapter = device_get_softc(dev);
916 struct ifnet *ifp = adapter->ifp;
918 EM_CORE_LOCK(adapter);
919 em_init_locked(adapter);
920 em_init_manageability(adapter);
921 EM_CORE_UNLOCK(adapter);
924 return bus_generic_resume(dev);
928 /*********************************************************************
929 * Transmit entry point
931 * em_start is called by the stack to initiate a transmit.
932 * The driver will remain in this routine as long as there are
933 * packets to transmit and transmit resources are available.
934 * In case resources are not available stack is notified and
935 * the packet is requeued.
936 **********************************************************************/
940 em_mq_start_locked(struct ifnet *ifp, struct mbuf *m)
942 struct adapter *adapter = ifp->if_softc;
944 int error = E1000_SUCCESS;
946 EM_TX_LOCK_ASSERT(adapter);
947 /* To allow being called from a tasklet */
951 if (((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) !=
953 || (!adapter->link_active)) {
954 error = drbr_enqueue(ifp, adapter->br, m);
956 } else if (!drbr_needs_enqueue(ifp, adapter->br) &&
957 (adapter->num_tx_desc_avail > EM_TX_OP_THRESHOLD)) {
958 if ((error = em_xmit(adapter, &m)) != 0) {
960 error = drbr_enqueue(ifp, adapter->br, m);
964 * We've bypassed the buf ring so we need to update
967 drbr_stats_update(ifp, m->m_pkthdr.len, m->m_flags);
969 ** Send a copy of the frame to the BPF
970 ** listener and set the watchdog on.
972 ETHER_BPF_MTAP(ifp, m);
973 adapter->watchdog_check = TRUE;
975 } else if ((error = drbr_enqueue(ifp, adapter->br, m)) != 0)
979 if (drbr_empty(ifp, adapter->br))
981 /* Process the queue */
983 if ((ifp->if_flags & IFF_RUNNING) == 0)
985 next = drbr_dequeue(ifp, adapter->br);
988 if ((error = em_xmit(adapter, &next)) != 0) {
990 error = drbr_enqueue(ifp, adapter->br, next);
993 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
994 ETHER_BPF_MTAP(ifp, next);
995 /* Set the watchdog */
996 adapter->watchdog_check = TRUE;
999 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
1000 ifp->if_flags |= IFF_OACTIVE;
1006 ** Multiqueue capable stack interface, this is not
1007 ** yet truely multiqueue, but that is coming...
1010 em_mq_start(struct ifnet *ifp, struct mbuf *m)
1013 struct adapter *adapter = ifp->if_softc;
1016 if (EM_TX_TRYLOCK(adapter)) {
1017 if (ifp->if_flags & IFF_RUNNING)
1018 error = em_mq_start_locked(ifp, m);
1019 EM_TX_UNLOCK(adapter);
1021 error = drbr_enqueue(ifp, adapter->br, m);
1027 em_qflush(struct ifnet *ifp)
1030 struct adapter *adapter = (struct adapter *)ifp->if_softc;
1032 EM_TX_LOCK(adapter);
1033 while ((m = buf_ring_dequeue_sc(adapter->br)) != NULL)
1036 EM_TX_UNLOCK(adapter);
1041 em_start_locked(struct ifnet *ifp)
1043 struct adapter *adapter = ifp->if_softc;
1044 struct mbuf *m_head;
1046 EM_TX_LOCK_ASSERT(adapter);
1048 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) !=
1051 if (!adapter->link_active)
1054 while (!ifq_is_empty(&ifp->if_snd)) {
1056 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1060 * Encapsulation can modify our pointer, and or make it
1061 * NULL on failure. In that event, we can't requeue.
1063 if (em_xmit(adapter, &m_head)) {
1066 ifp->if_flags |= IFF_OACTIVE;
1067 ifq_prepend(&ifp->if_snd, m_head);
1071 /* Send a copy of the frame to the BPF listener */
1072 ETHER_BPF_MTAP(ifp, m_head);
1074 /* Set timeout in case hardware has problems transmitting. */
1075 adapter->watchdog_check = TRUE;
1077 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
1078 ifp->if_flags |= IFF_OACTIVE;
1084 em_start(struct ifnet *ifp)
1086 struct adapter *adapter = ifp->if_softc;
1088 EM_TX_LOCK(adapter);
1089 if (ifp->if_flags & IFF_RUNNING)
1090 em_start_locked(ifp);
1091 EM_TX_UNLOCK(adapter);
1094 /*********************************************************************
1097 * em_ioctl is called when the user wants to configure the
1100 * return 0 on success, positive on failure
1101 **********************************************************************/
1104 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred * uc)
1106 struct adapter *adapter = ifp->if_softc;
1107 struct ifreq *ifr = (struct ifreq *)data;
1109 struct ifaddr *ifa = (struct ifaddr *)data;
1113 if (adapter->in_detach)
1119 if (ifa->ifa_addr->sa_family == AF_INET) {
1122 * Since resetting hardware takes a very long time
1123 * and results in link renegotiation we only
1124 * initialize the hardware only when it is absolutely
1127 ifp->if_flags |= IFF_UP;
1128 if (!(ifp->if_flags & IFF_RUNNING)) {
1129 EM_CORE_LOCK(adapter);
1130 em_init_locked(adapter);
1131 EM_CORE_UNLOCK(adapter);
1133 arp_ifinit(ifp, ifa);
1136 error = ether_ioctl(ifp, command, data);
1141 u16 eeprom_data = 0;
1143 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
1145 EM_CORE_LOCK(adapter);
1146 switch (adapter->hw.mac.type) {
1149 * 82573 only supports jumbo frames
1150 * if ASPM is disabled.
1152 e1000_read_nvm(&adapter->hw,
1153 NVM_INIT_3GIO_3, 1, &eeprom_data);
1154 if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
1155 max_frame_size = ETHER_MAX_LEN;
1158 /* Allow Jumbo frames - fall thru */
1162 case e1000_ich10lan:
1164 case e1000_80003es2lan: /* Limit Jumbo Frame size */
1165 max_frame_size = 9234;
1168 max_frame_size = 4096;
1170 /* Adapters that do not support jumbo frames */
1174 max_frame_size = ETHER_MAX_LEN;
1177 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1179 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1181 EM_CORE_UNLOCK(adapter);
1186 ifp->if_mtu = ifr->ifr_mtu;
1187 adapter->max_frame_size =
1188 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1189 em_init_locked(adapter);
1190 EM_CORE_UNLOCK(adapter);
1194 IOCTL_DEBUGOUT("ioctl rcv'd:\
1195 SIOCSIFFLAGS (Set Interface Flags)");
1196 EM_CORE_LOCK(adapter);
1197 if (ifp->if_flags & IFF_UP) {
1198 if ((ifp->if_flags & IFF_RUNNING)) {
1199 if ((ifp->if_flags ^ adapter->if_flags) &
1200 (IFF_PROMISC | IFF_ALLMULTI)) {
1201 em_disable_promisc(adapter);
1202 em_set_promisc(adapter);
1205 em_init_locked(adapter);
1207 if (ifp->if_flags & IFF_RUNNING) {
1208 EM_TX_LOCK(adapter);
1210 EM_TX_UNLOCK(adapter);
1212 adapter->if_flags = ifp->if_flags;
1213 EM_CORE_UNLOCK(adapter);
1217 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
1218 if (ifp->if_flags & IFF_RUNNING) {
1219 EM_CORE_LOCK(adapter);
1220 em_disable_intr(adapter);
1221 em_set_multi(adapter);
1222 if (adapter->hw.mac.type == e1000_82542 &&
1223 adapter->hw.revision_id == E1000_REVISION_2) {
1224 em_initialize_receive_unit(adapter);
1226 #ifdef DEVICE_POLLING
1227 if (!(ifp->if_capenable & IFCAP_POLLING))
1229 em_enable_intr(adapter);
1230 EM_CORE_UNLOCK(adapter);
1234 /* Check SOL/IDER usage */
1235 EM_CORE_LOCK(adapter);
1236 if (e1000_check_reset_block(&adapter->hw)) {
1237 EM_CORE_UNLOCK(adapter);
1238 device_printf(adapter->dev, "Media change is"
1239 " blocked due to SOL/IDER session.\n");
1242 EM_CORE_UNLOCK(adapter);
1244 IOCTL_DEBUGOUT("ioctl rcv'd: \
1245 SIOCxIFMEDIA (Get/Set Interface Media)");
1246 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1252 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
1254 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1255 #ifdef DEVICE_POLLING
1256 if (mask & IFCAP_POLLING) {
1257 if (ifr->ifr_reqcap & IFCAP_POLLING) {
1258 error = ether_poll_register(em_poll, ifp);
1261 EM_CORE_LOCK(adapter);
1262 em_disable_intr(adapter);
1263 ifp->if_capenable |= IFCAP_POLLING;
1264 EM_CORE_UNLOCK(adapter);
1266 error = ether_poll_deregister(ifp);
1267 /* Enable interrupt even in error case */
1268 EM_CORE_LOCK(adapter);
1269 em_enable_intr(adapter);
1270 ifp->if_capenable &= ~IFCAP_POLLING;
1271 EM_CORE_UNLOCK(adapter);
1275 if (mask & IFCAP_HWCSUM) {
1276 ifp->if_capenable ^= IFCAP_HWCSUM;
1280 if (mask & IFCAP_TSO4) {
1281 ifp->if_capenable ^= IFCAP_TSO4;
1285 if (mask & IFCAP_VLAN_HWTAGGING) {
1286 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1290 if (mask & IFCAP_VLAN_HWFILTER) {
1291 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1295 if ((mask & IFCAP_WOL) &&
1296 (ifp->if_capabilities & IFCAP_WOL) != 0) {
1297 if (mask & IFCAP_WOL_MCAST)
1298 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1299 if (mask & IFCAP_WOL_MAGIC)
1300 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1303 if (reinit && (ifp->if_flags & IFF_RUNNING))
1306 VLAN_CAPABILITIES(ifp);
1312 error = ether_ioctl(ifp, command, data);
1320 /*********************************************************************
1323 * This routine is used in two ways. It is used by the stack as
1324 * init entry point in network interface structure. It is also used
1325 * by the driver as a hw/sw initialization routine to get to a
1328 * return 0 on success, positive on failure
1329 **********************************************************************/
1332 em_init_locked(struct adapter *adapter)
1334 struct ifnet *ifp = adapter->ifp;
1335 device_t dev = adapter->dev;
1338 INIT_DEBUGOUT("em_init: begin");
1340 EM_CORE_LOCK_ASSERT(adapter);
1342 EM_TX_LOCK(adapter);
1344 EM_TX_UNLOCK(adapter);
1347 * Packet Buffer Allocation (PBA)
1348 * Writing PBA sets the receive portion of the buffer
1349 * the remainder is used for the transmit buffer.
1351 * Devices before the 82547 had a Packet Buffer of 64K.
1352 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1353 * After the 82547 the buffer was reduced to 40K.
1354 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1355 * Note: default does not leave enough room for Jumbo Frame >10k.
1357 switch (adapter->hw.mac.type) {
1359 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1360 if (adapter->max_frame_size > 8192)
1361 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1363 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1364 adapter->tx_fifo_head = 0;
1365 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1366 adapter->tx_fifo_size =
1367 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1369 /* Total Packet Buffer on these is 48K */
1372 case e1000_80003es2lan:
1373 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1375 case e1000_82573: /* 82573: Total Packet Buffer is 32K */
1376 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
1380 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
1383 case e1000_ich10lan:
1385 pba = E1000_PBA_10K;
1391 /* Devices before 82547 had a Packet Buffer of 64K. */
1392 if (adapter->max_frame_size > 8192)
1393 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1395 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1398 INIT_DEBUGOUT1("em_init: pba=%dK",pba);
1399 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1401 /* Get the latest mac address, User can use a LAA */
1402 bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
1405 /* Put the address into the Receive Address Array */
1406 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1409 * With the 82571 adapter, RAR[0] may be overwritten
1410 * when the other port is reset, we make a duplicate
1411 * in RAR[14] for that eventuality, this assures
1412 * the interface continues to function.
1414 if (adapter->hw.mac.type == e1000_82571) {
1415 e1000_set_laa_state_82571(&adapter->hw, TRUE);
1416 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr,
1417 E1000_RAR_ENTRIES - 1);
1420 /* Initialize the hardware */
1421 if (em_hardware_init(adapter)) {
1422 device_printf(dev, "Unable to initialize the hardware\n");
1425 em_update_link_status(adapter);
1427 /* Setup VLAN support, basic and offload if available */
1428 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
1429 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1430 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
1431 /* Use real VLAN Filter support */
1432 em_setup_vlan_hw_support(adapter);
1435 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1436 ctrl |= E1000_CTRL_VME;
1437 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
1441 /* Set hardware offload abilities */
1442 ifp->if_hwassist = 0;
1443 if (adapter->hw.mac.type >= e1000_82543) {
1444 if (ifp->if_capenable & IFCAP_TXCSUM)
1445 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
1447 if (ifp->if_capenable & IFCAP_TSO4)
1448 ifp->if_hwassist |= CSUM_TSO;
1452 /* Configure for OS presence */
1453 em_init_manageability(adapter);
1455 /* Prepare transmit descriptors and buffers */
1456 em_setup_transmit_structures(adapter);
1457 em_initialize_transmit_unit(adapter);
1459 /* Setup Multicast table */
1460 em_set_multi(adapter);
1462 /* Prepare receive descriptors and buffers */
1463 if (em_setup_receive_structures(adapter)) {
1464 device_printf(dev, "Could not setup receive structures\n");
1465 EM_TX_LOCK(adapter);
1467 EM_TX_UNLOCK(adapter);
1470 em_initialize_receive_unit(adapter);
1472 /* Don't lose promiscuous settings */
1473 em_set_promisc(adapter);
1475 ifp->if_flags |= IFF_RUNNING;
1476 ifp->if_flags &= ~IFF_OACTIVE;
1478 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1479 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1481 /* MSI/X configuration for 82574 */
1482 if (adapter->hw.mac.type == e1000_82574) {
1484 tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1485 tmp |= E1000_CTRL_EXT_PBA_CLR;
1486 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1488 ** Set the IVAR - interrupt vector routing.
1489 ** Each nibble represents a vector, high bit
1490 ** is enable, other 3 bits are the MSIX table
1491 ** entry, we map RXQ0 to 0, TXQ0 to 1, and
1492 ** Link (other) to 2, hence the magic number.
1494 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1497 #ifdef DEVICE_POLLING
1499 * Only enable interrupts if we are not polling, make sure
1500 * they are off otherwise.
1502 if (ifp->if_capenable & IFCAP_POLLING)
1503 em_disable_intr(adapter);
1505 #endif /* DEVICE_POLLING */
1506 em_enable_intr(adapter);
1508 /* AMT based hardware can now take control from firmware */
1509 if (adapter->has_manage && adapter->has_amt)
1510 em_get_hw_control(adapter);
1512 /* Don't reset the phy next time init gets called */
1513 adapter->hw.phy.reset_disable = TRUE;
1519 struct adapter *adapter = arg;
1521 EM_CORE_LOCK(adapter);
1522 em_init_locked(adapter);
1523 EM_CORE_UNLOCK(adapter);
1527 #ifdef DEVICE_POLLING
1528 /*********************************************************************
1530 * Legacy polling routine
1532 *********************************************************************/
1534 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1536 struct adapter *adapter = ifp->if_softc;
1537 u32 reg_icr, rx_done = 0;
1539 EM_CORE_LOCK(adapter);
1540 if ((ifp->if_flags & IFF_RUNNING) == 0) {
1541 EM_CORE_UNLOCK(adapter);
1545 if (cmd == POLL_AND_CHECK_STATUS) {
1546 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1547 /* Link status change */
1548 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1549 adapter->hw.mac.get_link_status = 1;
1550 em_update_link_status(adapter);
1552 if (reg_icr & E1000_ICR_RXO)
1553 adapter->rx_overruns++;
1555 EM_CORE_UNLOCK(adapter);
1557 rx_done = em_rxeof(adapter, count);
1559 EM_TX_LOCK(adapter);
1562 if (!drbr_empty(ifp, adapter->br))
1563 em_mq_start_locked(ifp, NULL);
1565 if (!ifq_is_empty(&ifp->if_snd))
1566 em_start_locked(ifp);
1568 EM_TX_UNLOCK(adapter);
1571 #endif /* DEVICE_POLLING */
1573 #ifdef EM_LEGACY_IRQ
1574 /*********************************************************************
1576 * Legacy Interrupt Service routine
1578 *********************************************************************/
1583 struct adapter *adapter = arg;
1584 struct ifnet *ifp = adapter->ifp;
1588 if (ifp->if_capenable & IFCAP_POLLING)
1591 EM_CORE_LOCK(adapter);
1592 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1593 if (reg_icr & E1000_ICR_RXO)
1594 adapter->rx_overruns++;
1595 if ((reg_icr == 0xffffffff) || (reg_icr == 0)||
1596 (adapter->hw.mac.type >= e1000_82571 &&
1597 (reg_icr & E1000_ICR_INT_ASSERTED) == 0))
1600 if ((ifp->if_flags & IFF_RUNNING) == 0)
1603 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1604 callout_stop(&adapter->timer);
1605 adapter->hw.mac.get_link_status = 1;
1606 em_update_link_status(adapter);
1607 /* Deal with TX cruft when link lost */
1608 em_tx_purge(adapter);
1609 callout_reset(&adapter->timer, hz,
1610 em_local_timer, adapter);
1614 EM_TX_LOCK(adapter);
1616 em_rxeof(adapter, -1);
1618 if (ifp->if_flags & IFF_RUNNING &&
1619 !ifq_is_empty(&ifp->if_snd))
1620 em_start_locked(ifp);
1621 EM_TX_UNLOCK(adapter);
1624 EM_CORE_UNLOCK(adapter);
1628 #else /* EM_FAST_IRQ, then fast interrupt routines only */
1631 em_handle_link(void *context, int pending)
1633 struct adapter *adapter = context;
1634 struct ifnet *ifp = adapter->ifp;
1636 if (!(ifp->if_flags & IFF_RUNNING))
1639 EM_CORE_LOCK(adapter);
1640 callout_stop(&adapter->timer);
1641 em_update_link_status(adapter);
1642 /* Deal with TX cruft when link lost */
1643 em_tx_purge(adapter);
1644 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1645 EM_CORE_UNLOCK(adapter);
1649 /* Combined RX/TX handler, used by Legacy and MSI */
1651 em_handle_rxtx(void *context, int pending)
1653 struct adapter *adapter = context;
1654 struct ifnet *ifp = adapter->ifp;
1657 if (ifp->if_flags & IFF_RUNNING) {
1658 if (em_rxeof(adapter, adapter->rx_process_limit) != 0)
1659 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1660 EM_TX_LOCK(adapter);
1663 if (!drbr_empty(ifp, adapter->br))
1664 em_mq_start_locked(ifp, NULL);
1666 if (!ifq_is_empty(&ifp->if_snd))
1667 em_start_locked(ifp);
1669 EM_TX_UNLOCK(adapter);
1672 em_enable_intr(adapter);
1675 /*********************************************************************
1677 * Fast Legacy/MSI Combined Interrupt Service routine
1679 *********************************************************************/
1680 #define FILTER_STRAY
1681 #define FILTER_HANDLED
1683 em_irq_fast(void *arg)
1685 struct adapter *adapter = arg;
1691 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1694 if (reg_icr == 0xffffffff)
1695 return FILTER_STRAY;
1697 /* Definitely not our interrupt. */
1699 return FILTER_STRAY;
1702 * Starting with the 82571 chip, bit 31 should be used to
1703 * determine whether the interrupt belongs to us.
1705 if (adapter->hw.mac.type >= e1000_82571 &&
1706 (reg_icr & E1000_ICR_INT_ASSERTED) == 0)
1707 return FILTER_STRAY;
1710 * Mask interrupts until the taskqueue is finished running. This is
1711 * cheap, just assume that it is needed. This also works around the
1712 * MSI message reordering errata on certain systems.
1714 em_disable_intr(adapter);
1715 taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
1717 /* Link status change */
1718 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1719 adapter->hw.mac.get_link_status = 1;
1720 taskqueue_enqueue(taskqueue_swi, &adapter->link_task);
1723 if (reg_icr & E1000_ICR_RXO)
1724 adapter->rx_overruns++;
1725 return FILTER_HANDLED;
1728 /*********************************************************************
1730 * MSIX Interrupt Service Routines
1732 **********************************************************************/
1733 #define EM_MSIX_TX 0x00040000
1734 #define EM_MSIX_RX 0x00010000
1735 #define EM_MSIX_LINK 0x00100000
1738 em_msix_tx(void *arg)
1740 struct adapter *adapter = arg;
1741 struct ifnet *ifp = adapter->ifp;
1744 if (ifp->if_flags & IFF_RUNNING) {
1745 EM_TX_LOCK(adapter);
1747 EM_TX_UNLOCK(adapter);
1748 taskqueue_enqueue(adapter->tq, &adapter->tx_task);
1750 /* Reenable this interrupt */
1751 E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_TX);
1755 /*********************************************************************
1757 * MSIX RX Interrupt Service routine
1759 **********************************************************************/
1762 em_msix_rx(void *arg)
1764 struct adapter *adapter = arg;
1765 struct ifnet *ifp = adapter->ifp;
1768 if ((ifp->if_flags & IFF_RUNNING) &&
1769 (em_rxeof(adapter, adapter->rx_process_limit) != 0))
1770 taskqueue_enqueue(adapter->tq, &adapter->rx_task);
1771 /* Reenable this interrupt */
1772 E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_RX);
1776 /*********************************************************************
1778 * MSIX Link Fast Interrupt Service routine
1780 **********************************************************************/
1783 em_msix_link(void *arg)
1785 struct adapter *adapter = arg;
1788 ++adapter->link_irq;
1789 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1791 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1792 adapter->hw.mac.get_link_status = 1;
1793 taskqueue_enqueue(taskqueue_swi, &adapter->link_task);
1795 E1000_WRITE_REG(&adapter->hw, E1000_IMS,
1796 EM_MSIX_LINK | E1000_IMS_LSC);
1801 em_handle_rx(void *context, int pending)
1803 struct adapter *adapter = context;
1804 struct ifnet *ifp = adapter->ifp;
1806 if ((ifp->if_flags & IFF_RUNNING) &&
1807 (em_rxeof(adapter, adapter->rx_process_limit) != 0))
1808 taskqueue_enqueue(adapter->tq, &adapter->rx_task);
1813 em_handle_tx(void *context, int pending)
1815 struct adapter *adapter = context;
1816 struct ifnet *ifp = adapter->ifp;
1818 if (ifp->if_flags & IFF_RUNNING) {
1819 if (!EM_TX_TRYLOCK(adapter))
1823 if (!drbr_empty(ifp, adapter->br))
1824 em_mq_start_locked(ifp, NULL);
1826 if (!ifq_is_empty(&ifp->if_snd))
1827 em_start_locked(ifp);
1829 EM_TX_UNLOCK(adapter);
1832 #endif /* EM_FAST_IRQ */
1834 /*********************************************************************
1836 * Media Ioctl callback
1838 * This routine is called whenever the user queries the status of
1839 * the interface using ifconfig.
1841 **********************************************************************/
1843 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1845 struct adapter *adapter = ifp->if_softc;
1846 u_char fiber_type = IFM_1000_SX;
1848 INIT_DEBUGOUT("em_media_status: begin");
1850 EM_CORE_LOCK(adapter);
1851 em_update_link_status(adapter);
1853 ifmr->ifm_status = IFM_AVALID;
1854 ifmr->ifm_active = IFM_ETHER;
1856 if (!adapter->link_active) {
1857 EM_CORE_UNLOCK(adapter);
1861 ifmr->ifm_status |= IFM_ACTIVE;
1863 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
1864 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
1865 if (adapter->hw.mac.type == e1000_82545)
1866 fiber_type = IFM_1000_LX;
1867 ifmr->ifm_active |= fiber_type | IFM_FDX;
1869 switch (adapter->link_speed) {
1871 ifmr->ifm_active |= IFM_10_T;
1874 ifmr->ifm_active |= IFM_100_TX;
1877 ifmr->ifm_active |= IFM_1000_T;
1880 if (adapter->link_duplex == FULL_DUPLEX)
1881 ifmr->ifm_active |= IFM_FDX;
1883 ifmr->ifm_active |= IFM_HDX;
1885 EM_CORE_UNLOCK(adapter);
1888 /*********************************************************************
1890 * Media Ioctl callback
1892 * This routine is called when the user changes speed/duplex using
1893 * media/mediopt option with ifconfig.
1895 **********************************************************************/
1897 em_media_change(struct ifnet *ifp)
1899 struct adapter *adapter = ifp->if_softc;
1900 struct ifmedia *ifm = &adapter->media;
1902 INIT_DEBUGOUT("em_media_change: begin");
1904 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1907 EM_CORE_LOCK(adapter);
1908 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1910 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1911 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1916 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1917 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1920 adapter->hw.mac.autoneg = FALSE;
1921 adapter->hw.phy.autoneg_advertised = 0;
1922 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1923 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1925 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1928 adapter->hw.mac.autoneg = FALSE;
1929 adapter->hw.phy.autoneg_advertised = 0;
1930 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1931 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1933 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1936 device_printf(adapter->dev, "Unsupported media type\n");
1939 /* As the speed/duplex settings my have changed we need to
1942 adapter->hw.phy.reset_disable = FALSE;
1944 em_init_locked(adapter);
1945 EM_CORE_UNLOCK(adapter);
1950 /*********************************************************************
1952 * This routine maps the mbufs to tx descriptors.
1954 * return 0 on success, positive on failure
1955 **********************************************************************/
1958 em_xmit(struct adapter *adapter, struct mbuf **m_headp)
1960 bus_dma_segment_t segs[EM_MAX_SCATTER];
1962 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1963 struct e1000_tx_desc *ctxd = NULL;
1964 struct mbuf *m_head;
1965 u32 txd_upper, txd_lower, txd_used, txd_saved;
1966 int nsegs, i, j, first, last = 0;
1967 int error, do_tso, tso_desc = 0;
1970 txd_upper = txd_lower = txd_used = txd_saved = 0;
1973 do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0);
1979 * Force a cleanup if number of TX descriptors
1980 * available hits the threshold
1982 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1984 /* Now do we at least have a minimal? */
1985 if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) {
1986 adapter->no_tx_desc_avail1++;
1994 * If an mbuf is only header we need
1995 * to pull 4 bytes of data into it.
1997 if (do_tso && (m_head->m_len <= M_TSO_LEN)) {
1998 m_head = m_pullup(m_head, M_TSO_LEN + 4);
2005 * Map the packet for DMA
2007 * Capture the first descriptor index,
2008 * this descriptor will have the index
2009 * of the EOP which is the only one that
2010 * now gets a DONE bit writeback.
2012 first = adapter->next_avail_tx_desc;
2013 tx_buffer = &adapter->tx_buffer_area[first];
2014 tx_buffer_mapped = tx_buffer;
2015 map = tx_buffer->map;
2017 error = bus_dmamap_load_mbuf_segment(adapter->txtag, map,
2018 *m_headp, segs, EM_MAX_SCATTER, &nsegs, BUS_DMA_NOWAIT);
2021 * There are two types of errors we can (try) to handle:
2022 * - EFBIG means the mbuf chain was too long and bus_dma ran
2023 * out of segments. Defragment the mbuf chain and try again.
2024 * - ENOMEM means bus_dma could not obtain enough bounce buffers
2025 * at this point in time. Defer sending and try again later.
2026 * All other errors, in particular EINVAL, are fatal and prevent the
2027 * mbuf chain from ever going through. Drop it and report error.
2029 if (error == EFBIG) {
2032 m = m_defrag(*m_headp, MB_DONTWAIT);
2034 adapter->mbuf_alloc_failed++;
2042 error = bus_dmamap_load_mbuf_segment(adapter->txtag, map,
2043 *m_headp, segs, EM_MAX_SCATTER, &nsegs, BUS_DMA_NOWAIT);
2046 adapter->no_tx_dma_setup++;
2051 } else if (error != 0) {
2052 adapter->no_tx_dma_setup++;
2057 * TSO Hardware workaround, if this packet is not
2058 * TSO, and is only a single descriptor long, and
2059 * it follows a TSO burst, then we need to add a
2060 * sentinel descriptor to prevent premature writeback.
2062 if ((do_tso == 0) && (adapter->tx_tso == TRUE)) {
2065 adapter->tx_tso = FALSE;
2068 if (nsegs > (adapter->num_tx_desc_avail - 2)) {
2069 adapter->no_tx_desc_avail2++;
2070 bus_dmamap_unload(adapter->txtag, map);
2075 /* Do hardware assists */
2077 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
2078 error = em_tso_setup(adapter, m_head, &txd_upper, &txd_lower);
2080 return (ENXIO); /* something foobar */
2081 /* we need to make a final sentinel transmit desc */
2085 if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)
2086 em_transmit_checksum_setup(adapter, m_head,
2087 &txd_upper, &txd_lower);
2089 i = adapter->next_avail_tx_desc;
2090 if (adapter->pcix_82544)
2093 /* Set up our transmit descriptors */
2094 for (j = 0; j < nsegs; j++) {
2096 bus_addr_t seg_addr;
2097 /* If adapter is 82544 and on PCIX bus */
2098 if(adapter->pcix_82544) {
2099 DESC_ARRAY desc_array;
2100 u32 array_elements, counter;
2102 * Check the Address and Length combination and
2103 * split the data accordingly
2105 array_elements = em_fill_descriptors(segs[j].ds_addr,
2106 segs[j].ds_len, &desc_array);
2107 for (counter = 0; counter < array_elements; counter++) {
2108 if (txd_used == adapter->num_tx_desc_avail) {
2109 adapter->next_avail_tx_desc = txd_saved;
2110 adapter->no_tx_desc_avail2++;
2111 bus_dmamap_unload(adapter->txtag, map);
2114 tx_buffer = &adapter->tx_buffer_area[i];
2115 ctxd = &adapter->tx_desc_base[i];
2116 ctxd->buffer_addr = htole64(
2117 desc_array.descriptor[counter].address);
2118 ctxd->lower.data = htole32(
2119 (adapter->txd_cmd | txd_lower | (u16)
2120 desc_array.descriptor[counter].length));
2122 htole32((txd_upper));
2124 if (++i == adapter->num_tx_desc)
2126 tx_buffer->m_head = NULL;
2127 tx_buffer->next_eop = -1;
2131 tx_buffer = &adapter->tx_buffer_area[i];
2132 ctxd = &adapter->tx_desc_base[i];
2133 seg_addr = segs[j].ds_addr;
2134 seg_len = segs[j].ds_len;
2137 ** If this is the last descriptor, we want to
2138 ** split it so we have a small final sentinel
2140 if (tso_desc && (j == (nsegs -1)) && (seg_len > 8)) {
2142 ctxd->buffer_addr = htole64(seg_addr);
2143 ctxd->lower.data = htole32(
2144 adapter->txd_cmd | txd_lower | seg_len);
2147 if (++i == adapter->num_tx_desc)
2149 /* Now make the sentinel */
2150 ++txd_used; /* using an extra txd */
2151 ctxd = &adapter->tx_desc_base[i];
2152 tx_buffer = &adapter->tx_buffer_area[i];
2154 htole64(seg_addr + seg_len);
2155 ctxd->lower.data = htole32(
2156 adapter->txd_cmd | txd_lower | 4);
2160 if (++i == adapter->num_tx_desc)
2163 ctxd->buffer_addr = htole64(seg_addr);
2164 ctxd->lower.data = htole32(
2165 adapter->txd_cmd | txd_lower | seg_len);
2169 if (++i == adapter->num_tx_desc)
2172 tx_buffer->m_head = NULL;
2173 tx_buffer->next_eop = -1;
2177 adapter->next_avail_tx_desc = i;
2178 if (adapter->pcix_82544)
2179 adapter->num_tx_desc_avail -= txd_used;
2181 adapter->num_tx_desc_avail -= nsegs;
2182 if (tso_desc) /* TSO used an extra for sentinel */
2183 adapter->num_tx_desc_avail -= txd_used;
2189 if (m_head->m_flags & M_VLANTAG) {
2190 /* Set the vlan id. */
2191 ctxd->upper.fields.special =
2192 htole16(m_head->m_pkthdr.ether_vlantag);
2193 /* Tell hardware to add tag */
2194 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
2197 tx_buffer->m_head = m_head;
2198 tx_buffer_mapped->map = tx_buffer->map;
2199 tx_buffer->map = map;
2200 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
2203 * Last Descriptor of Packet
2204 * needs End Of Packet (EOP)
2205 * and Report Status (RS)
2208 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
2210 * Keep track in the first buffer which
2211 * descriptor will be written back
2213 tx_buffer = &adapter->tx_buffer_area[first];
2214 tx_buffer->next_eop = last;
2215 adapter->watchdog_time = ticks;
2218 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
2219 * that this frame is available to transmit.
2221 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2222 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2223 if (adapter->hw.mac.type == e1000_82547 &&
2224 adapter->link_duplex == HALF_DUPLEX)
2225 em_82547_move_tail(adapter);
2227 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
2228 if (adapter->hw.mac.type == e1000_82547)
2229 em_82547_update_fifo_head(adapter,
2230 m_head->m_pkthdr.len);
2236 /*********************************************************************
2238 * 82547 workaround to avoid controller hang in half-duplex environment.
2239 * The workaround is to avoid queuing a large packet that would span
2240 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
2241 * in this case. We do that only when FIFO is quiescent.
2243 **********************************************************************/
2245 em_82547_move_tail_locked(void *arg)
2247 struct adapter *adapter = arg;
2249 struct e1000_tx_desc *tx_desc;
2250 u16 hw_tdt, sw_tdt, length = 0;
2253 EM_TX_LOCK_ASSERT(adapter);
2255 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
2256 sw_tdt = adapter->next_avail_tx_desc;
2258 while (hw_tdt != sw_tdt) {
2259 tx_desc = &adapter->tx_desc_base[hw_tdt];
2260 length += tx_desc->lower.flags.length;
2261 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
2262 if (++hw_tdt == adapter->num_tx_desc)
2266 if (em_82547_fifo_workaround(adapter, length)) {
2267 adapter->tx_fifo_wrk_cnt++;
2268 callout_reset(&adapter->tx_fifo_timer, 1,
2269 em_82547_move_tail, adapter);
2272 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
2273 em_82547_update_fifo_head(adapter, length);
2280 em_82547_move_tail(void *arg)
2282 struct adapter *adapter = arg;
2283 EM_TX_LOCK(adapter);
2284 em_82547_move_tail_locked(arg);
2285 EM_TX_UNLOCK(adapter);
2289 em_82547_fifo_workaround(struct adapter *adapter, int len)
2291 int fifo_space, fifo_pkt_len;
2293 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2295 if (adapter->link_duplex == HALF_DUPLEX) {
2296 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
2298 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
2299 if (em_82547_tx_fifo_reset(adapter))
2310 em_82547_update_fifo_head(struct adapter *adapter, int len)
2312 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
2314 /* tx_fifo_head is always 16 byte aligned */
2315 adapter->tx_fifo_head += fifo_pkt_len;
2316 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) {
2317 adapter->tx_fifo_head -= adapter->tx_fifo_size;
2323 em_82547_tx_fifo_reset(struct adapter *adapter)
2327 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
2328 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
2329 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
2330 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
2331 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
2332 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
2333 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
2334 /* Disable TX unit */
2335 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2336 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
2337 tctl & ~E1000_TCTL_EN);
2339 /* Reset FIFO pointers */
2340 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
2341 adapter->tx_head_addr);
2342 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
2343 adapter->tx_head_addr);
2344 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
2345 adapter->tx_head_addr);
2346 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
2347 adapter->tx_head_addr);
2349 /* Re-enable TX unit */
2350 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
2351 E1000_WRITE_FLUSH(&adapter->hw);
2353 adapter->tx_fifo_head = 0;
2354 adapter->tx_fifo_reset_cnt++;
2364 em_set_promisc(struct adapter *adapter)
2366 struct ifnet *ifp = adapter->ifp;
2369 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2371 if (ifp->if_flags & IFF_PROMISC) {
2372 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2373 /* Turn this on if you want to see bad packets */
2375 reg_rctl |= E1000_RCTL_SBP;
2376 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2377 } else if (ifp->if_flags & IFF_ALLMULTI) {
2378 reg_rctl |= E1000_RCTL_MPE;
2379 reg_rctl &= ~E1000_RCTL_UPE;
2380 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2385 em_disable_promisc(struct adapter *adapter)
2389 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2391 reg_rctl &= (~E1000_RCTL_UPE);
2392 reg_rctl &= (~E1000_RCTL_MPE);
2393 reg_rctl &= (~E1000_RCTL_SBP);
2394 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2398 /*********************************************************************
2401 * This routine is called whenever multicast address list is updated.
2403 **********************************************************************/
2406 em_set_multi(struct adapter *adapter)
2408 struct ifnet *ifp = adapter->ifp;
2409 struct ifmultiaddr *ifma;
2411 u8 *mta; /* Multicast array memory */
2414 IOCTL_DEBUGOUT("em_set_multi: begin");
2416 if (adapter->hw.mac.type == e1000_82542 &&
2417 adapter->hw.revision_id == E1000_REVISION_2) {
2418 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2419 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2420 e1000_pci_clear_mwi(&adapter->hw);
2421 reg_rctl |= E1000_RCTL_RST;
2422 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2426 /* Allocate temporary memory to setup array */
2427 mta = kmalloc(sizeof(u8) *
2428 (ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES),
2429 M_DEVBUF, M_NOWAIT | M_ZERO);
2431 panic("em_set_multi memory failure\n");
2434 #if __FreeBSD_version < 800000
2437 if_maddr_rlock(ifp);
2440 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2441 if (ifma->ifma_addr->sa_family != AF_LINK)
2444 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
2447 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2448 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
2452 #if __FreeBSD_version < 800000
2453 IF_ADDR_UNLOCK(ifp);
2455 if_maddr_runlock(ifp);
2458 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
2459 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2460 reg_rctl |= E1000_RCTL_MPE;
2461 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2463 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
2465 if (adapter->hw.mac.type == e1000_82542 &&
2466 adapter->hw.revision_id == E1000_REVISION_2) {
2467 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
2468 reg_rctl &= ~E1000_RCTL_RST;
2469 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
2471 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2472 e1000_pci_set_mwi(&adapter->hw);
2474 kfree(mta, M_DEVBUF);
2478 /*********************************************************************
2481 * This routine checks for link status and updates statistics.
2483 **********************************************************************/
2486 em_local_timer_locked(void *arg)
2488 struct adapter *adapter = arg;
2489 struct ifnet *ifp = adapter->ifp;
2491 EM_CORE_LOCK_ASSERT(adapter);
2493 #ifndef DEVICE_POLLING
2494 taskqueue_enqueue(adapter->tq,
2495 &adapter->rxtx_task);
2497 em_update_link_status(adapter);
2498 em_update_stats_counters(adapter);
2500 /* Reset LAA into RAR[0] on 82571 */
2501 if (e1000_get_laa_state_82571(&adapter->hw) == TRUE)
2502 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2504 if (em_display_debug_stats && ifp->if_flags & IFF_RUNNING)
2505 em_print_hw_stats(adapter);
2507 em_smartspeed(adapter);
2510 * We check the watchdog: the time since
2511 * the last TX descriptor was cleaned.
2512 * This implies a functional TX engine.
2514 if ((adapter->watchdog_check == TRUE) &&
2515 (ticks - adapter->watchdog_time > EM_WATCHDOG))
2518 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
2521 device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
2522 adapter->ifp->if_flags &= ~IFF_RUNNING;
2523 adapter->watchdog_events++;
2524 em_init_locked(adapter);
2528 em_local_timer(void *arg)
2530 struct adapter *adapter = arg;
2532 EM_CORE_LOCK(adapter);
2533 em_local_timer_locked(arg);
2534 EM_CORE_UNLOCK(adapter);
2539 em_update_link_status(struct adapter *adapter)
2541 struct e1000_hw *hw = &adapter->hw;
2542 struct ifnet *ifp = adapter->ifp;
2543 device_t dev = adapter->dev;
2546 /* Get the cached link value or read phy for real */
2547 switch (hw->phy.media_type) {
2548 case e1000_media_type_copper:
2549 if (hw->mac.get_link_status) {
2550 /* Do the work to read phy */
2551 e1000_check_for_link(hw);
2552 link_check = !hw->mac.get_link_status;
2553 if (link_check) /* ESB2 fix */
2554 e1000_cfg_on_link_up(hw);
2558 case e1000_media_type_fiber:
2559 e1000_check_for_link(hw);
2560 link_check = (E1000_READ_REG(hw, E1000_STATUS) &
2563 case e1000_media_type_internal_serdes:
2564 e1000_check_for_link(hw);
2565 link_check = adapter->hw.mac.serdes_has_link;
2568 case e1000_media_type_unknown:
2572 /* Now check for a transition */
2573 if (link_check && (adapter->link_active == 0)) {
2574 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2575 &adapter->link_duplex);
2576 /* Check if we must disable SPEED_MODE bit on PCI-E */
2577 if ((adapter->link_speed != SPEED_1000) &&
2578 ((hw->mac.type == e1000_82571) ||
2579 (hw->mac.type == e1000_82572))) {
2581 tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
2582 tarc0 &= ~SPEED_MODE_BIT;
2583 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
2586 device_printf(dev, "Link is up %d Mbps %s\n",
2587 adapter->link_speed,
2588 ((adapter->link_duplex == FULL_DUPLEX) ?
2589 "Full Duplex" : "Half Duplex"));
2590 adapter->link_active = 1;
2591 adapter->smartspeed = 0;
2592 ifp->if_baudrate = adapter->link_speed * 1000000;
2593 ifp->if_link_state = LINK_STATE_UP;
2594 if_link_state_change(ifp);
2595 } else if (!link_check && (adapter->link_active == 1)) {
2596 ifp->if_baudrate = adapter->link_speed = 0;
2597 adapter->link_duplex = 0;
2599 device_printf(dev, "Link is Down\n");
2600 adapter->link_active = 0;
2601 /* Link down, disable watchdog */
2602 adapter->watchdog_check = FALSE;
2603 ifp->if_link_state = LINK_STATE_DOWN;
2604 if_link_state_change(ifp);
2608 /*********************************************************************
2610 * This routine disables all traffic on the adapter by issuing a
2611 * global reset on the MAC and deallocates TX/RX buffers.
2613 * This routine should always be called with BOTH the CORE
2615 **********************************************************************/
2620 struct adapter *adapter = arg;
2621 struct ifnet *ifp = adapter->ifp;
2623 EM_CORE_LOCK_ASSERT(adapter);
2624 EM_TX_LOCK_ASSERT(adapter);
2626 INIT_DEBUGOUT("em_stop: begin");
2628 em_disable_intr(adapter);
2629 callout_stop(&adapter->timer);
2630 callout_stop(&adapter->tx_fifo_timer);
2632 /* Tell the stack that the interface is no longer active */
2633 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2635 e1000_reset_hw(&adapter->hw);
2636 if (adapter->hw.mac.type >= e1000_82544)
2637 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2641 /*********************************************************************
2643 * Determine hardware revision.
2645 **********************************************************************/
2647 em_identify_hardware(struct adapter *adapter)
2649 device_t dev = adapter->dev;
2651 /* Make sure our PCI config space has the necessary stuff set */
2652 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
2653 if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
2654 (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
2655 device_printf(dev, "Memory Access and/or Bus Master bits "
2657 adapter->hw.bus.pci_cmd_word |=
2658 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
2659 pci_write_config(dev, PCIR_COMMAND,
2660 adapter->hw.bus.pci_cmd_word, 2);
2663 /* Save off the information about this board */
2664 adapter->hw.vendor_id = pci_get_vendor(dev);
2665 adapter->hw.device_id = pci_get_device(dev);
2666 adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
2667 adapter->hw.subsystem_vendor_id =
2668 pci_read_config(dev, PCIR_SUBVEND_0, 2);
2669 adapter->hw.subsystem_device_id =
2670 pci_read_config(dev, PCIR_SUBDEV_0, 2);
2672 /* Do Shared Code Init and Setup */
2673 if (e1000_set_mac_type(&adapter->hw)) {
2674 device_printf(dev, "Setup init failure\n");
2680 em_allocate_pci_resources(struct adapter *adapter)
2682 device_t dev = adapter->dev;
2683 int val, rid, error = E1000_SUCCESS;
2686 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2688 if (adapter->memory == NULL) {
2689 device_printf(dev, "Unable to allocate bus resource: memory\n");
2692 adapter->osdep.mem_bus_space_tag =
2693 rman_get_bustag(adapter->memory);
2694 adapter->osdep.mem_bus_space_handle =
2695 rman_get_bushandle(adapter->memory);
2696 adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
2698 /* Only older adapters use IO mapping */
2699 if ((adapter->hw.mac.type > e1000_82543) &&
2700 (adapter->hw.mac.type < e1000_82571)) {
2701 /* Figure our where our IO BAR is ? */
2702 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
2703 val = pci_read_config(dev, rid, 4);
2704 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
2705 adapter->io_rid = rid;
2709 /* check for 64bit BAR */
2710 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2713 if (rid >= PCIR_CIS) {
2714 device_printf(dev, "Unable to locate IO BAR\n");
2717 adapter->ioport = bus_alloc_resource_any(dev,
2718 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
2719 if (adapter->ioport == NULL) {
2720 device_printf(dev, "Unable to allocate bus resource: "
2724 adapter->hw.io_base = 0;
2725 adapter->osdep.io_bus_space_tag =
2726 rman_get_bustag(adapter->ioport);
2727 adapter->osdep.io_bus_space_handle =
2728 rman_get_bushandle(adapter->ioport);
2732 ** Init the resource arrays
2733 ** used by MSIX setup
2735 for (int i = 0; i < 3; i++) {
2736 adapter->rid[i] = i + 1; /* MSI/X RID starts at 1 */
2737 adapter->tag[i] = NULL;
2738 adapter->res[i] = NULL;
2742 * Setup MSI/X or MSI if PCI Express
2745 adapter->msi = em_setup_msix(adapter);
2747 adapter->hw.back = &adapter->osdep;
2752 /*********************************************************************
2754 * Setup the Legacy or MSI Interrupt handler
2756 **********************************************************************/
2758 em_allocate_legacy(struct adapter *adapter)
2760 device_t dev = adapter->dev;
2763 /* Manually turn off all interrupts */
2764 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2766 /* Legacy RID is 0 */
2767 if (adapter->msi == 0)
2768 adapter->rid[0] = 0;
2770 /* We allocate a single interrupt resource */
2771 adapter->res[0] = bus_alloc_resource_any(dev,
2772 SYS_RES_IRQ, &adapter->rid[0], RF_SHAREABLE | RF_ACTIVE);
2773 if (adapter->res[0] == NULL) {
2774 device_printf(dev, "Unable to allocate bus resource: "
2779 #ifdef EM_LEGACY_IRQ
2780 /* We do Legacy setup */
2781 if ((error = bus_setup_intr(dev, adapter->res[0],
2782 /*INTR_TYPE_NET |*/ INTR_MPSAFE, em_intr, adapter,
2783 &adapter->tag[0], NULL)) != 0) {
2784 device_printf(dev, "Failed to register interrupt handler");
2788 #else /* FAST_IRQ */
2790 * Try allocating a fast interrupt and the associated deferred
2791 * processing contexts.
2793 TASK_INIT(&adapter->rxtx_task, 0, em_handle_rxtx, adapter);
2794 TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2795 adapter->tq = taskqueue_create("em_taskq", M_NOWAIT,
2796 taskqueue_thread_enqueue, &adapter->tq);
2797 taskqueue_start_threads(&adapter->tq, 1, TDPRI_KERN_DAEMON /*PI_NET*/, -1, "%s taskq",
2798 device_get_nameunit(adapter->dev));
2799 if ((error = bus_setup_intr(dev, adapter->res[0],
2800 /*INTR_TYPE_NET |*/ INTR_FAST, em_irq_fast, adapter,
2801 &adapter->tag[0], NULL)) != 0) {
2802 device_printf(dev, "Failed to register fast interrupt "
2803 "handler: %d\n", error);
2804 taskqueue_free(adapter->tq);
2808 #endif /* EM_LEGACY_IRQ */
2813 /*********************************************************************
2815 * Setup the MSIX Interrupt handlers
2816 * This is not really Multiqueue, rather
2817 * its just multiple interrupt vectors.
2819 **********************************************************************/
2821 em_allocate_msix(struct adapter *adapter)
2823 device_t dev = adapter->dev;
2826 /* Make sure all interrupts are disabled */
2827 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
2829 /* First get the resources */
2830 for (int i = 0; i < adapter->msi; i++) {
2831 adapter->res[i] = bus_alloc_resource_any(dev,
2832 SYS_RES_IRQ, &adapter->rid[i], RF_ACTIVE);
2833 if (adapter->res[i] == NULL) {
2835 "Unable to allocate bus resource: "
2836 "MSIX Interrupt\n");
2842 * Now allocate deferred processing contexts.
2844 TASK_INIT(&adapter->rx_task, 0, em_handle_rx, adapter);
2845 TASK_INIT(&adapter->tx_task, 0, em_handle_tx, adapter);
2847 * Handle compatibility for msi case for deferral due to
2850 TASK_INIT(&adapter->rxtx_task, 0, em_handle_tx, adapter);
2851 TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter);
2852 adapter->tq = taskqueue_create("em_taskq", M_NOWAIT,
2853 taskqueue_thread_enqueue, &adapter->tq);
2854 taskqueue_start_threads(&adapter->tq, 1, TDPRI_KERN_DAEMON /*PI_NET*/, -1, "%s taskq",
2855 device_get_nameunit(adapter->dev));
2858 * And setup the interrupt handlers
2861 /* First slot to RX */
2862 if ((error = bus_setup_intr(dev, adapter->res[0],
2863 /*INTR_TYPE_NET |*/ INTR_MPSAFE, em_msix_rx, adapter,
2864 &adapter->tag[0], NULL)) != 0) {
2865 device_printf(dev, "Failed to register RX handler");
2870 if ((error = bus_setup_intr(dev, adapter->res[1],
2871 /*INTR_TYPE_NET |*/ INTR_MPSAFE, em_msix_tx, adapter,
2872 &adapter->tag[1], NULL)) != 0) {
2873 device_printf(dev, "Failed to register TX handler");
2878 if ((error = bus_setup_intr(dev, adapter->res[2],
2879 /*INTR_TYPE_NET |*/ INTR_MPSAFE, em_msix_link, adapter,
2880 &adapter->tag[2], NULL)) != 0) {
2881 device_printf(dev, "Failed to register TX handler");
2890 em_free_pci_resources(struct adapter *adapter)
2892 device_t dev = adapter->dev;
2894 /* Make sure the for loop below runs once */
2895 if (adapter->msi == 0)
2899 * First release all the interrupt resources:
2900 * notice that since these are just kept
2901 * in an array we can do the same logic
2902 * whether its MSIX or just legacy.
2904 for (int i = 0; i < adapter->msi; i++) {
2905 if (adapter->tag[i] != NULL) {
2906 bus_teardown_intr(dev, adapter->res[i],
2908 adapter->tag[i] = NULL;
2910 if (adapter->res[i] != NULL) {
2911 bus_release_resource(dev, SYS_RES_IRQ,
2912 adapter->rid[i], adapter->res[i]);
2917 pci_release_msi(dev);
2919 if (adapter->msix != NULL)
2920 bus_release_resource(dev, SYS_RES_MEMORY,
2921 PCIR_BAR(EM_MSIX_BAR), adapter->msix);
2923 if (adapter->memory != NULL)
2924 bus_release_resource(dev, SYS_RES_MEMORY,
2925 PCIR_BAR(0), adapter->memory);
2927 if (adapter->flash != NULL)
2928 bus_release_resource(dev, SYS_RES_MEMORY,
2929 EM_FLASH, adapter->flash);
2931 if (adapter->ioport != NULL)
2932 bus_release_resource(dev, SYS_RES_IOPORT,
2933 adapter->io_rid, adapter->ioport);
2937 * Setup MSI or MSI/X
2940 em_setup_msix(struct adapter *adapter)
2942 device_t dev = adapter->dev;
2945 if (adapter->hw.mac.type < e1000_82571)
2948 /* Setup MSI/X for Hartwell */
2949 if (adapter->hw.mac.type == e1000_82574) {
2950 /* Map the MSIX BAR */
2951 int rid = PCIR_BAR(EM_MSIX_BAR);
2952 adapter->msix = bus_alloc_resource_any(dev,
2953 SYS_RES_MEMORY, &rid, RF_ACTIVE);
2954 if (!adapter->msix) {
2955 /* May not be enabled */
2956 device_printf(adapter->dev,
2957 "Unable to map MSIX table \n");
2960 val = pci_msix_count(dev);
2962 ** 82574 can be configured for 5 but
2963 ** we limit use to 3.
2965 if (val > 3) val = 3;
2966 if ((val) && pci_alloc_msix(dev, &val) == 0) {
2967 device_printf(adapter->dev,"Using MSIX interrupts\n");
2972 val = pci_msi_count(dev);
2973 if (val == 1 && pci_alloc_msi(dev, &val) == 0) {
2975 device_printf(adapter->dev,"Using MSI interrupt\n");
2981 /*********************************************************************
2983 * Initialize the hardware to a configuration
2984 * as specified by the adapter structure.
2986 **********************************************************************/
2988 em_hardware_init(struct adapter *adapter)
2990 device_t dev = adapter->dev;
2993 INIT_DEBUGOUT("em_hardware_init: begin");
2995 /* Issue a global reset */
2996 e1000_reset_hw(&adapter->hw);
2998 /* When hardware is reset, fifo_head is also reset */
2999 adapter->tx_fifo_head = 0;
3001 /* Set up smart power down as default off on newer adapters. */
3002 if (!em_smart_pwr_down && (adapter->hw.mac.type == e1000_82571 ||
3003 adapter->hw.mac.type == e1000_82572)) {
3006 /* Speed up time to link by disabling smart power down. */
3007 e1000_read_phy_reg(&adapter->hw,
3008 IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
3009 phy_tmp &= ~IGP02E1000_PM_SPD;
3010 e1000_write_phy_reg(&adapter->hw,
3011 IGP02E1000_PHY_POWER_MGMT, phy_tmp);
3015 * These parameters control the automatic generation (Tx) and
3016 * response (Rx) to Ethernet PAUSE frames.
3017 * - High water mark should allow for at least two frames to be
3018 * received after sending an XOFF.
3019 * - Low water mark works best when it is very near the high water mark.
3020 * This allows the receiver to restart by sending XON when it has
3021 * drained a bit. Here we use an arbitary value of 1500 which will
3022 * restart after one full frame is pulled from the buffer. There
3023 * could be several smaller frames in the buffer and if so they will
3024 * not trigger the XON until their total number reduces the buffer
3026 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
3028 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) &
3031 adapter->hw.fc.high_water = rx_buffer_size -
3032 roundup2(adapter->max_frame_size, 1024);
3033 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
3035 if (adapter->hw.mac.type == e1000_80003es2lan)
3036 adapter->hw.fc.pause_time = 0xFFFF;
3038 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
3039 adapter->hw.fc.send_xon = TRUE;
3041 /* Set Flow control, use the tunable location if sane */
3042 if ((em_fc_setting >= 0) || (em_fc_setting < 4))
3043 adapter->hw.fc.requested_mode = em_fc_setting;
3045 adapter->hw.fc.requested_mode = e1000_fc_none;
3047 /* Override - workaround for PCHLAN issue */
3048 if (adapter->hw.mac.type == e1000_pchlan)
3049 adapter->hw.fc.requested_mode = e1000_fc_rx_pause;
3051 if (e1000_init_hw(&adapter->hw) < 0) {
3052 device_printf(dev, "Hardware Initialization Failed\n");
3056 e1000_check_for_link(&adapter->hw);
3061 /*********************************************************************
3063 * Setup networking device structure and register an interface.
3065 **********************************************************************/
3067 em_setup_interface(device_t dev, struct adapter *adapter)
3071 INIT_DEBUGOUT("em_setup_interface: begin");
3073 ifp = adapter->ifp = &adapter->arpcom.ac_if;
3074 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
3075 ifp->if_mtu = ETHERMTU;
3076 ifp->if_init = em_init;
3077 ifp->if_softc = adapter;
3078 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
3079 ifp->if_ioctl = em_ioctl;
3080 ifp->if_start = em_start;
3081 ifq_set_maxlen(&ifp->if_snd, adapter->num_tx_desc - 1);
3082 ifq_set_ready(&ifp->if_snd);
3084 ether_ifattach(ifp, adapter->hw.mac.addr, NULL);
3086 ifp->if_capabilities = ifp->if_capenable = 0;
3088 #if __FreeBSD_version >= 800000
3089 /* Multiqueue tx functions */
3090 ifp->if_transmit = em_mq_start;
3091 ifp->if_qflush = em_qflush;
3092 adapter->br = buf_ring_alloc(4096, M_DEVBUF, M_WAITOK, &adapter->tx_mtx);
3094 if (adapter->hw.mac.type >= e1000_82543) {
3096 version_cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
3097 ifp->if_capabilities |= version_cap;
3098 ifp->if_capenable |= version_cap;
3102 /* Identify TSO capable adapters */
3103 if ((adapter->hw.mac.type > e1000_82544) &&
3104 (adapter->hw.mac.type != e1000_82547))
3105 ifp->if_capabilities |= IFCAP_TSO4;
3107 * By default only enable on PCI-E, this
3108 * can be overriden by ifconfig.
3110 if (adapter->hw.mac.type >= e1000_82571)
3111 ifp->if_capenable |= IFCAP_TSO4;
3114 * Tell the upper layer(s) we
3115 * support full VLAN capability
3117 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
3118 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
3119 ifp->if_capenable |= (IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING);
3122 ** Dont turn this on by default, if vlans are
3123 ** created on another pseudo device (eg. lagg)
3124 ** then vlan events are not passed thru, breaking
3125 ** operation, but with HW FILTER off it works. If
3126 ** using vlans directly on the em driver you can
3127 ** enable this and get full hardware tag filtering.
3129 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
3131 #ifdef DEVICE_POLLING
3132 ifp->if_capabilities |= IFCAP_POLLING;
3135 /* Limit WOL to MAGIC, not clear others are used */
3137 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
3138 ifp->if_capenable |= IFCAP_WOL_MAGIC;
3142 * Specify the media types supported by this adapter and register
3143 * callbacks to update media and link information
3145 ifmedia_init(&adapter->media, IFM_IMASK,
3146 em_media_change, em_media_status);
3147 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3148 (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
3149 u_char fiber_type = IFM_1000_SX; /* default type */
3151 if (adapter->hw.mac.type == e1000_82545)
3152 fiber_type = IFM_1000_LX;
3153 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
3155 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
3157 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
3158 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
3160 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
3162 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
3164 if (adapter->hw.phy.type != e1000_phy_ife) {
3165 ifmedia_add(&adapter->media,
3166 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
3167 ifmedia_add(&adapter->media,
3168 IFM_ETHER | IFM_1000_T, 0, NULL);
3171 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
3172 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
3176 /*********************************************************************
3178 * Workaround for SmartSpeed on 82541 and 82547 controllers
3180 **********************************************************************/
3182 em_smartspeed(struct adapter *adapter)
3186 if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) ||
3187 adapter->hw.mac.autoneg == 0 ||
3188 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
3191 if (adapter->smartspeed == 0) {
3192 /* If Master/Slave config fault is asserted twice,
3193 * we assume back-to-back */
3194 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3195 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
3197 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
3198 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
3199 e1000_read_phy_reg(&adapter->hw,
3200 PHY_1000T_CTRL, &phy_tmp);
3201 if(phy_tmp & CR_1000T_MS_ENABLE) {
3202 phy_tmp &= ~CR_1000T_MS_ENABLE;
3203 e1000_write_phy_reg(&adapter->hw,
3204 PHY_1000T_CTRL, phy_tmp);
3205 adapter->smartspeed++;
3206 if(adapter->hw.mac.autoneg &&
3207 !e1000_copper_link_autoneg(&adapter->hw) &&
3208 !e1000_read_phy_reg(&adapter->hw,
3209 PHY_CONTROL, &phy_tmp)) {
3210 phy_tmp |= (MII_CR_AUTO_NEG_EN |
3211 MII_CR_RESTART_AUTO_NEG);
3212 e1000_write_phy_reg(&adapter->hw,
3213 PHY_CONTROL, phy_tmp);
3218 } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
3219 /* If still no link, perhaps using 2/3 pair cable */
3220 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
3221 phy_tmp |= CR_1000T_MS_ENABLE;
3222 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
3223 if(adapter->hw.mac.autoneg &&
3224 !e1000_copper_link_autoneg(&adapter->hw) &&
3225 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
3226 phy_tmp |= (MII_CR_AUTO_NEG_EN |
3227 MII_CR_RESTART_AUTO_NEG);
3228 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
3231 /* Restart process after EM_SMARTSPEED_MAX iterations */
3232 if(adapter->smartspeed++ == EM_SMARTSPEED_MAX)
3233 adapter->smartspeed = 0;
3238 * Manage DMA'able memory.
3241 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3245 *(bus_addr_t *) arg = segs[0].ds_addr;
3249 em_dma_malloc(struct adapter *adapter, bus_size_t size,
3250 struct em_dma_alloc *dma, int mapflags)
3254 error = bus_dma_tag_create(NULL, /* parent */
3255 EM_DBA_ALIGN, 0, /* alignment, bounds */
3256 BUS_SPACE_MAXADDR, /* lowaddr */
3257 BUS_SPACE_MAXADDR, /* highaddr */
3258 NULL, NULL, /* filter, filterarg */
3261 size, /* maxsegsize */
3265 device_printf(adapter->dev,
3266 "%s: bus_dma_tag_create failed: %d\n",
3271 error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
3272 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
3274 device_printf(adapter->dev,
3275 "%s: bus_dmamem_alloc(%ju) failed: %d\n",
3276 __func__, (uintmax_t)size, error);
3281 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
3282 size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
3283 if (error || dma->dma_paddr == 0) {
3284 device_printf(adapter->dev,
3285 "%s: bus_dmamap_load failed: %d\n",
3293 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3295 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3296 bus_dma_tag_destroy(dma->dma_tag);
3298 dma->dma_map = NULL;
3299 dma->dma_tag = NULL;
3305 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
3307 if (dma->dma_tag == NULL)
3309 if (dma->dma_map != NULL) {
3310 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
3311 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3312 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
3313 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
3314 dma->dma_map = NULL;
3316 bus_dma_tag_destroy(dma->dma_tag);
3317 dma->dma_tag = NULL;
3321 /*********************************************************************
3323 * Allocate memory for tx_buffer structures. The tx_buffer stores all
3324 * the information needed to transmit a packet on the wire.
3326 **********************************************************************/
3328 em_allocate_transmit_structures(struct adapter *adapter)
3330 device_t dev = adapter->dev;
3331 struct em_buffer *tx_buffer;
3335 * Create DMA tags for tx descriptors
3337 if ((error = bus_dma_tag_create(NULL, /* parent */
3338 1, 0, /* alignment, bounds */
3339 BUS_SPACE_MAXADDR, /* lowaddr */
3340 BUS_SPACE_MAXADDR, /* highaddr */
3341 NULL, NULL, /* filter, filterarg */
3342 EM_TSO_SIZE, /* maxsize */
3343 EM_MAX_SCATTER, /* nsegments */
3344 EM_TSO_SEG_SIZE, /* maxsegsize */
3346 &adapter->txtag)) != 0) {
3347 device_printf(dev, "Unable to allocate TX DMA tag\n");
3351 adapter->tx_buffer_area = kmalloc(sizeof(struct em_buffer) *
3352 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
3353 if (adapter->tx_buffer_area == NULL) {
3354 device_printf(dev, "Unable to allocate tx_buffer memory\n");
3359 /* Create the descriptor buffer dma maps */
3360 for (int i = 0; i < adapter->num_tx_desc; i++) {
3361 tx_buffer = &adapter->tx_buffer_area[i];
3362 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
3364 device_printf(dev, "Unable to create TX DMA map\n");
3367 tx_buffer->next_eop = -1;
3372 em_free_transmit_structures(adapter);
3376 /*********************************************************************
3378 * (Re)Initialize transmit structures.
3380 **********************************************************************/
3382 em_setup_transmit_structures(struct adapter *adapter)
3384 struct em_buffer *tx_buffer;
3386 /* Clear the old ring contents */
3387 bzero(adapter->tx_desc_base,
3388 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
3390 /* Free any existing TX buffers */
3391 for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
3392 tx_buffer = &adapter->tx_buffer_area[i];
3393 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3394 BUS_DMASYNC_POSTWRITE);
3395 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
3396 m_freem(tx_buffer->m_head);
3397 tx_buffer->m_head = NULL;
3398 tx_buffer->next_eop = -1;
3402 adapter->next_avail_tx_desc = 0;
3403 adapter->next_tx_to_clean = 0;
3404 adapter->num_tx_desc_avail = adapter->num_tx_desc;
3406 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3407 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3412 /*********************************************************************
3414 * Enable transmit unit.
3416 **********************************************************************/
3418 em_initialize_transmit_unit(struct adapter *adapter)
3420 u32 tctl, tarc, tipg = 0;
3423 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
3424 /* Setup the Base and Length of the Tx Descriptor Ring */
3425 bus_addr = adapter->txdma.dma_paddr;
3426 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
3427 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
3428 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
3429 (u32)(bus_addr >> 32));
3430 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
3432 /* Setup the HW Tx Head and Tail descriptor pointers */
3433 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
3434 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
3436 HW_DEBUGOUT2("Base = %x, Length = %x\n",
3437 E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)),
3438 E1000_READ_REG(&adapter->hw, E1000_TDLEN(0)));
3440 /* Set the default values for the Tx Inter Packet Gap timer */
3441 switch (adapter->hw.mac.type) {
3443 tipg = DEFAULT_82542_TIPG_IPGT;
3444 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3445 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3447 case e1000_80003es2lan:
3448 tipg = DEFAULT_82543_TIPG_IPGR1;
3449 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
3450 E1000_TIPG_IPGR2_SHIFT;
3453 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
3454 (adapter->hw.phy.media_type ==
3455 e1000_media_type_internal_serdes))
3456 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
3458 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
3459 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
3460 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
3463 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
3464 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value);
3465 if(adapter->hw.mac.type >= e1000_82540)
3466 E1000_WRITE_REG(&adapter->hw, E1000_TADV,
3467 adapter->tx_abs_int_delay.value);
3469 if ((adapter->hw.mac.type == e1000_82571) ||
3470 (adapter->hw.mac.type == e1000_82572)) {
3471 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3472 tarc |= SPEED_MODE_BIT;
3473 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3474 } else if (adapter->hw.mac.type == e1000_80003es2lan) {
3475 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
3477 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
3478 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1));
3480 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
3483 /* Program the Transmit Control Register */
3484 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
3485 tctl &= ~E1000_TCTL_CT;
3486 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
3487 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
3489 if (adapter->hw.mac.type >= e1000_82571)
3490 tctl |= E1000_TCTL_MULR;
3492 /* This write will effectively turn on the transmit unit. */
3493 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
3495 /* Setup Transmit Descriptor Base Settings */
3496 adapter->txd_cmd = E1000_TXD_CMD_IFCS;
3498 if (adapter->tx_int_delay.value > 0)
3499 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3502 /*********************************************************************
3504 * Free all transmit related data structures.
3506 **********************************************************************/
3508 em_free_transmit_structures(struct adapter *adapter)
3510 struct em_buffer *tx_buffer;
3512 INIT_DEBUGOUT("free_transmit_structures: begin");
3514 if (adapter->tx_buffer_area != NULL) {
3515 for (int i = 0; i < adapter->num_tx_desc; i++) {
3516 tx_buffer = &adapter->tx_buffer_area[i];
3517 if (tx_buffer->m_head != NULL) {
3518 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
3519 BUS_DMASYNC_POSTWRITE);
3520 bus_dmamap_unload(adapter->txtag,
3522 m_freem(tx_buffer->m_head);
3523 tx_buffer->m_head = NULL;
3524 } else if (tx_buffer->map != NULL)
3525 bus_dmamap_unload(adapter->txtag,
3527 if (tx_buffer->map != NULL) {
3528 bus_dmamap_destroy(adapter->txtag,
3530 tx_buffer->map = NULL;
3534 if (adapter->tx_buffer_area != NULL) {
3535 kfree(adapter->tx_buffer_area, M_DEVBUF);
3536 adapter->tx_buffer_area = NULL;
3538 if (adapter->txtag != NULL) {
3539 bus_dma_tag_destroy(adapter->txtag);
3540 adapter->txtag = NULL;
3542 #if __FreeBSD_version >= 800000
3543 if (adapter->br != NULL)
3544 buf_ring_free(adapter->br, M_DEVBUF);
3548 /*********************************************************************
3550 * The offload context needs to be set when we transfer the first
3551 * packet of a particular protocol (TCP/UDP). This routine has been
3552 * enhanced to deal with inserted VLAN headers, and IPV6 (not complete)
3554 * Added back the old method of keeping the current context type
3555 * and not setting if unnecessary, as this is reported to be a
3556 * big performance win. -jfv
3557 **********************************************************************/
3559 em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp,
3560 u32 *txd_upper, u32 *txd_lower)
3562 struct e1000_context_desc *TXD = NULL;
3563 struct em_buffer *tx_buffer;
3564 struct ether_vlan_header *eh;
3565 struct ip *ip = NULL;
3566 struct ip6_hdr *ip6;
3567 int curr_txd, ehdrlen;
3568 u32 cmd, hdr_len, ip_hlen;
3573 cmd = hdr_len = ipproto = 0;
3574 curr_txd = adapter->next_avail_tx_desc;
3577 * Determine where frame payload starts.
3578 * Jump over vlan headers if already present,
3579 * helpful for QinQ too.
3581 eh = mtod(mp, struct ether_vlan_header *);
3582 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3583 etype = ntohs(eh->evl_proto);
3584 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3586 etype = ntohs(eh->evl_encap_proto);
3587 ehdrlen = ETHER_HDR_LEN;
3591 * We only support TCP/UDP for IPv4 and IPv6 for the moment.
3592 * TODO: Support SCTP too when it hits the tree.
3596 ip = (struct ip *)(mp->m_data + ehdrlen);
3597 ip_hlen = ip->ip_hl << 2;
3599 /* Setup of IP header checksum. */
3600 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
3602 * Start offset for header checksum calculation.
3603 * End offset for header checksum calculation.
3604 * Offset of place to put the checksum.
3606 TXD = (struct e1000_context_desc *)
3607 &adapter->tx_desc_base[curr_txd];
3608 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3609 TXD->lower_setup.ip_fields.ipcse =
3610 htole16(ehdrlen + ip_hlen);
3611 TXD->lower_setup.ip_fields.ipcso =
3612 ehdrlen + offsetof(struct ip, ip_sum);
3613 cmd |= E1000_TXD_CMD_IP;
3614 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3617 if (mp->m_len < ehdrlen + ip_hlen)
3618 return; /* failure */
3620 hdr_len = ehdrlen + ip_hlen;
3624 case ETHERTYPE_IPV6:
3625 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3626 ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */
3628 if (mp->m_len < ehdrlen + ip_hlen)
3629 return; /* failure */
3631 /* IPv6 doesn't have a header checksum. */
3633 hdr_len = ehdrlen + ip_hlen;
3634 ipproto = ip6->ip6_nxt;
3645 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
3646 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3647 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3648 /* no need for context if already set */
3649 if (adapter->last_hw_offload == CSUM_TCP)
3651 adapter->last_hw_offload = CSUM_TCP;
3653 * Start offset for payload checksum calculation.
3654 * End offset for payload checksum calculation.
3655 * Offset of place to put the checksum.
3657 TXD = (struct e1000_context_desc *)
3658 &adapter->tx_desc_base[curr_txd];
3659 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3660 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3661 TXD->upper_setup.tcp_fields.tucso =
3662 hdr_len + offsetof(struct tcphdr, th_sum);
3663 cmd |= E1000_TXD_CMD_TCP;
3668 if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
3669 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3670 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3671 /* no need for context if already set */
3672 if (adapter->last_hw_offload == CSUM_UDP)
3674 adapter->last_hw_offload = CSUM_UDP;
3676 * Start offset for header checksum calculation.
3677 * End offset for header checksum calculation.
3678 * Offset of place to put the checksum.
3680 TXD = (struct e1000_context_desc *)
3681 &adapter->tx_desc_base[curr_txd];
3682 TXD->upper_setup.tcp_fields.tucss = hdr_len;
3683 TXD->upper_setup.tcp_fields.tucse = htole16(0);
3684 TXD->upper_setup.tcp_fields.tucso =
3685 hdr_len + offsetof(struct udphdr, uh_sum);
3693 TXD->tcp_seg_setup.data = htole32(0);
3694 TXD->cmd_and_length =
3695 htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd);
3696 tx_buffer = &adapter->tx_buffer_area[curr_txd];
3697 tx_buffer->m_head = NULL;
3698 tx_buffer->next_eop = -1;
3700 if (++curr_txd == adapter->num_tx_desc)
3703 adapter->num_tx_desc_avail--;
3704 adapter->next_avail_tx_desc = curr_txd;
3709 /**********************************************************************
3711 * Setup work for hardware segmentation offload (TSO)
3713 **********************************************************************/
3715 em_tso_setup(struct adapter *adapter, struct mbuf *mp, u32 *txd_upper,
3718 struct e1000_context_desc *TXD;
3719 struct em_buffer *tx_buffer;
3720 struct ether_vlan_header *eh;
3722 struct ip6_hdr *ip6;
3724 int curr_txd, ehdrlen, hdr_len, ip_hlen, isip6;
3728 * This function could/should be extended to support IP/IPv6
3729 * fragmentation as well. But as they say, one step at a time.
3733 * Determine where frame payload starts.
3734 * Jump over vlan headers if already present,
3735 * helpful for QinQ too.
3737 eh = mtod(mp, struct ether_vlan_header *);
3738 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3739 etype = ntohs(eh->evl_proto);
3740 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3742 etype = ntohs(eh->evl_encap_proto);
3743 ehdrlen = ETHER_HDR_LEN;
3746 /* Ensure we have at least the IP+TCP header in the first mbuf. */
3747 if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
3748 return FALSE; /* -1 */
3751 * We only support TCP for IPv4 and IPv6 (notyet) for the moment.
3752 * TODO: Support SCTP too when it hits the tree.
3757 ip = (struct ip *)(mp->m_data + ehdrlen);
3758 if (ip->ip_p != IPPROTO_TCP)
3759 return FALSE; /* 0 */
3762 ip_hlen = ip->ip_hl << 2;
3763 if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3764 return FALSE; /* -1 */
3765 th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
3767 th->th_sum = in_pseudo(ip->ip_src.s_addr,
3768 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3770 th->th_sum = mp->m_pkthdr.csum_data;
3773 case ETHERTYPE_IPV6:
3775 return FALSE; /* Not supported yet. */
3776 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3777 if (ip6->ip6_nxt != IPPROTO_TCP)
3778 return FALSE; /* 0 */
3780 ip_hlen = sizeof(struct ip6_hdr); /* XXX: no header stacking. */
3781 if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr))
3782 return FALSE; /* -1 */
3783 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
3785 th->th_sum = in6_pseudo(ip6->ip6_src, ip->ip6_dst,
3786 htons(IPPROTO_TCP)); /* XXX: function notyet. */
3788 th->th_sum = mp->m_pkthdr.csum_data;
3794 hdr_len = ehdrlen + ip_hlen + (th->th_off << 2);
3796 *txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */
3797 E1000_TXD_DTYP_D | /* Data descr type */
3798 E1000_TXD_CMD_TSE); /* Do TSE on this packet */
3800 /* IP and/or TCP header checksum calculation and insertion. */
3801 *txd_upper = ((isip6 ? 0 : E1000_TXD_POPTS_IXSM) |
3802 E1000_TXD_POPTS_TXSM) << 8;
3804 curr_txd = adapter->next_avail_tx_desc;
3805 tx_buffer = &adapter->tx_buffer_area[curr_txd];
3806 TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd];
3808 /* IPv6 doesn't have a header checksum. */
3811 * Start offset for header checksum calculation.
3812 * End offset for header checksum calculation.
3813 * Offset of place put the checksum.
3815 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
3816 TXD->lower_setup.ip_fields.ipcse =
3817 htole16(ehdrlen + ip_hlen - 1);
3818 TXD->lower_setup.ip_fields.ipcso =
3819 ehdrlen + offsetof(struct ip, ip_sum);
3822 * Start offset for payload checksum calculation.
3823 * End offset for payload checksum calculation.
3824 * Offset of place to put the checksum.
3826 TXD->upper_setup.tcp_fields.tucss =
3828 TXD->upper_setup.tcp_fields.tucse = 0;
3829 TXD->upper_setup.tcp_fields.tucso =
3830 ehdrlen + ip_hlen + offsetof(struct tcphdr, th_sum);
3832 * Payload size per packet w/o any headers.
3833 * Length of all headers up to payload.
3835 TXD->tcp_seg_setup.fields.mss = htole16(mp->m_pkthdr.tso_segsz);
3836 TXD->tcp_seg_setup.fields.hdr_len = hdr_len;
3838 TXD->cmd_and_length = htole32(adapter->txd_cmd |
3839 E1000_TXD_CMD_DEXT | /* Extended descr */
3840 E1000_TXD_CMD_TSE | /* TSE context */
3841 (isip6 ? 0 : E1000_TXD_CMD_IP) | /* Do IP csum */
3842 E1000_TXD_CMD_TCP | /* Do TCP checksum */
3843 (mp->m_pkthdr.len - (hdr_len))); /* Total len */
3845 tx_buffer->m_head = NULL;
3846 tx_buffer->next_eop = -1;
3848 if (++curr_txd == adapter->num_tx_desc)
3851 adapter->num_tx_desc_avail--;
3852 adapter->next_avail_tx_desc = curr_txd;
3853 adapter->tx_tso = TRUE;
3860 /**********************************************************************
3862 * Examine each tx_buffer in the used queue. If the hardware is done
3863 * processing the packet then free associated resources. The
3864 * tx_buffer is put back on the free queue.
3866 **********************************************************************/
3868 em_txeof(struct adapter *adapter)
3870 int first, last, done, num_avail;
3871 struct em_buffer *tx_buffer;
3872 struct e1000_tx_desc *tx_desc, *eop_desc;
3873 struct ifnet *ifp = adapter->ifp;
3875 EM_TX_LOCK_ASSERT(adapter);
3877 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
3880 num_avail = adapter->num_tx_desc_avail;
3881 first = adapter->next_tx_to_clean;
3882 tx_desc = &adapter->tx_desc_base[first];
3883 tx_buffer = &adapter->tx_buffer_area[first];
3884 last = tx_buffer->next_eop;
3885 eop_desc = &adapter->tx_desc_base[last];
3888 * What this does is get the index of the
3889 * first descriptor AFTER the EOP of the
3890 * first packet, that way we can do the
3891 * simple comparison on the inner while loop.
3893 if (++last == adapter->num_tx_desc)
3897 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3898 BUS_DMASYNC_POSTREAD);
3900 while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
3901 /* We clean the range of the packet */
3902 while (first != done) {
3903 tx_desc->upper.data = 0;
3904 tx_desc->lower.data = 0;
3905 tx_desc->buffer_addr = 0;
3908 if (tx_buffer->m_head) {
3910 bus_dmamap_sync(adapter->txtag,
3912 BUS_DMASYNC_POSTWRITE);
3913 bus_dmamap_unload(adapter->txtag,
3916 m_freem(tx_buffer->m_head);
3917 tx_buffer->m_head = NULL;
3919 tx_buffer->next_eop = -1;
3920 adapter->watchdog_time = ticks;
3922 if (++first == adapter->num_tx_desc)
3925 tx_buffer = &adapter->tx_buffer_area[first];
3926 tx_desc = &adapter->tx_desc_base[first];
3928 /* See if we can continue to the next packet */
3929 last = tx_buffer->next_eop;
3931 eop_desc = &adapter->tx_desc_base[last];
3932 /* Get new done point */
3933 if (++last == adapter->num_tx_desc) last = 0;
3938 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
3939 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3941 adapter->next_tx_to_clean = first;
3944 * If we have enough room, clear IFF_OACTIVE to
3945 * tell the stack that it is OK to send packets.
3946 * If there are no pending descriptors, clear the watchdog.
3948 if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
3949 ifp->if_flags &= ~IFF_OACTIVE;
3950 if (num_avail == adapter->num_tx_desc) {
3951 adapter->watchdog_check = FALSE;
3952 adapter->num_tx_desc_avail = num_avail;
3957 adapter->num_tx_desc_avail = num_avail;
3961 /*********************************************************************
3963 * When Link is lost sometimes there is work still in the TX ring
3964 * which may result in a watchdog, rather than allow that we do an
3965 * attempted cleanup and then reinit here. Note that this has been
3966 * seens mostly with fiber adapters.
3968 **********************************************************************/
3970 em_tx_purge(struct adapter *adapter)
3972 if ((!adapter->link_active) && (adapter->watchdog_check)) {
3973 EM_TX_LOCK(adapter);
3975 EM_TX_UNLOCK(adapter);
3976 if (adapter->watchdog_check) /* Still outstanding? */
3977 em_init_locked(adapter);
3981 /*********************************************************************
3983 * Get a buffer from system mbuf buffer pool.
3985 **********************************************************************/
3987 em_get_buf(struct adapter *adapter, int i)
3990 bus_dma_segment_t segs[1];
3992 struct em_buffer *rx_buffer;
3995 m = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
3997 adapter->mbuf_cluster_failed++;
4000 m->m_len = m->m_pkthdr.len = MCLBYTES;
4002 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
4003 m_adj(m, ETHER_ALIGN);
4006 * Using memory from the mbuf cluster pool, invoke the
4007 * bus_dma machinery to arrange the memory mapping.
4009 error = bus_dmamap_load_mbuf_segment(adapter->rxtag,
4010 adapter->rx_sparemap, m, segs, 1, &nsegs, BUS_DMA_NOWAIT);
4016 /* If nsegs is wrong then the stack is corrupt. */
4017 KASSERT(nsegs == 1, ("Too many segments returned!"));
4019 rx_buffer = &adapter->rx_buffer_area[i];
4020 if (rx_buffer->m_head != NULL)
4021 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4023 map = rx_buffer->map;
4024 rx_buffer->map = adapter->rx_sparemap;
4025 adapter->rx_sparemap = map;
4026 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
4027 rx_buffer->m_head = m;
4029 adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
4033 /*********************************************************************
4035 * Allocate memory for rx_buffer structures. Since we use one
4036 * rx_buffer per received packet, the maximum number of rx_buffer's
4037 * that we'll need is equal to the number of receive descriptors
4038 * that we've allocated.
4040 **********************************************************************/
4042 em_allocate_receive_structures(struct adapter *adapter)
4044 device_t dev = adapter->dev;
4045 struct em_buffer *rx_buffer;
4048 adapter->rx_buffer_area = kmalloc(sizeof(struct em_buffer) *
4049 adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
4050 if (adapter->rx_buffer_area == NULL) {
4051 device_printf(dev, "Unable to allocate rx_buffer memory\n");
4055 error = bus_dma_tag_create(NULL, /* parent */
4056 1, 0, /* alignment, bounds */
4057 BUS_SPACE_MAXADDR, /* lowaddr */
4058 BUS_SPACE_MAXADDR, /* highaddr */
4059 NULL, NULL, /* filter, filterarg */
4060 MCLBYTES, /* maxsize */
4062 MCLBYTES, /* maxsegsize */
4066 device_printf(dev, "%s: bus_dma_tag_create failed %d\n",
4071 /* Create the spare map (used by getbuf) */
4072 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4073 &adapter->rx_sparemap);
4075 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4080 rx_buffer = adapter->rx_buffer_area;
4081 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4082 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
4085 device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
4094 em_free_receive_structures(adapter);
4098 /*********************************************************************
4100 * (Re)initialize receive structures.
4102 **********************************************************************/
4104 em_setup_receive_structures(struct adapter *adapter)
4106 struct em_buffer *rx_buffer;
4109 /* Reset descriptor ring */
4110 bzero(adapter->rx_desc_base,
4111 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
4113 /* Free current RX buffers. */
4114 rx_buffer = adapter->rx_buffer_area;
4115 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4116 if (rx_buffer->m_head != NULL) {
4117 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4118 BUS_DMASYNC_POSTREAD);
4119 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
4120 m_freem(rx_buffer->m_head);
4121 rx_buffer->m_head = NULL;
4125 /* Allocate new ones. */
4126 for (i = 0; i < adapter->num_rx_desc; i++) {
4127 error = em_get_buf(adapter, i);
4132 /* Setup our descriptor pointers */
4133 adapter->next_rx_desc_to_check = 0;
4134 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4135 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4140 /*********************************************************************
4142 * Enable receive unit.
4144 **********************************************************************/
4145 #define MAX_INTS_PER_SEC 8000
4146 #define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256)
4149 em_initialize_receive_unit(struct adapter *adapter)
4151 struct ifnet *ifp = adapter->ifp;
4155 INIT_DEBUGOUT("em_initialize_receive_unit: begin");
4158 * Make sure receives are disabled while setting
4159 * up the descriptor ring
4161 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4162 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
4164 if (adapter->hw.mac.type >= e1000_82540) {
4165 E1000_WRITE_REG(&adapter->hw, E1000_RADV,
4166 adapter->rx_abs_int_delay.value);
4168 * Set the interrupt throttling rate. Value is calculated
4169 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
4171 E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR);
4175 ** When using MSIX interrupts we need to throttle
4176 ** using the EITR register (82574 only)
4179 for (int i = 0; i < 4; i++)
4180 E1000_WRITE_REG(&adapter->hw,
4181 E1000_EITR_82574(i), DEFAULT_ITR);
4183 /* Disable accelerated ackknowledge */
4184 if (adapter->hw.mac.type == e1000_82574)
4185 E1000_WRITE_REG(&adapter->hw,
4186 E1000_RFCTL, E1000_RFCTL_ACK_DIS);
4188 /* Setup the Base and Length of the Rx Descriptor Ring */
4189 bus_addr = adapter->rxdma.dma_paddr;
4190 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
4191 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
4192 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
4193 (u32)(bus_addr >> 32));
4194 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
4197 /* Setup the Receive Control Register */
4198 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4199 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
4200 E1000_RCTL_RDMTS_HALF |
4201 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4203 /* Make sure VLAN Filters are off */
4204 rctl &= ~E1000_RCTL_VFE;
4206 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
4207 rctl |= E1000_RCTL_SBP;
4209 rctl &= ~E1000_RCTL_SBP;
4211 switch (adapter->rx_buffer_len) {
4214 rctl |= E1000_RCTL_SZ_2048;
4217 rctl |= E1000_RCTL_SZ_4096 |
4218 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4221 rctl |= E1000_RCTL_SZ_8192 |
4222 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4225 rctl |= E1000_RCTL_SZ_16384 |
4226 E1000_RCTL_BSEX | E1000_RCTL_LPE;
4230 if (ifp->if_mtu > ETHERMTU)
4231 rctl |= E1000_RCTL_LPE;
4233 rctl &= ~E1000_RCTL_LPE;
4235 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
4236 if ((adapter->hw.mac.type >= e1000_82543) &&
4237 (ifp->if_capenable & IFCAP_RXCSUM)) {
4238 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
4239 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
4240 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
4244 ** XXX TEMPORARY WORKAROUND: on some systems with 82573
4245 ** long latencies are observed, like Lenovo X60. This
4246 ** change eliminates the problem, but since having positive
4247 ** values in RDTR is a known source of problems on other
4248 ** platforms another solution is being sought.
4250 if (adapter->hw.mac.type == e1000_82573)
4251 E1000_WRITE_REG(&adapter->hw, E1000_RDTR, 0x20);
4253 /* Enable Receives */
4254 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4257 * Setup the HW Rx Head and
4258 * Tail Descriptor Pointers
4260 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
4261 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
4266 /*********************************************************************
4268 * Free receive related data structures.
4270 **********************************************************************/
4272 em_free_receive_structures(struct adapter *adapter)
4274 struct em_buffer *rx_buffer;
4277 INIT_DEBUGOUT("free_receive_structures: begin");
4279 if (adapter->rx_sparemap) {
4280 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
4281 adapter->rx_sparemap = NULL;
4284 /* Cleanup any existing buffers */
4285 if (adapter->rx_buffer_area != NULL) {
4286 rx_buffer = adapter->rx_buffer_area;
4287 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
4288 if (rx_buffer->m_head != NULL) {
4289 bus_dmamap_sync(adapter->rxtag, rx_buffer->map,
4290 BUS_DMASYNC_POSTREAD);
4291 bus_dmamap_unload(adapter->rxtag,
4293 m_freem(rx_buffer->m_head);
4294 rx_buffer->m_head = NULL;
4295 } else if (rx_buffer->map != NULL)
4296 bus_dmamap_unload(adapter->rxtag,
4298 if (rx_buffer->map != NULL) {
4299 bus_dmamap_destroy(adapter->rxtag,
4301 rx_buffer->map = NULL;
4306 if (adapter->rx_buffer_area != NULL) {
4307 kfree(adapter->rx_buffer_area, M_DEVBUF);
4308 adapter->rx_buffer_area = NULL;
4311 if (adapter->rxtag != NULL) {
4312 bus_dma_tag_destroy(adapter->rxtag);
4313 adapter->rxtag = NULL;
4317 /*********************************************************************
4319 * This routine executes in interrupt context. It replenishes
4320 * the mbufs in the descriptor and sends data which has been
4321 * dma'ed into host memory to upper layer.
4323 * We loop at most count times if count is > 0, or until done if
4326 * For polling we also now return the number of cleaned packets
4327 *********************************************************************/
4329 em_rxeof(struct adapter *adapter, int count)
4331 struct ifnet *ifp = adapter->ifp;;
4333 u8 status, accept_frame = 0, eop = 0;
4334 u16 len, desc_len, prev_len_adj;
4336 struct e1000_rx_desc *current_desc;
4338 EM_RX_LOCK(adapter);
4339 i = adapter->next_rx_desc_to_check;
4340 current_desc = &adapter->rx_desc_base[i];
4341 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4342 BUS_DMASYNC_POSTREAD);
4344 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
4345 EM_RX_UNLOCK(adapter);
4349 while ((current_desc->status & E1000_RXD_STAT_DD) &&
4351 (ifp->if_flags & IFF_RUNNING)) {
4352 struct mbuf *m = NULL;
4354 mp = adapter->rx_buffer_area[i].m_head;
4356 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
4357 * needs to access the last received byte in the mbuf.
4359 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
4360 BUS_DMASYNC_POSTREAD);
4364 desc_len = le16toh(current_desc->length);
4365 status = current_desc->status;
4366 if (status & E1000_RXD_STAT_EOP) {
4369 if (desc_len < ETHER_CRC_LEN) {
4371 prev_len_adj = ETHER_CRC_LEN - desc_len;
4373 len = desc_len - ETHER_CRC_LEN;
4379 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
4381 u32 pkt_len = desc_len;
4383 if (adapter->fmp != NULL)
4384 pkt_len += adapter->fmp->m_pkthdr.len;
4386 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
4387 if (TBI_ACCEPT(&adapter->hw, status,
4388 current_desc->errors, pkt_len, last_byte,
4389 adapter->min_frame_size, adapter->max_frame_size)) {
4390 e1000_tbi_adjust_stats_82543(&adapter->hw,
4391 &adapter->stats, pkt_len,
4392 adapter->hw.mac.addr,
4393 adapter->max_frame_size);
4401 if (em_get_buf(adapter, i) != 0) {
4406 /* Assign correct length to the current fragment */
4409 if (adapter->fmp == NULL) {
4410 mp->m_pkthdr.len = len;
4411 adapter->fmp = mp; /* Store the first mbuf */
4414 /* Chain mbuf's together */
4415 mp->m_flags &= ~M_PKTHDR;
4417 * Adjust length of previous mbuf in chain if
4418 * we received less than 4 bytes in the last
4421 if (prev_len_adj > 0) {
4422 adapter->lmp->m_len -= prev_len_adj;
4423 adapter->fmp->m_pkthdr.len -=
4426 adapter->lmp->m_next = mp;
4427 adapter->lmp = adapter->lmp->m_next;
4428 adapter->fmp->m_pkthdr.len += len;
4432 adapter->fmp->m_pkthdr.rcvif = ifp;
4434 em_receive_checksum(adapter, current_desc,
4436 #ifndef __NO_STRICT_ALIGNMENT
4437 if (adapter->max_frame_size >
4438 (MCLBYTES - ETHER_ALIGN) &&
4439 em_fixup_rx(adapter) != 0)
4442 if (status & E1000_RXD_STAT_VP) {
4444 VLAN_INPUT_TAG_NEW(ifp, adapter->fmp,
4445 (le16toh(current_desc->special) &
4446 E1000_RXD_SPC_VLAN_MASK));
4448 adapter->fmp->m_pkthdr.ether_vlantag =
4449 (le16toh(current_desc->special) &
4450 E1000_RXD_SPC_VLAN_MASK);
4451 adapter->fmp->m_flags |= M_VLANTAG;
4454 #ifndef __NO_STRICT_ALIGNMENT
4458 adapter->fmp = NULL;
4459 adapter->lmp = NULL;
4464 /* Reuse loaded DMA map and just update mbuf chain */
4465 mp = adapter->rx_buffer_area[i].m_head;
4466 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
4467 mp->m_data = mp->m_ext.ext_buf;
4469 if (adapter->max_frame_size <=
4470 (MCLBYTES - ETHER_ALIGN))
4471 m_adj(mp, ETHER_ALIGN);
4472 if (adapter->fmp != NULL) {
4473 m_freem(adapter->fmp);
4474 adapter->fmp = NULL;
4475 adapter->lmp = NULL;
4480 /* Zero out the receive descriptors status. */
4481 current_desc->status = 0;
4482 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
4483 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
4485 /* Advance our pointers to the next descriptor. */
4486 if (++i == adapter->num_rx_desc)
4488 /* Call into the stack */
4490 adapter->next_rx_desc_to_check = i;
4491 EM_RX_UNLOCK(adapter);
4492 (*ifp->if_input)(ifp, m);
4493 EM_RX_LOCK(adapter);
4495 i = adapter->next_rx_desc_to_check;
4497 current_desc = &adapter->rx_desc_base[i];
4499 adapter->next_rx_desc_to_check = i;
4501 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
4503 i = adapter->num_rx_desc - 1;
4504 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i);
4505 EM_RX_UNLOCK(adapter);
4509 #ifndef __NO_STRICT_ALIGNMENT
4511 * When jumbo frames are enabled we should realign entire payload on
4512 * architecures with strict alignment. This is serious design mistake of 8254x
4513 * as it nullifies DMA operations. 8254x just allows RX buffer size to be
4514 * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its
4515 * payload. On architecures without strict alignment restrictions 8254x still
4516 * performs unaligned memory access which would reduce the performance too.
4517 * To avoid copying over an entire frame to align, we allocate a new mbuf and
4518 * copy ethernet header to the new mbuf. The new mbuf is prepended into the
4519 * existing mbuf chain.
4521 * Be aware, best performance of the 8254x is achived only when jumbo frame is
4522 * not used at all on architectures with strict alignment.
4525 em_fixup_rx(struct adapter *adapter)
4532 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) {
4533 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
4534 m->m_data += ETHER_HDR_LEN;
4536 MGETHDR(n, MB_DONTWAIT, MT_DATA);
4538 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
4539 m->m_data += ETHER_HDR_LEN;
4540 m->m_len -= ETHER_HDR_LEN;
4541 n->m_len = ETHER_HDR_LEN;
4542 M_MOVE_PKTHDR(n, m);
4546 adapter->dropped_pkts++;
4547 m_freem(adapter->fmp);
4548 adapter->fmp = NULL;
4557 /*********************************************************************
4559 * Verify that the hardware indicated that the checksum is valid.
4560 * Inform the stack about the status of checksum so that stack
4561 * doesn't spend time verifying the checksum.
4563 *********************************************************************/
4565 em_receive_checksum(struct adapter *adapter,
4566 struct e1000_rx_desc *rx_desc, struct mbuf *mp)
4568 /* 82543 or newer only */
4569 if ((adapter->hw.mac.type < e1000_82543) ||
4570 /* Ignore Checksum bit is set */
4571 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
4572 mp->m_pkthdr.csum_flags = 0;
4576 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
4578 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
4579 /* IP Checksum Good */
4580 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
4581 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4584 mp->m_pkthdr.csum_flags = 0;
4588 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
4590 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
4591 mp->m_pkthdr.csum_flags |=
4592 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
4593 mp->m_pkthdr.csum_data = htons(0xffff);
4599 * This routine is run via an vlan
4603 em_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4605 struct adapter *adapter = ifp->if_softc;
4608 if (ifp->if_softc != arg) /* Not our event */
4611 if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */
4614 index = (vtag >> 5) & 0x7F;
4616 em_shadow_vfta[index] |= (1 << bit);
4617 ++adapter->num_vlans;
4618 /* Re-init to load the changes */
4623 * This routine is run via an vlan
4627 em_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
4629 struct adapter *adapter = ifp->if_softc;
4632 if (ifp->if_softc != arg)
4635 if ((vtag == 0) || (vtag > 4095)) /* Invalid */
4638 index = (vtag >> 5) & 0x7F;
4640 em_shadow_vfta[index] &= ~(1 << bit);
4641 --adapter->num_vlans;
4642 /* Re-init to load the changes */
4647 em_setup_vlan_hw_support(struct adapter *adapter)
4649 struct e1000_hw *hw = &adapter->hw;
4653 ** We get here thru init_locked, meaning
4654 ** a soft reset, this has already cleared
4655 ** the VFTA and other state, so if there
4656 ** have been no vlan's registered do nothing.
4658 if (adapter->num_vlans == 0)
4662 ** A soft reset zero's out the VFTA, so
4663 ** we need to repopulate it now.
4665 for (int i = 0; i < EM_VFTA_SIZE; i++)
4666 if (em_shadow_vfta[i] != 0)
4667 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
4668 i, em_shadow_vfta[i]);
4670 reg = E1000_READ_REG(hw, E1000_CTRL);
4671 reg |= E1000_CTRL_VME;
4672 E1000_WRITE_REG(hw, E1000_CTRL, reg);
4674 /* Enable the Filter Table */
4675 reg = E1000_READ_REG(hw, E1000_RCTL);
4676 reg &= ~E1000_RCTL_CFIEN;
4677 reg |= E1000_RCTL_VFE;
4678 E1000_WRITE_REG(hw, E1000_RCTL, reg);
4680 /* Update the frame size */
4681 E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
4682 adapter->max_frame_size + VLAN_TAG_SIZE);
4686 em_enable_intr(struct adapter *adapter)
4688 struct e1000_hw *hw = &adapter->hw;
4689 u32 ims_mask = IMS_ENABLE_MASK;
4691 if (adapter->msix) {
4692 E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
4693 ims_mask |= EM_MSIX_MASK;
4695 E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
4699 em_disable_intr(struct adapter *adapter)
4701 struct e1000_hw *hw = &adapter->hw;
4704 E1000_WRITE_REG(hw, EM_EIAC, 0);
4705 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
4709 * Bit of a misnomer, what this really means is
4710 * to enable OS management of the system... aka
4711 * to disable special hardware management features
4714 em_init_manageability(struct adapter *adapter)
4716 /* A shared code workaround */
4717 #define E1000_82542_MANC2H E1000_MANC2H
4718 if (adapter->has_manage) {
4719 int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H);
4720 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4722 /* disable hardware interception of ARP */
4723 manc &= ~(E1000_MANC_ARP_EN);
4725 /* enable receiving management packets to the host */
4726 if (adapter->hw.mac.type >= e1000_82571) {
4727 manc |= E1000_MANC_EN_MNG2HOST;
4728 #define E1000_MNG2HOST_PORT_623 (1 << 5)
4729 #define E1000_MNG2HOST_PORT_664 (1 << 6)
4730 manc2h |= E1000_MNG2HOST_PORT_623;
4731 manc2h |= E1000_MNG2HOST_PORT_664;
4732 E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
4735 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4740 * Give control back to hardware management
4741 * controller if there is one.
4744 em_release_manageability(struct adapter *adapter)
4746 if (adapter->has_manage) {
4747 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
4749 /* re-enable hardware interception of ARP */
4750 manc |= E1000_MANC_ARP_EN;
4752 if (adapter->hw.mac.type >= e1000_82571)
4753 manc &= ~E1000_MANC_EN_MNG2HOST;
4755 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
4760 * em_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
4761 * For ASF and Pass Through versions of f/w this means
4762 * that the driver is loaded. For AMT version type f/w
4763 * this means that the network i/f is open.
4766 em_get_hw_control(struct adapter *adapter)
4770 if (adapter->hw.mac.type == e1000_82573) {
4771 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4772 E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4773 swsm | E1000_SWSM_DRV_LOAD);
4777 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4778 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4779 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
4784 * em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
4785 * For ASF and Pass Through versions of f/w this means that
4786 * the driver is no longer loaded. For AMT versions of the
4787 * f/w this means that the network i/f is closed.
4790 em_release_hw_control(struct adapter *adapter)
4794 if (!adapter->has_manage)
4797 if (adapter->hw.mac.type == e1000_82573) {
4798 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
4799 E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
4800 swsm & ~E1000_SWSM_DRV_LOAD);
4804 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4805 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
4806 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4811 em_is_valid_ether_addr(u8 *addr)
4813 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
4815 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
4823 ** Parse the interface capabilities with regard
4824 ** to both system management and wake-on-lan for
4828 em_get_wakeup(device_t dev)
4830 struct adapter *adapter = device_get_softc(dev);
4831 u16 eeprom_data = 0, device_id, apme_mask;
4833 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
4834 apme_mask = EM_EEPROM_APME;
4836 switch (adapter->hw.mac.type) {
4841 e1000_read_nvm(&adapter->hw,
4842 NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
4843 apme_mask = EM_82544_APME;
4847 adapter->has_amt = TRUE;
4850 case e1000_82546_rev_3:
4853 case e1000_80003es2lan:
4854 if (adapter->hw.bus.func == 1) {
4855 e1000_read_nvm(&adapter->hw,
4856 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
4859 e1000_read_nvm(&adapter->hw,
4860 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4864 case e1000_ich10lan:
4866 apme_mask = E1000_WUC_APME;
4867 adapter->has_amt = TRUE;
4868 eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC);
4871 e1000_read_nvm(&adapter->hw,
4872 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
4875 if (eeprom_data & apme_mask)
4876 adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC);
4878 * We have the eeprom settings, now apply the special cases
4879 * where the eeprom may be wrong or the board won't support
4880 * wake on lan on a particular port
4882 device_id = pci_get_device(dev);
4883 switch (device_id) {
4884 case E1000_DEV_ID_82546GB_PCIE:
4887 case E1000_DEV_ID_82546EB_FIBER:
4888 case E1000_DEV_ID_82546GB_FIBER:
4889 case E1000_DEV_ID_82571EB_FIBER:
4890 /* Wake events only supported on port A for dual fiber
4891 * regardless of eeprom setting */
4892 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
4893 E1000_STATUS_FUNC_1)
4896 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
4897 case E1000_DEV_ID_82571EB_QUAD_COPPER:
4898 case E1000_DEV_ID_82571EB_QUAD_FIBER:
4899 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
4900 /* if quad port adapter, disable WoL on all but port A */
4901 if (global_quad_port_a != 0)
4903 /* Reset for multiple quad port adapters */
4904 if (++global_quad_port_a == 4)
4905 global_quad_port_a = 0;
4913 * Enable PCI Wake On Lan capability
4916 em_enable_wakeup(device_t dev)
4918 struct adapter *adapter = device_get_softc(dev);
4919 struct ifnet *ifp = adapter->ifp;
4920 u32 pmc, ctrl, ctrl_ext, rctl;
4923 if ((pci_find_extcap(dev, PCIY_PMG, &pmc) != 0))
4926 /* Advertise the wakeup capability */
4927 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
4928 ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3);
4929 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
4930 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4932 /* ICH workaround code */
4933 if ((adapter->hw.mac.type == e1000_ich8lan) ||
4934 (adapter->hw.mac.type == e1000_pchlan) ||
4935 (adapter->hw.mac.type == e1000_ich9lan) ||
4936 (adapter->hw.mac.type == e1000_ich10lan)) {
4937 e1000_disable_gig_wol_ich8lan(&adapter->hw);
4938 e1000_hv_phy_powerdown_workaround_ich8lan(&adapter->hw);
4941 /* Keep the laser running on Fiber adapters */
4942 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
4943 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
4944 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
4945 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
4946 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext);
4950 ** Determine type of Wakeup: note that wol
4951 ** is set with all bits on by default.
4953 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0)
4954 adapter->wol &= ~E1000_WUFC_MAG;
4956 if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0)
4957 adapter->wol &= ~E1000_WUFC_MC;
4959 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
4960 rctl |= E1000_RCTL_MPE;
4961 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
4964 if (adapter->hw.mac.type == e1000_pchlan) {
4965 if (em_enable_phy_wakeup(adapter))
4968 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
4969 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
4972 if (adapter->hw.phy.type == e1000_phy_igp_3)
4973 e1000_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
4976 status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
4977 status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
4978 if (ifp->if_capenable & IFCAP_WOL)
4979 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
4980 pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
4986 ** WOL in the newer chipset interfaces (pchlan)
4987 ** require thing to be copied into the phy
4990 em_enable_phy_wakeup(struct adapter *adapter)
4992 struct e1000_hw *hw = &adapter->hw;
4996 /* copy MAC RARs to PHY RARs */
4997 for (int i = 0; i < adapter->hw.mac.rar_entry_count; i++) {
4998 mreg = E1000_READ_REG(hw, E1000_RAL(i));
4999 e1000_write_phy_reg(hw, BM_RAR_L(i), (u16)(mreg & 0xFFFF));
5000 e1000_write_phy_reg(hw, BM_RAR_M(i),
5001 (u16)((mreg >> 16) & 0xFFFF));
5002 mreg = E1000_READ_REG(hw, E1000_RAH(i));
5003 e1000_write_phy_reg(hw, BM_RAR_H(i), (u16)(mreg & 0xFFFF));
5004 e1000_write_phy_reg(hw, BM_RAR_CTRL(i),
5005 (u16)((mreg >> 16) & 0xFFFF));
5008 /* copy MAC MTA to PHY MTA */
5009 for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
5010 mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
5011 e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF));
5012 e1000_write_phy_reg(hw, BM_MTA(i) + 1,
5013 (u16)((mreg >> 16) & 0xFFFF));
5016 /* configure PHY Rx Control register */
5017 e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg);
5018 mreg = E1000_READ_REG(hw, E1000_RCTL);
5019 if (mreg & E1000_RCTL_UPE)
5020 preg |= BM_RCTL_UPE;
5021 if (mreg & E1000_RCTL_MPE)
5022 preg |= BM_RCTL_MPE;
5023 preg &= ~(BM_RCTL_MO_MASK);
5024 if (mreg & E1000_RCTL_MO_3)
5025 preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
5026 << BM_RCTL_MO_SHIFT);
5027 if (mreg & E1000_RCTL_BAM)
5028 preg |= BM_RCTL_BAM;
5029 if (mreg & E1000_RCTL_PMCF)
5030 preg |= BM_RCTL_PMCF;
5031 mreg = E1000_READ_REG(hw, E1000_CTRL);
5032 if (mreg & E1000_CTRL_RFCE)
5033 preg |= BM_RCTL_RFCE;
5034 e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg);
5036 /* enable PHY wakeup in MAC register */
5037 E1000_WRITE_REG(hw, E1000_WUC,
5038 E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
5039 E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol);
5041 /* configure and enable PHY wakeup in PHY registers */
5042 e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol);
5043 e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
5045 /* activate PHY wakeup */
5046 ret = hw->phy.ops.acquire(hw);
5048 kprintf("Could not acquire PHY\n");
5051 e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
5052 (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT));
5053 ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg);
5055 kprintf("Could not read PHY page 769\n");
5058 preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
5059 ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg);
5061 kprintf("Could not set PHY Host Wakeup bit\n");
5063 hw->phy.ops.release(hw);
5069 /*********************************************************************
5070 * 82544 Coexistence issue workaround.
5071 * There are 2 issues.
5072 * 1. Transmit Hang issue.
5073 * To detect this issue, following equation can be used...
5074 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
5075 * If SUM[3:0] is in between 1 to 4, we will have this issue.
5078 * To detect this issue, following equation can be used...
5079 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
5080 * If SUM[3:0] is in between 9 to c, we will have this issue.
5084 * Make sure we do not have ending address
5085 * as 1,2,3,4(Hang) or 9,a,b,c (DAC)
5087 *************************************************************************/
5089 em_fill_descriptors (bus_addr_t address, u32 length,
5090 PDESC_ARRAY desc_array)
5092 u32 safe_terminator;
5094 /* Since issue is sensitive to length and address.*/
5095 /* Let us first check the address...*/
5097 desc_array->descriptor[0].address = address;
5098 desc_array->descriptor[0].length = length;
5099 desc_array->elements = 1;
5100 return (desc_array->elements);
5102 safe_terminator = (u32)((((u32)address & 0x7) +
5103 (length & 0xF)) & 0xF);
5104 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
5105 if (safe_terminator == 0 ||
5106 (safe_terminator > 4 &&
5107 safe_terminator < 9) ||
5108 (safe_terminator > 0xC &&
5109 safe_terminator <= 0xF)) {
5110 desc_array->descriptor[0].address = address;
5111 desc_array->descriptor[0].length = length;
5112 desc_array->elements = 1;
5113 return (desc_array->elements);
5116 desc_array->descriptor[0].address = address;
5117 desc_array->descriptor[0].length = length - 4;
5118 desc_array->descriptor[1].address = address + (length - 4);
5119 desc_array->descriptor[1].length = 4;
5120 desc_array->elements = 2;
5121 return (desc_array->elements);
5124 /**********************************************************************
5126 * Update the board statistics counters.
5128 **********************************************************************/
5130 em_update_stats_counters(struct adapter *adapter)
5134 if(adapter->hw.phy.media_type == e1000_media_type_copper ||
5135 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
5136 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
5137 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
5139 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
5140 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
5141 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC);
5142 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL);
5144 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC);
5145 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL);
5146 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC);
5147 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC);
5148 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC);
5149 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC);
5150 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC);
5151 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
5152 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC);
5153 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC);
5154 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64);
5155 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127);
5156 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255);
5157 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511);
5158 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023);
5159 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522);
5160 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC);
5161 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC);
5162 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC);
5163 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC);
5165 /* For the 64-bit byte counters the low dword must be read first. */
5166 /* Both registers clear on the read of the high dword */
5168 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCH);
5169 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCH);
5171 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC);
5172 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC);
5173 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC);
5174 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC);
5175 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC);
5177 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH);
5178 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH);
5180 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR);
5181 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT);
5182 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64);
5183 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127);
5184 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255);
5185 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511);
5186 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023);
5187 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522);
5188 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC);
5189 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC);
5191 if (adapter->hw.mac.type >= e1000_82543) {
5192 adapter->stats.algnerrc +=
5193 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC);
5194 adapter->stats.rxerrc +=
5195 E1000_READ_REG(&adapter->hw, E1000_RXERRC);
5196 adapter->stats.tncrs +=
5197 E1000_READ_REG(&adapter->hw, E1000_TNCRS);
5198 adapter->stats.cexterr +=
5199 E1000_READ_REG(&adapter->hw, E1000_CEXTERR);
5200 adapter->stats.tsctc +=
5201 E1000_READ_REG(&adapter->hw, E1000_TSCTC);
5202 adapter->stats.tsctfc +=
5203 E1000_READ_REG(&adapter->hw, E1000_TSCTFC);
5207 ifp->if_collisions = adapter->stats.colc;
5210 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
5211 adapter->stats.crcerrs + adapter->stats.algnerrc +
5212 adapter->stats.ruc + adapter->stats.roc +
5213 adapter->stats.mpc + adapter->stats.cexterr;
5216 ifp->if_oerrors = adapter->stats.ecol +
5217 adapter->stats.latecol + adapter->watchdog_events;
5221 /**********************************************************************
5223 * This routine is called only when em_display_debug_stats is enabled.
5224 * This routine provides a way to take a look at important statistics
5225 * maintained by the driver and hardware.
5227 **********************************************************************/
5229 em_print_debug_info(struct adapter *adapter)
5231 device_t dev = adapter->dev;
5232 u8 *hw_addr = adapter->hw.hw_addr;
5234 device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
5235 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n",
5236 E1000_READ_REG(&adapter->hw, E1000_CTRL),
5237 E1000_READ_REG(&adapter->hw, E1000_RCTL));
5238 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n",
5239 ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff0000) >> 16),\
5240 (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) );
5241 device_printf(dev, "Flow control watermarks high = %d low = %d\n",
5242 adapter->hw.fc.high_water,
5243 adapter->hw.fc.low_water);
5244 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
5245 E1000_READ_REG(&adapter->hw, E1000_TIDV),
5246 E1000_READ_REG(&adapter->hw, E1000_TADV));
5247 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
5248 E1000_READ_REG(&adapter->hw, E1000_RDTR),
5249 E1000_READ_REG(&adapter->hw, E1000_RADV));
5250 device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n",
5251 (long long)adapter->tx_fifo_wrk_cnt,
5252 (long long)adapter->tx_fifo_reset_cnt);
5253 device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
5254 E1000_READ_REG(&adapter->hw, E1000_TDH(0)),
5255 E1000_READ_REG(&adapter->hw, E1000_TDT(0)));
5256 device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
5257 E1000_READ_REG(&adapter->hw, E1000_RDH(0)),
5258 E1000_READ_REG(&adapter->hw, E1000_RDT(0)));
5259 device_printf(dev, "Num Tx descriptors avail = %d\n",
5260 adapter->num_tx_desc_avail);
5261 device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
5262 adapter->no_tx_desc_avail1);
5263 device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
5264 adapter->no_tx_desc_avail2);
5265 device_printf(dev, "Std mbuf failed = %ld\n",
5266 adapter->mbuf_alloc_failed);
5267 device_printf(dev, "Std mbuf cluster failed = %ld\n",
5268 adapter->mbuf_cluster_failed);
5269 device_printf(dev, "Driver dropped packets = %ld\n",
5270 adapter->dropped_pkts);
5271 device_printf(dev, "Driver tx dma failure in encap = %ld\n",
5272 adapter->no_tx_dma_setup);
5276 em_print_hw_stats(struct adapter *adapter)
5278 device_t dev = adapter->dev;
5280 device_printf(dev, "Excessive collisions = %lld\n",
5281 (long long)adapter->stats.ecol);
5282 #if (DEBUG_HW > 0) /* Dont output these errors normally */
5283 device_printf(dev, "Symbol errors = %lld\n",
5284 (long long)adapter->stats.symerrs);
5286 device_printf(dev, "Sequence errors = %lld\n",
5287 (long long)adapter->stats.sec);
5288 device_printf(dev, "Defer count = %lld\n",
5289 (long long)adapter->stats.dc);
5290 device_printf(dev, "Missed Packets = %lld\n",
5291 (long long)adapter->stats.mpc);
5292 device_printf(dev, "Receive No Buffers = %lld\n",
5293 (long long)adapter->stats.rnbc);
5294 /* RLEC is inaccurate on some hardware, calculate our own. */
5295 device_printf(dev, "Receive Length Errors = %lld\n",
5296 ((long long)adapter->stats.roc + (long long)adapter->stats.ruc));
5297 device_printf(dev, "Receive errors = %lld\n",
5298 (long long)adapter->stats.rxerrc);
5299 device_printf(dev, "Crc errors = %lld\n",
5300 (long long)adapter->stats.crcerrs);
5301 device_printf(dev, "Alignment errors = %lld\n",
5302 (long long)adapter->stats.algnerrc);
5303 device_printf(dev, "Collision/Carrier extension errors = %lld\n",
5304 (long long)adapter->stats.cexterr);
5305 device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns);
5306 device_printf(dev, "watchdog timeouts = %ld\n",
5307 adapter->watchdog_events);
5308 device_printf(dev, "RX MSIX IRQ = %ld TX MSIX IRQ = %ld"
5309 " LINK MSIX IRQ = %ld\n", adapter->rx_irq,
5310 adapter->tx_irq , adapter->link_irq);
5311 device_printf(dev, "XON Rcvd = %lld\n",
5312 (long long)adapter->stats.xonrxc);
5313 device_printf(dev, "XON Xmtd = %lld\n",
5314 (long long)adapter->stats.xontxc);
5315 device_printf(dev, "XOFF Rcvd = %lld\n",
5316 (long long)adapter->stats.xoffrxc);
5317 device_printf(dev, "XOFF Xmtd = %lld\n",
5318 (long long)adapter->stats.xofftxc);
5319 device_printf(dev, "Good Packets Rcvd = %lld\n",
5320 (long long)adapter->stats.gprc);
5321 device_printf(dev, "Good Packets Xmtd = %lld\n",
5322 (long long)adapter->stats.gptc);
5323 device_printf(dev, "TSO Contexts Xmtd = %lld\n",
5324 (long long)adapter->stats.tsctc);
5325 device_printf(dev, "TSO Contexts Failed = %lld\n",
5326 (long long)adapter->stats.tsctfc);
5329 /**********************************************************************
5331 * This routine provides a way to dump out the adapter eeprom,
5332 * often a useful debug/service tool. This only dumps the first
5333 * 32 words, stuff that matters is in that extent.
5335 **********************************************************************/
5337 em_print_nvm_info(struct adapter *adapter)
5342 /* Its a bit crude, but it gets the job done */
5343 kprintf("\nInterface EEPROM Dump:\n");
5344 kprintf("Offset\n0x0000 ");
5345 for (i = 0, j = 0; i < 32; i++, j++) {
5346 if (j == 8) { /* Make the offset block */
5348 kprintf("\n0x00%x0 ",row);
5350 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
5351 kprintf("%04x ", eeprom_data);
5357 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
5359 struct adapter *adapter;
5364 error = sysctl_handle_int(oidp, &result, 0, req);
5366 if (error || !req->newptr)
5370 adapter = (struct adapter *)arg1;
5371 em_print_debug_info(adapter);
5374 * This value will cause a hex dump of the
5375 * first 32 16-bit words of the EEPROM to
5379 adapter = (struct adapter *)arg1;
5380 em_print_nvm_info(adapter);
5388 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
5390 struct adapter *adapter;
5395 error = sysctl_handle_int(oidp, &result, 0, req);
5397 if (error || !req->newptr)
5401 adapter = (struct adapter *)arg1;
5402 em_print_hw_stats(adapter);
5409 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
5411 struct em_int_delay_info *info;
5412 struct adapter *adapter;
5418 info = (struct em_int_delay_info *)arg1;
5419 usecs = info->value;
5420 error = sysctl_handle_int(oidp, &usecs, 0, req);
5421 if (error != 0 || req->newptr == NULL)
5423 if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535))
5425 info->value = usecs;
5426 ticks = EM_USECS_TO_TICKS(usecs);
5428 adapter = info->adapter;
5430 EM_CORE_LOCK(adapter);
5431 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
5432 regval = (regval & ~0xffff) | (ticks & 0xffff);
5433 /* Handle a few special cases. */
5434 switch (info->offset) {
5439 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
5440 /* Don't write 0 into the TIDV register. */
5443 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
5446 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
5447 EM_CORE_UNLOCK(adapter);
5452 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
5453 const char *description, struct em_int_delay_info *info,
5454 int offset, int value)
5456 info->adapter = adapter;
5457 info->offset = offset;
5458 info->value = value;
5459 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
5460 SYSCTL_CHILDREN(adapter->sysctl_tree),
5461 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
5462 info, 0, em_sysctl_int_delay, "I", description);
5465 #ifndef EM_LEGACY_IRQ
5467 em_add_rx_process_limit(struct adapter *adapter, const char *name,
5468 const char *description, int *limit, int value)
5471 SYSCTL_ADD_INT(&adapter->sysctl_ctx,
5472 SYSCTL_CHILDREN(adapter->sysctl_tree),
5473 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);