Reduce ifnet.if_serializer contention on output path:
[dragonfly.git] / sys / dev / netif / em / if_em.c
1 /*
2  *
3  * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>.  All rights reserved.
4  *
5  * Copyright (c) 2001-2006, Intel Corporation
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  * 
11  *  1. Redistributions of source code must retain the above copyright notice,
12  *     this list of conditions and the following disclaimer.
13  * 
14  *  2. Redistributions in binary form must reproduce the above copyright
15  *     notice, this list of conditions and the following disclaimer in the
16  *     documentation and/or other materials provided with the distribution.
17  * 
18  *  3. Neither the name of the Intel Corporation nor the names of its
19  *     contributors may be used to endorse or promote products derived from
20  *     this software without specific prior written permission.
21  * 
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  *
34  *
35  * Copyright (c) 2005 The DragonFly Project.  All rights reserved.
36  * 
37  * This code is derived from software contributed to The DragonFly Project
38  * by Matthew Dillon <dillon@backplane.com>
39  * 
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in
48  *    the documentation and/or other materials provided with the
49  *    distribution.
50  * 3. Neither the name of The DragonFly Project nor the names of its
51  *    contributors may be used to endorse or promote products derived
52  *    from this software without specific, prior written permission.
53  * 
54  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
57  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
58  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
59  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
60  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
61  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
62  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
63  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
64  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65  * SUCH DAMAGE.
66  * 
67  * $DragonFly: src/sys/dev/netif/em/if_em.c,v 1.71 2008/05/14 11:59:19 sephe Exp $
68  * $FreeBSD$
69  */
70 /*
71  * SERIALIZATION API RULES:
72  *
73  * - If the driver uses the same serializer for the interrupt as for the
74  *   ifnet, most of the serialization will be done automatically for the
75  *   driver.  
76  *
77  * - ifmedia entry points will be serialized by the ifmedia code using the
78  *   ifnet serializer.
79  *
80  * - if_* entry points except for if_input will be serialized by the IF
81  *   and protocol layers.
82  *
83  * - The device driver must be sure to serialize access from timeout code
84  *   installed by the device driver.
85  *
86  * - The device driver typically holds the serializer at the time it wishes
87  *   to call if_input.  If so, it should pass the serializer to if_input and
88  *   note that the serializer might be dropped temporarily by if_input 
89  *   (e.g. in case it has to bridge the packet to another interface).
90  *
91  *   NOTE!  Since callers into the device driver hold the ifnet serializer,
92  *   the device driver may be holding a serializer at the time it calls
93  *   if_input even if it is not serializer-aware.
94  */
95
96 #include "opt_polling.h"
97 #include "opt_inet.h"
98 #include "opt_serializer.h"
99 #include "opt_ethernet.h"
100
101 #include <sys/param.h>
102 #include <sys/bus.h>
103 #include <sys/endian.h>
104 #include <sys/interrupt.h>
105 #include <sys/kernel.h>
106 #include <sys/ktr.h>
107 #include <sys/malloc.h>
108 #include <sys/mbuf.h>
109 #include <sys/module.h>
110 #include <sys/rman.h>
111 #include <sys/serialize.h>
112 #include <sys/socket.h>
113 #include <sys/sockio.h>
114 #include <sys/sysctl.h>
115
116 #include <net/bpf.h>
117 #include <net/ethernet.h>
118 #include <net/if.h>
119 #include <net/if_arp.h>
120 #include <net/if_dl.h>
121 #include <net/if_media.h>
122 #include <net/if_types.h>
123 #include <net/ifq_var.h>
124 #include <net/vlan/if_vlan_var.h>
125 #include <net/vlan/if_vlan_ether.h>
126
127 #ifdef INET
128 #include <netinet/in.h>
129 #include <netinet/in_systm.h>
130 #include <netinet/in_var.h>
131 #include <netinet/ip.h>
132 #include <netinet/tcp.h>
133 #include <netinet/udp.h>
134 #endif
135
136 #include <dev/netif/em/if_em_hw.h>
137 #include <dev/netif/em/if_em.h>
138
139 #define EM_X60_WORKAROUND
140
141 /*********************************************************************
142  *  Set this to one to display debug statistics
143  *********************************************************************/
144 int     em_display_debug_stats = 0;
145
146 /*********************************************************************
147  *  Driver version
148  *********************************************************************/
149
150 char em_driver_version[] = "6.2.9";
151
152
153 /*********************************************************************
154  *  PCI Device ID Table
155  *
156  *  Used by probe to select devices to load on
157  *  Last field stores an index into em_strings
158  *  Last entry must be all 0s
159  *
160  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
161  *********************************************************************/
162
163 static em_vendor_info_t em_vendor_info_array[] =
164 {
165         /* Intel(R) PRO/1000 Network Connection */
166         { 0x8086, E1000_DEV_ID_82540EM,         PCI_ANY_ID, PCI_ANY_ID, 0},
167         { 0x8086, E1000_DEV_ID_82540EM_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
168         { 0x8086, E1000_DEV_ID_82540EP,         PCI_ANY_ID, PCI_ANY_ID, 0},
169         { 0x8086, E1000_DEV_ID_82540EP_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
170         { 0x8086, E1000_DEV_ID_82540EP_LP,      PCI_ANY_ID, PCI_ANY_ID, 0},
171
172         { 0x8086, E1000_DEV_ID_82541EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
173         { 0x8086, E1000_DEV_ID_82541ER,         PCI_ANY_ID, PCI_ANY_ID, 0},
174         { 0x8086, E1000_DEV_ID_82541ER_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
175         { 0x8086, E1000_DEV_ID_82541EI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
176         { 0x8086, E1000_DEV_ID_82541GI,         PCI_ANY_ID, PCI_ANY_ID, 0},
177         { 0x8086, E1000_DEV_ID_82541GI_LF,      PCI_ANY_ID, PCI_ANY_ID, 0},
178         { 0x8086, E1000_DEV_ID_82541GI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
179
180         { 0x8086, E1000_DEV_ID_82542,           PCI_ANY_ID, PCI_ANY_ID, 0},
181
182         { 0x8086, E1000_DEV_ID_82543GC_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
183         { 0x8086, E1000_DEV_ID_82543GC_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
184
185         { 0x8086, E1000_DEV_ID_82544EI_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
186         { 0x8086, E1000_DEV_ID_82544EI_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
187         { 0x8086, E1000_DEV_ID_82544GC_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
188         { 0x8086, E1000_DEV_ID_82544GC_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
189
190         { 0x8086, E1000_DEV_ID_82545EM_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
191         { 0x8086, E1000_DEV_ID_82545EM_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
192         { 0x8086, E1000_DEV_ID_82545GM_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
193         { 0x8086, E1000_DEV_ID_82545GM_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
194         { 0x8086, E1000_DEV_ID_82545GM_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
195
196         { 0x8086, E1000_DEV_ID_82546EB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
197         { 0x8086, E1000_DEV_ID_82546EB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
198         { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
199         { 0x8086, E1000_DEV_ID_82546GB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
200         { 0x8086, E1000_DEV_ID_82546GB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
201         { 0x8086, E1000_DEV_ID_82546GB_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
202         { 0x8086, E1000_DEV_ID_82546GB_PCIE,    PCI_ANY_ID, PCI_ANY_ID, 0},
203         { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
204         { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
205                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
206
207         { 0x8086, E1000_DEV_ID_82547EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
208         { 0x8086, E1000_DEV_ID_82547EI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
209         { 0x8086, E1000_DEV_ID_82547GI,         PCI_ANY_ID, PCI_ANY_ID, 0},
210
211         { 0x8086, E1000_DEV_ID_82571EB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
212         { 0x8086, E1000_DEV_ID_82571EB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
213         { 0x8086, E1000_DEV_ID_82571EB_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
214         { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER,
215                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
216         { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE,
217                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
218
219         { 0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER,
220                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
221         { 0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER,
222                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
223         { 0x8086, E1000_DEV_ID_82572EI_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
224         { 0x8086, E1000_DEV_ID_82572EI_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
225         { 0x8086, E1000_DEV_ID_82572EI_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
226         { 0x8086, E1000_DEV_ID_82572EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
227
228         { 0x8086, E1000_DEV_ID_82573E,          PCI_ANY_ID, PCI_ANY_ID, 0},
229         { 0x8086, E1000_DEV_ID_82573E_IAMT,     PCI_ANY_ID, PCI_ANY_ID, 0},
230         { 0x8086, E1000_DEV_ID_82573L,          PCI_ANY_ID, PCI_ANY_ID, 0},
231
232         { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT,
233                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
234         { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT,
235                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
236         { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT,
237                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
238         { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT,
239                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
240
241         { 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT,  PCI_ANY_ID, PCI_ANY_ID, 0},
242         { 0x8086, E1000_DEV_ID_ICH8_IGP_AMT,    PCI_ANY_ID, PCI_ANY_ID, 0},
243         { 0x8086, E1000_DEV_ID_ICH8_IGP_C,      PCI_ANY_ID, PCI_ANY_ID, 0},
244         { 0x8086, E1000_DEV_ID_ICH8_IFE,        PCI_ANY_ID, PCI_ANY_ID, 0},
245         { 0x8086, E1000_DEV_ID_ICH8_IFE_GT,     PCI_ANY_ID, PCI_ANY_ID, 0},
246         { 0x8086, E1000_DEV_ID_ICH8_IFE_G,      PCI_ANY_ID, PCI_ANY_ID, 0},
247         { 0x8086, E1000_DEV_ID_ICH8_IGP_M,      PCI_ANY_ID, PCI_ANY_ID, 0},
248
249         { 0x8086, E1000_DEV_ID_ICH9_IGP_AMT,    PCI_ANY_ID, PCI_ANY_ID, 0},
250         { 0x8086, E1000_DEV_ID_ICH9_IGP_C,      PCI_ANY_ID, PCI_ANY_ID, 0},
251         { 0x8086, E1000_DEV_ID_ICH9_IFE,        PCI_ANY_ID, PCI_ANY_ID, 0},
252         { 0x8086, E1000_DEV_ID_ICH9_IFE_GT,     PCI_ANY_ID, PCI_ANY_ID, 0},
253         { 0x8086, E1000_DEV_ID_ICH9_IFE_G,      PCI_ANY_ID, PCI_ANY_ID, 0},
254
255         { 0x8086, E1000_DEV_ID_82575EB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
256         { 0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES,
257                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
258         { 0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER,
259                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
260         { 0x8086, 0x101A, PCI_ANY_ID, PCI_ANY_ID, 0},
261         { 0x8086, 0x1014, PCI_ANY_ID, PCI_ANY_ID, 0},
262         /* required last entry */
263         { 0, 0, 0, 0, 0}
264 };
265
266 /*********************************************************************
267  *  Table of branding strings for all supported NICs.
268  *********************************************************************/
269
270 static const char *em_strings[] = {
271         "Intel(R) PRO/1000 Network Connection"
272 };
273
274 /*********************************************************************
275  *  Function prototypes
276  *********************************************************************/
277 static int      em_probe(device_t);
278 static int      em_attach(device_t);
279 static int      em_detach(device_t);
280 static int      em_shutdown(device_t);
281 static void     em_intr(void *);
282 static int      em_suspend(device_t);
283 static int      em_resume(device_t);
284 static void     em_start(struct ifnet *);
285 static int      em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
286 static void     em_watchdog(struct ifnet *);
287 static void     em_init(void *);
288 static void     em_stop(void *);
289 static void     em_media_status(struct ifnet *, struct ifmediareq *);
290 static int      em_media_change(struct ifnet *);
291 static void     em_identify_hardware(struct adapter *);
292 static int      em_allocate_pci_resources(device_t);
293 static void     em_free_pci_resources(device_t);
294 static void     em_local_timer(void *);
295 static int      em_hardware_init(struct adapter *);
296 static void     em_setup_interface(device_t, struct adapter *);
297 static int      em_setup_transmit_structures(struct adapter *);
298 static void     em_initialize_transmit_unit(struct adapter *);
299 static int      em_setup_receive_structures(struct adapter *);
300 static void     em_initialize_receive_unit(struct adapter *);
301 static void     em_enable_intr(struct adapter *);
302 static void     em_disable_intr(struct adapter *);
303 static void     em_free_transmit_structures(struct adapter *);
304 static void     em_free_receive_structures(struct adapter *);
305 static void     em_update_stats_counters(struct adapter *);
306 static void     em_txeof(struct adapter *);
307 static int      em_allocate_receive_structures(struct adapter *);
308 static void     em_rxeof(struct adapter *, int);
309 static void     em_receive_checksum(struct adapter *, struct em_rx_desc *,
310                                     struct mbuf *);
311 static void     em_transmit_checksum_setup(struct adapter *, struct mbuf *,
312                                            uint32_t *, uint32_t *);
313 static void     em_set_promisc(struct adapter *);
314 static void     em_disable_promisc(struct adapter *);
315 static void     em_set_multi(struct adapter *);
316 static void     em_print_hw_stats(struct adapter *);
317 static void     em_update_link_status(struct adapter *);
318 static int      em_get_buf(int i, struct adapter *, struct mbuf *, int how);
319 static void     em_enable_vlans(struct adapter *);
320 static void     em_disable_vlans(struct adapter *);
321 static int      em_encap(struct adapter *, struct mbuf *);
322 static void     em_smartspeed(struct adapter *);
323 static int      em_82547_fifo_workaround(struct adapter *, int);
324 static void     em_82547_update_fifo_head(struct adapter *, int);
325 static int      em_82547_tx_fifo_reset(struct adapter *);
326 static void     em_82547_move_tail(void *);
327 static void     em_82547_move_tail_serialized(struct adapter *);
328 static int      em_dma_malloc(struct adapter *, bus_size_t,
329                               struct em_dma_alloc *);
330 static void     em_dma_free(struct adapter *, struct em_dma_alloc *);
331 static void     em_print_debug_info(struct adapter *);
332 static int      em_is_valid_ether_addr(uint8_t *);
333 static int      em_sysctl_stats(SYSCTL_HANDLER_ARGS);
334 static int      em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
335 static uint32_t em_fill_descriptors(bus_addr_t address, uint32_t length, 
336                                    PDESC_ARRAY desc_array);
337 static int      em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
338 static int      em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS);
339 static void     em_add_int_delay_sysctl(struct adapter *, const char *,
340                                         const char *,
341                                         struct em_int_delay_info *, int, int);
342
343 /*********************************************************************
344  *  FreeBSD Device Interface Entry Points
345  *********************************************************************/
346
347 static device_method_t em_methods[] = {
348         /* Device interface */
349         DEVMETHOD(device_probe, em_probe),
350         DEVMETHOD(device_attach, em_attach),
351         DEVMETHOD(device_detach, em_detach),
352         DEVMETHOD(device_shutdown, em_shutdown),
353         DEVMETHOD(device_suspend, em_suspend),
354         DEVMETHOD(device_resume, em_resume),
355         {0, 0}
356 };
357
358 static driver_t em_driver = {
359         "em", em_methods, sizeof(struct adapter),
360 };
361
362 static devclass_t em_devclass;
363
364 DECLARE_DUMMY_MODULE(if_em);
365 DRIVER_MODULE(if_em, pci, em_driver, em_devclass, 0, 0);
366
367 /*********************************************************************
368  *  Tunable default values.
369  *********************************************************************/
370
371 #define E1000_TICKS_TO_USECS(ticks)     ((1024 * (ticks) + 500) / 1000)
372 #define E1000_USECS_TO_TICKS(usecs)     ((1000 * (usecs) + 512) / 1024)
373
374 static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
375 static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
376 static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
377 static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
378 static int em_int_throttle_ceil = 10000;
379 static int em_rxd = EM_DEFAULT_RXD;
380 static int em_txd = EM_DEFAULT_TXD;
381 static int em_smart_pwr_down = FALSE;
382
383 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
384 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
385 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
386 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
387 TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil);
388 TUNABLE_INT("hw.em.rxd", &em_rxd);
389 TUNABLE_INT("hw.em.txd", &em_txd);
390 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
391
392 /*
393  * Kernel trace for characterization of operations
394  */
395 #if !defined(KTR_IF_EM)
396 #define KTR_IF_EM       KTR_ALL
397 #endif
398 KTR_INFO_MASTER(if_em);
399 KTR_INFO(KTR_IF_EM, if_em, intr_beg, 0, "intr begin", 0);
400 KTR_INFO(KTR_IF_EM, if_em, intr_end, 1, "intr end", 0);
401 KTR_INFO(KTR_IF_EM, if_em, pkt_receive, 4, "rx packet", 0);
402 KTR_INFO(KTR_IF_EM, if_em, pkt_txqueue, 5, "tx packet", 0);
403 KTR_INFO(KTR_IF_EM, if_em, pkt_txclean, 6, "tx clean", 0);
404 #define logif(name)     KTR_LOG(if_em_ ## name)
405
406 /*********************************************************************
407  *  Device identification routine
408  *
409  *  em_probe determines if the driver should be loaded on
410  *  adapter based on PCI vendor/device id of the adapter.
411  *
412  *  return 0 on success, positive on failure
413  *********************************************************************/
414
415 static int
416 em_probe(device_t dev)
417 {
418         em_vendor_info_t *ent;
419
420         uint16_t pci_vendor_id = 0;
421         uint16_t pci_device_id = 0;
422         uint16_t pci_subvendor_id = 0;
423         uint16_t pci_subdevice_id = 0;
424         char adapter_name[60];
425
426         INIT_DEBUGOUT("em_probe: begin");
427
428         pci_vendor_id = pci_get_vendor(dev);
429         if (pci_vendor_id != EM_VENDOR_ID)
430                 return (ENXIO);
431
432         pci_device_id = pci_get_device(dev);
433         pci_subvendor_id = pci_get_subvendor(dev);
434         pci_subdevice_id = pci_get_subdevice(dev);
435
436         ent = em_vendor_info_array;
437         while (ent->vendor_id != 0) {
438                 if ((pci_vendor_id == ent->vendor_id) &&
439                     (pci_device_id == ent->device_id) &&
440
441                     ((pci_subvendor_id == ent->subvendor_id) ||
442                      (ent->subvendor_id == PCI_ANY_ID)) &&
443
444                     ((pci_subdevice_id == ent->subdevice_id) ||
445                      (ent->subdevice_id == PCI_ANY_ID))) {
446                         ksnprintf(adapter_name, sizeof(adapter_name),
447                                  "%s, Version - %s",  em_strings[ent->index], 
448                                  em_driver_version);
449                         device_set_desc_copy(dev, adapter_name);
450                         device_set_async_attach(dev, TRUE);
451                         return (0);
452                 }
453                 ent++;
454         }
455
456         return (ENXIO);
457 }
458
459 /*********************************************************************
460  *  Device initialization routine
461  *
462  *  The attach entry point is called when the driver is being loaded.
463  *  This routine identifies the type of hardware, allocates all resources
464  *  and initializes the hardware.
465  *
466  *  return 0 on success, positive on failure
467  *********************************************************************/
468
469 static int
470 em_attach(device_t dev)
471 {
472         struct adapter *adapter;
473         struct ifnet *ifp;
474         int tsize, rsize;
475         int error = 0;
476
477         INIT_DEBUGOUT("em_attach: begin");
478
479         adapter = device_get_softc(dev);
480         ifp = &adapter->interface_data.ac_if;
481
482         callout_init(&adapter->timer);
483         callout_init(&adapter->tx_fifo_timer);
484
485         adapter->dev = dev;
486         adapter->osdep.dev = dev;
487
488         /* SYSCTL stuff */
489         sysctl_ctx_init(&adapter->sysctl_ctx);
490         adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx,
491                                                SYSCTL_STATIC_CHILDREN(_hw),
492                                                OID_AUTO, 
493                                                device_get_nameunit(dev),
494                                                CTLFLAG_RD,
495                                                0, "");
496
497         if (adapter->sysctl_tree == NULL) {
498                 device_printf(dev, "Unable to create sysctl tree\n");
499                 return EIO;
500         }
501
502         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,  
503                         SYSCTL_CHILDREN(adapter->sysctl_tree),
504                         OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW, 
505                         (void *)adapter, 0,
506                         em_sysctl_debug_info, "I", "Debug Information");
507
508         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,  
509                         SYSCTL_CHILDREN(adapter->sysctl_tree),
510                         OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, 
511                         (void *)adapter, 0,
512                         em_sysctl_stats, "I", "Statistics");
513
514         /* Determine hardware revision */
515         em_identify_hardware(adapter);
516
517         /* Set up some sysctls for the tunable interrupt delays */
518         em_add_int_delay_sysctl(adapter, "rx_int_delay",
519                                 "receive interrupt delay in usecs",
520                                 &adapter->rx_int_delay,
521                                 E1000_REG_OFFSET(&adapter->hw, RDTR),
522                                 em_rx_int_delay_dflt);
523         em_add_int_delay_sysctl(adapter, "tx_int_delay",
524                                 "transmit interrupt delay in usecs",
525                                 &adapter->tx_int_delay,
526                                 E1000_REG_OFFSET(&adapter->hw, TIDV),
527                                 em_tx_int_delay_dflt);
528         if (adapter->hw.mac_type >= em_82540) {
529                 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
530                                         "receive interrupt delay limit in usecs",
531                                         &adapter->rx_abs_int_delay,
532                                         E1000_REG_OFFSET(&adapter->hw, RADV),
533                                         em_rx_abs_int_delay_dflt);
534                 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
535                                         "transmit interrupt delay limit in usecs",
536                                         &adapter->tx_abs_int_delay,
537                                         E1000_REG_OFFSET(&adapter->hw, TADV),
538                                         em_tx_abs_int_delay_dflt);
539                 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
540                         SYSCTL_CHILDREN(adapter->sysctl_tree),
541                         OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW,
542                         adapter, 0, em_sysctl_int_throttle, "I", NULL);
543         }
544
545         /*
546          * Validate number of transmit and receive descriptors. It
547          * must not exceed hardware maximum, and must be multiple
548          * of EM_DBA_ALIGN.
549          */
550         if (((em_txd * sizeof(struct em_tx_desc)) % EM_DBA_ALIGN) != 0 ||
551             (adapter->hw.mac_type >= em_82544 && em_txd > EM_MAX_TXD) ||
552             (adapter->hw.mac_type < em_82544 && em_txd > EM_MAX_TXD_82543) ||
553             (em_txd < EM_MIN_TXD)) {
554                 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
555                               EM_DEFAULT_TXD, em_txd);
556                 adapter->num_tx_desc = EM_DEFAULT_TXD;
557         } else {
558                 adapter->num_tx_desc = em_txd;
559         }
560  
561         if (((em_rxd * sizeof(struct em_rx_desc)) % EM_DBA_ALIGN) != 0 ||
562             (adapter->hw.mac_type >= em_82544 && em_rxd > EM_MAX_RXD) ||
563             (adapter->hw.mac_type < em_82544 && em_rxd > EM_MAX_RXD_82543) ||
564             (em_rxd < EM_MIN_RXD)) {
565                 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
566                               EM_DEFAULT_RXD, em_rxd);
567                 adapter->num_rx_desc = EM_DEFAULT_RXD;
568         } else {
569                 adapter->num_rx_desc = em_rxd;
570         }
571
572         SYSCTL_ADD_INT(&adapter->sysctl_ctx,
573                        SYSCTL_CHILDREN(adapter->sysctl_tree), OID_AUTO, "rxd",
574                        CTLFLAG_RD, &adapter->num_rx_desc, 0, NULL);
575         SYSCTL_ADD_INT(&adapter->sysctl_ctx,
576                        SYSCTL_CHILDREN(adapter->sysctl_tree), OID_AUTO, "txd",
577                        CTLFLAG_RD, &adapter->num_tx_desc, 0, NULL);
578
579         adapter->hw.autoneg = DO_AUTO_NEG;
580         adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
581         adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
582         adapter->hw.tbi_compatibility_en = TRUE;
583         adapter->rx_buffer_len = EM_RXBUFFER_2048;
584
585         adapter->hw.phy_init_script = 1;
586         adapter->hw.phy_reset_disable = FALSE;
587
588 #ifndef EM_MASTER_SLAVE
589         adapter->hw.master_slave = em_ms_hw_default;
590 #else
591         adapter->hw.master_slave = EM_MASTER_SLAVE;
592 #endif
593
594         /*
595          * Set the max frame size assuming standard ethernet
596          * sized frames.
597          */   
598         adapter->hw.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
599
600         adapter->hw.min_frame_size =
601             MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
602
603         /*
604          * This controls when hardware reports transmit completion
605          * status.
606          */
607         adapter->hw.report_tx_early = 1;
608
609         error = em_allocate_pci_resources(dev);
610         if (error)
611                 goto fail;
612
613         /* Initialize eeprom parameters */
614         em_init_eeprom_params(&adapter->hw);
615
616         tsize = roundup2(adapter->num_tx_desc * sizeof(struct em_tx_desc),
617                          EM_DBA_ALIGN);
618
619         /* Allocate Transmit Descriptor ring */
620         error = em_dma_malloc(adapter, tsize, &adapter->txdma);
621         if (error) {
622                 device_printf(dev, "Unable to allocate TxDescriptor memory\n");
623                 goto fail;
624         }
625         adapter->tx_desc_base = (struct em_tx_desc *)adapter->txdma.dma_vaddr;
626
627         rsize = roundup2(adapter->num_rx_desc * sizeof(struct em_rx_desc),
628                          EM_DBA_ALIGN);
629
630         /* Allocate Receive Descriptor ring */
631         error = em_dma_malloc(adapter, rsize, &adapter->rxdma);
632         if (error) {
633                 device_printf(dev, "Unable to allocate rx_desc memory\n");
634                 goto fail;
635         }
636         adapter->rx_desc_base = (struct em_rx_desc *)adapter->rxdma.dma_vaddr;
637
638         /* Initialize the hardware */
639         if (em_hardware_init(adapter)) {
640                 device_printf(dev, "Unable to initialize the hardware\n");
641                 error = EIO;
642                 goto fail;
643         }
644
645         /* Copy the permanent MAC address out of the EEPROM */
646         if (em_read_mac_addr(&adapter->hw) < 0) {
647                 device_printf(dev,
648                               "EEPROM read error while reading MAC address\n");
649                 error = EIO;
650                 goto fail;
651         }
652
653         if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
654                 device_printf(dev, "Invalid MAC address\n");
655                 error = EIO;
656                 goto fail;
657         }
658
659         /* Setup OS specific network interface */
660         em_setup_interface(dev, adapter);
661
662         /* Initialize statistics */
663         em_clear_hw_cntrs(&adapter->hw);
664         em_update_stats_counters(adapter);
665         adapter->hw.get_link_status = 1;
666         em_update_link_status(adapter);
667
668         /* Indicate SOL/IDER usage */
669         if (em_check_phy_reset_block(&adapter->hw)) {
670                 device_printf(dev, "PHY reset is blocked due to "
671                               "SOL/IDER session.\n");
672         }
673  
674         /* Identify 82544 on PCIX */
675         em_get_bus_info(&adapter->hw);
676         if (adapter->hw.bus_type == em_bus_type_pcix &&
677             adapter->hw.mac_type == em_82544)
678                 adapter->pcix_82544 = TRUE;
679         else
680                 adapter->pcix_82544 = FALSE;
681
682         error = bus_setup_intr(dev, adapter->res_interrupt, INTR_NETSAFE,
683                            em_intr, adapter,
684                            &adapter->int_handler_tag, ifp->if_serializer);
685         if (error) {
686                 device_printf(dev, "Error registering interrupt handler!\n");
687                 ether_ifdetach(ifp);
688                 goto fail;
689         }
690
691         ifp->if_cpuid = ithread_cpuid(rman_get_start(adapter->res_interrupt));
692         KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
693         INIT_DEBUGOUT("em_attach: end");
694         return(0);
695
696 fail:
697         em_detach(dev);
698         return(error);
699 }
700
701 /*********************************************************************
702  *  Device removal routine
703  *
704  *  The detach entry point is called when the driver is being removed.
705  *  This routine stops the adapter and deallocates all the resources
706  *  that were allocated for driver operation.
707  *
708  *  return 0 on success, positive on failure
709  *********************************************************************/
710
711 static int
712 em_detach(device_t dev)
713 {
714         struct adapter *adapter = device_get_softc(dev);
715
716         INIT_DEBUGOUT("em_detach: begin");
717
718         if (device_is_attached(dev)) {
719                 struct ifnet *ifp = &adapter->interface_data.ac_if;
720
721                 lwkt_serialize_enter(ifp->if_serializer);
722                 adapter->in_detach = 1;
723                 em_stop(adapter);
724                 em_phy_hw_reset(&adapter->hw);
725                 bus_teardown_intr(dev, adapter->res_interrupt, 
726                                   adapter->int_handler_tag);
727                 lwkt_serialize_exit(ifp->if_serializer);
728
729                 ether_ifdetach(ifp);
730         }
731         bus_generic_detach(dev);
732
733         em_free_pci_resources(dev);
734
735         /* Free Transmit Descriptor ring */
736         if (adapter->tx_desc_base != NULL) {
737                 em_dma_free(adapter, &adapter->txdma);
738                 adapter->tx_desc_base = NULL;
739         }
740
741         /* Free Receive Descriptor ring */
742         if (adapter->rx_desc_base != NULL) {
743                 em_dma_free(adapter, &adapter->rxdma);
744                 adapter->rx_desc_base = NULL;
745         }
746
747         /* Free sysctl tree */
748         if (adapter->sysctl_tree != NULL) {
749                 adapter->sysctl_tree = NULL;
750                 sysctl_ctx_free(&adapter->sysctl_ctx);
751         }
752
753         return (0);
754 }
755
756 /*********************************************************************
757  *
758  *  Shutdown entry point
759  *
760  **********************************************************************/
761
762 static int
763 em_shutdown(device_t dev)
764 {
765         struct adapter *adapter = device_get_softc(dev);
766         struct ifnet *ifp = &adapter->interface_data.ac_if;
767
768         lwkt_serialize_enter(ifp->if_serializer);
769         em_stop(adapter);
770         lwkt_serialize_exit(ifp->if_serializer);
771
772         return (0);
773 }
774
775 /*
776  * Suspend/resume device methods.
777  */
778 static int
779 em_suspend(device_t dev)
780 {
781         struct adapter *adapter = device_get_softc(dev);
782         struct ifnet *ifp = &adapter->interface_data.ac_if;
783
784         lwkt_serialize_enter(ifp->if_serializer);
785         em_stop(adapter);
786         lwkt_serialize_exit(ifp->if_serializer);
787         return (0);
788 }
789
790 static int
791 em_resume(device_t dev)
792 {
793         struct adapter *adapter = device_get_softc(dev);
794         struct ifnet *ifp = &adapter->interface_data.ac_if;
795
796         lwkt_serialize_enter(ifp->if_serializer);
797         ifp->if_flags &= ~IFF_RUNNING;
798         em_init(adapter);
799         if_devstart(ifp);
800         lwkt_serialize_exit(ifp->if_serializer);
801
802         return bus_generic_resume(dev);
803 }
804
805 /*********************************************************************
806  *  Transmit entry point
807  *
808  *  em_start is called by the stack to initiate a transmit.
809  *  The driver will remain in this routine as long as there are
810  *  packets to transmit and transmit resources are available.
811  *  In case resources are not available stack is notified and
812  *  the packet is requeued.
813  **********************************************************************/
814
815 static void
816 em_start(struct ifnet *ifp)
817 {
818         struct mbuf *m_head;
819         struct adapter *adapter = ifp->if_softc;
820
821         ASSERT_SERIALIZED(ifp->if_serializer);
822
823         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
824                 return;
825         if (!adapter->link_active) {
826                 ifq_purge(&ifp->if_snd);
827                 return;
828         }
829         while (!ifq_is_empty(&ifp->if_snd)) {
830                 m_head = ifq_dequeue(&ifp->if_snd, NULL);
831                 if (m_head == NULL)
832                         break;
833
834                 logif(pkt_txqueue);
835                 if (em_encap(adapter, m_head)) {
836                         ifp->if_flags |= IFF_OACTIVE;
837                         ifq_prepend(&ifp->if_snd, m_head);
838                         break;
839                 }
840
841                 /* Send a copy of the frame to the BPF listener */
842                 ETHER_BPF_MTAP(ifp, m_head);
843
844                 /* Set timeout in case hardware has problems transmitting. */
845                 ifp->if_timer = EM_TX_TIMEOUT;
846         }
847 }
848
849 /*********************************************************************
850  *  Ioctl entry point
851  *
852  *  em_ioctl is called when the user wants to configure the
853  *  interface.
854  *
855  *  return 0 on success, positive on failure
856  **********************************************************************/
857
858 static int
859 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
860 {
861         int max_frame_size, mask, error = 0, reinit = 0;
862         struct ifreq *ifr = (struct ifreq *) data;
863         struct adapter *adapter = ifp->if_softc;
864         uint16_t eeprom_data = 0;
865
866         ASSERT_SERIALIZED(ifp->if_serializer);
867
868         if (adapter->in_detach)
869                 return 0;
870
871         switch (command) {
872         case SIOCSIFMTU:
873                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
874                 switch (adapter->hw.mac_type) {
875                 case em_82573:
876                         /*
877                          * 82573 only supports jumbo frames
878                          * if ASPM is disabled.
879                          */
880                         em_read_eeprom(&adapter->hw, EEPROM_INIT_3GIO_3,
881                             1, &eeprom_data);
882                         if (eeprom_data & EEPROM_WORD1A_ASPM_MASK) {
883                                 max_frame_size = ETHER_MAX_LEN;
884                                 break;
885                         }
886                         /* Allow Jumbo frames */
887                         /* FALLTHROUGH */
888                 case em_82571:
889                 case em_82572:
890                 case em_ich9lan:
891                 case em_80003es2lan:    /* Limit Jumbo Frame size */
892                         max_frame_size = 9234;
893                         break;
894                 case em_ich8lan:
895                         /* ICH8 does not support jumbo frames */
896                         max_frame_size = ETHER_MAX_LEN;
897                         break;
898                 default:
899                         max_frame_size = MAX_JUMBO_FRAME_SIZE;
900                         break;
901                 }
902                 if (ifr->ifr_mtu >
903                         max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) {
904                         error = EINVAL;
905                 } else {
906                         ifp->if_mtu = ifr->ifr_mtu;
907                         adapter->hw.max_frame_size = 
908                         ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
909                         ifp->if_flags &= ~IFF_RUNNING;
910                         em_init(adapter);
911                 }
912                 break;
913         case SIOCSIFFLAGS:
914                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS "
915                                "(Set Interface Flags)");
916                 if (ifp->if_flags & IFF_UP) {
917                         if (!(ifp->if_flags & IFF_RUNNING)) {
918                                 em_init(adapter);
919                         } else if ((ifp->if_flags ^ adapter->if_flags) &
920                                    IFF_PROMISC) {
921                                 em_disable_promisc(adapter);
922                                 em_set_promisc(adapter);
923                         }
924                 } else {
925                         if (ifp->if_flags & IFF_RUNNING)
926                                 em_stop(adapter);
927                 }
928                 adapter->if_flags = ifp->if_flags;
929                 break;
930         case SIOCADDMULTI:
931         case SIOCDELMULTI:
932                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
933                 if (ifp->if_flags & IFF_RUNNING) {
934                         em_disable_intr(adapter);
935                         em_set_multi(adapter);
936                         if (adapter->hw.mac_type == em_82542_rev2_0)
937                                 em_initialize_receive_unit(adapter);
938 #ifdef DEVICE_POLLING
939                         /* Do not enable interrupt if polling(4) is enabled */
940                         if ((ifp->if_flags & IFF_POLLING) == 0)
941 #endif
942                         em_enable_intr(adapter);
943                 }
944                 break;
945         case SIOCSIFMEDIA:
946                 /* Check SOL/IDER usage */
947                 if (em_check_phy_reset_block(&adapter->hw)) {
948                         if_printf(ifp, "Media change is blocked due to "
949                                   "SOL/IDER session.\n");
950                         break;
951                 }
952                 /* FALLTHROUGH */
953         case SIOCGIFMEDIA:
954                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA "
955                                "(Get/Set Interface Media)");
956                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
957                 break;
958         case SIOCSIFCAP:
959                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
960                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
961                 if (mask & IFCAP_HWCSUM) {
962                         ifp->if_capenable ^= IFCAP_HWCSUM;
963                         reinit = 1;
964                 }
965                 if (mask & IFCAP_VLAN_HWTAGGING) {
966                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
967                         reinit = 1;
968                 }
969                 if (reinit && (ifp->if_flags & IFF_RUNNING)) {
970                         ifp->if_flags &= ~IFF_RUNNING;
971                         em_init(adapter);
972                 }
973                 break;
974         default:
975                 error = ether_ioctl(ifp, command, data);
976                 break;
977         }
978
979         return (error);
980 }
981
982 /*********************************************************************
983  *  Watchdog entry point
984  *
985  *  This routine is called whenever hardware quits transmitting.
986  *
987  **********************************************************************/
988
989 static void
990 em_watchdog(struct ifnet *ifp)
991 {
992         struct adapter *adapter = ifp->if_softc;
993
994         /*
995          * If we are in this routine because of pause frames, then
996          * don't reset the hardware.
997          */
998         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
999                 ifp->if_timer = EM_TX_TIMEOUT;
1000                 return;
1001         }
1002
1003         if (em_check_for_link(&adapter->hw) == 0)
1004                 if_printf(ifp, "watchdog timeout -- resetting\n");
1005
1006         ifp->if_flags &= ~IFF_RUNNING;
1007         em_init(adapter);
1008
1009         adapter->watchdog_timeouts++;
1010 }
1011
1012 /*********************************************************************
1013  *  Init entry point
1014  *
1015  *  This routine is used in two ways. It is used by the stack as
1016  *  init entry point in network interface structure. It is also used
1017  *  by the driver as a hw/sw initialization routine to get to a
1018  *  consistent state.
1019  *
1020  *  return 0 on success, positive on failure
1021  **********************************************************************/
1022
1023 static void
1024 em_init(void *arg)
1025 {
1026         struct adapter *adapter = arg;
1027         uint32_t pba;
1028         struct ifnet *ifp = &adapter->interface_data.ac_if;
1029
1030         ASSERT_SERIALIZED(ifp->if_serializer);
1031
1032         INIT_DEBUGOUT("em_init: begin");
1033
1034         if (ifp->if_flags & IFF_RUNNING)
1035                 return;
1036
1037         em_stop(adapter);
1038
1039         /*
1040          * Packet Buffer Allocation (PBA)
1041          * Writing PBA sets the receive portion of the buffer
1042          * the remainder is used for the transmit buffer.
1043          *
1044          * Devices before the 82547 had a Packet Buffer of 64K.
1045          *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1046          * After the 82547 the buffer was reduced to 40K.
1047          *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1048          *   Note: default does not leave enough room for Jumbo Frame >10k.
1049          */
1050         switch (adapter->hw.mac_type) {
1051         case em_82547:
1052         case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1053                 if (adapter->hw.max_frame_size > EM_RXBUFFER_8192)
1054                         pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1055                 else
1056                         pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1057
1058                 adapter->tx_fifo_head = 0;
1059                 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1060                 adapter->tx_fifo_size =
1061                         (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1062                 break;
1063         /* Total Packet Buffer on these is 48K */
1064         case em_82571:
1065         case em_82572:
1066         case em_80003es2lan:
1067                 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1068                 break;
1069         case em_82573: /* 82573: Total Packet Buffer is 32K */
1070                 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
1071                 break;
1072         case em_ich8lan:
1073                 pba = E1000_PBA_8K;
1074                 break;
1075         case em_ich9lan:
1076 #define E1000_PBA_10K   0x000A
1077                 pba = E1000_PBA_10K;
1078                 break;
1079         default:
1080                 /* Devices before 82547 had a Packet Buffer of 64K.   */
1081                 if(adapter->hw.max_frame_size > EM_RXBUFFER_8192)
1082                         pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1083                 else
1084                         pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1085         }
1086
1087         INIT_DEBUGOUT1("em_init: pba=%dK",pba);
1088         E1000_WRITE_REG(&adapter->hw, PBA, pba);
1089
1090         /* Get the latest mac address, User can use a LAA */
1091         bcopy(adapter->interface_data.ac_enaddr, adapter->hw.mac_addr,
1092               ETHER_ADDR_LEN);
1093
1094         /* Initialize the hardware */
1095         if (em_hardware_init(adapter)) {
1096                 if_printf(ifp, "Unable to initialize the hardware\n");
1097                 return;
1098         }
1099         em_update_link_status(adapter);
1100
1101         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1102                 em_enable_vlans(adapter);
1103
1104         /* Set hardware offload abilities */
1105         if (adapter->hw.mac_type >= em_82543) {
1106                 if (ifp->if_capenable & IFCAP_TXCSUM)
1107                         ifp->if_hwassist = EM_CHECKSUM_FEATURES;
1108                 else
1109                         ifp->if_hwassist = 0;
1110         }
1111
1112         /* Prepare transmit descriptors and buffers */
1113         if (em_setup_transmit_structures(adapter)) {
1114                 if_printf(ifp, "Could not setup transmit structures\n");
1115                 em_stop(adapter);
1116                 return;
1117         }
1118         em_initialize_transmit_unit(adapter);
1119
1120         /* Setup Multicast table */
1121         em_set_multi(adapter);
1122
1123         /* Prepare receive descriptors and buffers */
1124         if (em_setup_receive_structures(adapter)) {
1125                 if_printf(ifp, "Could not setup receive structures\n");
1126                 em_stop(adapter);
1127                 return;
1128         }
1129         em_initialize_receive_unit(adapter);
1130
1131         /* Don't lose promiscuous settings */
1132         em_set_promisc(adapter);
1133
1134         ifp->if_flags |= IFF_RUNNING;
1135         ifp->if_flags &= ~IFF_OACTIVE;
1136
1137         callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1138         em_clear_hw_cntrs(&adapter->hw);
1139
1140 #ifdef DEVICE_POLLING
1141         /* Do not enable interrupt if polling(4) is enabled */
1142         if (ifp->if_flags & IFF_POLLING)
1143                 em_disable_intr(adapter);
1144         else
1145 #endif
1146         em_enable_intr(adapter);
1147
1148         /* Don't reset the phy next time init gets called */
1149         adapter->hw.phy_reset_disable = TRUE;
1150 }
1151
1152 #ifdef DEVICE_POLLING
1153
1154 static void
1155 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1156 {
1157         struct adapter *adapter = ifp->if_softc;
1158         uint32_t reg_icr;
1159
1160         ASSERT_SERIALIZED(ifp->if_serializer);
1161
1162         switch(cmd) {
1163         case POLL_REGISTER:
1164                 em_disable_intr(adapter);
1165                 break;
1166         case POLL_DEREGISTER:
1167                 em_enable_intr(adapter);
1168                 break;
1169         case POLL_AND_CHECK_STATUS:
1170                 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1171                 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1172                         callout_stop(&adapter->timer);
1173                         adapter->hw.get_link_status = 1;
1174                         em_check_for_link(&adapter->hw);
1175                         em_update_link_status(adapter);
1176                         callout_reset(&adapter->timer, hz, em_local_timer,
1177                                       adapter);
1178                 }
1179                 /* fall through */
1180         case POLL_ONLY:
1181                 if (ifp->if_flags & IFF_RUNNING) {
1182                         em_rxeof(adapter, count);
1183                         em_txeof(adapter);
1184
1185                         if (!ifq_is_empty(&ifp->if_snd))
1186                                 if_devstart(ifp);
1187                 }
1188                 break;
1189         }
1190 }
1191
1192 #endif /* DEVICE_POLLING */
1193
1194 /*********************************************************************
1195  *
1196  *  Interrupt Service routine
1197  *
1198  *********************************************************************/
1199 static void
1200 em_intr(void *arg)
1201 {
1202         uint32_t reg_icr;
1203         struct ifnet *ifp;
1204         struct adapter *adapter = arg;
1205
1206         ifp = &adapter->interface_data.ac_if;  
1207
1208         logif(intr_beg);
1209         ASSERT_SERIALIZED(ifp->if_serializer);
1210
1211         reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1212         if ((adapter->hw.mac_type >= em_82571 &&
1213              (reg_icr & E1000_ICR_INT_ASSERTED) == 0) ||
1214             reg_icr == 0) {
1215                 logif(intr_end);
1216                 return;
1217         }
1218
1219         /*
1220          * XXX: some laptops trigger several spurious interrupts on em(4)
1221          * when in the resume cycle. The ICR register reports all-ones
1222          * value in this case. Processing such interrupts would lead to
1223          * a freeze. I don't know why.
1224          */
1225         if (reg_icr == 0xffffffff) {
1226                 logif(intr_end);
1227                 return;
1228         }
1229
1230         /*
1231          * note: do not attempt to improve efficiency by looping.  This 
1232          * only results in unnecessary piecemeal collection of received
1233          * packets and unnecessary piecemeal cleanups of the transmit ring.
1234          */
1235         if (ifp->if_flags & IFF_RUNNING) {
1236                 em_rxeof(adapter, -1);
1237                 em_txeof(adapter);
1238         }
1239
1240         /* Link status change */
1241         if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1242                 callout_stop(&adapter->timer);
1243                 adapter->hw.get_link_status = 1;
1244                 em_check_for_link(&adapter->hw);
1245                 em_update_link_status(adapter);
1246                 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1247         }
1248
1249         if (reg_icr & E1000_ICR_RXO)
1250                 adapter->rx_overruns++;
1251
1252         if ((ifp->if_flags & IFF_RUNNING) && !ifq_is_empty(&ifp->if_snd))
1253                 if_devstart(ifp);
1254
1255         logif(intr_end);
1256 }
1257
1258 /*********************************************************************
1259  *
1260  *  Media Ioctl callback
1261  *
1262  *  This routine is called whenever the user queries the status of
1263  *  the interface using ifconfig.
1264  *
1265  **********************************************************************/
1266 static void
1267 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1268 {
1269         struct adapter *adapter = ifp->if_softc;
1270         u_char fiber_type = IFM_1000_SX;
1271
1272         INIT_DEBUGOUT("em_media_status: begin");
1273
1274         ASSERT_SERIALIZED(ifp->if_serializer);
1275
1276         em_check_for_link(&adapter->hw);
1277         em_update_link_status(adapter);
1278
1279         ifmr->ifm_status = IFM_AVALID;
1280         ifmr->ifm_active = IFM_ETHER;
1281
1282         if (!adapter->link_active)
1283                 return;
1284
1285         ifmr->ifm_status |= IFM_ACTIVE;
1286
1287         if (adapter->hw.media_type == em_media_type_fiber ||
1288             adapter->hw.media_type == em_media_type_internal_serdes) {
1289                 if (adapter->hw.mac_type == em_82545)
1290                         fiber_type = IFM_1000_LX;
1291                 ifmr->ifm_active |= fiber_type | IFM_FDX;
1292         } else {
1293                 switch (adapter->link_speed) {
1294                 case 10:
1295                         ifmr->ifm_active |= IFM_10_T;
1296                         break;
1297                 case 100:
1298                         ifmr->ifm_active |= IFM_100_TX;
1299                         break;
1300                 case 1000:
1301                         ifmr->ifm_active |= IFM_1000_T;
1302                         break;
1303                 }
1304                 if (adapter->link_duplex == FULL_DUPLEX)
1305                         ifmr->ifm_active |= IFM_FDX;
1306                 else
1307                         ifmr->ifm_active |= IFM_HDX;
1308         }
1309 }
1310
1311 /*********************************************************************
1312  *
1313  *  Media Ioctl callback
1314  *
1315  *  This routine is called when the user changes speed/duplex using
1316  *  media/mediopt option with ifconfig.
1317  *
1318  **********************************************************************/
1319 static int
1320 em_media_change(struct ifnet *ifp)
1321 {
1322         struct adapter *adapter = ifp->if_softc;
1323         struct ifmedia *ifm = &adapter->media;
1324
1325         INIT_DEBUGOUT("em_media_change: begin");
1326
1327         ASSERT_SERIALIZED(ifp->if_serializer);
1328
1329         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1330                 return (EINVAL);
1331
1332         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1333         case IFM_AUTO:
1334                 adapter->hw.autoneg = DO_AUTO_NEG;
1335                 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1336                 break;
1337         case IFM_1000_LX:
1338         case IFM_1000_SX:
1339         case IFM_1000_T:
1340                 adapter->hw.autoneg = DO_AUTO_NEG;
1341                 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1342                 break;
1343         case IFM_100_TX:
1344                 adapter->hw.autoneg = FALSE;
1345                 adapter->hw.autoneg_advertised = 0;
1346                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1347                         adapter->hw.forced_speed_duplex = em_100_full;
1348                 else
1349                         adapter->hw.forced_speed_duplex = em_100_half;
1350                 break;
1351         case IFM_10_T:
1352                 adapter->hw.autoneg = FALSE;
1353                 adapter->hw.autoneg_advertised = 0;
1354                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1355                         adapter->hw.forced_speed_duplex = em_10_full;
1356                 else
1357                         adapter->hw.forced_speed_duplex = em_10_half;
1358                 break;
1359         default:
1360                 if_printf(ifp, "Unsupported media type\n");
1361         }
1362         /*
1363          * As the speed/duplex settings may have changed we need to
1364          * reset the PHY.
1365          */
1366         adapter->hw.phy_reset_disable = FALSE;
1367
1368         ifp->if_flags &= ~IFF_RUNNING;
1369         em_init(adapter);
1370
1371         return(0);
1372 }
1373
1374 static void
1375 em_tx_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize,
1376          int error)
1377 {
1378         struct em_q *q = arg;
1379
1380         if (error)
1381                 return;
1382         KASSERT(nsegs <= EM_MAX_SCATTER,
1383                 ("Too many DMA segments returned when mapping tx packet"));
1384         q->nsegs = nsegs;
1385         bcopy(seg, q->segs, nsegs * sizeof(seg[0]));
1386 }
1387
1388 /*********************************************************************
1389  *
1390  *  This routine maps the mbufs to tx descriptors.
1391  *
1392  *  return 0 on success, positive on failure
1393  **********************************************************************/
1394 static int
1395 em_encap(struct adapter *adapter, struct mbuf *m_head)
1396 {
1397         uint32_t txd_upper = 0, txd_lower = 0, txd_used = 0, txd_saved = 0;
1398         int i, j, error, last = 0;
1399
1400         struct em_q q;
1401         struct em_buffer *tx_buffer = NULL, *tx_buffer_first;
1402         bus_dmamap_t map;
1403         struct em_tx_desc *current_tx_desc = NULL;
1404         struct ifnet *ifp = &adapter->interface_data.ac_if;
1405
1406         /*
1407          * Force a cleanup if number of TX descriptors
1408          * available hits the threshold
1409          */
1410         if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1411                 em_txeof(adapter);
1412                 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1413                         adapter->no_tx_desc_avail1++;
1414                         return (ENOBUFS);
1415                 }
1416         }
1417
1418         /*
1419          * Capture the first descriptor index, this descriptor will have
1420          * the index of the EOP which is the only one that now gets a
1421          * DONE bit writeback.
1422          */
1423         tx_buffer_first = &adapter->tx_buffer_area[adapter->next_avail_tx_desc];
1424
1425         /*
1426          * Map the packet for DMA.
1427          */
1428         map = tx_buffer_first->map;
1429         error = bus_dmamap_load_mbuf(adapter->txtag, map, m_head, em_tx_cb,
1430                                      &q, BUS_DMA_NOWAIT);
1431         if (error != 0) {
1432                 adapter->no_tx_dma_setup++;
1433                 return (error);
1434         }
1435         KASSERT(q.nsegs != 0, ("em_encap: empty packet"));
1436
1437         if (q.nsegs > (adapter->num_tx_desc_avail - 2)) {
1438                 adapter->no_tx_desc_avail2++;
1439                 error = ENOBUFS;
1440                 goto fail;
1441         }
1442
1443         if (ifp->if_hwassist > 0) {
1444                 em_transmit_checksum_setup(adapter,  m_head,
1445                                            &txd_upper, &txd_lower);
1446         }
1447
1448         i = adapter->next_avail_tx_desc;
1449         if (adapter->pcix_82544)
1450                 txd_saved = i;
1451
1452         /* Set up our transmit descriptors */
1453         for (j = 0; j < q.nsegs; j++) {
1454                 /* If adapter is 82544 and on PCIX bus */
1455                 if(adapter->pcix_82544) {
1456                         DESC_ARRAY desc_array;
1457                         uint32_t array_elements, counter;
1458
1459                         /* 
1460                          * Check the Address and Length combination and
1461                          * split the data accordingly
1462                          */
1463                         array_elements = em_fill_descriptors(q.segs[j].ds_addr,
1464                                                 q.segs[j].ds_len, &desc_array);
1465                         for (counter = 0; counter < array_elements; counter++) {
1466                                 if (txd_used == adapter->num_tx_desc_avail) {
1467                                         adapter->next_avail_tx_desc = txd_saved;
1468                                         adapter->no_tx_desc_avail2++;
1469                                         error = ENOBUFS;
1470                                         goto fail;
1471                                 }
1472                                 tx_buffer = &adapter->tx_buffer_area[i];
1473                                 current_tx_desc = &adapter->tx_desc_base[i];
1474                                 current_tx_desc->buffer_addr = htole64(
1475                                         desc_array.descriptor[counter].address);
1476                                 current_tx_desc->lower.data = htole32(
1477                                         adapter->txd_cmd | txd_lower |
1478                                         (uint16_t)desc_array.descriptor[counter].length);
1479                                 current_tx_desc->upper.data = htole32(txd_upper);
1480
1481                                 last = i;
1482                                 if (++i == adapter->num_tx_desc)
1483                                         i = 0;
1484
1485                                 tx_buffer->m_head = NULL;
1486                                 tx_buffer->next_eop = -1;
1487                                 txd_used++;
1488                         }
1489                 } else {
1490                         tx_buffer = &adapter->tx_buffer_area[i];
1491                         current_tx_desc = &adapter->tx_desc_base[i];
1492
1493                         current_tx_desc->buffer_addr = htole64(q.segs[j].ds_addr);
1494                         current_tx_desc->lower.data = htole32(
1495                                 adapter->txd_cmd | txd_lower | q.segs[j].ds_len);
1496                         current_tx_desc->upper.data = htole32(txd_upper);
1497
1498                         last = i;
1499                         if (++i == adapter->num_tx_desc)
1500                                 i = 0;
1501
1502                         tx_buffer->m_head = NULL;
1503                         tx_buffer->next_eop = -1;
1504                 }
1505         }
1506
1507         adapter->next_avail_tx_desc = i;
1508         if (adapter->pcix_82544)
1509                 adapter->num_tx_desc_avail -= txd_used;
1510         else
1511                 adapter->num_tx_desc_avail -= q.nsegs;
1512
1513         /* Find out if we are in vlan mode */
1514         if (m_head->m_flags & M_VLANTAG) {
1515                 /* Set the vlan id */
1516                 current_tx_desc->upper.fields.special =
1517                         htole16(m_head->m_pkthdr.ether_vlantag);
1518
1519                 /* Tell hardware to add tag */
1520                 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1521         }
1522
1523         tx_buffer->m_head = m_head;
1524         tx_buffer_first->map = tx_buffer->map;
1525         tx_buffer->map = map;
1526         bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1527
1528         /*
1529          * Last Descriptor of Packet needs End Of Packet (EOP)
1530          * and Report Status (RS)
1531          */
1532         current_tx_desc->lower.data |=
1533                 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1534
1535         /*
1536          * Keep track in the first buffer which descriptor will be
1537          * written back.
1538          */
1539         tx_buffer_first->next_eop = last;
1540
1541         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1542                         BUS_DMASYNC_PREWRITE);
1543
1544         /* 
1545          * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1546          * that this frame is available to transmit.
1547          */
1548         if (adapter->hw.mac_type == em_82547 &&
1549             adapter->link_duplex == HALF_DUPLEX) {
1550                 em_82547_move_tail_serialized(adapter);
1551         } else {
1552                 E1000_WRITE_REG(&adapter->hw, TDT, i);
1553                 if (adapter->hw.mac_type == em_82547) {
1554                         em_82547_update_fifo_head(adapter,
1555                                                   m_head->m_pkthdr.len);
1556                 }
1557         }
1558
1559         return (0);
1560 fail:
1561         bus_dmamap_unload(adapter->txtag, map);
1562         return error;
1563 }
1564
1565 /*********************************************************************
1566  *
1567  * 82547 workaround to avoid controller hang in half-duplex environment.
1568  * The workaround is to avoid queuing a large packet that would span
1569  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1570  * in this case. We do that only when FIFO is quiescent.
1571  *
1572  **********************************************************************/
1573 static void
1574 em_82547_move_tail(void *arg)
1575 {
1576         struct adapter *adapter = arg;
1577         struct ifnet *ifp = &adapter->interface_data.ac_if;
1578
1579         lwkt_serialize_enter(ifp->if_serializer);
1580         em_82547_move_tail_serialized(adapter);
1581         lwkt_serialize_exit(ifp->if_serializer);
1582 }
1583
1584 static void
1585 em_82547_move_tail_serialized(struct adapter *adapter)
1586 {
1587         uint16_t hw_tdt;
1588         uint16_t sw_tdt;
1589         struct em_tx_desc *tx_desc;
1590         uint16_t length = 0;
1591         boolean_t eop = 0;
1592
1593         hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1594         sw_tdt = adapter->next_avail_tx_desc;
1595
1596         while (hw_tdt != sw_tdt) {
1597                 tx_desc = &adapter->tx_desc_base[hw_tdt];
1598                 length += tx_desc->lower.flags.length;
1599                 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1600                 if (++hw_tdt == adapter->num_tx_desc)
1601                         hw_tdt = 0;
1602
1603                 if (eop) {
1604                         if (em_82547_fifo_workaround(adapter, length)) {
1605                                 adapter->tx_fifo_wrk_cnt++;
1606                                 callout_reset(&adapter->tx_fifo_timer, 1,
1607                                         em_82547_move_tail, adapter);
1608                                 break;
1609                         }
1610                         E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1611                         em_82547_update_fifo_head(adapter, length);
1612                         length = 0;
1613                 }
1614         }       
1615 }
1616
1617 static int
1618 em_82547_fifo_workaround(struct adapter *adapter, int len)
1619 {       
1620         int fifo_space, fifo_pkt_len;
1621
1622         fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1623
1624         if (adapter->link_duplex == HALF_DUPLEX) {
1625                 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1626
1627                 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1628                         if (em_82547_tx_fifo_reset(adapter))
1629                                 return (0);
1630                         else
1631                                 return (1);
1632                 }
1633         }
1634
1635         return (0);
1636 }
1637
1638 static void
1639 em_82547_update_fifo_head(struct adapter *adapter, int len)
1640 {
1641         int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1642
1643         /* tx_fifo_head is always 16 byte aligned */
1644         adapter->tx_fifo_head += fifo_pkt_len;
1645         if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
1646                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1647 }
1648
1649 static int
1650 em_82547_tx_fifo_reset(struct adapter *adapter)
1651 {
1652         uint32_t tctl;
1653
1654         if (E1000_READ_REG(&adapter->hw, TDT) == E1000_READ_REG(&adapter->hw, TDH) &&
1655             E1000_READ_REG(&adapter->hw, TDFT) == E1000_READ_REG(&adapter->hw, TDFH) &&
1656             E1000_READ_REG(&adapter->hw, TDFTS) == E1000_READ_REG(&adapter->hw, TDFHS) &&
1657             E1000_READ_REG(&adapter->hw, TDFPC) == 0) {
1658                 /* Disable TX unit */
1659                 tctl = E1000_READ_REG(&adapter->hw, TCTL);
1660                 E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1661
1662                 /* Reset FIFO pointers */
1663                 E1000_WRITE_REG(&adapter->hw, TDFT,  adapter->tx_head_addr);
1664                 E1000_WRITE_REG(&adapter->hw, TDFH,  adapter->tx_head_addr);
1665                 E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr);
1666                 E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr);
1667
1668                 /* Re-enable TX unit */
1669                 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1670                 E1000_WRITE_FLUSH(&adapter->hw);
1671
1672                 adapter->tx_fifo_head = 0;
1673                 adapter->tx_fifo_reset_cnt++;
1674
1675                 return (TRUE);
1676         } else {
1677                 return (FALSE);
1678         }
1679 }
1680
1681 static void
1682 em_set_promisc(struct adapter *adapter)
1683 {
1684         uint32_t reg_rctl;
1685         struct ifnet *ifp = &adapter->interface_data.ac_if;
1686
1687         reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1688
1689         adapter->em_insert_vlan_header = 0;
1690         if (ifp->if_flags & IFF_PROMISC) {
1691                 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1692                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1693
1694                 /*
1695                  * Disable VLAN stripping in promiscous mode.
1696                  * This enables bridging of vlan tagged frames to occur 
1697                  * and also allows vlan tags to be seen in tcpdump.
1698                  */
1699                 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1700                         em_disable_vlans(adapter);
1701                 adapter->em_insert_vlan_header = 1;
1702         } else if (ifp->if_flags & IFF_ALLMULTI) {
1703                 reg_rctl |= E1000_RCTL_MPE;
1704                 reg_rctl &= ~E1000_RCTL_UPE;
1705                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1706         }
1707 }
1708
1709 static void
1710 em_disable_promisc(struct adapter *adapter)
1711 {
1712         struct ifnet *ifp = &adapter->interface_data.ac_if;
1713
1714         uint32_t reg_rctl;
1715
1716         reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1717
1718         reg_rctl &= (~E1000_RCTL_UPE);
1719         reg_rctl &= (~E1000_RCTL_MPE);
1720         E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1721
1722         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1723                 em_enable_vlans(adapter);
1724         adapter->em_insert_vlan_header = 0;
1725 }
1726
1727 /*********************************************************************
1728  *  Multicast Update
1729  *
1730  *  This routine is called whenever multicast address list is updated.
1731  *
1732  **********************************************************************/
1733
1734 static void
1735 em_set_multi(struct adapter *adapter)
1736 {
1737         uint32_t reg_rctl = 0;
1738         uint8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1739         struct ifmultiaddr *ifma;
1740         int mcnt = 0;
1741         struct ifnet *ifp = &adapter->interface_data.ac_if;
1742
1743         IOCTL_DEBUGOUT("em_set_multi: begin");
1744
1745         if (adapter->hw.mac_type == em_82542_rev2_0) {
1746                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1747                 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1748                         em_pci_clear_mwi(&adapter->hw);
1749                 reg_rctl |= E1000_RCTL_RST;
1750                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1751                 msec_delay(5);
1752         }
1753
1754         LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1755                 if (ifma->ifma_addr->sa_family != AF_LINK)
1756                         continue;
1757
1758                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1759                         break;
1760
1761                 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1762                       &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1763                 mcnt++;
1764         }
1765
1766         if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1767                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1768                 reg_rctl |= E1000_RCTL_MPE;
1769                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1770         } else {
1771                 em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1772         }
1773
1774         if (adapter->hw.mac_type == em_82542_rev2_0) {
1775                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1776                 reg_rctl &= ~E1000_RCTL_RST;
1777                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1778                 msec_delay(5);
1779                 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1780                         em_pci_set_mwi(&adapter->hw);
1781         }
1782 }
1783
1784 /*********************************************************************
1785  *  Timer routine
1786  *
1787  *  This routine checks for link status and updates statistics.
1788  *
1789  **********************************************************************/
1790
1791 static void
1792 em_local_timer(void *arg)
1793 {
1794         struct ifnet *ifp;
1795         struct adapter *adapter = arg;
1796         ifp = &adapter->interface_data.ac_if;
1797
1798         lwkt_serialize_enter(ifp->if_serializer);
1799
1800         em_check_for_link(&adapter->hw);
1801         em_update_link_status(adapter);
1802         em_update_stats_counters(adapter);
1803         if (em_display_debug_stats && ifp->if_flags & IFF_RUNNING)
1804                 em_print_hw_stats(adapter);
1805         em_smartspeed(adapter);
1806
1807         callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1808
1809         lwkt_serialize_exit(ifp->if_serializer);
1810 }
1811
1812 static void
1813 em_update_link_status(struct adapter *adapter)
1814 {
1815         struct ifnet *ifp;
1816         ifp = &adapter->interface_data.ac_if;
1817
1818         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1819                 if (adapter->link_active == 0) {
1820                         em_get_speed_and_duplex(&adapter->hw, 
1821                                                 &adapter->link_speed, 
1822                                                 &adapter->link_duplex);
1823                         /* Check if we may set SPEED_MODE bit on PCI-E */
1824                         if (adapter->link_speed == SPEED_1000 &&
1825                             (adapter->hw.mac_type == em_82571 ||
1826                              adapter->hw.mac_type == em_82572)) {
1827                                 int tarc0;
1828
1829                                 tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
1830                                 tarc0 |= SPEED_MODE_BIT;
1831                                 E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
1832                         }
1833                         if (bootverbose) {
1834                                 if_printf(&adapter->interface_data.ac_if,
1835                                           "Link is up %d Mbps %s\n",
1836                                           adapter->link_speed,
1837                                           adapter->link_duplex == FULL_DUPLEX ?
1838                                                 "Full Duplex" : "Half Duplex");
1839                         }
1840                         adapter->link_active = 1;
1841                         adapter->smartspeed = 0;
1842                         ifp->if_baudrate = adapter->link_speed * 1000000;
1843                         ifp->if_link_state = LINK_STATE_UP;
1844                         if_link_state_change(ifp);
1845                 }
1846         } else {
1847                 if (adapter->link_active == 1) {
1848                         ifp->if_baudrate = 0;
1849                         adapter->link_speed = 0;
1850                         adapter->link_duplex = 0;
1851                         if (bootverbose) {
1852                                 if_printf(&adapter->interface_data.ac_if,
1853                                           "Link is Down\n");
1854                         }
1855                         adapter->link_active = 0;
1856                         ifp->if_link_state = LINK_STATE_DOWN;
1857                         if_link_state_change(ifp);
1858                 }
1859         }
1860 }
1861
1862 /*********************************************************************
1863  *
1864  *  This routine disables all traffic on the adapter by issuing a
1865  *  global reset on the MAC and deallocates TX/RX buffers.
1866  *
1867  **********************************************************************/
1868
1869 static void
1870 em_stop(void *arg)
1871 {
1872         struct ifnet   *ifp;
1873         struct adapter * adapter = arg;
1874         ifp = &adapter->interface_data.ac_if;
1875
1876         ASSERT_SERIALIZED(ifp->if_serializer);
1877
1878         INIT_DEBUGOUT("em_stop: begin");
1879         em_disable_intr(adapter);
1880         em_reset_hw(&adapter->hw);
1881         callout_stop(&adapter->timer);
1882         callout_stop(&adapter->tx_fifo_timer);
1883         em_free_transmit_structures(adapter);
1884         em_free_receive_structures(adapter);
1885
1886         /* Tell the stack that the interface is no longer active */
1887         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1888         ifp->if_timer = 0;
1889 }
1890
1891 /*********************************************************************
1892  *
1893  *  Determine hardware revision.
1894  *
1895  **********************************************************************/
1896 static void
1897 em_identify_hardware(struct adapter *adapter)
1898 {
1899         device_t dev = adapter->dev;
1900
1901         /* Make sure our PCI config space has the necessary stuff set */
1902         adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1903         if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1904               (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1905                 device_printf(dev, "Memory Access and/or Bus Master bits "
1906                               "were not set!\n");
1907                 adapter->hw.pci_cmd_word |= PCIM_CMD_BUSMASTEREN |
1908                                             PCIM_CMD_MEMEN;
1909                 pci_write_config(dev, PCIR_COMMAND,
1910                                  adapter->hw.pci_cmd_word, 2);
1911         }
1912
1913         /* Save off the information about this board */
1914         adapter->hw.vendor_id = pci_get_vendor(dev);
1915         adapter->hw.device_id = pci_get_device(dev);
1916         adapter->hw.revision_id = pci_get_revid(dev);
1917         adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev);
1918         adapter->hw.subsystem_id = pci_get_subdevice(dev);
1919
1920         /* Identify the MAC */
1921         if (em_set_mac_type(&adapter->hw))
1922                 device_printf(dev, "Unknown MAC Type\n");
1923
1924         if (adapter->hw.mac_type == em_82541 ||
1925             adapter->hw.mac_type == em_82541_rev_2 ||
1926             adapter->hw.mac_type == em_82547 ||
1927             adapter->hw.mac_type == em_82547_rev_2)
1928                 adapter->hw.phy_init_script = TRUE;
1929 }
1930
1931 static int
1932 em_allocate_pci_resources(device_t dev)
1933 {
1934         struct adapter *adapter = device_get_softc(dev);
1935         int rid;
1936
1937         rid = PCIR_BAR(0);
1938         adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1939                                                      &rid, RF_ACTIVE);
1940         if (adapter->res_memory == NULL) {
1941                 device_printf(dev, "Unable to allocate bus resource: memory\n");
1942                 return ENXIO;
1943         }
1944         adapter->osdep.mem_bus_space_tag =
1945                 rman_get_bustag(adapter->res_memory);
1946         adapter->osdep.mem_bus_space_handle =
1947             rman_get_bushandle(adapter->res_memory);
1948         adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
1949
1950         if (adapter->hw.mac_type > em_82543) {
1951                 /* Figure our where our IO BAR is ? */
1952                 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
1953                         uint32_t val;
1954
1955                         val = pci_read_config(dev, rid, 4);
1956                         if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
1957                                 adapter->io_rid = rid;
1958                                 break;
1959                         }
1960                         rid += 4;
1961                         /* check for 64bit BAR */
1962                         if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
1963                                 rid += 4;
1964                 }
1965                 if (rid >= PCIR_CIS) {
1966                         device_printf(dev, "Unable to locate IO BAR\n");
1967                         return (ENXIO);
1968                 }
1969
1970                 adapter->res_ioport = bus_alloc_resource_any(dev,
1971                     SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
1972                 if (!(adapter->res_ioport)) {
1973                         device_printf(dev, "Unable to allocate bus resource: "
1974                                       "ioport\n");
1975                         return ENXIO;
1976                 }
1977                 adapter->hw.io_base = 0;
1978                 adapter->osdep.io_bus_space_tag =
1979                         rman_get_bustag(adapter->res_ioport);
1980                 adapter->osdep.io_bus_space_handle =
1981                         rman_get_bushandle(adapter->res_ioport);
1982         }
1983
1984         /* For ICH8 we need to find the flash memory. */
1985         if ((adapter->hw.mac_type == em_ich8lan) ||
1986             (adapter->hw.mac_type == em_ich9lan)) {
1987                 rid = EM_FLASH;
1988                 adapter->flash_mem = bus_alloc_resource_any(dev,
1989                     SYS_RES_MEMORY, &rid, RF_ACTIVE);
1990                 if (adapter->flash_mem == NULL) {
1991                         device_printf(dev, "Unable to allocate bus resource: "
1992                                       "flash memory\n");
1993                         return ENXIO;
1994                 }
1995                 adapter->osdep.flash_bus_space_tag =
1996                     rman_get_bustag(adapter->flash_mem);
1997                 adapter->osdep.flash_bus_space_handle =
1998                     rman_get_bushandle(adapter->flash_mem);
1999         }
2000
2001         rid = 0x0;
2002         adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ,
2003             &rid, RF_SHAREABLE | RF_ACTIVE);
2004         if (adapter->res_interrupt == NULL) {
2005                 device_printf(dev, "Unable to allocate bus resource: "
2006                               "interrupt\n");
2007                 return ENXIO;
2008         }
2009
2010         adapter->hw.back = &adapter->osdep;
2011
2012         return 0;
2013 }
2014
2015 static void
2016 em_free_pci_resources(device_t dev)
2017 {
2018         struct adapter *adapter = device_get_softc(dev);
2019
2020         if (adapter->res_interrupt != NULL) {
2021                 bus_release_resource(dev, SYS_RES_IRQ, 0, 
2022                                      adapter->res_interrupt);
2023         }
2024         if (adapter->res_memory != NULL) {
2025                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 
2026                                      adapter->res_memory);
2027         }
2028
2029         if (adapter->res_ioport != NULL) {
2030                 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid, 
2031                                      adapter->res_ioport);
2032         }
2033
2034         if (adapter->flash_mem != NULL) {
2035                 bus_release_resource(dev, SYS_RES_MEMORY, EM_FLASH,
2036                                      adapter->flash_mem);
2037         }
2038 }
2039
2040 /*********************************************************************
2041  *
2042  *  Initialize the hardware to a configuration as specified by the
2043  *  adapter structure. The controller is reset, the EEPROM is
2044  *  verified, the MAC address is set, then the shared initialization
2045  *  routines are called.
2046  *
2047  **********************************************************************/
2048 static int
2049 em_hardware_init(struct adapter *adapter)
2050 {
2051         uint16_t        rx_buffer_size;
2052
2053         INIT_DEBUGOUT("em_hardware_init: begin");
2054         /* Issue a global reset */
2055         em_reset_hw(&adapter->hw);
2056
2057         /* When hardware is reset, fifo_head is also reset */
2058         adapter->tx_fifo_head = 0;
2059
2060         /* Make sure we have a good EEPROM before we read from it */
2061         if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
2062                 if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
2063                         device_printf(adapter->dev,
2064                                       "The EEPROM Checksum Is Not Valid\n");
2065                         return (EIO);
2066                 }
2067         }
2068
2069         if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
2070                 device_printf(adapter->dev,
2071                               "EEPROM read error while reading part number\n");
2072                 return (EIO);
2073         }
2074
2075         /* Set up smart power down as default off on newer adapters. */
2076         if (!em_smart_pwr_down &&
2077             (adapter->hw.mac_type == em_82571 ||
2078              adapter->hw.mac_type == em_82572)) {
2079                 uint16_t phy_tmp = 0;
2080
2081                 /* Speed up time to link by disabling smart power down. */
2082                 em_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
2083                                 &phy_tmp);
2084                 phy_tmp &= ~IGP02E1000_PM_SPD;
2085                 em_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
2086                                  phy_tmp);
2087         }
2088
2089         /*
2090          * These parameters control the automatic generation (Tx) and
2091          * response (Rx) to Ethernet PAUSE frames.
2092          * - High water mark should allow for at least two frames to be
2093          *   received after sending an XOFF.
2094          * - Low water mark works best when it is very near the high water mark.
2095          *   This allows the receiver to restart by sending XON when it has
2096          *   drained a bit.  Here we use an arbitary value of 1500 which will
2097          *   restart after one full frame is pulled from the buffer.  There
2098          *   could be several smaller frames in the buffer and if so they will
2099          *   not trigger the XON until their total number reduces the buffer
2100          *   by 1500.
2101          * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2102          */
2103         rx_buffer_size = ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff) << 10);
2104
2105         adapter->hw.fc_high_water =
2106             rx_buffer_size - roundup2(adapter->hw.max_frame_size, 1024); 
2107         adapter->hw.fc_low_water = adapter->hw.fc_high_water - 1500;
2108         if (adapter->hw.mac_type == em_80003es2lan)
2109                 adapter->hw.fc_pause_time = 0xFFFF;
2110         else
2111                 adapter->hw.fc_pause_time = 1000;
2112         adapter->hw.fc_send_xon = TRUE;
2113         adapter->hw.fc = E1000_FC_FULL;
2114
2115         if (em_init_hw(&adapter->hw) < 0) {
2116                 device_printf(adapter->dev, "Hardware Initialization Failed");
2117                 return (EIO);
2118         }
2119
2120         em_check_for_link(&adapter->hw);
2121
2122         return (0);
2123 }
2124
2125 /*********************************************************************
2126  *
2127  *  Setup networking device structure and register an interface.
2128  *
2129  **********************************************************************/
2130 static void
2131 em_setup_interface(device_t dev, struct adapter *adapter)
2132 {
2133         struct ifnet *ifp;
2134         u_char fiber_type = IFM_1000_SX;        /* default type */
2135         INIT_DEBUGOUT("em_setup_interface: begin");
2136
2137         ifp = &adapter->interface_data.ac_if;
2138         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2139         ifp->if_mtu = ETHERMTU;
2140         ifp->if_baudrate = 1000000000;
2141         ifp->if_init =  em_init;
2142         ifp->if_softc = adapter;
2143         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2144         ifp->if_ioctl = em_ioctl;
2145         ifp->if_start = em_start;
2146 #ifdef DEVICE_POLLING
2147         ifp->if_poll = em_poll;
2148 #endif
2149         ifp->if_watchdog = em_watchdog;
2150         ifq_set_maxlen(&ifp->if_snd, adapter->num_tx_desc - 1);
2151         ifq_set_ready(&ifp->if_snd);
2152
2153         if (adapter->hw.mac_type >= em_82543)
2154                 ifp->if_capabilities |= IFCAP_HWCSUM;
2155
2156         ifp->if_capenable = ifp->if_capabilities;
2157
2158         ether_ifattach(ifp, adapter->hw.mac_addr, NULL);
2159
2160 #ifdef PROFILE_SERIALIZER
2161         SYSCTL_ADD_UINT(&adapter->sysctl_ctx,
2162                         SYSCTL_CHILDREN(adapter->sysctl_tree), OID_AUTO,
2163                         "serializer_sleep", CTLFLAG_RW,
2164                         &ifp->if_serializer->sleep_cnt, 0, NULL);
2165         SYSCTL_ADD_UINT(&adapter->sysctl_ctx,
2166                         SYSCTL_CHILDREN(adapter->sysctl_tree), OID_AUTO,
2167                         "serializer_tryfail", CTLFLAG_RW,
2168                         &ifp->if_serializer->tryfail_cnt, 0, NULL);
2169         SYSCTL_ADD_UINT(&adapter->sysctl_ctx,
2170                         SYSCTL_CHILDREN(adapter->sysctl_tree), OID_AUTO,
2171                         "serializer_enter", CTLFLAG_RW,
2172                         &ifp->if_serializer->enter_cnt, 0, NULL);
2173         SYSCTL_ADD_UINT(&adapter->sysctl_ctx,
2174                         SYSCTL_CHILDREN(adapter->sysctl_tree), OID_AUTO,
2175                         "serializer_try", CTLFLAG_RW,
2176                         &ifp->if_serializer->try_cnt, 0, NULL);
2177 #endif
2178
2179         /*
2180          * Tell the upper layer(s) we support long frames.
2181          */
2182         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2183         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2184 #if 0
2185         ifp->if_capenable |= IFCAP_VLAN_MTU;
2186 #endif
2187
2188         /*
2189          * Specify the media types supported by this adapter and register
2190          * callbacks to update media and link information
2191          */
2192         ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
2193                      em_media_status);
2194         if (adapter->hw.media_type == em_media_type_fiber ||
2195             adapter->hw.media_type == em_media_type_internal_serdes) {
2196                 if (adapter->hw.mac_type == em_82545)
2197                         fiber_type = IFM_1000_LX;
2198                 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 
2199                             0, NULL);
2200                 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2201         } else {
2202                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2203                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2204                             0, NULL);
2205                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2206                             0, NULL);
2207                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2208                             0, NULL);
2209                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
2210                             0, NULL);
2211                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2212         }
2213         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2214         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2215 }
2216
2217 /*********************************************************************
2218  *
2219  *  Workaround for SmartSpeed on 82541 and 82547 controllers
2220  *
2221  **********************************************************************/
2222 static void
2223 em_smartspeed(struct adapter *adapter)
2224 {
2225         uint16_t phy_tmp;
2226
2227         if (adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
2228             !adapter->hw.autoneg ||
2229             !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
2230                 return;
2231
2232         if (adapter->smartspeed == 0) {
2233                 /*
2234                  * If Master/Slave config fault is asserted twice,
2235                  * we assume back-to-back.
2236                  */
2237                 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2238                 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2239                         return;
2240                 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2241                 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2242                         em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2243                         if (phy_tmp & CR_1000T_MS_ENABLE) {
2244                                 phy_tmp &= ~CR_1000T_MS_ENABLE;
2245                                 em_write_phy_reg(&adapter->hw,
2246                                                  PHY_1000T_CTRL, phy_tmp);
2247                                 adapter->smartspeed++;
2248                                 if (adapter->hw.autoneg &&
2249                                     !em_phy_setup_autoneg(&adapter->hw) &&
2250                                     !em_read_phy_reg(&adapter->hw, PHY_CTRL,
2251                                                      &phy_tmp)) {
2252                                         phy_tmp |= (MII_CR_AUTO_NEG_EN |
2253                                                     MII_CR_RESTART_AUTO_NEG);
2254                                         em_write_phy_reg(&adapter->hw,
2255                                                          PHY_CTRL, phy_tmp);
2256                                 }
2257                         }
2258                 }
2259                 return;
2260         } else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2261                 /* If still no link, perhaps using 2/3 pair cable */
2262                 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2263                 phy_tmp |= CR_1000T_MS_ENABLE;
2264                 em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2265                 if (adapter->hw.autoneg &&
2266                     !em_phy_setup_autoneg(&adapter->hw) &&
2267                     !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
2268                         phy_tmp |= (MII_CR_AUTO_NEG_EN |
2269                                     MII_CR_RESTART_AUTO_NEG);
2270                         em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
2271                 }
2272         }
2273         /* Restart process after EM_SMARTSPEED_MAX iterations */
2274         if (adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2275                 adapter->smartspeed = 0;
2276 }
2277
2278 /*
2279  * Manage DMA'able memory.
2280  */
2281 static void
2282 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2283 {
2284         if (error)
2285                 return;
2286         *(bus_addr_t *)arg = segs->ds_addr;
2287 }
2288
2289 static int
2290 em_dma_malloc(struct adapter *adapter, bus_size_t size,
2291               struct em_dma_alloc *dma)
2292 {
2293         device_t dev = adapter->dev;
2294         int error;
2295
2296         error = bus_dma_tag_create(NULL,                /* parent */
2297                                    EM_DBA_ALIGN, 0,     /* alignment, bounds */
2298                                    BUS_SPACE_MAXADDR,   /* lowaddr */
2299                                    BUS_SPACE_MAXADDR,   /* highaddr */
2300                                    NULL, NULL,          /* filter, filterarg */
2301                                    size,                /* maxsize */
2302                                    1,                   /* nsegments */
2303                                    size,                /* maxsegsize */
2304                                    0,                   /* flags */
2305                                    &dma->dma_tag);
2306         if (error) {
2307                 device_printf(dev, "%s: bus_dma_tag_create failed; error %d\n",
2308                               __func__, error);
2309                 return error;
2310         }
2311
2312         error = bus_dmamem_alloc(dma->dma_tag, (void**)&dma->dma_vaddr,
2313                                  BUS_DMA_WAITOK, &dma->dma_map);
2314         if (error) {
2315                 device_printf(dev, "%s: bus_dmammem_alloc failed; "
2316                               "size %llu, error %d\n",
2317                               __func__, (uintmax_t)size, error);
2318                 goto fail;
2319         }
2320
2321         error = bus_dmamap_load(dma->dma_tag, dma->dma_map,
2322                                 dma->dma_vaddr, size,
2323                                 em_dmamap_cb, &dma->dma_paddr,
2324                                 BUS_DMA_WAITOK);
2325         if (error) {
2326                 device_printf(dev, "%s: bus_dmamap_load failed; error %u\n",
2327                               __func__, error);
2328                 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2329                 goto fail;
2330         }
2331
2332         return 0;
2333 fail:
2334         bus_dma_tag_destroy(dma->dma_tag);
2335         dma->dma_tag = NULL;
2336         return error;
2337 }
2338
2339 static void
2340 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2341 {
2342         if (dma->dma_tag != NULL) {
2343                 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2344                 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2345                 bus_dma_tag_destroy(dma->dma_tag);
2346                 dma->dma_tag = NULL;
2347         }
2348 }
2349
2350 /*********************************************************************
2351  *
2352  *  Allocate and initialize transmit structures.
2353  *
2354  **********************************************************************/
2355 static int
2356 em_setup_transmit_structures(struct adapter *adapter)
2357 {
2358         struct em_buffer *tx_buffer;
2359         bus_size_t size;
2360         int error, i;
2361
2362         /*
2363          * Setup DMA descriptor areas.
2364          */
2365         size = roundup2(adapter->hw.max_frame_size, MCLBYTES);
2366         if (bus_dma_tag_create(NULL,                    /* parent */
2367                                1, 0,                    /* alignment, bounds */
2368                                BUS_SPACE_MAXADDR,       /* lowaddr */ 
2369                                BUS_SPACE_MAXADDR,       /* highaddr */
2370                                NULL, NULL,              /* filter, filterarg */
2371                                size,                    /* maxsize */
2372                                EM_MAX_SCATTER,          /* nsegments */
2373                                size,                    /* maxsegsize */
2374                                0,                       /* flags */ 
2375                                &adapter->txtag)) {
2376                 device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
2377                 return(ENOMEM);
2378         }
2379
2380         adapter->tx_buffer_area =
2381                 kmalloc(sizeof(struct em_buffer) * adapter->num_tx_desc,
2382                         M_DEVBUF, M_WAITOK | M_ZERO);
2383
2384         bzero(adapter->tx_desc_base,
2385               sizeof(struct em_tx_desc) * adapter->num_tx_desc);
2386         tx_buffer = adapter->tx_buffer_area;
2387         for (i = 0; i < adapter->num_tx_desc; i++) {
2388                 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2389                 if (error) {
2390                         device_printf(adapter->dev,
2391                                       "Unable to create TX DMA map\n");
2392                         goto fail;
2393                 }
2394                 tx_buffer++;
2395         }
2396
2397         adapter->next_avail_tx_desc = 0;
2398         adapter->next_tx_to_clean = 0;
2399
2400         /* Set number of descriptors available */
2401         adapter->num_tx_desc_avail = adapter->num_tx_desc;
2402
2403         /* Set checksum context */
2404         adapter->active_checksum_context = OFFLOAD_NONE;
2405
2406         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2407                         BUS_DMASYNC_PREWRITE);
2408
2409         return (0);
2410 fail:
2411         em_free_transmit_structures(adapter);
2412         return (error);
2413 }
2414
2415 /*********************************************************************
2416  *
2417  *  Enable transmit unit.
2418  *
2419  **********************************************************************/
2420 static void
2421 em_initialize_transmit_unit(struct adapter *adapter)
2422 {
2423         uint32_t reg_tctl;
2424         uint32_t reg_tipg = 0;
2425         uint64_t bus_addr;
2426
2427         INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2428
2429         /* Setup the Base and Length of the Tx Descriptor Ring */
2430         bus_addr = adapter->txdma.dma_paddr;
2431         E1000_WRITE_REG(&adapter->hw, TDLEN,
2432                         adapter->num_tx_desc * sizeof(struct em_tx_desc));
2433         E1000_WRITE_REG(&adapter->hw, TDBAH, (uint32_t)(bus_addr >> 32));
2434         E1000_WRITE_REG(&adapter->hw, TDBAL, (uint32_t)bus_addr);
2435
2436         /* Setup the HW Tx Head and Tail descriptor pointers */
2437         E1000_WRITE_REG(&adapter->hw, TDT, 0);
2438         E1000_WRITE_REG(&adapter->hw, TDH, 0);
2439
2440         HW_DEBUGOUT2("Base = %x, Length = %x\n",
2441                      E1000_READ_REG(&adapter->hw, TDBAL),
2442                      E1000_READ_REG(&adapter->hw, TDLEN));
2443
2444         /* Set the default values for the Tx Inter Packet Gap timer */
2445         switch (adapter->hw.mac_type) {
2446         case em_82542_rev2_0:
2447         case em_82542_rev2_1:
2448                 reg_tipg = DEFAULT_82542_TIPG_IPGT;
2449                 reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2450                 reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2451                 break;
2452         case em_80003es2lan:
2453                 reg_tipg = DEFAULT_82543_TIPG_IPGR1;
2454                 reg_tipg |=
2455                     DEFAULT_80003ES2LAN_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2456                 break;
2457         default:
2458                 if (adapter->hw.media_type == em_media_type_fiber ||
2459                     adapter->hw.media_type == em_media_type_internal_serdes)
2460                         reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2461                 else
2462                         reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2463                 reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2464                 reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2465         }
2466
2467         E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
2468         E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
2469         if (adapter->hw.mac_type >= em_82540) {
2470                 E1000_WRITE_REG(&adapter->hw, TADV,
2471                                 adapter->tx_abs_int_delay.value);
2472         }
2473
2474         /* Program the Transmit Control Register */
2475         reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2476                    (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2477         if (adapter->hw.mac_type >= em_82571)
2478                 reg_tctl |= E1000_TCTL_MULR;
2479         if (adapter->link_duplex == 1)
2480                 reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2481         else
2482                 reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2483
2484         /* This write will effectively turn on the transmit unit. */
2485         E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
2486
2487         /* Setup Transmit Descriptor Base Settings */
2488         adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2489
2490         if (adapter->tx_int_delay.value > 0)
2491                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2492 }
2493
2494 /*********************************************************************
2495  *
2496  *  Free all transmit related data structures.
2497  *
2498  **********************************************************************/
2499 static void
2500 em_free_transmit_structures(struct adapter *adapter)
2501 {
2502         struct em_buffer *tx_buffer;
2503         int i;
2504
2505         INIT_DEBUGOUT("free_transmit_structures: begin");
2506
2507         if (adapter->tx_buffer_area != NULL) {
2508                 tx_buffer = adapter->tx_buffer_area;
2509                 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2510                         if (tx_buffer->m_head != NULL) {
2511                                 bus_dmamap_unload(adapter->txtag,
2512                                                   tx_buffer->map);
2513                                 m_freem(tx_buffer->m_head);
2514                         }
2515
2516                         if (tx_buffer->map != NULL) {
2517                                 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2518                                 tx_buffer->map = NULL;
2519                         }
2520                         tx_buffer->m_head = NULL;
2521                 }
2522         }
2523         if (adapter->tx_buffer_area != NULL) {
2524                 kfree(adapter->tx_buffer_area, M_DEVBUF);
2525                 adapter->tx_buffer_area = NULL;
2526         }
2527         if (adapter->txtag != NULL) {
2528                 bus_dma_tag_destroy(adapter->txtag);
2529                 adapter->txtag = NULL;
2530         }
2531 }
2532
2533 /*********************************************************************
2534  *
2535  *  The offload context needs to be set when we transfer the first
2536  *  packet of a particular protocol (TCP/UDP). We change the
2537  *  context only if the protocol type changes.
2538  *
2539  **********************************************************************/
2540 static void
2541 em_transmit_checksum_setup(struct adapter *adapter,
2542                            struct mbuf *mp,
2543                            uint32_t *txd_upper,
2544                            uint32_t *txd_lower) 
2545 {
2546         struct em_context_desc *TXD;
2547         struct em_buffer *tx_buffer;
2548         int curr_txd;
2549
2550         if (mp->m_pkthdr.csum_flags) {
2551                 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2552                         *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2553                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2554                         if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2555                                 return;
2556                         else
2557                                 adapter->active_checksum_context = OFFLOAD_TCP_IP;
2558                 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2559                         *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2560                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2561                         if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2562                                 return;
2563                         else
2564                                 adapter->active_checksum_context = OFFLOAD_UDP_IP;
2565                 } else {
2566                         *txd_upper = 0;
2567                         *txd_lower = 0;
2568                         return;
2569                 }
2570         } else {
2571                 *txd_upper = 0;
2572                 *txd_lower = 0;
2573                 return;
2574         }
2575
2576         /*
2577          * If we reach this point, the checksum offload context
2578          * needs to be reset.
2579          */
2580         curr_txd = adapter->next_avail_tx_desc;
2581         tx_buffer = &adapter->tx_buffer_area[curr_txd];
2582         TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2583
2584         TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2585         TXD->lower_setup.ip_fields.ipcso =
2586             ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2587         TXD->lower_setup.ip_fields.ipcse =
2588             htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2589
2590         TXD->upper_setup.tcp_fields.tucss =
2591             ETHER_HDR_LEN + sizeof(struct ip);
2592         TXD->upper_setup.tcp_fields.tucse = htole16(0);
2593
2594         if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2595                 TXD->upper_setup.tcp_fields.tucso =
2596                         ETHER_HDR_LEN + sizeof(struct ip) +
2597                         offsetof(struct tcphdr, th_sum);
2598         } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2599                 TXD->upper_setup.tcp_fields.tucso =
2600                         ETHER_HDR_LEN + sizeof(struct ip) +
2601                         offsetof(struct udphdr, uh_sum);
2602         }
2603
2604         TXD->tcp_seg_setup.data = htole32(0);
2605         TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2606
2607         tx_buffer->m_head = NULL;
2608         tx_buffer->next_eop = -1;
2609
2610         if (++curr_txd == adapter->num_tx_desc)
2611                 curr_txd = 0;
2612
2613         adapter->num_tx_desc_avail--;
2614         adapter->next_avail_tx_desc = curr_txd;
2615 }
2616
2617 /**********************************************************************
2618  *
2619  *  Examine each tx_buffer in the used queue. If the hardware is done
2620  *  processing the packet then free associated resources. The
2621  *  tx_buffer is put back on the free queue.
2622  *
2623  **********************************************************************/
2624
2625 static void
2626 em_txeof(struct adapter *adapter)
2627 {
2628         int first, last, done, num_avail;
2629         struct em_buffer *tx_buffer;
2630         struct em_tx_desc *tx_desc, *eop_desc;
2631         struct ifnet *ifp = &adapter->interface_data.ac_if;
2632
2633         if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2634                 return;
2635
2636         num_avail = adapter->num_tx_desc_avail; 
2637         first = adapter->next_tx_to_clean;
2638         tx_desc = &adapter->tx_desc_base[first];
2639         tx_buffer = &adapter->tx_buffer_area[first];
2640         last = tx_buffer->next_eop;
2641         KKASSERT(last >= 0 && last < adapter->num_tx_desc);
2642         eop_desc = &adapter->tx_desc_base[last];
2643
2644         /*
2645          * Now caculate the terminating index for the cleanup loop below
2646          */
2647         if (++last == adapter->num_tx_desc)
2648                 last = 0;
2649         done = last;
2650
2651         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2652                         BUS_DMASYNC_POSTREAD);
2653
2654         while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2655                 while (first != done) {
2656                         tx_desc->upper.data = 0;
2657                         tx_desc->lower.data = 0;
2658                         num_avail++;
2659
2660                         logif(pkt_txclean);
2661
2662                         if (tx_buffer->m_head) {
2663                                 ifp->if_opackets++;
2664                                 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2665                                                 BUS_DMASYNC_POSTWRITE);
2666                                 bus_dmamap_unload(adapter->txtag,
2667                                                   tx_buffer->map);
2668
2669                                 m_freem(tx_buffer->m_head);
2670                                 tx_buffer->m_head = NULL;
2671                         }
2672                         tx_buffer->next_eop = -1;
2673
2674                         if (++first == adapter->num_tx_desc)
2675                                 first = 0;
2676
2677                         tx_buffer = &adapter->tx_buffer_area[first];
2678                         tx_desc = &adapter->tx_desc_base[first];
2679                 }
2680                 /* See if we can continue to the next packet */
2681                 last = tx_buffer->next_eop;
2682                 if (last != -1) {
2683                         KKASSERT(last >= 0 && last < adapter->num_tx_desc);
2684                         eop_desc = &adapter->tx_desc_base[last];
2685                         if (++last == adapter->num_tx_desc)
2686                                 last = 0;
2687                         done = last;
2688                 } else {
2689                         break;
2690                 }
2691         }
2692
2693         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2694                         BUS_DMASYNC_PREWRITE);
2695
2696         adapter->next_tx_to_clean = first;
2697
2698         /*
2699          * If we have enough room, clear IFF_OACTIVE to tell the stack
2700          * that it is OK to send packets.
2701          * If there are no pending descriptors, clear the timeout. Otherwise,
2702          * if some descriptors have been freed, restart the timeout.
2703          */
2704         if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2705                 ifp->if_flags &= ~IFF_OACTIVE;
2706                 if (num_avail == adapter->num_tx_desc)
2707                         ifp->if_timer = 0;
2708                 else if (num_avail == adapter->num_tx_desc_avail)
2709                         ifp->if_timer = EM_TX_TIMEOUT;
2710         }
2711         adapter->num_tx_desc_avail = num_avail;
2712 }
2713
2714 /*********************************************************************
2715  *
2716  *  Get a buffer from system mbuf buffer pool.
2717  *
2718  **********************************************************************/
2719 static int
2720 em_get_buf(int i, struct adapter *adapter, struct mbuf *nmp, int how)
2721 {
2722         struct mbuf *mp = nmp;
2723         struct em_buffer *rx_buffer;
2724         struct ifnet *ifp;
2725         bus_addr_t paddr;
2726         int error;
2727
2728         ifp = &adapter->interface_data.ac_if;
2729
2730         if (mp == NULL) {
2731                 mp = m_getcl(how, MT_DATA, M_PKTHDR);
2732                 if (mp == NULL) {
2733                         adapter->mbuf_cluster_failed++;
2734                         return (ENOBUFS);
2735                 }
2736                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2737         } else {
2738                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2739                 mp->m_data = mp->m_ext.ext_buf;
2740                 mp->m_next = NULL;
2741         }
2742
2743         if (ifp->if_mtu <= ETHERMTU)
2744                 m_adj(mp, ETHER_ALIGN);
2745
2746         rx_buffer = &adapter->rx_buffer_area[i];
2747
2748         /*
2749          * Using memory from the mbuf cluster pool, invoke the
2750          * bus_dma machinery to arrange the memory mapping.
2751          */
2752         error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
2753                                 mtod(mp, void *), mp->m_len,
2754                                 em_dmamap_cb, &paddr, 0);
2755         if (error) {
2756                 m_freem(mp);
2757                 return (error);
2758         }
2759         rx_buffer->m_head = mp;
2760         adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
2761         bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2762
2763         return (0);
2764 }
2765
2766 /*********************************************************************
2767  *
2768  *  Allocate memory for rx_buffer structures. Since we use one
2769  *  rx_buffer per received packet, the maximum number of rx_buffer's
2770  *  that we'll need is equal to the number of receive descriptors
2771  *  that we've allocated.
2772  *
2773  **********************************************************************/
2774 static int
2775 em_allocate_receive_structures(struct adapter *adapter)
2776 {
2777         int i, error, size;
2778         struct em_buffer *rx_buffer;
2779
2780         size = adapter->num_rx_desc * sizeof(struct em_buffer);
2781         adapter->rx_buffer_area = kmalloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
2782
2783         error = bus_dma_tag_create(NULL,                /* parent */
2784                                    1, 0,                /* alignment, bounds */
2785                                    BUS_SPACE_MAXADDR,   /* lowaddr */
2786                                    BUS_SPACE_MAXADDR,   /* highaddr */
2787                                    NULL, NULL,          /* filter, filterarg */
2788                                    MCLBYTES,            /* maxsize */
2789                                    1,                   /* nsegments */
2790                                    MCLBYTES,            /* maxsegsize */
2791                                    0,                   /* flags */
2792                                    &adapter->rxtag);
2793         if (error) {
2794                 device_printf(adapter->dev, "%s: bus_dma_tag_create failed; "
2795                               "error %u\n", __func__, error);
2796                 goto fail;
2797         }
2798  
2799         rx_buffer = adapter->rx_buffer_area;
2800         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2801                 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2802                                           &rx_buffer->map);
2803                 if (error) {
2804                         device_printf(adapter->dev,
2805                                       "%s: bus_dmamap_create failed; "
2806                                       "error %u\n", __func__, error);
2807                         goto fail;
2808                 }
2809         }
2810
2811         for (i = 0; i < adapter->num_rx_desc; i++) {
2812                 error = em_get_buf(i, adapter, NULL, MB_DONTWAIT);
2813                 if (error)
2814                         goto fail;
2815         }
2816
2817         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2818                         BUS_DMASYNC_PREWRITE);
2819
2820         return (0);
2821 fail:
2822         em_free_receive_structures(adapter);
2823         return (error);
2824 }
2825
2826 /*********************************************************************
2827  *
2828  *  Allocate and initialize receive structures.
2829  *
2830  **********************************************************************/
2831 static int
2832 em_setup_receive_structures(struct adapter *adapter)
2833 {
2834         int error;
2835
2836         bzero(adapter->rx_desc_base,
2837               sizeof(struct em_rx_desc) * adapter->num_rx_desc);
2838
2839         error = em_allocate_receive_structures(adapter);
2840         if (error)
2841                 return (error);
2842
2843         /* Setup our descriptor pointers */
2844         adapter->next_rx_desc_to_check = 0;
2845
2846         return (0);
2847 }
2848
2849 /*********************************************************************
2850  *
2851  *  Enable receive unit.
2852  *
2853  **********************************************************************/
2854 static void
2855 em_initialize_receive_unit(struct adapter *adapter)
2856 {
2857         uint32_t reg_rctl;
2858         uint32_t reg_rxcsum;
2859         struct ifnet *ifp;
2860         uint64_t bus_addr;
2861  
2862         INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2863
2864         ifp = &adapter->interface_data.ac_if;
2865
2866         /*
2867          * Make sure receives are disabled while setting
2868          * up the descriptor ring
2869          */
2870         E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2871
2872         /* Set the Receive Delay Timer Register */
2873         E1000_WRITE_REG(&adapter->hw, RDTR, 
2874                         adapter->rx_int_delay.value | E1000_RDT_FPDB);
2875
2876         if(adapter->hw.mac_type >= em_82540) {
2877                 E1000_WRITE_REG(&adapter->hw, RADV,
2878                                 adapter->rx_abs_int_delay.value);
2879
2880                 /* Set the interrupt throttling rate in 256ns increments */  
2881                 if (em_int_throttle_ceil) {
2882                         E1000_WRITE_REG(&adapter->hw, ITR,
2883                                 1000000000 / 256 / em_int_throttle_ceil);
2884                 } else {
2885                         E1000_WRITE_REG(&adapter->hw, ITR, 0);
2886                 }
2887         }
2888
2889         /* Setup the Base and Length of the Rx Descriptor Ring */
2890         bus_addr = adapter->rxdma.dma_paddr;
2891         E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2892                         sizeof(struct em_rx_desc));
2893         E1000_WRITE_REG(&adapter->hw, RDBAH, (uint32_t)(bus_addr >> 32));
2894         E1000_WRITE_REG(&adapter->hw, RDBAL, (uint32_t)bus_addr);
2895
2896         /* Setup the Receive Control Register */
2897         reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2898                    E1000_RCTL_RDMTS_HALF |
2899                    (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2900
2901         if (adapter->hw.tbi_compatibility_on == TRUE)
2902                 reg_rctl |= E1000_RCTL_SBP;
2903
2904         switch (adapter->rx_buffer_len) {
2905         default:
2906         case EM_RXBUFFER_2048:
2907                 reg_rctl |= E1000_RCTL_SZ_2048;
2908                 break;
2909         case EM_RXBUFFER_4096:
2910                 reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX |
2911                             E1000_RCTL_LPE;
2912                 break;            
2913         case EM_RXBUFFER_8192:
2914                 reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX |
2915                             E1000_RCTL_LPE;
2916                 break;
2917         case EM_RXBUFFER_16384:
2918                 reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX |
2919                             E1000_RCTL_LPE;
2920                 break;
2921         }
2922
2923         if (ifp->if_mtu > ETHERMTU)
2924                 reg_rctl |= E1000_RCTL_LPE;
2925
2926         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2927         if ((adapter->hw.mac_type >= em_82543) &&
2928             (ifp->if_capenable & IFCAP_RXCSUM)) {
2929                 reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2930                 reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2931                 E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2932         }
2933
2934 #ifdef EM_X60_WORKAROUND
2935         if (adapter->hw.mac_type == em_82573)
2936                 E1000_WRITE_REG(&adapter->hw, RDTR, 32);
2937 #endif
2938
2939         /* Enable Receives */
2940         E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2941
2942         /* Setup the HW Rx Head and Tail Descriptor Pointers */
2943         E1000_WRITE_REG(&adapter->hw, RDH, 0);
2944         E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2945 }
2946
2947 /*********************************************************************
2948  *
2949  *  Free receive related data structures.
2950  *
2951  **********************************************************************/
2952 static void
2953 em_free_receive_structures(struct adapter *adapter)
2954 {
2955         struct em_buffer *rx_buffer;
2956         int i;
2957
2958         INIT_DEBUGOUT("free_receive_structures: begin");
2959
2960         if (adapter->rx_buffer_area != NULL) {
2961                 rx_buffer = adapter->rx_buffer_area;
2962                 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2963                         if (rx_buffer->m_head != NULL) {
2964                                 bus_dmamap_unload(adapter->rxtag,
2965                                                   rx_buffer->map);
2966                                 m_freem(rx_buffer->m_head);
2967                                 rx_buffer->m_head = NULL;
2968                         }
2969                         if (rx_buffer->map != NULL) {
2970                                 bus_dmamap_destroy(adapter->rxtag,
2971                                                    rx_buffer->map);
2972                                 rx_buffer->map = NULL;
2973                         }
2974                 }
2975         }
2976         if (adapter->rx_buffer_area != NULL) {
2977                 kfree(adapter->rx_buffer_area, M_DEVBUF);
2978                 adapter->rx_buffer_area = NULL;
2979         }
2980         if (adapter->rxtag != NULL) {
2981                 bus_dma_tag_destroy(adapter->rxtag);
2982                 adapter->rxtag = NULL;
2983         }
2984 }
2985
2986 /*********************************************************************
2987  *
2988  *  This routine executes in interrupt context. It replenishes
2989  *  the mbufs in the descriptor and sends data which has been
2990  *  dma'ed into host memory to upper layer.
2991  *
2992  *  We loop at most count times if count is > 0, or until done if
2993  *  count < 0.
2994  *
2995  *********************************************************************/
2996 static void
2997 em_rxeof(struct adapter *adapter, int count)
2998 {
2999         struct ifnet *ifp;
3000         struct mbuf *mp;
3001         uint8_t accept_frame = 0;
3002         uint8_t eop = 0;
3003         uint16_t len, desc_len, prev_len_adj;
3004         int i;
3005 #ifdef ETHER_INPUT_CHAIN
3006         struct mbuf_chain chain[MAXCPU];
3007         int j;
3008 #endif
3009
3010         /* Pointer to the receive descriptor being examined. */
3011         struct em_rx_desc *current_desc;
3012
3013         ifp = &adapter->interface_data.ac_if;
3014         i = adapter->next_rx_desc_to_check;
3015         current_desc = &adapter->rx_desc_base[i];
3016
3017         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3018                         BUS_DMASYNC_POSTREAD);
3019
3020         if (!(current_desc->status & E1000_RXD_STAT_DD))
3021                 return;
3022
3023 #ifdef ETHER_INPUT_CHAIN
3024         for (j = 0; j < ncpus; ++j)
3025                 chain[j].mc_head = chain[j].mc_tail = NULL;
3026 #endif
3027
3028         while ((current_desc->status & E1000_RXD_STAT_DD) && count != 0) {
3029                 logif(pkt_receive);
3030                 mp = adapter->rx_buffer_area[i].m_head;
3031                 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3032                                 BUS_DMASYNC_POSTREAD);
3033                 bus_dmamap_unload(adapter->rxtag,
3034                                   adapter->rx_buffer_area[i].map);
3035
3036                 accept_frame = 1;
3037                 prev_len_adj = 0;
3038                 desc_len = le16toh(current_desc->length);
3039                 if (current_desc->status & E1000_RXD_STAT_EOP) {
3040                         count--;
3041                         eop = 1;
3042                         if (desc_len < ETHER_CRC_LEN) {
3043                                 len = 0;
3044                                 prev_len_adj = ETHER_CRC_LEN - desc_len;
3045                         } else {
3046                                 len = desc_len - ETHER_CRC_LEN;
3047                         }
3048                 } else {
3049                         eop = 0;
3050                         len = desc_len;
3051                 }
3052
3053                 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3054                         uint8_t last_byte;
3055                         uint32_t pkt_len = desc_len;
3056
3057                         if (adapter->fmp != NULL)
3058                                 pkt_len += adapter->fmp->m_pkthdr.len; 
3059
3060                         last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3061
3062                         if (TBI_ACCEPT(&adapter->hw, current_desc->status, 
3063                                        current_desc->errors, 
3064                                        pkt_len, last_byte)) {
3065                                 em_tbi_adjust_stats(&adapter->hw, 
3066                                                     &adapter->stats, 
3067                                                     pkt_len, 
3068                                                     adapter->hw.mac_addr);
3069                                 if (len > 0)
3070                                         len--;
3071                         } else {
3072                                 accept_frame = 0;
3073                         }
3074                 }
3075
3076                 if (accept_frame) {
3077                         if (em_get_buf(i, adapter, NULL, MB_DONTWAIT) == ENOBUFS) {
3078                                 adapter->dropped_pkts++;
3079                                 em_get_buf(i, adapter, mp, MB_DONTWAIT);
3080                                 if (adapter->fmp != NULL)
3081                                         m_freem(adapter->fmp);
3082                                 adapter->fmp = NULL;
3083                                 adapter->lmp = NULL;
3084                                 goto skip;
3085                         }
3086
3087                         /* Assign correct length to the current fragment */
3088                         mp->m_len = len;
3089
3090                         if (adapter->fmp == NULL) {
3091                                 mp->m_pkthdr.len = len;
3092                                 adapter->fmp = mp;       /* Store the first mbuf */
3093                                 adapter->lmp = mp;
3094                         } else {
3095                                 /* Chain mbuf's together */
3096                                 /* 
3097                                  * Adjust length of previous mbuf in chain if
3098                                  * we received less than 4 bytes in the last
3099                                  * descriptor.
3100                                  */
3101                                 if (prev_len_adj > 0) {
3102                                         adapter->lmp->m_len -= prev_len_adj;
3103                                         adapter->fmp->m_pkthdr.len -= prev_len_adj;
3104                                 }
3105                                 adapter->lmp->m_next = mp;
3106                                 adapter->lmp = adapter->lmp->m_next;
3107                                 adapter->fmp->m_pkthdr.len += len;
3108                         }
3109
3110                         if (eop) {
3111                                 adapter->fmp->m_pkthdr.rcvif = ifp;
3112                                 ifp->if_ipackets++;
3113
3114                                 em_receive_checksum(adapter, current_desc,
3115                                                     adapter->fmp);
3116                                 if (current_desc->status & E1000_RXD_STAT_VP) {
3117                                         VLAN_INPUT_TAG(adapter->fmp,
3118                                                        (current_desc->special & 
3119                                                         E1000_RXD_SPC_VLAN_MASK));
3120                                 } else {
3121 #ifdef ETHER_INPUT_CHAIN
3122                                         ether_input_chain(ifp, adapter->fmp,
3123                                                           chain);
3124 #else
3125                                         ifp->if_input(ifp, adapter->fmp);
3126 #endif
3127                                 }
3128                                 adapter->fmp = NULL;
3129                                 adapter->lmp = NULL;
3130                         }
3131                 } else {
3132                         adapter->dropped_pkts++;
3133                         em_get_buf(i, adapter, mp, MB_DONTWAIT);
3134                         if (adapter->fmp != NULL) 
3135                                 m_freem(adapter->fmp);
3136                         adapter->fmp = NULL;
3137                         adapter->lmp = NULL;
3138                 }
3139
3140 skip:
3141                 /* Zero out the receive descriptors status. */
3142                 current_desc->status = 0;
3143
3144                 /* Advance our pointers to the next descriptor. */
3145                 if (++i == adapter->num_rx_desc) {
3146                         i = 0;
3147                         current_desc = adapter->rx_desc_base;
3148                 } else {
3149                         current_desc++;
3150                 }
3151         }
3152
3153 #ifdef ETHER_INPUT_CHAIN
3154         ether_input_dispatch(chain);
3155 #endif
3156
3157         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3158                         BUS_DMASYNC_PREWRITE);
3159
3160         adapter->next_rx_desc_to_check = i;
3161
3162         /* Advance the E1000's Receive Queue #0  "Tail Pointer". */
3163         if (--i < 0)
3164                 i = adapter->num_rx_desc - 1;
3165
3166         E1000_WRITE_REG(&adapter->hw, RDT, i);
3167 }
3168
3169 /*********************************************************************
3170  *
3171  *  Verify that the hardware indicated that the checksum is valid.
3172  *  Inform the stack about the status of checksum so that stack
3173  *  doesn't spend time verifying the checksum.
3174  *
3175  *********************************************************************/
3176 static void
3177 em_receive_checksum(struct adapter *adapter,
3178                     struct em_rx_desc *rx_desc,
3179                     struct mbuf *mp)
3180 {
3181         /* 82543 or newer only */
3182         if ((adapter->hw.mac_type < em_82543) ||
3183             /* Ignore Checksum bit is set */
3184             (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3185                 mp->m_pkthdr.csum_flags = 0;
3186                 return;
3187         }
3188
3189         if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3190                 /* Did it pass? */
3191                 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3192                         /* IP Checksum Good */
3193                         mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3194                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3195                 } else {
3196                         mp->m_pkthdr.csum_flags = 0;
3197                 }
3198         }
3199
3200         if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3201                 /* Did it pass? */
3202                 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3203                         mp->m_pkthdr.csum_flags |=
3204                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR |
3205                          CSUM_FRAG_NOT_CHECKED);
3206                         mp->m_pkthdr.csum_data = htons(0xffff);
3207                 }
3208         }
3209 }
3210
3211
3212 static void 
3213 em_enable_vlans(struct adapter *adapter)
3214 {
3215         uint32_t ctrl;
3216
3217         E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
3218
3219         ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3220         ctrl |= E1000_CTRL_VME;
3221         E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3222 }
3223
3224 static void
3225 em_disable_vlans(struct adapter *adapter)
3226 {
3227         uint32_t ctrl;
3228
3229         ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3230         ctrl &= ~E1000_CTRL_VME;
3231         E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3232 }
3233
3234 /*
3235  * note: we must call bus_enable_intr() prior to enabling the hardware
3236  * interrupt and bus_disable_intr() after disabling the hardware interrupt
3237  * in order to avoid handler execution races from scheduled interrupt
3238  * threads.
3239  */
3240 static void
3241 em_enable_intr(struct adapter *adapter)
3242 {
3243         struct ifnet *ifp = &adapter->interface_data.ac_if;
3244         
3245         if ((ifp->if_flags & IFF_POLLING) == 0) {
3246                 lwkt_serialize_handler_enable(ifp->if_serializer);
3247                 E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
3248         }
3249 }
3250
3251 static void
3252 em_disable_intr(struct adapter *adapter)
3253 {
3254         /*
3255          * The first version of 82542 had an errata where when link was forced
3256          * it would stay up even up even if the cable was disconnected.
3257          * Sequence errors were used to detect the disconnect and then the
3258          * driver would unforce the link.  This code in the in the ISR.  For
3259          * this to work correctly the Sequence error interrupt had to be
3260          * enabled all the time.
3261          */
3262         if (adapter->hw.mac_type == em_82542_rev2_0) {
3263                 E1000_WRITE_REG(&adapter->hw, IMC,
3264                                 (0xffffffff & ~E1000_IMC_RXSEQ));
3265         } else {
3266                 E1000_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
3267         }
3268
3269         lwkt_serialize_handler_disable(adapter->interface_data.ac_if.if_serializer);
3270 }
3271
3272 static int
3273 em_is_valid_ether_addr(uint8_t *addr)
3274 {
3275         static const char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3276
3277         if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN))
3278                 return (FALSE);
3279         else
3280                 return (TRUE);
3281 }
3282
3283 void
3284 em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3285 {
3286         pci_write_config(((struct em_osdep *)hw->back)->dev, reg, *value, 2);
3287 }
3288
3289 void
3290 em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3291 {
3292         *value = pci_read_config(((struct em_osdep *)hw->back)->dev, reg, 2);
3293 }
3294
3295 void
3296 em_pci_set_mwi(struct em_hw *hw)
3297 {
3298         pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
3299                          (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
3300 }
3301
3302 void
3303 em_pci_clear_mwi(struct em_hw *hw)
3304 {
3305         pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
3306                          (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
3307 }
3308
3309 uint32_t
3310 em_io_read(struct em_hw *hw, unsigned long port)
3311 {
3312         struct em_osdep *io = hw->back;
3313
3314         return bus_space_read_4(io->io_bus_space_tag,
3315                                 io->io_bus_space_handle, port);
3316 }
3317
3318 void
3319 em_io_write(struct em_hw *hw, unsigned long port, uint32_t value)
3320 {
3321         struct em_osdep *io = hw->back;
3322
3323         bus_space_write_4(io->io_bus_space_tag,
3324                           io->io_bus_space_handle, port, value);
3325 }
3326
3327 /*
3328  * We may eventually really do this, but its unnecessary 
3329  * for now so we just return unsupported.
3330  */
3331 int32_t
3332 em_read_pcie_cap_reg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3333 {
3334         return (0);
3335 }
3336
3337
3338 /*********************************************************************
3339  * 82544 Coexistence issue workaround.
3340  *    There are 2 issues.
3341  *      1. Transmit Hang issue.
3342  *    To detect this issue, following equation can be used...
3343  *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3344  *          If SUM[3:0] is in between 1 to 4, we will have this issue.
3345  *
3346  *      2. DAC issue.
3347  *    To detect this issue, following equation can be used...
3348  *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3349  *          If SUM[3:0] is in between 9 to c, we will have this issue.
3350  *
3351  *
3352  *    WORKAROUND:
3353  *          Make sure we do not have ending address as 1,2,3,4(Hang) or
3354  *          9,a,b,c (DAC)
3355  *
3356 *************************************************************************/
3357 static uint32_t
3358 em_fill_descriptors(bus_addr_t address, uint32_t length, PDESC_ARRAY desc_array)
3359 {
3360         /* Since issue is sensitive to length and address.*/
3361         /* Let us first check the address...*/
3362         uint32_t safe_terminator;
3363         if (length <= 4) {
3364                 desc_array->descriptor[0].address = address;
3365                 desc_array->descriptor[0].length = length;
3366                 desc_array->elements = 1;
3367                 return (desc_array->elements);
3368         }
3369         safe_terminator = (uint32_t)((((uint32_t)address & 0x7) + (length & 0xF)) & 0xF);
3370         /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */ 
3371         if (safe_terminator == 0 ||
3372             (safe_terminator > 4 && safe_terminator < 9) || 
3373             (safe_terminator > 0xC && safe_terminator <= 0xF)) {
3374                 desc_array->descriptor[0].address = address;
3375                 desc_array->descriptor[0].length = length;
3376                 desc_array->elements = 1;
3377                 return (desc_array->elements);
3378         }
3379
3380         desc_array->descriptor[0].address = address;
3381         desc_array->descriptor[0].length = length - 4;
3382         desc_array->descriptor[1].address = address + (length - 4);
3383         desc_array->descriptor[1].length = 4;
3384         desc_array->elements = 2;
3385         return (desc_array->elements);
3386 }
3387
3388 /**********************************************************************
3389  *
3390  *  Update the board statistics counters.
3391  *
3392  **********************************************************************/
3393 static void
3394 em_update_stats_counters(struct adapter *adapter)
3395 {
3396         struct ifnet   *ifp;
3397
3398         if (adapter->hw.media_type == em_media_type_copper ||
3399             (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
3400                 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
3401                 adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
3402         }
3403         adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
3404         adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
3405         adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
3406         adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
3407
3408         adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
3409         adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
3410         adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
3411         adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
3412         adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
3413         adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
3414         adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
3415         adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
3416         adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
3417         adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
3418         adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
3419         adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
3420         adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
3421         adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
3422         adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
3423         adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
3424         adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
3425         adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
3426         adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
3427         adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
3428
3429         /* For the 64-bit byte counters the low dword must be read first. */
3430         /* Both registers clear on the read of the high dword */
3431
3432         adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
3433         adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
3434         adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
3435         adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
3436
3437         adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
3438         adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
3439         adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
3440         adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
3441         adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
3442
3443         adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
3444         adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
3445         adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
3446         adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
3447
3448         adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
3449         adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
3450         adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
3451         adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
3452         adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
3453         adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
3454         adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
3455         adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
3456         adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
3457         adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
3458
3459         if (adapter->hw.mac_type >= em_82543) {
3460                 adapter->stats.algnerrc += 
3461                     E1000_READ_REG(&adapter->hw, ALGNERRC);
3462                 adapter->stats.rxerrc += 
3463                     E1000_READ_REG(&adapter->hw, RXERRC);
3464                 adapter->stats.tncrs += 
3465                     E1000_READ_REG(&adapter->hw, TNCRS);
3466                 adapter->stats.cexterr += 
3467                     E1000_READ_REG(&adapter->hw, CEXTERR);
3468                 adapter->stats.tsctc += 
3469                     E1000_READ_REG(&adapter->hw, TSCTC);
3470                 adapter->stats.tsctfc += 
3471                     E1000_READ_REG(&adapter->hw, TSCTFC);
3472         }
3473         ifp = &adapter->interface_data.ac_if;
3474
3475         /* Fill out the OS statistics structure */
3476         ifp->if_collisions = adapter->stats.colc;
3477
3478         /* Rx Errors */
3479         ifp->if_ierrors =
3480                 adapter->dropped_pkts +
3481                 adapter->stats.rxerrc +
3482                 adapter->stats.crcerrs +
3483                 adapter->stats.algnerrc +
3484                 adapter->stats.ruc + adapter->stats.roc +
3485                 adapter->stats.mpc + adapter->stats.cexterr +
3486                 adapter->rx_overruns;
3487
3488         /* Tx Errors */
3489         ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol +
3490                           adapter->watchdog_timeouts;
3491 }
3492
3493
3494 /**********************************************************************
3495  *
3496  *  This routine is called only when em_display_debug_stats is enabled.
3497  *  This routine provides a way to take a look at important statistics
3498  *  maintained by the driver and hardware.
3499  *
3500  **********************************************************************/
3501 static void
3502 em_print_debug_info(struct adapter *adapter)
3503 {
3504         device_t dev= adapter->dev;
3505         uint8_t *hw_addr = adapter->hw.hw_addr;
3506
3507         device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
3508         device_printf(dev, "CTRL  = 0x%x RCTL = 0x%x\n",
3509                       E1000_READ_REG(&adapter->hw, CTRL),
3510                       E1000_READ_REG(&adapter->hw, RCTL));
3511         device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk\n",
3512                       ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff0000) >> 16),
3513                       (E1000_READ_REG(&adapter->hw, PBA) & 0xffff));
3514         device_printf(dev, "Flow control watermarks high = %d low = %d\n",
3515                       adapter->hw.fc_high_water, adapter->hw.fc_low_water);
3516         device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
3517                       E1000_READ_REG(&adapter->hw, TIDV),
3518                       E1000_READ_REG(&adapter->hw, TADV));
3519         device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
3520                       E1000_READ_REG(&adapter->hw, RDTR),
3521                       E1000_READ_REG(&adapter->hw, RADV));
3522         device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n",
3523                       (long long)adapter->tx_fifo_wrk_cnt,
3524                       (long long)adapter->tx_fifo_reset_cnt);
3525         device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
3526                       E1000_READ_REG(&adapter->hw, TDH),
3527                       E1000_READ_REG(&adapter->hw, TDT));
3528         device_printf(dev, "Num Tx descriptors avail = %d\n",
3529                       adapter->num_tx_desc_avail);
3530         device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
3531                       adapter->no_tx_desc_avail1);
3532         device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
3533                       adapter->no_tx_desc_avail2);
3534         device_printf(dev, "Std mbuf failed = %ld\n",
3535                       adapter->mbuf_alloc_failed);
3536         device_printf(dev, "Std mbuf cluster failed = %ld\n",
3537                       adapter->mbuf_cluster_failed);
3538         device_printf(dev, "Driver dropped packets = %ld\n",
3539                       adapter->dropped_pkts);
3540 }
3541
3542 static void
3543 em_print_hw_stats(struct adapter *adapter)
3544 {
3545         device_t dev= adapter->dev;
3546
3547         device_printf(dev, "Excessive collisions = %lld\n",
3548                       (long long)adapter->stats.ecol);
3549         device_printf(dev, "Symbol errors = %lld\n",
3550                       (long long)adapter->stats.symerrs);
3551         device_printf(dev, "Sequence errors = %lld\n",
3552                       (long long)adapter->stats.sec);
3553         device_printf(dev, "Defer count = %lld\n",
3554                       (long long)adapter->stats.dc);
3555
3556         device_printf(dev, "Missed Packets = %lld\n",
3557                       (long long)adapter->stats.mpc);
3558         device_printf(dev, "Receive No Buffers = %lld\n",
3559                       (long long)adapter->stats.rnbc);
3560         /* RLEC is inaccurate on some hardware, calculate our own. */
3561         device_printf(dev, "Receive Length errors = %lld\n",
3562                       (long long)adapter->stats.roc +
3563                       (long long)adapter->stats.ruc);
3564         device_printf(dev, "Receive errors = %lld\n",
3565                       (long long)adapter->stats.rxerrc);
3566         device_printf(dev, "Crc errors = %lld\n",
3567                       (long long)adapter->stats.crcerrs);
3568         device_printf(dev, "Alignment errors = %lld\n",
3569                       (long long)adapter->stats.algnerrc);
3570         device_printf(dev, "Carrier extension errors = %lld\n",
3571                       (long long)adapter->stats.cexterr);
3572         device_printf(dev, "RX overruns = %lu\n", adapter->rx_overruns);
3573         device_printf(dev, "Watchdog timeouts = %lu\n",
3574                       adapter->watchdog_timeouts);
3575
3576         device_printf(dev, "XON Rcvd = %lld\n",
3577                       (long long)adapter->stats.xonrxc);
3578         device_printf(dev, "XON Xmtd = %lld\n",
3579                       (long long)adapter->stats.xontxc);
3580         device_printf(dev, "XOFF Rcvd = %lld\n",
3581                       (long long)adapter->stats.xoffrxc);
3582         device_printf(dev, "XOFF Xmtd = %lld\n",
3583                       (long long)adapter->stats.xofftxc);
3584
3585         device_printf(dev, "Good Packets Rcvd = %lld\n",
3586                       (long long)adapter->stats.gprc);
3587         device_printf(dev, "Good Packets Xmtd = %lld\n",
3588                       (long long)adapter->stats.gptc);
3589 }
3590
3591 static int
3592 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3593 {
3594         int error;
3595         int result;
3596         struct adapter *adapter;
3597
3598         result = -1;
3599         error = sysctl_handle_int(oidp, &result, 0, req);
3600
3601         if (error || !req->newptr)
3602                 return (error);
3603
3604         if (result == 1) {
3605                 adapter = (struct adapter *)arg1;
3606                 em_print_debug_info(adapter);
3607         }
3608
3609         return (error);
3610 }
3611
3612 static int
3613 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3614 {
3615         int error;
3616         int result;
3617         struct adapter *adapter;
3618
3619         result = -1;
3620         error = sysctl_handle_int(oidp, &result, 0, req);
3621
3622         if (error || !req->newptr)
3623                 return (error);
3624
3625         if (result == 1) {
3626                 adapter = (struct adapter *)arg1;
3627                 em_print_hw_stats(adapter);
3628         }
3629
3630         return (error);
3631 }
3632
3633 static int
3634 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3635 {
3636         struct em_int_delay_info *info;
3637         struct adapter *adapter;
3638         uint32_t regval;
3639         int error;
3640         int usecs;
3641         int ticks;
3642
3643         info = (struct em_int_delay_info *)arg1;
3644         adapter = info->adapter;
3645         usecs = info->value;
3646         error = sysctl_handle_int(oidp, &usecs, 0, req);
3647         if (error != 0 || req->newptr == NULL)
3648                 return (error);
3649         if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3650                 return (EINVAL);
3651         info->value = usecs;
3652         ticks = E1000_USECS_TO_TICKS(usecs);
3653
3654         lwkt_serialize_enter(adapter->interface_data.ac_if.if_serializer);
3655         regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3656         regval = (regval & ~0xffff) | (ticks & 0xffff);
3657         /* Handle a few special cases. */
3658         switch (info->offset) {
3659         case E1000_RDTR:
3660         case E1000_82542_RDTR:
3661                 regval |= E1000_RDT_FPDB;
3662                 break;
3663         case E1000_TIDV:
3664         case E1000_82542_TIDV:
3665                 if (ticks == 0) {
3666                         adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3667                         /* Don't write 0 into the TIDV register. */
3668                         regval++;
3669                 } else
3670                         adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3671                 break;
3672         }
3673         E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3674         lwkt_serialize_exit(adapter->interface_data.ac_if.if_serializer);
3675         return (0);
3676 }
3677
3678 static void
3679 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3680                         const char *description, struct em_int_delay_info *info,
3681                         int offset, int value)
3682 {
3683         info->adapter = adapter;
3684         info->offset = offset;
3685         info->value = value;
3686         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
3687                         SYSCTL_CHILDREN(adapter->sysctl_tree),
3688                         OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3689                         info, 0, em_sysctl_int_delay, "I", description);
3690 }
3691
3692 static int
3693 em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS)
3694 {
3695         struct adapter *adapter = (void *)arg1;
3696         int error;
3697         int throttle;
3698
3699         throttle = em_int_throttle_ceil;
3700         error = sysctl_handle_int(oidp, &throttle, 0, req);
3701         if (error || req->newptr == NULL)
3702                 return error;
3703         if (throttle < 0 || throttle > 1000000000 / 256)
3704                 return EINVAL;
3705         if (throttle) {
3706                 /*
3707                  * Set the interrupt throttling rate in 256ns increments,
3708                  * recalculate sysctl value assignment to get exact frequency.
3709                  */
3710                 throttle = 1000000000 / 256 / throttle;
3711                 lwkt_serialize_enter(adapter->interface_data.ac_if.if_serializer);
3712                 em_int_throttle_ceil = 1000000000 / 256 / throttle;
3713                 E1000_WRITE_REG(&adapter->hw, ITR, throttle);
3714                 lwkt_serialize_exit(adapter->interface_data.ac_if.if_serializer);
3715         } else {
3716                 lwkt_serialize_enter(adapter->interface_data.ac_if.if_serializer);
3717                 em_int_throttle_ceil = 0;
3718                 E1000_WRITE_REG(&adapter->hw, ITR, 0);
3719                 lwkt_serialize_exit(adapter->interface_data.ac_if.if_serializer);
3720         }
3721         device_printf(adapter->dev, "Interrupt moderation set to %d/sec\n", 
3722                         em_int_throttle_ceil);
3723         return 0;
3724 }