Merge from vendor branch TNFTP:
[dragonfly.git] / sys / dev / netif / em / if_em.c
1 /*
2  *
3  * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>.  All rights reserved.
4  *
5  * Copyright (c) 2001-2006, Intel Corporation
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  * 
11  *  1. Redistributions of source code must retain the above copyright notice,
12  *     this list of conditions and the following disclaimer.
13  * 
14  *  2. Redistributions in binary form must reproduce the above copyright
15  *     notice, this list of conditions and the following disclaimer in the
16  *     documentation and/or other materials provided with the distribution.
17  * 
18  *  3. Neither the name of the Intel Corporation nor the names of its
19  *     contributors may be used to endorse or promote products derived from
20  *     this software without specific prior written permission.
21  * 
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  *
34  *
35  * Copyright (c) 2005 The DragonFly Project.  All rights reserved.
36  * 
37  * This code is derived from software contributed to The DragonFly Project
38  * by Matthew Dillon <dillon@backplane.com>
39  * 
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in
48  *    the documentation and/or other materials provided with the
49  *    distribution.
50  * 3. Neither the name of The DragonFly Project nor the names of its
51  *    contributors may be used to endorse or promote products derived
52  *    from this software without specific, prior written permission.
53  * 
54  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
57  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
58  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
59  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
60  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
61  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
62  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
63  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
64  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65  * SUCH DAMAGE.
66  * 
67  * $DragonFly: src/sys/dev/netif/em/if_em.c,v 1.60 2007/08/14 13:30:35 sephe Exp $
68  * $FreeBSD$
69  */
70 /*
71  * SERIALIZATION API RULES:
72  *
73  * - If the driver uses the same serializer for the interrupt as for the
74  *   ifnet, most of the serialization will be done automatically for the
75  *   driver.  
76  *
77  * - ifmedia entry points will be serialized by the ifmedia code using the
78  *   ifnet serializer.
79  *
80  * - if_* entry points except for if_input will be serialized by the IF
81  *   and protocol layers.
82  *
83  * - The device driver must be sure to serialize access from timeout code
84  *   installed by the device driver.
85  *
86  * - The device driver typically holds the serializer at the time it wishes
87  *   to call if_input.  If so, it should pass the serializer to if_input and
88  *   note that the serializer might be dropped temporarily by if_input 
89  *   (e.g. in case it has to bridge the packet to another interface).
90  *
91  *   NOTE!  Since callers into the device driver hold the ifnet serializer,
92  *   the device driver may be holding a serializer at the time it calls
93  *   if_input even if it is not serializer-aware.
94  */
95
96 #include "opt_polling.h"
97 #include "opt_inet.h"
98
99 #include <sys/param.h>
100 #include <sys/bus.h>
101 #include <sys/endian.h>
102 #include <sys/kernel.h>
103 #include <sys/ktr.h>
104 #include <sys/malloc.h>
105 #include <sys/mbuf.h>
106 #include <sys/module.h>
107 #include <sys/rman.h>
108 #include <sys/serialize.h>
109 #include <sys/socket.h>
110 #include <sys/sockio.h>
111 #include <sys/sysctl.h>
112
113 #include <net/bpf.h>
114 #include <net/ethernet.h>
115 #include <net/if.h>
116 #include <net/if_arp.h>
117 #include <net/if_dl.h>
118 #include <net/if_media.h>
119 #include <net/if_types.h>
120 #include <net/ifq_var.h>
121 #include <net/vlan/if_vlan_var.h>
122
123 #ifdef INET
124 #include <netinet/in.h>
125 #include <netinet/in_systm.h>
126 #include <netinet/in_var.h>
127 #include <netinet/ip.h>
128 #include <netinet/tcp.h>
129 #include <netinet/udp.h>
130 #endif
131
132 #include <dev/netif/em/if_em_hw.h>
133 #include <dev/netif/em/if_em.h>
134
135 #define EM_X60_WORKAROUND
136
137 /*********************************************************************
138  *  Set this to one to display debug statistics
139  *********************************************************************/
140 int     em_display_debug_stats = 0;
141
142 /*********************************************************************
143  *  Driver version
144  *********************************************************************/
145
146 char em_driver_version[] = "6.2.9";
147
148
149 /*********************************************************************
150  *  PCI Device ID Table
151  *
152  *  Used by probe to select devices to load on
153  *  Last field stores an index into em_strings
154  *  Last entry must be all 0s
155  *
156  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
157  *********************************************************************/
158
159 static em_vendor_info_t em_vendor_info_array[] =
160 {
161         /* Intel(R) PRO/1000 Network Connection */
162         { 0x8086, E1000_DEV_ID_82540EM,         PCI_ANY_ID, PCI_ANY_ID, 0},
163         { 0x8086, E1000_DEV_ID_82540EM_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
164         { 0x8086, E1000_DEV_ID_82540EP,         PCI_ANY_ID, PCI_ANY_ID, 0},
165         { 0x8086, E1000_DEV_ID_82540EP_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
166         { 0x8086, E1000_DEV_ID_82540EP_LP,      PCI_ANY_ID, PCI_ANY_ID, 0},
167
168         { 0x8086, E1000_DEV_ID_82541EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
169         { 0x8086, E1000_DEV_ID_82541ER,         PCI_ANY_ID, PCI_ANY_ID, 0},
170         { 0x8086, E1000_DEV_ID_82541ER_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
171         { 0x8086, E1000_DEV_ID_82541EI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
172         { 0x8086, E1000_DEV_ID_82541GI,         PCI_ANY_ID, PCI_ANY_ID, 0},
173         { 0x8086, E1000_DEV_ID_82541GI_LF,      PCI_ANY_ID, PCI_ANY_ID, 0},
174         { 0x8086, E1000_DEV_ID_82541GI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
175
176         { 0x8086, E1000_DEV_ID_82542,           PCI_ANY_ID, PCI_ANY_ID, 0},
177
178         { 0x8086, E1000_DEV_ID_82543GC_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
179         { 0x8086, E1000_DEV_ID_82543GC_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
180
181         { 0x8086, E1000_DEV_ID_82544EI_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
182         { 0x8086, E1000_DEV_ID_82544EI_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
183         { 0x8086, E1000_DEV_ID_82544GC_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
184         { 0x8086, E1000_DEV_ID_82544GC_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
185
186         { 0x8086, E1000_DEV_ID_82545EM_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
187         { 0x8086, E1000_DEV_ID_82545EM_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
188         { 0x8086, E1000_DEV_ID_82545GM_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
189         { 0x8086, E1000_DEV_ID_82545GM_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
190         { 0x8086, E1000_DEV_ID_82545GM_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
191
192         { 0x8086, E1000_DEV_ID_82546EB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
193         { 0x8086, E1000_DEV_ID_82546EB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
194         { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
195         { 0x8086, E1000_DEV_ID_82546GB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
196         { 0x8086, E1000_DEV_ID_82546GB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
197         { 0x8086, E1000_DEV_ID_82546GB_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
198         { 0x8086, E1000_DEV_ID_82546GB_PCIE,    PCI_ANY_ID, PCI_ANY_ID, 0},
199         { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
200         { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
201                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
202
203         { 0x8086, E1000_DEV_ID_82547EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
204         { 0x8086, E1000_DEV_ID_82547EI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
205         { 0x8086, E1000_DEV_ID_82547GI,         PCI_ANY_ID, PCI_ANY_ID, 0},
206
207         { 0x8086, E1000_DEV_ID_82571EB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
208         { 0x8086, E1000_DEV_ID_82571EB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
209         { 0x8086, E1000_DEV_ID_82571EB_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
210         { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER,
211                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
212         { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE,
213                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
214
215         { 0x8086, E1000_DEV_ID_82572EI_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
216         { 0x8086, E1000_DEV_ID_82572EI_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
217         { 0x8086, E1000_DEV_ID_82572EI_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
218         { 0x8086, E1000_DEV_ID_82572EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
219
220         { 0x8086, E1000_DEV_ID_82573E,          PCI_ANY_ID, PCI_ANY_ID, 0},
221         { 0x8086, E1000_DEV_ID_82573E_IAMT,     PCI_ANY_ID, PCI_ANY_ID, 0},
222         { 0x8086, E1000_DEV_ID_82573L,          PCI_ANY_ID, PCI_ANY_ID, 0},
223
224         { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT,
225                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
226         { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT,
227                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
228         { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT,
229                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
230         { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT,
231                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
232
233         { 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT,  PCI_ANY_ID, PCI_ANY_ID, 0},
234         { 0x8086, E1000_DEV_ID_ICH8_IGP_AMT,    PCI_ANY_ID, PCI_ANY_ID, 0},
235         { 0x8086, E1000_DEV_ID_ICH8_IGP_C,      PCI_ANY_ID, PCI_ANY_ID, 0},
236         { 0x8086, E1000_DEV_ID_ICH8_IFE,        PCI_ANY_ID, PCI_ANY_ID, 0},
237         { 0x8086, E1000_DEV_ID_ICH8_IFE_GT,     PCI_ANY_ID, PCI_ANY_ID, 0},
238         { 0x8086, E1000_DEV_ID_ICH8_IFE_G,      PCI_ANY_ID, PCI_ANY_ID, 0},
239         { 0x8086, E1000_DEV_ID_ICH8_IGP_M,      PCI_ANY_ID, PCI_ANY_ID, 0},
240
241         { 0x8086, 0x101A, PCI_ANY_ID, PCI_ANY_ID, 0},
242         { 0x8086, 0x1014, PCI_ANY_ID, PCI_ANY_ID, 0},
243         /* required last entry */
244         { 0, 0, 0, 0, 0}
245 };
246
247 /*********************************************************************
248  *  Table of branding strings for all supported NICs.
249  *********************************************************************/
250
251 static const char *em_strings[] = {
252         "Intel(R) PRO/1000 Network Connection"
253 };
254
255 /*********************************************************************
256  *  Function prototypes
257  *********************************************************************/
258 static int      em_probe(device_t);
259 static int      em_attach(device_t);
260 static int      em_detach(device_t);
261 static int      em_shutdown(device_t);
262 static void     em_intr(void *);
263 static int      em_suspend(device_t);
264 static int      em_resume(device_t);
265 static void     em_start(struct ifnet *);
266 static int      em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
267 static void     em_watchdog(struct ifnet *);
268 static void     em_init(void *);
269 static void     em_stop(void *);
270 static void     em_media_status(struct ifnet *, struct ifmediareq *);
271 static int      em_media_change(struct ifnet *);
272 static void     em_identify_hardware(struct adapter *);
273 static int      em_allocate_pci_resources(device_t);
274 static void     em_free_pci_resources(device_t);
275 static void     em_local_timer(void *);
276 static int      em_hardware_init(struct adapter *);
277 static void     em_setup_interface(device_t, struct adapter *);
278 static int      em_setup_transmit_structures(struct adapter *);
279 static void     em_initialize_transmit_unit(struct adapter *);
280 static int      em_setup_receive_structures(struct adapter *);
281 static void     em_initialize_receive_unit(struct adapter *);
282 static void     em_enable_intr(struct adapter *);
283 static void     em_disable_intr(struct adapter *);
284 static void     em_free_transmit_structures(struct adapter *);
285 static void     em_free_receive_structures(struct adapter *);
286 static void     em_update_stats_counters(struct adapter *);
287 static void     em_txeof(struct adapter *);
288 static int      em_allocate_receive_structures(struct adapter *);
289 static void     em_rxeof(struct adapter *, int);
290 static void     em_receive_checksum(struct adapter *, struct em_rx_desc *,
291                                     struct mbuf *);
292 static void     em_transmit_checksum_setup(struct adapter *, struct mbuf *,
293                                            uint32_t *, uint32_t *);
294 static void     em_set_promisc(struct adapter *);
295 static void     em_disable_promisc(struct adapter *);
296 static void     em_set_multi(struct adapter *);
297 static void     em_print_hw_stats(struct adapter *);
298 static void     em_update_link_status(struct adapter *);
299 static int      em_get_buf(int i, struct adapter *, struct mbuf *, int how);
300 static void     em_enable_vlans(struct adapter *);
301 static void     em_disable_vlans(struct adapter *);
302 static int      em_encap(struct adapter *, struct mbuf *);
303 static void     em_smartspeed(struct adapter *);
304 static int      em_82547_fifo_workaround(struct adapter *, int);
305 static void     em_82547_update_fifo_head(struct adapter *, int);
306 static int      em_82547_tx_fifo_reset(struct adapter *);
307 static void     em_82547_move_tail(void *);
308 static void     em_82547_move_tail_serialized(struct adapter *);
309 static int      em_dma_malloc(struct adapter *, bus_size_t,
310                               struct em_dma_alloc *);
311 static void     em_dma_free(struct adapter *, struct em_dma_alloc *);
312 static void     em_print_debug_info(struct adapter *);
313 static int      em_is_valid_ether_addr(uint8_t *);
314 static int      em_sysctl_stats(SYSCTL_HANDLER_ARGS);
315 static int      em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
316 static uint32_t em_fill_descriptors(bus_addr_t address, uint32_t length, 
317                                    PDESC_ARRAY desc_array);
318 static int      em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
319 static int      em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS);
320 static void     em_add_int_delay_sysctl(struct adapter *, const char *,
321                                         const char *,
322                                         struct em_int_delay_info *, int, int);
323
324 /*********************************************************************
325  *  FreeBSD Device Interface Entry Points
326  *********************************************************************/
327
328 static device_method_t em_methods[] = {
329         /* Device interface */
330         DEVMETHOD(device_probe, em_probe),
331         DEVMETHOD(device_attach, em_attach),
332         DEVMETHOD(device_detach, em_detach),
333         DEVMETHOD(device_shutdown, em_shutdown),
334         DEVMETHOD(device_suspend, em_suspend),
335         DEVMETHOD(device_resume, em_resume),
336         {0, 0}
337 };
338
339 static driver_t em_driver = {
340         "em", em_methods, sizeof(struct adapter),
341 };
342
343 static devclass_t em_devclass;
344
345 DECLARE_DUMMY_MODULE(if_em);
346 DRIVER_MODULE(if_em, pci, em_driver, em_devclass, 0, 0);
347
348 /*********************************************************************
349  *  Tunable default values.
350  *********************************************************************/
351
352 #define E1000_TICKS_TO_USECS(ticks)     ((1024 * (ticks) + 500) / 1000)
353 #define E1000_USECS_TO_TICKS(usecs)     ((1000 * (usecs) + 512) / 1024)
354
355 static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
356 static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
357 static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
358 static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
359 static int em_int_throttle_ceil = 10000;
360 static int em_rxd = EM_DEFAULT_RXD;
361 static int em_txd = EM_DEFAULT_TXD;
362 static int em_smart_pwr_down = FALSE;
363
364 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
365 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
366 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
367 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
368 TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil);
369 TUNABLE_INT("hw.em.rxd", &em_rxd);
370 TUNABLE_INT("hw.em.txd", &em_txd);
371 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
372
373 /*
374  * Kernel trace for characterization of operations
375  */
376 #if !defined(KTR_IF_EM)
377 #define KTR_IF_EM       KTR_ALL
378 #endif
379 KTR_INFO_MASTER(if_em);
380 KTR_INFO(KTR_IF_EM, if_em, intr_beg, 0, "intr begin", 0);
381 KTR_INFO(KTR_IF_EM, if_em, intr_end, 1, "intr end", 0);
382 #ifdef DEVICE_POLLING
383 KTR_INFO(KTR_IF_EM, if_em, poll_beg, 2, "poll begin", 0);
384 KTR_INFO(KTR_IF_EM, if_em, poll_end, 3, "poll end", 0);
385 #endif
386 KTR_INFO(KTR_IF_EM, if_em, pkt_receive, 4, "rx packet", 0);
387 KTR_INFO(KTR_IF_EM, if_em, pkt_txqueue, 5, "tx packet", 0);
388 KTR_INFO(KTR_IF_EM, if_em, pkt_txclean, 6, "tx clean", 0);
389 #define logif(name)     KTR_LOG(if_em_ ## name)
390
391 /*********************************************************************
392  *  Device identification routine
393  *
394  *  em_probe determines if the driver should be loaded on
395  *  adapter based on PCI vendor/device id of the adapter.
396  *
397  *  return 0 on success, positive on failure
398  *********************************************************************/
399
400 static int
401 em_probe(device_t dev)
402 {
403         em_vendor_info_t *ent;
404
405         uint16_t pci_vendor_id = 0;
406         uint16_t pci_device_id = 0;
407         uint16_t pci_subvendor_id = 0;
408         uint16_t pci_subdevice_id = 0;
409         char adapter_name[60];
410
411         INIT_DEBUGOUT("em_probe: begin");
412
413         pci_vendor_id = pci_get_vendor(dev);
414         if (pci_vendor_id != EM_VENDOR_ID)
415                 return (ENXIO);
416
417         pci_device_id = pci_get_device(dev);
418         pci_subvendor_id = pci_get_subvendor(dev);
419         pci_subdevice_id = pci_get_subdevice(dev);
420
421         ent = em_vendor_info_array;
422         while (ent->vendor_id != 0) {
423                 if ((pci_vendor_id == ent->vendor_id) &&
424                     (pci_device_id == ent->device_id) &&
425
426                     ((pci_subvendor_id == ent->subvendor_id) ||
427                      (ent->subvendor_id == PCI_ANY_ID)) &&
428
429                     ((pci_subdevice_id == ent->subdevice_id) ||
430                      (ent->subdevice_id == PCI_ANY_ID))) {
431                         ksnprintf(adapter_name, sizeof(adapter_name),
432                                  "%s, Version - %s",  em_strings[ent->index], 
433                                  em_driver_version);
434                         device_set_desc_copy(dev, adapter_name);
435                         device_set_async_attach(dev, TRUE);
436                         return (0);
437                 }
438                 ent++;
439         }
440
441         return (ENXIO);
442 }
443
444 /*********************************************************************
445  *  Device initialization routine
446  *
447  *  The attach entry point is called when the driver is being loaded.
448  *  This routine identifies the type of hardware, allocates all resources
449  *  and initializes the hardware.
450  *
451  *  return 0 on success, positive on failure
452  *********************************************************************/
453
454 static int
455 em_attach(device_t dev)
456 {
457         struct adapter *adapter;
458         int tsize, rsize;
459         int error = 0;
460
461         INIT_DEBUGOUT("em_attach: begin");
462
463         adapter = device_get_softc(dev);
464
465         callout_init(&adapter->timer);
466         callout_init(&adapter->tx_fifo_timer);
467
468         adapter->dev = dev;
469         adapter->osdep.dev = dev;
470
471         /* SYSCTL stuff */
472         sysctl_ctx_init(&adapter->sysctl_ctx);
473         adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx,
474                                                SYSCTL_STATIC_CHILDREN(_hw),
475                                                OID_AUTO, 
476                                                device_get_nameunit(dev),
477                                                CTLFLAG_RD,
478                                                0, "");
479
480         if (adapter->sysctl_tree == NULL) {
481                 device_printf(dev, "Unable to create sysctl tree\n");
482                 return EIO;
483         }
484
485         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,  
486                         SYSCTL_CHILDREN(adapter->sysctl_tree),
487                         OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW, 
488                         (void *)adapter, 0,
489                         em_sysctl_debug_info, "I", "Debug Information");
490
491         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,  
492                         SYSCTL_CHILDREN(adapter->sysctl_tree),
493                         OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, 
494                         (void *)adapter, 0,
495                         em_sysctl_stats, "I", "Statistics");
496
497         /* Determine hardware revision */
498         em_identify_hardware(adapter);
499
500         /* Set up some sysctls for the tunable interrupt delays */
501         em_add_int_delay_sysctl(adapter, "rx_int_delay",
502                                 "receive interrupt delay in usecs",
503                                 &adapter->rx_int_delay,
504                                 E1000_REG_OFFSET(&adapter->hw, RDTR),
505                                 em_rx_int_delay_dflt);
506         em_add_int_delay_sysctl(adapter, "tx_int_delay",
507                                 "transmit interrupt delay in usecs",
508                                 &adapter->tx_int_delay,
509                                 E1000_REG_OFFSET(&adapter->hw, TIDV),
510                                 em_tx_int_delay_dflt);
511         if (adapter->hw.mac_type >= em_82540) {
512                 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
513                                         "receive interrupt delay limit in usecs",
514                                         &adapter->rx_abs_int_delay,
515                                         E1000_REG_OFFSET(&adapter->hw, RADV),
516                                         em_rx_abs_int_delay_dflt);
517                 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
518                                         "transmit interrupt delay limit in usecs",
519                                         &adapter->tx_abs_int_delay,
520                                         E1000_REG_OFFSET(&adapter->hw, TADV),
521                                         em_tx_abs_int_delay_dflt);
522                 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
523                         SYSCTL_CHILDREN(adapter->sysctl_tree),
524                         OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW,
525                         adapter, 0, em_sysctl_int_throttle, "I", NULL);
526         }
527
528         /*
529          * Validate number of transmit and receive descriptors. It
530          * must not exceed hardware maximum, and must be multiple
531          * of EM_DBA_ALIGN.
532          */
533         if (((em_txd * sizeof(struct em_tx_desc)) % EM_DBA_ALIGN) != 0 ||
534             (adapter->hw.mac_type >= em_82544 && em_txd > EM_MAX_TXD) ||
535             (adapter->hw.mac_type < em_82544 && em_txd > EM_MAX_TXD_82543) ||
536             (em_txd < EM_MIN_TXD)) {
537                 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
538                               EM_DEFAULT_TXD, em_txd);
539                 adapter->num_tx_desc = EM_DEFAULT_TXD;
540         } else {
541                 adapter->num_tx_desc = em_txd;
542         }
543  
544         if (((em_rxd * sizeof(struct em_rx_desc)) % EM_DBA_ALIGN) != 0 ||
545             (adapter->hw.mac_type >= em_82544 && em_rxd > EM_MAX_RXD) ||
546             (adapter->hw.mac_type < em_82544 && em_rxd > EM_MAX_RXD_82543) ||
547             (em_rxd < EM_MIN_RXD)) {
548                 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
549                               EM_DEFAULT_RXD, em_rxd);
550                 adapter->num_rx_desc = EM_DEFAULT_RXD;
551         } else {
552                 adapter->num_rx_desc = em_rxd;
553         }
554
555         SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(adapter->sysctl_tree), OID_AUTO, "rxd",
556                        CTLFLAG_RD, &adapter->num_rx_desc, 0, NULL);
557         SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(adapter->sysctl_tree), OID_AUTO, "txd",
558                        CTLFLAG_RD, &adapter->num_tx_desc, 0, NULL);
559
560         adapter->hw.autoneg = DO_AUTO_NEG;
561         adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
562         adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
563         adapter->hw.tbi_compatibility_en = TRUE;
564         adapter->rx_buffer_len = EM_RXBUFFER_2048;
565
566         adapter->hw.phy_init_script = 1;
567         adapter->hw.phy_reset_disable = FALSE;
568
569 #ifndef EM_MASTER_SLAVE
570         adapter->hw.master_slave = em_ms_hw_default;
571 #else
572         adapter->hw.master_slave = EM_MASTER_SLAVE;
573 #endif
574
575         /*
576          * Set the max frame size assuming standard ethernet
577          * sized frames.
578          */   
579         adapter->hw.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
580
581         adapter->hw.min_frame_size =
582             MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
583
584         /*
585          * This controls when hardware reports transmit completion
586          * status.
587          */
588         adapter->hw.report_tx_early = 1;
589
590         error = em_allocate_pci_resources(dev);
591         if (error)
592                 goto fail;
593
594         /* Initialize eeprom parameters */
595         em_init_eeprom_params(&adapter->hw);
596
597         tsize = roundup2(adapter->num_tx_desc * sizeof(struct em_tx_desc),
598                          EM_DBA_ALIGN);
599
600         /* Allocate Transmit Descriptor ring */
601         error = em_dma_malloc(adapter, tsize, &adapter->txdma);
602         if (error) {
603                 device_printf(dev, "Unable to allocate TxDescriptor memory\n");
604                 goto fail;
605         }
606         adapter->tx_desc_base = (struct em_tx_desc *)adapter->txdma.dma_vaddr;
607
608         rsize = roundup2(adapter->num_rx_desc * sizeof(struct em_rx_desc),
609                          EM_DBA_ALIGN);
610
611         /* Allocate Receive Descriptor ring */
612         error = em_dma_malloc(adapter, rsize, &adapter->rxdma);
613         if (error) {
614                 device_printf(dev, "Unable to allocate rx_desc memory\n");
615                 goto fail;
616         }
617         adapter->rx_desc_base = (struct em_rx_desc *)adapter->rxdma.dma_vaddr;
618
619         /* Initialize the hardware */
620         if (em_hardware_init(adapter)) {
621                 device_printf(dev, "Unable to initialize the hardware\n");
622                 error = EIO;
623                 goto fail;
624         }
625
626         /* Copy the permanent MAC address out of the EEPROM */
627         if (em_read_mac_addr(&adapter->hw) < 0) {
628                 device_printf(dev,
629                               "EEPROM read error while reading MAC address\n");
630                 error = EIO;
631                 goto fail;
632         }
633
634         if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
635                 device_printf(dev, "Invalid MAC address\n");
636                 error = EIO;
637                 goto fail;
638         }
639
640         /* Setup OS specific network interface */
641         em_setup_interface(dev, adapter);
642
643         /* Initialize statistics */
644         em_clear_hw_cntrs(&adapter->hw);
645         em_update_stats_counters(adapter);
646         adapter->hw.get_link_status = 1;
647         em_update_link_status(adapter);
648
649         /* Indicate SOL/IDER usage */
650         if (em_check_phy_reset_block(&adapter->hw)) {
651                 device_printf(dev, "PHY reset is blocked due to "
652                               "SOL/IDER session.\n");
653         }
654  
655         /* Identify 82544 on PCIX */
656         em_get_bus_info(&adapter->hw);
657         if (adapter->hw.bus_type == em_bus_type_pcix &&
658             adapter->hw.mac_type == em_82544)
659                 adapter->pcix_82544 = TRUE;
660         else
661                 adapter->pcix_82544 = FALSE;
662
663         error = bus_setup_intr(dev, adapter->res_interrupt, INTR_NETSAFE,
664                            em_intr, adapter,
665                            &adapter->int_handler_tag,
666                            adapter->interface_data.ac_if.if_serializer);
667         if (error) {
668                 device_printf(dev, "Error registering interrupt handler!\n");
669                 ether_ifdetach(&adapter->interface_data.ac_if);
670                 goto fail;
671         }
672
673         INIT_DEBUGOUT("em_attach: end");
674         return(0);
675
676 fail:
677         em_detach(dev);
678         return(error);
679 }
680
681 /*********************************************************************
682  *  Device removal routine
683  *
684  *  The detach entry point is called when the driver is being removed.
685  *  This routine stops the adapter and deallocates all the resources
686  *  that were allocated for driver operation.
687  *
688  *  return 0 on success, positive on failure
689  *********************************************************************/
690
691 static int
692 em_detach(device_t dev)
693 {
694         struct adapter *adapter = device_get_softc(dev);
695
696         INIT_DEBUGOUT("em_detach: begin");
697
698         if (device_is_attached(dev)) {
699                 struct ifnet *ifp = &adapter->interface_data.ac_if;
700
701                 lwkt_serialize_enter(ifp->if_serializer);
702                 adapter->in_detach = 1;
703                 em_stop(adapter);
704                 em_phy_hw_reset(&adapter->hw);
705                 bus_teardown_intr(dev, adapter->res_interrupt, 
706                                   adapter->int_handler_tag);
707                 lwkt_serialize_exit(ifp->if_serializer);
708
709                 ether_ifdetach(ifp);
710         }
711         bus_generic_detach(dev);
712
713         em_free_pci_resources(dev);
714
715         /* Free Transmit Descriptor ring */
716         if (adapter->tx_desc_base != NULL) {
717                 em_dma_free(adapter, &adapter->txdma);
718                 adapter->tx_desc_base = NULL;
719         }
720
721         /* Free Receive Descriptor ring */
722         if (adapter->rx_desc_base != NULL) {
723                 em_dma_free(adapter, &adapter->rxdma);
724                 adapter->rx_desc_base = NULL;
725         }
726
727         /* Free sysctl tree */
728         if (adapter->sysctl_tree != NULL) {
729                 adapter->sysctl_tree = NULL;
730                 sysctl_ctx_free(&adapter->sysctl_ctx);
731         }
732
733         return (0);
734 }
735
736 /*********************************************************************
737  *
738  *  Shutdown entry point
739  *
740  **********************************************************************/
741
742 static int
743 em_shutdown(device_t dev)
744 {
745         struct adapter *adapter = device_get_softc(dev);
746         struct ifnet *ifp = &adapter->interface_data.ac_if;
747
748         lwkt_serialize_enter(ifp->if_serializer);
749         em_stop(adapter);
750         lwkt_serialize_exit(ifp->if_serializer);
751
752         return (0);
753 }
754
755 /*
756  * Suspend/resume device methods.
757  */
758 static int
759 em_suspend(device_t dev)
760 {
761         struct adapter *adapter = device_get_softc(dev);
762         struct ifnet *ifp = &adapter->interface_data.ac_if;
763
764         lwkt_serialize_enter(ifp->if_serializer);
765         em_stop(adapter);
766         lwkt_serialize_exit(ifp->if_serializer);
767         return (0);
768 }
769
770 static int
771 em_resume(device_t dev)
772 {
773         struct adapter *adapter = device_get_softc(dev);
774         struct ifnet *ifp = &adapter->interface_data.ac_if;
775
776         lwkt_serialize_enter(ifp->if_serializer);
777         ifp->if_flags &= ~IFF_RUNNING;
778         em_init(adapter);
779         if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == (IFF_UP | IFF_RUNNING))
780                 em_start(ifp);
781         lwkt_serialize_exit(ifp->if_serializer);
782
783         return bus_generic_resume(dev);
784 }
785
786 /*********************************************************************
787  *  Transmit entry point
788  *
789  *  em_start is called by the stack to initiate a transmit.
790  *  The driver will remain in this routine as long as there are
791  *  packets to transmit and transmit resources are available.
792  *  In case resources are not available stack is notified and
793  *  the packet is requeued.
794  **********************************************************************/
795
796 static void
797 em_start(struct ifnet *ifp)
798 {
799         struct mbuf *m_head;
800         struct adapter *adapter = ifp->if_softc;
801
802         ASSERT_SERIALIZED(ifp->if_serializer);
803
804         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
805                 return;
806         if (!adapter->link_active)
807                 return;
808         while (!ifq_is_empty(&ifp->if_snd)) {
809                 m_head = ifq_poll(&ifp->if_snd);
810
811                 if (m_head == NULL)
812                         break;
813
814                 logif(pkt_txqueue);
815                 if (em_encap(adapter, m_head)) {
816                         ifp->if_flags |= IFF_OACTIVE;
817                         break;
818                 }
819                 ifq_dequeue(&ifp->if_snd, m_head);
820
821                 /* Send a copy of the frame to the BPF listener */
822                 BPF_MTAP(ifp, m_head);
823
824                 /* Set timeout in case hardware has problems transmitting. */
825                 ifp->if_timer = EM_TX_TIMEOUT;
826         }
827 }
828
829 /*********************************************************************
830  *  Ioctl entry point
831  *
832  *  em_ioctl is called when the user wants to configure the
833  *  interface.
834  *
835  *  return 0 on success, positive on failure
836  **********************************************************************/
837
838 static int
839 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
840 {
841         int max_frame_size, mask, error = 0, reinit = 0;
842         struct ifreq *ifr = (struct ifreq *) data;
843         struct adapter *adapter = ifp->if_softc;
844         uint16_t eeprom_data = 0;
845
846         ASSERT_SERIALIZED(ifp->if_serializer);
847
848         if (adapter->in_detach)
849                 return 0;
850
851         switch (command) {
852         case SIOCSIFMTU:
853                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
854                 switch (adapter->hw.mac_type) {
855                 case em_82573:
856                         /*
857                          * 82573 only supports jumbo frames
858                          * if ASPM is disabled.
859                          */
860                         em_read_eeprom(&adapter->hw, EEPROM_INIT_3GIO_3,
861                             1, &eeprom_data);
862                         if (eeprom_data & EEPROM_WORD1A_ASPM_MASK) {
863                                 max_frame_size = ETHER_MAX_LEN;
864                                 break;
865                         }
866                         /* Allow Jumbo frames */
867                         /* FALLTHROUGH */
868                 case em_82571:
869                 case em_82572:
870                 case em_80003es2lan:    /* Limit Jumbo Frame size */
871                         max_frame_size = 9234;
872                         break;
873                 case em_ich8lan:
874                         /* ICH8 does not support jumbo frames */
875                         max_frame_size = ETHER_MAX_LEN;
876                         break;
877                 default:
878                         max_frame_size = MAX_JUMBO_FRAME_SIZE;
879                         break;
880                 }
881                 if (ifr->ifr_mtu >
882                         max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) {
883                         error = EINVAL;
884                 } else {
885                         ifp->if_mtu = ifr->ifr_mtu;
886                         adapter->hw.max_frame_size = 
887                         ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
888                         ifp->if_flags &= ~IFF_RUNNING;
889                         em_init(adapter);
890                 }
891                 break;
892         case SIOCSIFFLAGS:
893                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS "
894                                "(Set Interface Flags)");
895                 if (ifp->if_flags & IFF_UP) {
896                         if (!(ifp->if_flags & IFF_RUNNING)) {
897                                 em_init(adapter);
898                         } else if ((ifp->if_flags ^ adapter->if_flags) &
899                                    IFF_PROMISC) {
900                                 em_disable_promisc(adapter);
901                                 em_set_promisc(adapter);
902                         }
903                 } else {
904                         if (ifp->if_flags & IFF_RUNNING)
905                                 em_stop(adapter);
906                 }
907                 adapter->if_flags = ifp->if_flags;
908                 break;
909         case SIOCADDMULTI:
910         case SIOCDELMULTI:
911                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
912                 if (ifp->if_flags & IFF_RUNNING) {
913                         em_disable_intr(adapter);
914                         em_set_multi(adapter);
915                         if (adapter->hw.mac_type == em_82542_rev2_0)
916                                 em_initialize_receive_unit(adapter);
917 #ifdef DEVICE_POLLING
918                         /* Do not enable interrupt if polling(4) is enabled */
919                         if ((ifp->if_flags & IFF_POLLING) == 0)
920 #endif
921                         em_enable_intr(adapter);
922                 }
923                 break;
924         case SIOCSIFMEDIA:
925                 /* Check SOL/IDER usage */
926                 if (em_check_phy_reset_block(&adapter->hw)) {
927                         if_printf(ifp, "Media change is blocked due to "
928                                   "SOL/IDER session.\n");
929                         break;
930                 }
931                 /* FALLTHROUGH */
932         case SIOCGIFMEDIA:
933                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA "
934                                "(Get/Set Interface Media)");
935                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
936                 break;
937         case SIOCSIFCAP:
938                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
939                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
940                 if (mask & IFCAP_HWCSUM) {
941                         ifp->if_capenable ^= IFCAP_HWCSUM;
942                         reinit = 1;
943                 }
944                 if (mask & IFCAP_VLAN_HWTAGGING) {
945                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
946                         reinit = 1;
947                 }
948                 if (reinit && (ifp->if_flags & IFF_RUNNING)) {
949                         ifp->if_flags &= ~IFF_RUNNING;
950                         em_init(adapter);
951                 }
952                 break;
953         default:
954                 error = ether_ioctl(ifp, command, data);
955                 break;
956         }
957
958         return (error);
959 }
960
961 /*********************************************************************
962  *  Watchdog entry point
963  *
964  *  This routine is called whenever hardware quits transmitting.
965  *
966  **********************************************************************/
967
968 static void
969 em_watchdog(struct ifnet *ifp)
970 {
971         struct adapter *adapter = ifp->if_softc;
972
973         /*
974          * If we are in this routine because of pause frames, then
975          * don't reset the hardware.
976          */
977         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
978                 ifp->if_timer = EM_TX_TIMEOUT;
979                 return;
980         }
981
982         if (em_check_for_link(&adapter->hw) == 0)
983                 if_printf(ifp, "watchdog timeout -- resetting\n");
984
985         ifp->if_flags &= ~IFF_RUNNING;
986         em_init(adapter);
987
988         adapter->watchdog_timeouts++;
989 }
990
991 /*********************************************************************
992  *  Init entry point
993  *
994  *  This routine is used in two ways. It is used by the stack as
995  *  init entry point in network interface structure. It is also used
996  *  by the driver as a hw/sw initialization routine to get to a
997  *  consistent state.
998  *
999  *  return 0 on success, positive on failure
1000  **********************************************************************/
1001
1002 static void
1003 em_init(void *arg)
1004 {
1005         struct adapter *adapter = arg;
1006         uint32_t pba;
1007         struct ifnet *ifp = &adapter->interface_data.ac_if;
1008
1009         ASSERT_SERIALIZED(ifp->if_serializer);
1010
1011         INIT_DEBUGOUT("em_init: begin");
1012
1013         if (ifp->if_flags & IFF_RUNNING)
1014                 return;
1015
1016         em_stop(adapter);
1017
1018         /*
1019          * Packet Buffer Allocation (PBA)
1020          * Writing PBA sets the receive portion of the buffer
1021          * the remainder is used for the transmit buffer.
1022          *
1023          * Devices before the 82547 had a Packet Buffer of 64K.
1024          *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1025          * After the 82547 the buffer was reduced to 40K.
1026          *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1027          *   Note: default does not leave enough room for Jumbo Frame >10k.
1028          */
1029         switch (adapter->hw.mac_type) {
1030         case em_82547:
1031         case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1032                 if (adapter->hw.max_frame_size > EM_RXBUFFER_8192)
1033                         pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1034                 else
1035                         pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1036
1037                 adapter->tx_fifo_head = 0;
1038                 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1039                 adapter->tx_fifo_size =
1040                         (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1041                 break;
1042         /* Total Packet Buffer on these is 48K */
1043         case em_82571:
1044         case em_82572:
1045         case em_80003es2lan:
1046                 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1047                 break;
1048         case em_82573: /* 82573: Total Packet Buffer is 32K */
1049                 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
1050                 break;
1051         case em_ich8lan:
1052                 pba = E1000_PBA_8K;
1053                 break;
1054         default:
1055                 /* Devices before 82547 had a Packet Buffer of 64K.   */
1056                 if(adapter->hw.max_frame_size > EM_RXBUFFER_8192)
1057                         pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1058                 else
1059                         pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1060         }
1061
1062         INIT_DEBUGOUT1("em_init: pba=%dK",pba);
1063         E1000_WRITE_REG(&adapter->hw, PBA, pba);
1064
1065         /* Get the latest mac address, User can use a LAA */
1066         bcopy(adapter->interface_data.ac_enaddr, adapter->hw.mac_addr,
1067               ETHER_ADDR_LEN);
1068
1069         /* Initialize the hardware */
1070         if (em_hardware_init(adapter)) {
1071                 if_printf(ifp, "Unable to initialize the hardware\n");
1072                 return;
1073         }
1074         em_update_link_status(adapter);
1075
1076         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1077                 em_enable_vlans(adapter);
1078
1079         /* Set hardware offload abilities */
1080         if (adapter->hw.mac_type >= em_82543) {
1081                 if (ifp->if_capenable & IFCAP_TXCSUM)
1082                         ifp->if_hwassist = EM_CHECKSUM_FEATURES;
1083                 else
1084                         ifp->if_hwassist = 0;
1085         }
1086
1087         /* Prepare transmit descriptors and buffers */
1088         if (em_setup_transmit_structures(adapter)) {
1089                 if_printf(ifp, "Could not setup transmit structures\n");
1090                 em_stop(adapter);
1091                 return;
1092         }
1093         em_initialize_transmit_unit(adapter);
1094
1095         /* Setup Multicast table */
1096         em_set_multi(adapter);
1097
1098         /* Prepare receive descriptors and buffers */
1099         if (em_setup_receive_structures(adapter)) {
1100                 if_printf(ifp, "Could not setup receive structures\n");
1101                 em_stop(adapter);
1102                 return;
1103         }
1104         em_initialize_receive_unit(adapter);
1105
1106         /* Don't lose promiscuous settings */
1107         em_set_promisc(adapter);
1108
1109         ifp->if_flags |= IFF_RUNNING;
1110         ifp->if_flags &= ~IFF_OACTIVE;
1111
1112         callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1113         em_clear_hw_cntrs(&adapter->hw);
1114
1115 #ifdef DEVICE_POLLING
1116         /* Do not enable interrupt if polling(4) is enabled */
1117         if (ifp->if_flags & IFF_POLLING)
1118                 em_disable_intr(adapter);
1119         else
1120 #endif
1121         em_enable_intr(adapter);
1122
1123         /* Don't reset the phy next time init gets called */
1124         adapter->hw.phy_reset_disable = TRUE;
1125 }
1126
1127 #ifdef DEVICE_POLLING
1128
1129 static void
1130 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1131 {
1132         struct adapter *adapter = ifp->if_softc;
1133         uint32_t reg_icr;
1134
1135         logif(poll_beg);
1136
1137         ASSERT_SERIALIZED(ifp->if_serializer);
1138
1139         switch(cmd) {
1140         case POLL_REGISTER:
1141                 em_disable_intr(adapter);
1142                 break;
1143         case POLL_DEREGISTER:
1144                 em_enable_intr(adapter);
1145                 break;
1146         case POLL_AND_CHECK_STATUS:
1147                 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1148                 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1149                         callout_stop(&adapter->timer);
1150                         adapter->hw.get_link_status = 1;
1151                         em_check_for_link(&adapter->hw);
1152                         em_update_link_status(adapter);
1153                         callout_reset(&adapter->timer, hz, em_local_timer,
1154                                       adapter);
1155                 }
1156                 /* fall through */
1157         case POLL_ONLY:
1158                 if (ifp->if_flags & IFF_RUNNING) {
1159                         em_rxeof(adapter, count);
1160                         em_txeof(adapter);
1161
1162                         if (!ifq_is_empty(&ifp->if_snd))
1163                                 em_start(ifp);
1164                 }
1165                 break;
1166         }
1167         logif(poll_end);
1168 }
1169
1170 #endif /* DEVICE_POLLING */
1171
1172 /*********************************************************************
1173  *
1174  *  Interrupt Service routine
1175  *
1176  *********************************************************************/
1177 static void
1178 em_intr(void *arg)
1179 {
1180         uint32_t reg_icr;
1181         struct ifnet *ifp;
1182         struct adapter *adapter = arg;
1183
1184         ifp = &adapter->interface_data.ac_if;  
1185
1186         logif(intr_beg);
1187         ASSERT_SERIALIZED(ifp->if_serializer);
1188
1189         reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1190         if ((adapter->hw.mac_type >= em_82571 &&
1191              (reg_icr & E1000_ICR_INT_ASSERTED) == 0) ||
1192             reg_icr == 0) {
1193                 logif(intr_end);
1194                 return;
1195         }
1196
1197         /*
1198          * XXX: some laptops trigger several spurious interrupts on em(4)
1199          * when in the resume cycle. The ICR register reports all-ones
1200          * value in this case. Processing such interrupts would lead to
1201          * a freeze. I don't know why.
1202          */
1203         if (reg_icr == 0xffffffff) {
1204                 logif(intr_end);
1205                 return;
1206         }
1207
1208         /*
1209          * note: do not attempt to improve efficiency by looping.  This 
1210          * only results in unnecessary piecemeal collection of received
1211          * packets and unnecessary piecemeal cleanups of the transmit ring.
1212          */
1213         if (ifp->if_flags & IFF_RUNNING) {
1214                 em_rxeof(adapter, -1);
1215                 em_txeof(adapter);
1216         }
1217
1218         /* Link status change */
1219         if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1220                 callout_stop(&adapter->timer);
1221                 adapter->hw.get_link_status = 1;
1222                 em_check_for_link(&adapter->hw);
1223                 em_update_link_status(adapter);
1224                 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1225         }
1226
1227         if (reg_icr & E1000_ICR_RXO)
1228                 adapter->rx_overruns++;
1229
1230         if ((ifp->if_flags & IFF_RUNNING) && !ifq_is_empty(&ifp->if_snd))
1231                 em_start(ifp);
1232         logif(intr_end);
1233 }
1234
1235 /*********************************************************************
1236  *
1237  *  Media Ioctl callback
1238  *
1239  *  This routine is called whenever the user queries the status of
1240  *  the interface using ifconfig.
1241  *
1242  **********************************************************************/
1243 static void
1244 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1245 {
1246         struct adapter *adapter = ifp->if_softc;
1247         u_char fiber_type = IFM_1000_SX;
1248
1249         INIT_DEBUGOUT("em_media_status: begin");
1250
1251         ASSERT_SERIALIZED(ifp->if_serializer);
1252
1253         em_check_for_link(&adapter->hw);
1254         em_update_link_status(adapter);
1255
1256         ifmr->ifm_status = IFM_AVALID;
1257         ifmr->ifm_active = IFM_ETHER;
1258
1259         if (!adapter->link_active)
1260                 return;
1261
1262         ifmr->ifm_status |= IFM_ACTIVE;
1263
1264         if (adapter->hw.media_type == em_media_type_fiber ||
1265             adapter->hw.media_type == em_media_type_internal_serdes) {
1266                 if (adapter->hw.mac_type == em_82545)
1267                         fiber_type = IFM_1000_LX;
1268                 ifmr->ifm_active |= fiber_type | IFM_FDX;
1269         } else {
1270                 switch (adapter->link_speed) {
1271                 case 10:
1272                         ifmr->ifm_active |= IFM_10_T;
1273                         break;
1274                 case 100:
1275                         ifmr->ifm_active |= IFM_100_TX;
1276                         break;
1277                 case 1000:
1278                         ifmr->ifm_active |= IFM_1000_T;
1279                         break;
1280                 }
1281                 if (adapter->link_duplex == FULL_DUPLEX)
1282                         ifmr->ifm_active |= IFM_FDX;
1283                 else
1284                         ifmr->ifm_active |= IFM_HDX;
1285         }
1286 }
1287
1288 /*********************************************************************
1289  *
1290  *  Media Ioctl callback
1291  *
1292  *  This routine is called when the user changes speed/duplex using
1293  *  media/mediopt option with ifconfig.
1294  *
1295  **********************************************************************/
1296 static int
1297 em_media_change(struct ifnet *ifp)
1298 {
1299         struct adapter *adapter = ifp->if_softc;
1300         struct ifmedia *ifm = &adapter->media;
1301
1302         INIT_DEBUGOUT("em_media_change: begin");
1303
1304         ASSERT_SERIALIZED(ifp->if_serializer);
1305
1306         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1307                 return (EINVAL);
1308
1309         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1310         case IFM_AUTO:
1311                 adapter->hw.autoneg = DO_AUTO_NEG;
1312                 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1313                 break;
1314         case IFM_1000_LX:
1315         case IFM_1000_SX:
1316         case IFM_1000_T:
1317                 adapter->hw.autoneg = DO_AUTO_NEG;
1318                 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1319                 break;
1320         case IFM_100_TX:
1321                 adapter->hw.autoneg = FALSE;
1322                 adapter->hw.autoneg_advertised = 0;
1323                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1324                         adapter->hw.forced_speed_duplex = em_100_full;
1325                 else
1326                         adapter->hw.forced_speed_duplex = em_100_half;
1327                 break;
1328         case IFM_10_T:
1329                 adapter->hw.autoneg = FALSE;
1330                 adapter->hw.autoneg_advertised = 0;
1331                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1332                         adapter->hw.forced_speed_duplex = em_10_full;
1333                 else
1334                         adapter->hw.forced_speed_duplex = em_10_half;
1335                 break;
1336         default:
1337                 if_printf(ifp, "Unsupported media type\n");
1338         }
1339         /*
1340          * As the speed/duplex settings may have changed we need to
1341          * reset the PHY.
1342          */
1343         adapter->hw.phy_reset_disable = FALSE;
1344
1345         ifp->if_flags &= ~IFF_RUNNING;
1346         em_init(adapter);
1347
1348         return(0);
1349 }
1350
1351 static void
1352 em_tx_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize,
1353          int error)
1354 {
1355         struct em_q *q = arg;
1356
1357         if (error)
1358                 return;
1359         KASSERT(nsegs <= EM_MAX_SCATTER,
1360                 ("Too many DMA segments returned when mapping tx packet"));
1361         q->nsegs = nsegs;
1362         bcopy(seg, q->segs, nsegs * sizeof(seg[0]));
1363 }
1364
1365 /*********************************************************************
1366  *
1367  *  This routine maps the mbufs to tx descriptors.
1368  *
1369  *  return 0 on success, positive on failure
1370  **********************************************************************/
1371 static int
1372 em_encap(struct adapter *adapter, struct mbuf *m_head)
1373 {
1374         uint32_t txd_upper = 0, txd_lower = 0, txd_used = 0, txd_saved = 0;
1375         int i, j, error, last = 0;
1376
1377         struct ifvlan *ifv = NULL;
1378         struct em_q q;
1379         struct em_buffer *tx_buffer = NULL, *tx_buffer_first;
1380         bus_dmamap_t map;
1381         struct em_tx_desc *current_tx_desc = NULL;
1382         struct ifnet *ifp = &adapter->interface_data.ac_if;
1383
1384         /*
1385          * Force a cleanup if number of TX descriptors
1386          * available hits the threshold
1387          */
1388         if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1389                 em_txeof(adapter);
1390                 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1391                         adapter->no_tx_desc_avail1++;
1392                         return (ENOBUFS);
1393                 }
1394         }
1395
1396         /*
1397          * Capture the first descriptor index, this descriptor will have
1398          * the index of the EOP which is the only one that now gets a
1399          * DONE bit writeback.
1400          */
1401         tx_buffer_first = &adapter->tx_buffer_area[adapter->next_avail_tx_desc];
1402
1403         /*
1404          * Map the packet for DMA.
1405          */
1406         map = tx_buffer_first->map;
1407         error = bus_dmamap_load_mbuf(adapter->txtag, map, m_head, em_tx_cb,
1408                                      &q, BUS_DMA_NOWAIT);
1409         if (error != 0) {
1410                 adapter->no_tx_dma_setup++;
1411                 return (error);
1412         }
1413         KASSERT(q.nsegs != 0, ("em_encap: empty packet"));
1414
1415         if (q.nsegs > (adapter->num_tx_desc_avail - 2)) {
1416                 adapter->no_tx_desc_avail2++;
1417                 error = ENOBUFS;
1418                 goto fail;
1419         }
1420
1421         if (ifp->if_hwassist > 0) {
1422                 em_transmit_checksum_setup(adapter,  m_head,
1423                                            &txd_upper, &txd_lower);
1424         }
1425
1426         /* Find out if we are in vlan mode */
1427         if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1428             m_head->m_pkthdr.rcvif != NULL &&
1429             m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1430                 ifv = m_head->m_pkthdr.rcvif->if_softc;
1431
1432         i = adapter->next_avail_tx_desc;
1433         if (adapter->pcix_82544)
1434                 txd_saved = i;
1435
1436         /* Set up our transmit descriptors */
1437         for (j = 0; j < q.nsegs; j++) {
1438                 /* If adapter is 82544 and on PCIX bus */
1439                 if(adapter->pcix_82544) {
1440                         DESC_ARRAY desc_array;
1441                         uint32_t array_elements, counter;
1442
1443                         /* 
1444                          * Check the Address and Length combination and
1445                          * split the data accordingly
1446                          */
1447                         array_elements = em_fill_descriptors(q.segs[j].ds_addr,
1448                                                 q.segs[j].ds_len, &desc_array);
1449                         for (counter = 0; counter < array_elements; counter++) {
1450                                 if (txd_used == adapter->num_tx_desc_avail) {
1451                                         adapter->next_avail_tx_desc = txd_saved;
1452                                         adapter->no_tx_desc_avail2++;
1453                                         error = ENOBUFS;
1454                                         goto fail;
1455                                 }
1456                                 tx_buffer = &adapter->tx_buffer_area[i];
1457                                 current_tx_desc = &adapter->tx_desc_base[i];
1458                                 current_tx_desc->buffer_addr = htole64(
1459                                         desc_array.descriptor[counter].address);
1460                                 current_tx_desc->lower.data = htole32(
1461                                         adapter->txd_cmd | txd_lower |
1462                                         (uint16_t)desc_array.descriptor[counter].length);
1463                                 current_tx_desc->upper.data = htole32(txd_upper);
1464
1465                                 last = i;
1466                                 if (++i == adapter->num_tx_desc)
1467                                         i = 0;
1468
1469                                 tx_buffer->m_head = NULL;
1470                                 tx_buffer->next_eop = -1;
1471                                 txd_used++;
1472                         }
1473                 } else {
1474                         tx_buffer = &adapter->tx_buffer_area[i];
1475                         current_tx_desc = &adapter->tx_desc_base[i];
1476
1477                         current_tx_desc->buffer_addr = htole64(q.segs[j].ds_addr);
1478                         current_tx_desc->lower.data = htole32(
1479                                 adapter->txd_cmd | txd_lower | q.segs[j].ds_len);
1480                         current_tx_desc->upper.data = htole32(txd_upper);
1481
1482                         last = i;
1483                         if (++i == adapter->num_tx_desc)
1484                                 i = 0;
1485
1486                         tx_buffer->m_head = NULL;
1487                         tx_buffer->next_eop = -1;
1488                 }
1489         }
1490
1491         adapter->next_avail_tx_desc = i;
1492         if (adapter->pcix_82544)
1493                 adapter->num_tx_desc_avail -= txd_used;
1494         else
1495                 adapter->num_tx_desc_avail -= q.nsegs;
1496
1497         if (ifv != NULL) {
1498                 /* Set the vlan id */
1499                 current_tx_desc->upper.fields.special = htole16(ifv->ifv_tag);
1500
1501                 /* Tell hardware to add tag */
1502                 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1503         }
1504
1505         tx_buffer->m_head = m_head;
1506         tx_buffer_first->map = tx_buffer->map;
1507         tx_buffer->map = map;
1508         bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1509
1510         /*
1511          * Last Descriptor of Packet needs End Of Packet (EOP)
1512          * and Report Status (RS)
1513          */
1514         current_tx_desc->lower.data |=
1515                 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1516
1517         /*
1518          * Keep track in the first buffer which descriptor will be
1519          * written back.
1520          */
1521         tx_buffer_first->next_eop = last;
1522
1523         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1524                         BUS_DMASYNC_PREWRITE);
1525
1526         /* 
1527          * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1528          * that this frame is available to transmit.
1529          */
1530         if (adapter->hw.mac_type == em_82547 &&
1531             adapter->link_duplex == HALF_DUPLEX) {
1532                 em_82547_move_tail_serialized(adapter);
1533         } else {
1534                 E1000_WRITE_REG(&adapter->hw, TDT, i);
1535                 if (adapter->hw.mac_type == em_82547) {
1536                         em_82547_update_fifo_head(adapter,
1537                                                   m_head->m_pkthdr.len);
1538                 }
1539         }
1540
1541         return (0);
1542 fail:
1543         bus_dmamap_unload(adapter->txtag, map);
1544         return error;
1545 }
1546
1547 /*********************************************************************
1548  *
1549  * 82547 workaround to avoid controller hang in half-duplex environment.
1550  * The workaround is to avoid queuing a large packet that would span
1551  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1552  * in this case. We do that only when FIFO is quiescent.
1553  *
1554  **********************************************************************/
1555 static void
1556 em_82547_move_tail(void *arg)
1557 {
1558         struct adapter *adapter = arg;
1559         struct ifnet *ifp = &adapter->interface_data.ac_if;
1560
1561         lwkt_serialize_enter(ifp->if_serializer);
1562         em_82547_move_tail_serialized(adapter);
1563         lwkt_serialize_exit(ifp->if_serializer);
1564 }
1565
1566 static void
1567 em_82547_move_tail_serialized(struct adapter *adapter)
1568 {
1569         uint16_t hw_tdt;
1570         uint16_t sw_tdt;
1571         struct em_tx_desc *tx_desc;
1572         uint16_t length = 0;
1573         boolean_t eop = 0;
1574
1575         hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1576         sw_tdt = adapter->next_avail_tx_desc;
1577
1578         while (hw_tdt != sw_tdt) {
1579                 tx_desc = &adapter->tx_desc_base[hw_tdt];
1580                 length += tx_desc->lower.flags.length;
1581                 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1582                 if (++hw_tdt == adapter->num_tx_desc)
1583                         hw_tdt = 0;
1584
1585                 if (eop) {
1586                         if (em_82547_fifo_workaround(adapter, length)) {
1587                                 adapter->tx_fifo_wrk_cnt++;
1588                                 callout_reset(&adapter->tx_fifo_timer, 1,
1589                                         em_82547_move_tail, adapter);
1590                                 break;
1591                         }
1592                         E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1593                         em_82547_update_fifo_head(adapter, length);
1594                         length = 0;
1595                 }
1596         }       
1597 }
1598
1599 static int
1600 em_82547_fifo_workaround(struct adapter *adapter, int len)
1601 {       
1602         int fifo_space, fifo_pkt_len;
1603
1604         fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1605
1606         if (adapter->link_duplex == HALF_DUPLEX) {
1607                 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1608
1609                 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1610                         if (em_82547_tx_fifo_reset(adapter))
1611                                 return (0);
1612                         else
1613                                 return (1);
1614                 }
1615         }
1616
1617         return (0);
1618 }
1619
1620 static void
1621 em_82547_update_fifo_head(struct adapter *adapter, int len)
1622 {
1623         int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1624
1625         /* tx_fifo_head is always 16 byte aligned */
1626         adapter->tx_fifo_head += fifo_pkt_len;
1627         if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
1628                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1629 }
1630
1631 static int
1632 em_82547_tx_fifo_reset(struct adapter *adapter)
1633 {
1634         uint32_t tctl;
1635
1636         if (E1000_READ_REG(&adapter->hw, TDT) == E1000_READ_REG(&adapter->hw, TDH) &&
1637             E1000_READ_REG(&adapter->hw, TDFT) == E1000_READ_REG(&adapter->hw, TDFH) &&
1638             E1000_READ_REG(&adapter->hw, TDFTS) == E1000_READ_REG(&adapter->hw, TDFHS) &&
1639             E1000_READ_REG(&adapter->hw, TDFPC) == 0) {
1640                 /* Disable TX unit */
1641                 tctl = E1000_READ_REG(&adapter->hw, TCTL);
1642                 E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1643
1644                 /* Reset FIFO pointers */
1645                 E1000_WRITE_REG(&adapter->hw, TDFT,  adapter->tx_head_addr);
1646                 E1000_WRITE_REG(&adapter->hw, TDFH,  adapter->tx_head_addr);
1647                 E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr);
1648                 E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr);
1649
1650                 /* Re-enable TX unit */
1651                 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1652                 E1000_WRITE_FLUSH(&adapter->hw);
1653
1654                 adapter->tx_fifo_head = 0;
1655                 adapter->tx_fifo_reset_cnt++;
1656
1657                 return (TRUE);
1658         } else {
1659                 return (FALSE);
1660         }
1661 }
1662
1663 static void
1664 em_set_promisc(struct adapter *adapter)
1665 {
1666         uint32_t reg_rctl;
1667         struct ifnet *ifp = &adapter->interface_data.ac_if;
1668
1669         reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1670
1671         adapter->em_insert_vlan_header = 0;
1672         if (ifp->if_flags & IFF_PROMISC) {
1673                 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1674                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1675
1676                 /*
1677                  * Disable VLAN stripping in promiscous mode.
1678                  * This enables bridging of vlan tagged frames to occur 
1679                  * and also allows vlan tags to be seen in tcpdump.
1680                  */
1681                 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1682                         em_disable_vlans(adapter);
1683                 adapter->em_insert_vlan_header = 1;
1684         } else if (ifp->if_flags & IFF_ALLMULTI) {
1685                 reg_rctl |= E1000_RCTL_MPE;
1686                 reg_rctl &= ~E1000_RCTL_UPE;
1687                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1688         }
1689 }
1690
1691 static void
1692 em_disable_promisc(struct adapter *adapter)
1693 {
1694         struct ifnet *ifp = &adapter->interface_data.ac_if;
1695
1696         uint32_t reg_rctl;
1697
1698         reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1699
1700         reg_rctl &= (~E1000_RCTL_UPE);
1701         reg_rctl &= (~E1000_RCTL_MPE);
1702         E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1703
1704         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1705                 em_enable_vlans(adapter);
1706         adapter->em_insert_vlan_header = 0;
1707 }
1708
1709 /*********************************************************************
1710  *  Multicast Update
1711  *
1712  *  This routine is called whenever multicast address list is updated.
1713  *
1714  **********************************************************************/
1715
1716 static void
1717 em_set_multi(struct adapter *adapter)
1718 {
1719         uint32_t reg_rctl = 0;
1720         uint8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1721         struct ifmultiaddr *ifma;
1722         int mcnt = 0;
1723         struct ifnet *ifp = &adapter->interface_data.ac_if;
1724
1725         IOCTL_DEBUGOUT("em_set_multi: begin");
1726
1727         if (adapter->hw.mac_type == em_82542_rev2_0) {
1728                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1729                 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1730                         em_pci_clear_mwi(&adapter->hw);
1731                 reg_rctl |= E1000_RCTL_RST;
1732                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1733                 msec_delay(5);
1734         }
1735
1736         LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1737                 if (ifma->ifma_addr->sa_family != AF_LINK)
1738                         continue;
1739
1740                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1741                         break;
1742
1743                 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1744                       &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1745                 mcnt++;
1746         }
1747
1748         if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1749                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1750                 reg_rctl |= E1000_RCTL_MPE;
1751                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1752         } else {
1753                 em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1754         }
1755
1756         if (adapter->hw.mac_type == em_82542_rev2_0) {
1757                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1758                 reg_rctl &= ~E1000_RCTL_RST;
1759                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1760                 msec_delay(5);
1761                 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1762                         em_pci_set_mwi(&adapter->hw);
1763         }
1764 }
1765
1766 /*********************************************************************
1767  *  Timer routine
1768  *
1769  *  This routine checks for link status and updates statistics.
1770  *
1771  **********************************************************************/
1772
1773 static void
1774 em_local_timer(void *arg)
1775 {
1776         struct ifnet *ifp;
1777         struct adapter *adapter = arg;
1778         ifp = &adapter->interface_data.ac_if;
1779
1780         lwkt_serialize_enter(ifp->if_serializer);
1781
1782         em_check_for_link(&adapter->hw);
1783         em_update_link_status(adapter);
1784         em_update_stats_counters(adapter);
1785         if (em_display_debug_stats && ifp->if_flags & IFF_RUNNING)
1786                 em_print_hw_stats(adapter);
1787         em_smartspeed(adapter);
1788
1789         callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1790
1791         lwkt_serialize_exit(ifp->if_serializer);
1792 }
1793
1794 static void
1795 em_update_link_status(struct adapter *adapter)
1796 {
1797         struct ifnet *ifp;
1798         ifp = &adapter->interface_data.ac_if;
1799
1800         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1801                 if (adapter->link_active == 0) {
1802                         em_get_speed_and_duplex(&adapter->hw, 
1803                                                 &adapter->link_speed, 
1804                                                 &adapter->link_duplex);
1805                         /* Check if we may set SPEED_MODE bit on PCI-E */
1806                         if (adapter->link_speed == SPEED_1000 &&
1807                             (adapter->hw.mac_type == em_82571 ||
1808                              adapter->hw.mac_type == em_82572)) {
1809                                 int tarc0;
1810
1811                                 tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
1812                                 tarc0 |= SPEED_MODE_BIT;
1813                                 E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
1814                         }
1815                         if (bootverbose) {
1816                                 if_printf(&adapter->interface_data.ac_if,
1817                                           "Link is up %d Mbps %s\n",
1818                                           adapter->link_speed,
1819                                           adapter->link_duplex == FULL_DUPLEX ?
1820                                                 "Full Duplex" : "Half Duplex");
1821                         }
1822                         adapter->link_active = 1;
1823                         adapter->smartspeed = 0;
1824                         ifp->if_baudrate = adapter->link_speed * 1000000;
1825                         ifp->if_link_state = LINK_STATE_UP;
1826                         if_link_state_change(ifp);
1827                 }
1828         } else {
1829                 if (adapter->link_active == 1) {
1830                         ifp->if_baudrate = 0;
1831                         adapter->link_speed = 0;
1832                         adapter->link_duplex = 0;
1833                         if (bootverbose) {
1834                                 if_printf(&adapter->interface_data.ac_if,
1835                                           "Link is Down\n");
1836                         }
1837                         adapter->link_active = 0;
1838                         ifp->if_link_state = LINK_STATE_DOWN;
1839                         if_link_state_change(ifp);
1840                 }
1841         }
1842 }
1843
1844 /*********************************************************************
1845  *
1846  *  This routine disables all traffic on the adapter by issuing a
1847  *  global reset on the MAC and deallocates TX/RX buffers.
1848  *
1849  **********************************************************************/
1850
1851 static void
1852 em_stop(void *arg)
1853 {
1854         struct ifnet   *ifp;
1855         struct adapter * adapter = arg;
1856         ifp = &adapter->interface_data.ac_if;
1857
1858         ASSERT_SERIALIZED(ifp->if_serializer);
1859
1860         INIT_DEBUGOUT("em_stop: begin");
1861         em_disable_intr(adapter);
1862         em_reset_hw(&adapter->hw);
1863         callout_stop(&adapter->timer);
1864         callout_stop(&adapter->tx_fifo_timer);
1865         em_free_transmit_structures(adapter);
1866         em_free_receive_structures(adapter);
1867
1868         /* Tell the stack that the interface is no longer active */
1869         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1870         ifp->if_timer = 0;
1871 }
1872
1873 /*********************************************************************
1874  *
1875  *  Determine hardware revision.
1876  *
1877  **********************************************************************/
1878 static void
1879 em_identify_hardware(struct adapter *adapter)
1880 {
1881         device_t dev = adapter->dev;
1882
1883         /* Make sure our PCI config space has the necessary stuff set */
1884         adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1885         if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1886               (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1887                 device_printf(dev, "Memory Access and/or Bus Master bits "
1888                               "were not set!\n");
1889                 adapter->hw.pci_cmd_word |= PCIM_CMD_BUSMASTEREN |
1890                                             PCIM_CMD_MEMEN;
1891                 pci_write_config(dev, PCIR_COMMAND,
1892                                  adapter->hw.pci_cmd_word, 2);
1893         }
1894
1895         /* Save off the information about this board */
1896         adapter->hw.vendor_id = pci_get_vendor(dev);
1897         adapter->hw.device_id = pci_get_device(dev);
1898         adapter->hw.revision_id = pci_get_revid(dev);
1899         adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev);
1900         adapter->hw.subsystem_id = pci_get_subdevice(dev);
1901
1902         /* Identify the MAC */
1903         if (em_set_mac_type(&adapter->hw))
1904                 device_printf(dev, "Unknown MAC Type\n");
1905
1906         if (adapter->hw.mac_type == em_82541 ||
1907             adapter->hw.mac_type == em_82541_rev_2 ||
1908             adapter->hw.mac_type == em_82547 ||
1909             adapter->hw.mac_type == em_82547_rev_2)
1910                 adapter->hw.phy_init_script = TRUE;
1911 }
1912
1913 static int
1914 em_allocate_pci_resources(device_t dev)
1915 {
1916         struct adapter *adapter = device_get_softc(dev);
1917         int rid;
1918
1919         rid = PCIR_BAR(0);
1920         adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1921                                                      &rid, RF_ACTIVE);
1922         if (adapter->res_memory == NULL) {
1923                 device_printf(dev, "Unable to allocate bus resource: memory\n");
1924                 return ENXIO;
1925         }
1926         adapter->osdep.mem_bus_space_tag =
1927                 rman_get_bustag(adapter->res_memory);
1928         adapter->osdep.mem_bus_space_handle =
1929             rman_get_bushandle(adapter->res_memory);
1930         adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
1931
1932         if (adapter->hw.mac_type > em_82543) {
1933                 /* Figure our where our IO BAR is ? */
1934                 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
1935                         uint32_t val;
1936
1937                         val = pci_read_config(dev, rid, 4);
1938                         if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
1939                                 adapter->io_rid = rid;
1940                                 break;
1941                         }
1942                         rid += 4;
1943                         /* check for 64bit BAR */
1944                         if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
1945                                 rid += 4;
1946                 }
1947                 if (rid >= PCIR_CIS) {
1948                         device_printf(dev, "Unable to locate IO BAR\n");
1949                         return (ENXIO);
1950                 }
1951
1952                 adapter->res_ioport = bus_alloc_resource_any(dev,
1953                     SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
1954                 if (!(adapter->res_ioport)) {
1955                         device_printf(dev, "Unable to allocate bus resource: "
1956                                       "ioport\n");
1957                         return ENXIO;
1958                 }
1959                 adapter->hw.io_base = 0;
1960                 adapter->osdep.io_bus_space_tag =
1961                         rman_get_bustag(adapter->res_ioport);
1962                 adapter->osdep.io_bus_space_handle =
1963                         rman_get_bushandle(adapter->res_ioport);
1964         }
1965
1966         /* For ICH8 we need to find the flash memory. */
1967         if (adapter->hw.mac_type == em_ich8lan) {
1968                 rid = EM_FLASH;
1969                 adapter->flash_mem = bus_alloc_resource_any(dev,
1970                     SYS_RES_MEMORY, &rid, RF_ACTIVE);
1971                 if (adapter->flash_mem == NULL) {
1972                         device_printf(dev, "Unable to allocate bus resource: "
1973                                       "flash memory\n");
1974                         return ENXIO;
1975                 }
1976                 adapter->osdep.flash_bus_space_tag =
1977                     rman_get_bustag(adapter->flash_mem);
1978                 adapter->osdep.flash_bus_space_handle =
1979                     rman_get_bushandle(adapter->flash_mem);
1980         }
1981
1982         rid = 0x0;
1983         adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1984             &rid, RF_SHAREABLE | RF_ACTIVE);
1985         if (adapter->res_interrupt == NULL) {
1986                 device_printf(dev, "Unable to allocate bus resource: "
1987                               "interrupt\n");
1988                 return ENXIO;
1989         }
1990
1991         adapter->hw.back = &adapter->osdep;
1992
1993         return 0;
1994 }
1995
1996 static void
1997 em_free_pci_resources(device_t dev)
1998 {
1999         struct adapter *adapter = device_get_softc(dev);
2000
2001         if (adapter->res_interrupt != NULL) {
2002                 bus_release_resource(dev, SYS_RES_IRQ, 0, 
2003                                      adapter->res_interrupt);
2004         }
2005         if (adapter->res_memory != NULL) {
2006                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 
2007                                      adapter->res_memory);
2008         }
2009
2010         if (adapter->res_ioport != NULL) {
2011                 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid, 
2012                                      adapter->res_ioport);
2013         }
2014
2015         if (adapter->flash_mem != NULL) {
2016                 bus_release_resource(dev, SYS_RES_MEMORY, EM_FLASH,
2017                                      adapter->flash_mem);
2018         }
2019 }
2020
2021 /*********************************************************************
2022  *
2023  *  Initialize the hardware to a configuration as specified by the
2024  *  adapter structure. The controller is reset, the EEPROM is
2025  *  verified, the MAC address is set, then the shared initialization
2026  *  routines are called.
2027  *
2028  **********************************************************************/
2029 static int
2030 em_hardware_init(struct adapter *adapter)
2031 {
2032         uint16_t        rx_buffer_size;
2033
2034         INIT_DEBUGOUT("em_hardware_init: begin");
2035         /* Issue a global reset */
2036         em_reset_hw(&adapter->hw);
2037
2038         /* When hardware is reset, fifo_head is also reset */
2039         adapter->tx_fifo_head = 0;
2040
2041         /* Make sure we have a good EEPROM before we read from it */
2042         if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
2043                 if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
2044                         device_printf(adapter->dev,
2045                                       "The EEPROM Checksum Is Not Valid\n");
2046                         return (EIO);
2047                 }
2048         }
2049
2050         if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
2051                 device_printf(adapter->dev,
2052                               "EEPROM read error while reading part number\n");
2053                 return (EIO);
2054         }
2055
2056         /* Set up smart power down as default off on newer adapters. */
2057         if (!em_smart_pwr_down &&
2058             (adapter->hw.mac_type == em_82571 ||
2059              adapter->hw.mac_type == em_82572)) {
2060                 uint16_t phy_tmp = 0;
2061
2062                 /* Speed up time to link by disabling smart power down. */
2063                 em_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
2064                                 &phy_tmp);
2065                 phy_tmp &= ~IGP02E1000_PM_SPD;
2066                 em_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
2067                                  phy_tmp);
2068         }
2069
2070         /*
2071          * These parameters control the automatic generation (Tx) and
2072          * response (Rx) to Ethernet PAUSE frames.
2073          * - High water mark should allow for at least two frames to be
2074          *   received after sending an XOFF.
2075          * - Low water mark works best when it is very near the high water mark.
2076          *   This allows the receiver to restart by sending XON when it has
2077          *   drained a bit.  Here we use an arbitary value of 1500 which will
2078          *   restart after one full frame is pulled from the buffer.  There
2079          *   could be several smaller frames in the buffer and if so they will
2080          *   not trigger the XON until their total number reduces the buffer
2081          *   by 1500.
2082          * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2083          */
2084         rx_buffer_size = ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff) << 10);
2085
2086         adapter->hw.fc_high_water =
2087             rx_buffer_size - roundup2(adapter->hw.max_frame_size, 1024); 
2088         adapter->hw.fc_low_water = adapter->hw.fc_high_water - 1500;
2089         if (adapter->hw.mac_type == em_80003es2lan)
2090                 adapter->hw.fc_pause_time = 0xFFFF;
2091         else
2092                 adapter->hw.fc_pause_time = 0x1000;
2093         adapter->hw.fc_send_xon = TRUE;
2094         adapter->hw.fc = E1000_FC_FULL;
2095
2096         if (em_init_hw(&adapter->hw) < 0) {
2097                 device_printf(adapter->dev, "Hardware Initialization Failed");
2098                 return (EIO);
2099         }
2100
2101         em_check_for_link(&adapter->hw);
2102
2103         return (0);
2104 }
2105
2106 /*********************************************************************
2107  *
2108  *  Setup networking device structure and register an interface.
2109  *
2110  **********************************************************************/
2111 static void
2112 em_setup_interface(device_t dev, struct adapter *adapter)
2113 {
2114         struct ifnet *ifp;
2115         u_char fiber_type = IFM_1000_SX;        /* default type */
2116         INIT_DEBUGOUT("em_setup_interface: begin");
2117
2118         ifp = &adapter->interface_data.ac_if;
2119         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2120         ifp->if_mtu = ETHERMTU;
2121         ifp->if_baudrate = 1000000000;
2122         ifp->if_init =  em_init;
2123         ifp->if_softc = adapter;
2124         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2125         ifp->if_ioctl = em_ioctl;
2126         ifp->if_start = em_start;
2127 #ifdef DEVICE_POLLING
2128         ifp->if_poll = em_poll;
2129 #endif
2130         ifp->if_watchdog = em_watchdog;
2131         ifq_set_maxlen(&ifp->if_snd, adapter->num_tx_desc - 1);
2132         ifq_set_ready(&ifp->if_snd);
2133
2134         if (adapter->hw.mac_type >= em_82543)
2135                 ifp->if_capabilities |= IFCAP_HWCSUM;
2136
2137         ifp->if_capenable = ifp->if_capabilities;
2138
2139         ether_ifattach(ifp, adapter->hw.mac_addr, NULL);
2140
2141         /*
2142          * Tell the upper layer(s) we support long frames.
2143          */
2144         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2145         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2146 #if 0
2147         ifp->if_capenable |= IFCAP_VLAN_MTU;
2148 #endif
2149
2150         /*
2151          * Specify the media types supported by this adapter and register
2152          * callbacks to update media and link information
2153          */
2154         ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
2155                      em_media_status);
2156         if (adapter->hw.media_type == em_media_type_fiber ||
2157             adapter->hw.media_type == em_media_type_internal_serdes) {
2158                 if (adapter->hw.mac_type == em_82545)
2159                         fiber_type = IFM_1000_LX;
2160                 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 
2161                             0, NULL);
2162                 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2163         } else {
2164                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2165                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2166                             0, NULL);
2167                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2168                             0, NULL);
2169                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2170                             0, NULL);
2171                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
2172                             0, NULL);
2173                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2174         }
2175         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2176         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2177 }
2178
2179 /*********************************************************************
2180  *
2181  *  Workaround for SmartSpeed on 82541 and 82547 controllers
2182  *
2183  **********************************************************************/
2184 static void
2185 em_smartspeed(struct adapter *adapter)
2186 {
2187         uint16_t phy_tmp;
2188
2189         if (adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
2190             !adapter->hw.autoneg ||
2191             !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
2192                 return;
2193
2194         if (adapter->smartspeed == 0) {
2195                 /*
2196                  * If Master/Slave config fault is asserted twice,
2197                  * we assume back-to-back.
2198                  */
2199                 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2200                 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2201                         return;
2202                 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2203                 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2204                         em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2205                         if (phy_tmp & CR_1000T_MS_ENABLE) {
2206                                 phy_tmp &= ~CR_1000T_MS_ENABLE;
2207                                 em_write_phy_reg(&adapter->hw,
2208                                                  PHY_1000T_CTRL, phy_tmp);
2209                                 adapter->smartspeed++;
2210                                 if (adapter->hw.autoneg &&
2211                                     !em_phy_setup_autoneg(&adapter->hw) &&
2212                                     !em_read_phy_reg(&adapter->hw, PHY_CTRL,
2213                                                      &phy_tmp)) {
2214                                         phy_tmp |= (MII_CR_AUTO_NEG_EN |
2215                                                     MII_CR_RESTART_AUTO_NEG);
2216                                         em_write_phy_reg(&adapter->hw,
2217                                                          PHY_CTRL, phy_tmp);
2218                                 }
2219                         }
2220                 }
2221                 return;
2222         } else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2223                 /* If still no link, perhaps using 2/3 pair cable */
2224                 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2225                 phy_tmp |= CR_1000T_MS_ENABLE;
2226                 em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2227                 if (adapter->hw.autoneg &&
2228                     !em_phy_setup_autoneg(&adapter->hw) &&
2229                     !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
2230                         phy_tmp |= (MII_CR_AUTO_NEG_EN |
2231                                     MII_CR_RESTART_AUTO_NEG);
2232                         em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
2233                 }
2234         }
2235         /* Restart process after EM_SMARTSPEED_MAX iterations */
2236         if (adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2237                 adapter->smartspeed = 0;
2238 }
2239
2240 /*
2241  * Manage DMA'able memory.
2242  */
2243 static void
2244 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2245 {
2246         if (error)
2247                 return;
2248         *(bus_addr_t *)arg = segs->ds_addr;
2249 }
2250
2251 static int
2252 em_dma_malloc(struct adapter *adapter, bus_size_t size,
2253               struct em_dma_alloc *dma)
2254 {
2255         device_t dev = adapter->dev;
2256         int error;
2257
2258         error = bus_dma_tag_create(NULL,                /* parent */
2259                                    EM_DBA_ALIGN, 0,     /* alignment, bounds */
2260                                    BUS_SPACE_MAXADDR,   /* lowaddr */
2261                                    BUS_SPACE_MAXADDR,   /* highaddr */
2262                                    NULL, NULL,          /* filter, filterarg */
2263                                    size,                /* maxsize */
2264                                    1,                   /* nsegments */
2265                                    size,                /* maxsegsize */
2266                                    0,                   /* flags */
2267                                    &dma->dma_tag);
2268         if (error) {
2269                 device_printf(dev, "%s: bus_dma_tag_create failed; error %d\n",
2270                               __func__, error);
2271                 return error;
2272         }
2273
2274         error = bus_dmamem_alloc(dma->dma_tag, (void**)&dma->dma_vaddr,
2275                                  BUS_DMA_WAITOK, &dma->dma_map);
2276         if (error) {
2277                 device_printf(dev, "%s: bus_dmammem_alloc failed; "
2278                               "size %llu, error %d\n",
2279                               __func__, (uintmax_t)size, error);
2280                 goto fail;
2281         }
2282
2283         error = bus_dmamap_load(dma->dma_tag, dma->dma_map,
2284                                 dma->dma_vaddr, size,
2285                                 em_dmamap_cb, &dma->dma_paddr,
2286                                 BUS_DMA_WAITOK);
2287         if (error) {
2288                 device_printf(dev, "%s: bus_dmamap_load failed; error %u\n",
2289                               __func__, error);
2290                 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2291                 goto fail;
2292         }
2293
2294         return 0;
2295 fail:
2296         bus_dma_tag_destroy(dma->dma_tag);
2297         dma->dma_tag = NULL;
2298         return error;
2299 }
2300
2301 static void
2302 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2303 {
2304         if (dma->dma_tag != NULL) {
2305                 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2306                 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2307                 bus_dma_tag_destroy(dma->dma_tag);
2308                 dma->dma_tag = NULL;
2309         }
2310 }
2311
2312 /*********************************************************************
2313  *
2314  *  Allocate and initialize transmit structures.
2315  *
2316  **********************************************************************/
2317 static int
2318 em_setup_transmit_structures(struct adapter *adapter)
2319 {
2320         struct em_buffer *tx_buffer;
2321         bus_size_t size;
2322         int error, i;
2323
2324         /*
2325          * Setup DMA descriptor areas.
2326          */
2327         size = roundup2(adapter->hw.max_frame_size, MCLBYTES);
2328         if (bus_dma_tag_create(NULL,                    /* parent */
2329                                1, 0,                    /* alignment, bounds */
2330                                BUS_SPACE_MAXADDR,       /* lowaddr */ 
2331                                BUS_SPACE_MAXADDR,       /* highaddr */
2332                                NULL, NULL,              /* filter, filterarg */
2333                                size,                    /* maxsize */
2334                                EM_MAX_SCATTER,          /* nsegments */
2335                                size,                    /* maxsegsize */
2336                                0,                       /* flags */ 
2337                                &adapter->txtag)) {
2338                 device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
2339                 return(ENOMEM);
2340         }
2341
2342         adapter->tx_buffer_area =
2343                 kmalloc(sizeof(struct em_buffer) * adapter->num_tx_desc,
2344                         M_DEVBUF, M_WAITOK | M_ZERO);
2345
2346         bzero(adapter->tx_desc_base,
2347               sizeof(struct em_tx_desc) * adapter->num_tx_desc);
2348         tx_buffer = adapter->tx_buffer_area;
2349         for (i = 0; i < adapter->num_tx_desc; i++) {
2350                 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2351                 if (error) {
2352                         device_printf(adapter->dev,
2353                                       "Unable to create TX DMA map\n");
2354                         goto fail;
2355                 }
2356                 tx_buffer++;
2357         }
2358
2359         adapter->next_avail_tx_desc = 0;
2360         adapter->next_tx_to_clean = 0;
2361
2362         /* Set number of descriptors available */
2363         adapter->num_tx_desc_avail = adapter->num_tx_desc;
2364
2365         /* Set checksum context */
2366         adapter->active_checksum_context = OFFLOAD_NONE;
2367
2368         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2369                         BUS_DMASYNC_PREWRITE);
2370
2371         return (0);
2372 fail:
2373         em_free_transmit_structures(adapter);
2374         return (error);
2375 }
2376
2377 /*********************************************************************
2378  *
2379  *  Enable transmit unit.
2380  *
2381  **********************************************************************/
2382 static void
2383 em_initialize_transmit_unit(struct adapter *adapter)
2384 {
2385         uint32_t reg_tctl;
2386         uint32_t reg_tipg = 0;
2387         uint64_t bus_addr;
2388
2389         INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2390
2391         /* Setup the Base and Length of the Tx Descriptor Ring */
2392         bus_addr = adapter->txdma.dma_paddr;
2393         E1000_WRITE_REG(&adapter->hw, TDLEN,
2394                         adapter->num_tx_desc * sizeof(struct em_tx_desc));
2395         E1000_WRITE_REG(&adapter->hw, TDBAH, (uint32_t)(bus_addr >> 32));
2396         E1000_WRITE_REG(&adapter->hw, TDBAL, (uint32_t)bus_addr);
2397
2398         /* Setup the HW Tx Head and Tail descriptor pointers */
2399         E1000_WRITE_REG(&adapter->hw, TDT, 0);
2400         E1000_WRITE_REG(&adapter->hw, TDH, 0);
2401
2402         HW_DEBUGOUT2("Base = %x, Length = %x\n",
2403                      E1000_READ_REG(&adapter->hw, TDBAL),
2404                      E1000_READ_REG(&adapter->hw, TDLEN));
2405
2406         /* Set the default values for the Tx Inter Packet Gap timer */
2407         switch (adapter->hw.mac_type) {
2408         case em_82542_rev2_0:
2409         case em_82542_rev2_1:
2410                 reg_tipg = DEFAULT_82542_TIPG_IPGT;
2411                 reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2412                 reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2413                 break;
2414         case em_80003es2lan:
2415                 reg_tipg = DEFAULT_82543_TIPG_IPGR1;
2416                 reg_tipg |=
2417                     DEFAULT_80003ES2LAN_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2418                 break;
2419         default:
2420                 if (adapter->hw.media_type == em_media_type_fiber ||
2421                     adapter->hw.media_type == em_media_type_internal_serdes)
2422                         reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2423                 else
2424                         reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2425                 reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2426                 reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2427         }
2428
2429         E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
2430         E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
2431         if (adapter->hw.mac_type >= em_82540) {
2432                 E1000_WRITE_REG(&adapter->hw, TADV,
2433                                 adapter->tx_abs_int_delay.value);
2434         }
2435
2436         /* Program the Transmit Control Register */
2437         reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2438                    (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2439         if (adapter->hw.mac_type >= em_82571)
2440                 reg_tctl |= E1000_TCTL_MULR;
2441         if (adapter->link_duplex == 1)
2442                 reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2443         else
2444                 reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2445
2446         /* This write will effectively turn on the transmit unit. */
2447         E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
2448
2449         /* Setup Transmit Descriptor Base Settings */
2450         adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2451
2452         if (adapter->tx_int_delay.value > 0)
2453                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2454 }
2455
2456 /*********************************************************************
2457  *
2458  *  Free all transmit related data structures.
2459  *
2460  **********************************************************************/
2461 static void
2462 em_free_transmit_structures(struct adapter *adapter)
2463 {
2464         struct em_buffer *tx_buffer;
2465         int i;
2466
2467         INIT_DEBUGOUT("free_transmit_structures: begin");
2468
2469         if (adapter->tx_buffer_area != NULL) {
2470                 tx_buffer = adapter->tx_buffer_area;
2471                 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2472                         if (tx_buffer->m_head != NULL) {
2473                                 bus_dmamap_unload(adapter->txtag,
2474                                                   tx_buffer->map);
2475                                 m_freem(tx_buffer->m_head);
2476                         }
2477
2478                         if (tx_buffer->map != NULL) {
2479                                 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2480                                 tx_buffer->map = NULL;
2481                         }
2482                         tx_buffer->m_head = NULL;
2483                 }
2484         }
2485         if (adapter->tx_buffer_area != NULL) {
2486                 kfree(adapter->tx_buffer_area, M_DEVBUF);
2487                 adapter->tx_buffer_area = NULL;
2488         }
2489         if (adapter->txtag != NULL) {
2490                 bus_dma_tag_destroy(adapter->txtag);
2491                 adapter->txtag = NULL;
2492         }
2493 }
2494
2495 /*********************************************************************
2496  *
2497  *  The offload context needs to be set when we transfer the first
2498  *  packet of a particular protocol (TCP/UDP). We change the
2499  *  context only if the protocol type changes.
2500  *
2501  **********************************************************************/
2502 static void
2503 em_transmit_checksum_setup(struct adapter *adapter,
2504                            struct mbuf *mp,
2505                            uint32_t *txd_upper,
2506                            uint32_t *txd_lower) 
2507 {
2508         struct em_context_desc *TXD;
2509         struct em_buffer *tx_buffer;
2510         int curr_txd;
2511
2512         if (mp->m_pkthdr.csum_flags) {
2513                 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2514                         *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2515                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2516                         if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2517                                 return;
2518                         else
2519                                 adapter->active_checksum_context = OFFLOAD_TCP_IP;
2520                 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2521                         *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2522                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2523                         if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2524                                 return;
2525                         else
2526                                 adapter->active_checksum_context = OFFLOAD_UDP_IP;
2527                 } else {
2528                         *txd_upper = 0;
2529                         *txd_lower = 0;
2530                         return;
2531                 }
2532         } else {
2533                 *txd_upper = 0;
2534                 *txd_lower = 0;
2535                 return;
2536         }
2537
2538         /*
2539          * If we reach this point, the checksum offload context
2540          * needs to be reset.
2541          */
2542         curr_txd = adapter->next_avail_tx_desc;
2543         tx_buffer = &adapter->tx_buffer_area[curr_txd];
2544         TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2545
2546         TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2547         TXD->lower_setup.ip_fields.ipcso =
2548             ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2549         TXD->lower_setup.ip_fields.ipcse =
2550             htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2551
2552         TXD->upper_setup.tcp_fields.tucss =
2553             ETHER_HDR_LEN + sizeof(struct ip);
2554         TXD->upper_setup.tcp_fields.tucse = htole16(0);
2555
2556         if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2557                 TXD->upper_setup.tcp_fields.tucso =
2558                         ETHER_HDR_LEN + sizeof(struct ip) +
2559                         offsetof(struct tcphdr, th_sum);
2560         } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2561                 TXD->upper_setup.tcp_fields.tucso =
2562                         ETHER_HDR_LEN + sizeof(struct ip) +
2563                         offsetof(struct udphdr, uh_sum);
2564         }
2565
2566         TXD->tcp_seg_setup.data = htole32(0);
2567         TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2568
2569         tx_buffer->m_head = NULL;
2570         tx_buffer->next_eop = -1;
2571
2572         if (++curr_txd == adapter->num_tx_desc)
2573                 curr_txd = 0;
2574
2575         adapter->num_tx_desc_avail--;
2576         adapter->next_avail_tx_desc = curr_txd;
2577 }
2578
2579 /**********************************************************************
2580  *
2581  *  Examine each tx_buffer in the used queue. If the hardware is done
2582  *  processing the packet then free associated resources. The
2583  *  tx_buffer is put back on the free queue.
2584  *
2585  **********************************************************************/
2586
2587 static void
2588 em_txeof(struct adapter *adapter)
2589 {
2590         int first, last, done, num_avail;
2591         struct em_buffer *tx_buffer;
2592         struct em_tx_desc *tx_desc, *eop_desc;
2593         struct ifnet *ifp = &adapter->interface_data.ac_if;
2594
2595         if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2596                 return;
2597
2598         num_avail = adapter->num_tx_desc_avail; 
2599         first = adapter->next_tx_to_clean;
2600         tx_desc = &adapter->tx_desc_base[first];
2601         tx_buffer = &adapter->tx_buffer_area[first];
2602         last = tx_buffer->next_eop;
2603         KKASSERT(last >= 0 && last < adapter->num_tx_desc);
2604         eop_desc = &adapter->tx_desc_base[last];
2605
2606         /*
2607          * Now caculate the terminating index for the cleanup loop below
2608          */
2609         if (++last == adapter->num_tx_desc)
2610                 last = 0;
2611         done = last;
2612
2613         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2614                         BUS_DMASYNC_POSTREAD);
2615
2616         while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2617                 while (first != done) {
2618                         tx_desc->upper.data = 0;
2619                         tx_desc->lower.data = 0;
2620                         num_avail++;
2621
2622                         logif(pkt_txclean);
2623
2624                         if (tx_buffer->m_head) {
2625                                 ifp->if_opackets++;
2626                                 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2627                                                 BUS_DMASYNC_POSTWRITE);
2628                                 bus_dmamap_unload(adapter->txtag,
2629                                                   tx_buffer->map);
2630
2631                                 m_freem(tx_buffer->m_head);
2632                                 tx_buffer->m_head = NULL;
2633                         }
2634                         tx_buffer->next_eop = -1;
2635
2636                         if (++first == adapter->num_tx_desc)
2637                                 first = 0;
2638
2639                         tx_buffer = &adapter->tx_buffer_area[first];
2640                         tx_desc = &adapter->tx_desc_base[first];
2641                 }
2642                 /* See if we can continue to the next packet */
2643                 last = tx_buffer->next_eop;
2644                 if (last != -1) {
2645                         KKASSERT(last >= 0 && last < adapter->num_tx_desc);
2646                         eop_desc = &adapter->tx_desc_base[last];
2647                         if (++last == adapter->num_tx_desc)
2648                                 last = 0;
2649                         done = last;
2650                 } else {
2651                         break;
2652                 }
2653         }
2654
2655         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2656                         BUS_DMASYNC_PREWRITE);
2657
2658         adapter->next_tx_to_clean = first;
2659
2660         /*
2661          * If we have enough room, clear IFF_OACTIVE to tell the stack
2662          * that it is OK to send packets.
2663          * If there are no pending descriptors, clear the timeout. Otherwise,
2664          * if some descriptors have been freed, restart the timeout.
2665          */
2666         if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2667                 ifp->if_flags &= ~IFF_OACTIVE;
2668                 if (num_avail == adapter->num_tx_desc)
2669                         ifp->if_timer = 0;
2670                 else if (num_avail == adapter->num_tx_desc_avail)
2671                         ifp->if_timer = EM_TX_TIMEOUT;
2672         }
2673         adapter->num_tx_desc_avail = num_avail;
2674 }
2675
2676 /*********************************************************************
2677  *
2678  *  Get a buffer from system mbuf buffer pool.
2679  *
2680  **********************************************************************/
2681 static int
2682 em_get_buf(int i, struct adapter *adapter, struct mbuf *nmp, int how)
2683 {
2684         struct mbuf *mp = nmp;
2685         struct em_buffer *rx_buffer;
2686         struct ifnet *ifp;
2687         bus_addr_t paddr;
2688         int error;
2689
2690         ifp = &adapter->interface_data.ac_if;
2691
2692         if (mp == NULL) {
2693                 mp = m_getcl(how, MT_DATA, M_PKTHDR);
2694                 if (mp == NULL) {
2695                         adapter->mbuf_cluster_failed++;
2696                         return (ENOBUFS);
2697                 }
2698                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2699         } else {
2700                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2701                 mp->m_data = mp->m_ext.ext_buf;
2702                 mp->m_next = NULL;
2703         }
2704
2705         if (ifp->if_mtu <= ETHERMTU)
2706                 m_adj(mp, ETHER_ALIGN);
2707
2708         rx_buffer = &adapter->rx_buffer_area[i];
2709
2710         /*
2711          * Using memory from the mbuf cluster pool, invoke the
2712          * bus_dma machinery to arrange the memory mapping.
2713          */
2714         error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
2715                                 mtod(mp, void *), mp->m_len,
2716                                 em_dmamap_cb, &paddr, 0);
2717         if (error) {
2718                 m_free(mp);
2719                 return (error);
2720         }
2721         rx_buffer->m_head = mp;
2722         adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
2723         bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2724
2725         return (0);
2726 }
2727
2728 /*********************************************************************
2729  *
2730  *  Allocate memory for rx_buffer structures. Since we use one
2731  *  rx_buffer per received packet, the maximum number of rx_buffer's
2732  *  that we'll need is equal to the number of receive descriptors
2733  *  that we've allocated.
2734  *
2735  **********************************************************************/
2736 static int
2737 em_allocate_receive_structures(struct adapter *adapter)
2738 {
2739         int i, error, size;
2740         struct em_buffer *rx_buffer;
2741
2742         size = adapter->num_rx_desc * sizeof(struct em_buffer);
2743         adapter->rx_buffer_area = kmalloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
2744
2745         error = bus_dma_tag_create(NULL,                /* parent */
2746                                    1, 0,                /* alignment, bounds */
2747                                    BUS_SPACE_MAXADDR,   /* lowaddr */
2748                                    BUS_SPACE_MAXADDR,   /* highaddr */
2749                                    NULL, NULL,          /* filter, filterarg */
2750                                    MCLBYTES,            /* maxsize */
2751                                    1,                   /* nsegments */
2752                                    MCLBYTES,            /* maxsegsize */
2753                                    0,                   /* flags */
2754                                    &adapter->rxtag);
2755         if (error) {
2756                 device_printf(adapter->dev, "%s: bus_dma_tag_create failed; "
2757                               "error %u\n", __func__, error);
2758                 goto fail;
2759         }
2760  
2761         rx_buffer = adapter->rx_buffer_area;
2762         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2763                 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2764                                           &rx_buffer->map);
2765                 if (error) {
2766                         device_printf(adapter->dev,
2767                                       "%s: bus_dmamap_create failed; "
2768                                       "error %u\n", __func__, error);
2769                         goto fail;
2770                 }
2771         }
2772
2773         for (i = 0; i < adapter->num_rx_desc; i++) {
2774                 error = em_get_buf(i, adapter, NULL, MB_DONTWAIT);
2775                 if (error)
2776                         goto fail;
2777         }
2778
2779         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2780                         BUS_DMASYNC_PREWRITE);
2781
2782         return (0);
2783 fail:
2784         em_free_receive_structures(adapter);
2785         return (error);
2786 }
2787
2788 /*********************************************************************
2789  *
2790  *  Allocate and initialize receive structures.
2791  *
2792  **********************************************************************/
2793 static int
2794 em_setup_receive_structures(struct adapter *adapter)
2795 {
2796         int error;
2797
2798         bzero(adapter->rx_desc_base,
2799               sizeof(struct em_rx_desc) * adapter->num_rx_desc);
2800
2801         error = em_allocate_receive_structures(adapter);
2802         if (error)
2803                 return (error);
2804
2805         /* Setup our descriptor pointers */
2806         adapter->next_rx_desc_to_check = 0;
2807
2808         return (0);
2809 }
2810
2811 /*********************************************************************
2812  *
2813  *  Enable receive unit.
2814  *
2815  **********************************************************************/
2816 static void
2817 em_initialize_receive_unit(struct adapter *adapter)
2818 {
2819         uint32_t reg_rctl;
2820         uint32_t reg_rxcsum;
2821         struct ifnet *ifp;
2822         uint64_t bus_addr;
2823  
2824         INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2825
2826         ifp = &adapter->interface_data.ac_if;
2827
2828         /*
2829          * Make sure receives are disabled while setting
2830          * up the descriptor ring
2831          */
2832         E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2833
2834         /* Set the Receive Delay Timer Register */
2835         E1000_WRITE_REG(&adapter->hw, RDTR, 
2836                         adapter->rx_int_delay.value | E1000_RDT_FPDB);
2837
2838         if(adapter->hw.mac_type >= em_82540) {
2839                 E1000_WRITE_REG(&adapter->hw, RADV,
2840                                 adapter->rx_abs_int_delay.value);
2841
2842                 /* Set the interrupt throttling rate in 256ns increments */  
2843                 if (em_int_throttle_ceil) {
2844                         E1000_WRITE_REG(&adapter->hw, ITR,
2845                                 1000000000 / 256 / em_int_throttle_ceil);
2846                 } else {
2847                         E1000_WRITE_REG(&adapter->hw, ITR, 0);
2848                 }
2849         }
2850
2851         /* Setup the Base and Length of the Rx Descriptor Ring */
2852         bus_addr = adapter->rxdma.dma_paddr;
2853         E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2854                         sizeof(struct em_rx_desc));
2855         E1000_WRITE_REG(&adapter->hw, RDBAH, (uint32_t)(bus_addr >> 32));
2856         E1000_WRITE_REG(&adapter->hw, RDBAL, (uint32_t)bus_addr);
2857
2858         /* Setup the Receive Control Register */
2859         reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2860                    E1000_RCTL_RDMTS_HALF |
2861                    (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2862
2863         if (adapter->hw.tbi_compatibility_on == TRUE)
2864                 reg_rctl |= E1000_RCTL_SBP;
2865
2866         switch (adapter->rx_buffer_len) {
2867         default:
2868         case EM_RXBUFFER_2048:
2869                 reg_rctl |= E1000_RCTL_SZ_2048;
2870                 break;
2871         case EM_RXBUFFER_4096:
2872                 reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX |
2873                             E1000_RCTL_LPE;
2874                 break;            
2875         case EM_RXBUFFER_8192:
2876                 reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX |
2877                             E1000_RCTL_LPE;
2878                 break;
2879         case EM_RXBUFFER_16384:
2880                 reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX |
2881                             E1000_RCTL_LPE;
2882                 break;
2883         }
2884
2885         if (ifp->if_mtu > ETHERMTU)
2886                 reg_rctl |= E1000_RCTL_LPE;
2887
2888         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2889         if ((adapter->hw.mac_type >= em_82543) &&
2890             (ifp->if_capenable & IFCAP_RXCSUM)) {
2891                 reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2892                 reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2893                 E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2894         }
2895
2896 #ifdef EM_X60_WORKAROUND
2897         if (adapter->hw.mac_type == em_82573)
2898                 E1000_WRITE_REG(&adapter->hw, RDTR, 32);
2899 #endif
2900
2901         /* Enable Receives */
2902         E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2903
2904         /* Setup the HW Rx Head and Tail Descriptor Pointers */
2905         E1000_WRITE_REG(&adapter->hw, RDH, 0);
2906         E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2907 }
2908
2909 /*********************************************************************
2910  *
2911  *  Free receive related data structures.
2912  *
2913  **********************************************************************/
2914 static void
2915 em_free_receive_structures(struct adapter *adapter)
2916 {
2917         struct em_buffer *rx_buffer;
2918         int i;
2919
2920         INIT_DEBUGOUT("free_receive_structures: begin");
2921
2922         if (adapter->rx_buffer_area != NULL) {
2923                 rx_buffer = adapter->rx_buffer_area;
2924                 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2925                         if (rx_buffer->m_head != NULL) {
2926                                 bus_dmamap_unload(adapter->rxtag,
2927                                                   rx_buffer->map);
2928                                 m_freem(rx_buffer->m_head);
2929                                 rx_buffer->m_head = NULL;
2930                         }
2931                         if (rx_buffer->map != NULL) {
2932                                 bus_dmamap_destroy(adapter->rxtag,
2933                                                    rx_buffer->map);
2934                                 rx_buffer->map = NULL;
2935                         }
2936                 }
2937         }
2938         if (adapter->rx_buffer_area != NULL) {
2939                 kfree(adapter->rx_buffer_area, M_DEVBUF);
2940                 adapter->rx_buffer_area = NULL;
2941         }
2942         if (adapter->rxtag != NULL) {
2943                 bus_dma_tag_destroy(adapter->rxtag);
2944                 adapter->rxtag = NULL;
2945         }
2946 }
2947
2948 /*********************************************************************
2949  *
2950  *  This routine executes in interrupt context. It replenishes
2951  *  the mbufs in the descriptor and sends data which has been
2952  *  dma'ed into host memory to upper layer.
2953  *
2954  *  We loop at most count times if count is > 0, or until done if
2955  *  count < 0.
2956  *
2957  *********************************************************************/
2958 static void
2959 em_rxeof(struct adapter *adapter, int count)
2960 {
2961         struct ifnet *ifp;
2962         struct mbuf *mp;
2963         uint8_t accept_frame = 0;
2964         uint8_t eop = 0;
2965         uint16_t len, desc_len, prev_len_adj;
2966         int i;
2967
2968         /* Pointer to the receive descriptor being examined. */
2969         struct em_rx_desc *current_desc;
2970
2971         ifp = &adapter->interface_data.ac_if;
2972         i = adapter->next_rx_desc_to_check;
2973         current_desc = &adapter->rx_desc_base[i];
2974
2975         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2976                         BUS_DMASYNC_POSTREAD);
2977
2978         if (!(current_desc->status & E1000_RXD_STAT_DD))
2979                 return;
2980
2981         while ((current_desc->status & E1000_RXD_STAT_DD) && count != 0) {
2982                 logif(pkt_receive);
2983                 mp = adapter->rx_buffer_area[i].m_head;
2984                 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2985                                 BUS_DMASYNC_POSTREAD);
2986                 bus_dmamap_unload(adapter->rxtag,
2987                                   adapter->rx_buffer_area[i].map);
2988
2989                 accept_frame = 1;
2990                 prev_len_adj = 0;
2991                 desc_len = le16toh(current_desc->length);
2992                 if (current_desc->status & E1000_RXD_STAT_EOP) {
2993                         count--;
2994                         eop = 1;
2995                         if (desc_len < ETHER_CRC_LEN) {
2996                                 len = 0;
2997                                 prev_len_adj = ETHER_CRC_LEN - desc_len;
2998                         } else {
2999                                 len = desc_len - ETHER_CRC_LEN;
3000                         }
3001                 } else {
3002                         eop = 0;
3003                         len = desc_len;
3004                 }
3005
3006                 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3007                         uint8_t last_byte;
3008                         uint32_t pkt_len = desc_len;
3009
3010                         if (adapter->fmp != NULL)
3011                                 pkt_len += adapter->fmp->m_pkthdr.len; 
3012
3013                         last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3014
3015                         if (TBI_ACCEPT(&adapter->hw, current_desc->status, 
3016                                        current_desc->errors, 
3017                                        pkt_len, last_byte)) {
3018                                 em_tbi_adjust_stats(&adapter->hw, 
3019                                                     &adapter->stats, 
3020                                                     pkt_len, 
3021                                                     adapter->hw.mac_addr);
3022                                 if (len > 0)
3023                                         len--;
3024                         } else {
3025                                 accept_frame = 0;
3026                         }
3027                 }
3028
3029                 if (accept_frame) {
3030                         if (em_get_buf(i, adapter, NULL, MB_DONTWAIT) == ENOBUFS) {
3031                                 adapter->dropped_pkts++;
3032                                 em_get_buf(i, adapter, mp, MB_DONTWAIT);
3033                                 if (adapter->fmp != NULL)
3034                                         m_freem(adapter->fmp);
3035                                 adapter->fmp = NULL;
3036                                 adapter->lmp = NULL;
3037                                 goto skip;
3038                         }
3039
3040                         /* Assign correct length to the current fragment */
3041                         mp->m_len = len;
3042
3043                         if (adapter->fmp == NULL) {
3044                                 mp->m_pkthdr.len = len;
3045                                 adapter->fmp = mp;       /* Store the first mbuf */
3046                                 adapter->lmp = mp;
3047                         } else {
3048                                 /* Chain mbuf's together */
3049                                 /* 
3050                                  * Adjust length of previous mbuf in chain if
3051                                  * we received less than 4 bytes in the last
3052                                  * descriptor.
3053                                  */
3054                                 if (prev_len_adj > 0) {
3055                                         adapter->lmp->m_len -= prev_len_adj;
3056                                         adapter->fmp->m_pkthdr.len -= prev_len_adj;
3057                                 }
3058                                 adapter->lmp->m_next = mp;
3059                                 adapter->lmp = adapter->lmp->m_next;
3060                                 adapter->fmp->m_pkthdr.len += len;
3061                         }
3062
3063                         if (eop) {
3064                                 adapter->fmp->m_pkthdr.rcvif = ifp;
3065                                 ifp->if_ipackets++;
3066
3067                                 em_receive_checksum(adapter, current_desc,
3068                                                     adapter->fmp);
3069                                 if (current_desc->status & E1000_RXD_STAT_VP) {
3070                                         VLAN_INPUT_TAG(adapter->fmp,
3071                                                        (current_desc->special & 
3072                                                         E1000_RXD_SPC_VLAN_MASK));
3073                                 } else {
3074                                         ifp->if_input(ifp, adapter->fmp);
3075                                 }
3076                                 adapter->fmp = NULL;
3077                                 adapter->lmp = NULL;
3078                         }
3079                 } else {
3080                         adapter->dropped_pkts++;
3081                         em_get_buf(i, adapter, mp, MB_DONTWAIT);
3082                         if (adapter->fmp != NULL) 
3083                                 m_freem(adapter->fmp);
3084                         adapter->fmp = NULL;
3085                         adapter->lmp = NULL;
3086                 }
3087
3088 skip:
3089                 /* Zero out the receive descriptors status. */
3090                 current_desc->status = 0;
3091
3092                 /* Advance our pointers to the next descriptor. */
3093                 if (++i == adapter->num_rx_desc) {
3094                         i = 0;
3095                         current_desc = adapter->rx_desc_base;
3096                 } else {
3097                         current_desc++;
3098                 }
3099         }
3100
3101         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3102                         BUS_DMASYNC_PREWRITE);
3103
3104         adapter->next_rx_desc_to_check = i;
3105
3106         /* Advance the E1000's Receive Queue #0  "Tail Pointer". */
3107         if (--i < 0)
3108                 i = adapter->num_rx_desc - 1;
3109
3110         E1000_WRITE_REG(&adapter->hw, RDT, i);
3111 }
3112
3113 /*********************************************************************
3114  *
3115  *  Verify that the hardware indicated that the checksum is valid.
3116  *  Inform the stack about the status of checksum so that stack
3117  *  doesn't spend time verifying the checksum.
3118  *
3119  *********************************************************************/
3120 static void
3121 em_receive_checksum(struct adapter *adapter,
3122                     struct em_rx_desc *rx_desc,
3123                     struct mbuf *mp)
3124 {
3125         /* 82543 or newer only */
3126         if ((adapter->hw.mac_type < em_82543) ||
3127             /* Ignore Checksum bit is set */
3128             (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3129                 mp->m_pkthdr.csum_flags = 0;
3130                 return;
3131         }
3132
3133         if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3134                 /* Did it pass? */
3135                 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3136                         /* IP Checksum Good */
3137                         mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3138                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3139                 } else {
3140                         mp->m_pkthdr.csum_flags = 0;
3141                 }
3142         }
3143
3144         if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3145                 /* Did it pass? */
3146                 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3147                         mp->m_pkthdr.csum_flags |=
3148                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR |
3149                          CSUM_FRAG_NOT_CHECKED);
3150                         mp->m_pkthdr.csum_data = htons(0xffff);
3151                 }
3152         }
3153 }
3154
3155
3156 static void 
3157 em_enable_vlans(struct adapter *adapter)
3158 {
3159         uint32_t ctrl;
3160
3161         E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
3162
3163         ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3164         ctrl |= E1000_CTRL_VME;
3165         E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3166 }
3167
3168 static void
3169 em_disable_vlans(struct adapter *adapter)
3170 {
3171         uint32_t ctrl;
3172
3173         ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3174         ctrl &= ~E1000_CTRL_VME;
3175         E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3176 }
3177
3178 /*
3179  * note: we must call bus_enable_intr() prior to enabling the hardware
3180  * interrupt and bus_disable_intr() after disabling the hardware interrupt
3181  * in order to avoid handler execution races from scheduled interrupt
3182  * threads.
3183  */
3184 static void
3185 em_enable_intr(struct adapter *adapter)
3186 {
3187         struct ifnet *ifp = &adapter->interface_data.ac_if;
3188         
3189         if ((ifp->if_flags & IFF_POLLING) == 0) {
3190                 lwkt_serialize_handler_enable(ifp->if_serializer);
3191                 E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
3192         }
3193 }
3194
3195 static void
3196 em_disable_intr(struct adapter *adapter)
3197 {
3198         /*
3199          * The first version of 82542 had an errata where when link was forced
3200          * it would stay up even up even if the cable was disconnected.
3201          * Sequence errors were used to detect the disconnect and then the
3202          * driver would unforce the link.  This code in the in the ISR.  For
3203          * this to work correctly the Sequence error interrupt had to be
3204          * enabled all the time.
3205          */
3206         if (adapter->hw.mac_type == em_82542_rev2_0) {
3207                 E1000_WRITE_REG(&adapter->hw, IMC,
3208                                 (0xffffffff & ~E1000_IMC_RXSEQ));
3209         } else {
3210                 E1000_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
3211         }
3212
3213         lwkt_serialize_handler_disable(adapter->interface_data.ac_if.if_serializer);
3214 }
3215
3216 static int
3217 em_is_valid_ether_addr(uint8_t *addr)
3218 {
3219         static const char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3220
3221         if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN))
3222                 return (FALSE);
3223         else
3224                 return (TRUE);
3225 }
3226
3227 void
3228 em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3229 {
3230         pci_write_config(((struct em_osdep *)hw->back)->dev, reg, *value, 2);
3231 }
3232
3233 void
3234 em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3235 {
3236         *value = pci_read_config(((struct em_osdep *)hw->back)->dev, reg, 2);
3237 }
3238
3239 void
3240 em_pci_set_mwi(struct em_hw *hw)
3241 {
3242         pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
3243                          (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
3244 }
3245
3246 void
3247 em_pci_clear_mwi(struct em_hw *hw)
3248 {
3249         pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
3250                          (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
3251 }
3252
3253 uint32_t
3254 em_io_read(struct em_hw *hw, unsigned long port)
3255 {
3256         struct em_osdep *io = hw->back;
3257
3258         return bus_space_read_4(io->io_bus_space_tag,
3259                                 io->io_bus_space_handle, port);
3260 }
3261
3262 void
3263 em_io_write(struct em_hw *hw, unsigned long port, uint32_t value)
3264 {
3265         struct em_osdep *io = hw->back;
3266
3267         bus_space_write_4(io->io_bus_space_tag,
3268                           io->io_bus_space_handle, port, value);
3269 }
3270
3271 /*
3272  * We may eventually really do this, but its unnecessary 
3273  * for now so we just return unsupported.
3274  */
3275 int32_t
3276 em_read_pcie_cap_reg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3277 {
3278         return (0);
3279 }
3280
3281
3282 /*********************************************************************
3283  * 82544 Coexistence issue workaround.
3284  *    There are 2 issues.
3285  *      1. Transmit Hang issue.
3286  *    To detect this issue, following equation can be used...
3287  *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3288  *          If SUM[3:0] is in between 1 to 4, we will have this issue.
3289  *
3290  *      2. DAC issue.
3291  *    To detect this issue, following equation can be used...
3292  *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3293  *          If SUM[3:0] is in between 9 to c, we will have this issue.
3294  *
3295  *
3296  *    WORKAROUND:
3297  *          Make sure we do not have ending address as 1,2,3,4(Hang) or
3298  *          9,a,b,c (DAC)
3299  *
3300 *************************************************************************/
3301 static uint32_t
3302 em_fill_descriptors(bus_addr_t address, uint32_t length, PDESC_ARRAY desc_array)
3303 {
3304         /* Since issue is sensitive to length and address.*/
3305         /* Let us first check the address...*/
3306         uint32_t safe_terminator;
3307         if (length <= 4) {
3308                 desc_array->descriptor[0].address = address;
3309                 desc_array->descriptor[0].length = length;
3310                 desc_array->elements = 1;
3311                 return (desc_array->elements);
3312         }
3313         safe_terminator = (uint32_t)((((uint32_t)address & 0x7) + (length & 0xF)) & 0xF);
3314         /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */ 
3315         if (safe_terminator == 0 ||
3316             (safe_terminator > 4 && safe_terminator < 9) || 
3317             (safe_terminator > 0xC && safe_terminator <= 0xF)) {
3318                 desc_array->descriptor[0].address = address;
3319                 desc_array->descriptor[0].length = length;
3320                 desc_array->elements = 1;
3321                 return (desc_array->elements);
3322         }
3323
3324         desc_array->descriptor[0].address = address;
3325         desc_array->descriptor[0].length = length - 4;
3326         desc_array->descriptor[1].address = address + (length - 4);
3327         desc_array->descriptor[1].length = 4;
3328         desc_array->elements = 2;
3329         return (desc_array->elements);
3330 }
3331
3332 /**********************************************************************
3333  *
3334  *  Update the board statistics counters.
3335  *
3336  **********************************************************************/
3337 static void
3338 em_update_stats_counters(struct adapter *adapter)
3339 {
3340         struct ifnet   *ifp;
3341
3342         if (adapter->hw.media_type == em_media_type_copper ||
3343             (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
3344                 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
3345                 adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
3346         }
3347         adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
3348         adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
3349         adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
3350         adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
3351
3352         adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
3353         adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
3354         adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
3355         adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
3356         adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
3357         adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
3358         adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
3359         adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
3360         adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
3361         adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
3362         adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
3363         adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
3364         adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
3365         adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
3366         adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
3367         adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
3368         adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
3369         adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
3370         adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
3371         adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
3372
3373         /* For the 64-bit byte counters the low dword must be read first. */
3374         /* Both registers clear on the read of the high dword */
3375
3376         adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
3377         adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
3378         adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
3379         adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
3380
3381         adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
3382         adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
3383         adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
3384         adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
3385         adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
3386
3387         adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
3388         adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
3389         adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
3390         adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
3391
3392         adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
3393         adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
3394         adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
3395         adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
3396         adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
3397         adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
3398         adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
3399         adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
3400         adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
3401         adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
3402
3403         if (adapter->hw.mac_type >= em_82543) {
3404                 adapter->stats.algnerrc += 
3405                     E1000_READ_REG(&adapter->hw, ALGNERRC);
3406                 adapter->stats.rxerrc += 
3407                     E1000_READ_REG(&adapter->hw, RXERRC);
3408                 adapter->stats.tncrs += 
3409                     E1000_READ_REG(&adapter->hw, TNCRS);
3410                 adapter->stats.cexterr += 
3411                     E1000_READ_REG(&adapter->hw, CEXTERR);
3412                 adapter->stats.tsctc += 
3413                     E1000_READ_REG(&adapter->hw, TSCTC);
3414                 adapter->stats.tsctfc += 
3415                     E1000_READ_REG(&adapter->hw, TSCTFC);
3416         }
3417         ifp = &adapter->interface_data.ac_if;
3418
3419         /* Fill out the OS statistics structure */
3420         ifp->if_collisions = adapter->stats.colc;
3421
3422         /* Rx Errors */
3423         ifp->if_ierrors =
3424                 adapter->dropped_pkts +
3425                 adapter->stats.rxerrc +
3426                 adapter->stats.crcerrs +
3427                 adapter->stats.algnerrc +
3428                 adapter->stats.ruc + adapter->stats.roc +
3429                 adapter->stats.mpc + adapter->stats.cexterr +
3430                 adapter->rx_overruns;
3431
3432         /* Tx Errors */
3433         ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol +
3434                           adapter->watchdog_timeouts;
3435 }
3436
3437
3438 /**********************************************************************
3439  *
3440  *  This routine is called only when em_display_debug_stats is enabled.
3441  *  This routine provides a way to take a look at important statistics
3442  *  maintained by the driver and hardware.
3443  *
3444  **********************************************************************/
3445 static void
3446 em_print_debug_info(struct adapter *adapter)
3447 {
3448         device_t dev= adapter->dev;
3449         uint8_t *hw_addr = adapter->hw.hw_addr;
3450
3451         device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
3452         device_printf(dev, "CTRL  = 0x%x RCTL = 0x%x\n",
3453                       E1000_READ_REG(&adapter->hw, CTRL),
3454                       E1000_READ_REG(&adapter->hw, RCTL));
3455         device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk\n",
3456                       ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff0000) >> 16),
3457                       (E1000_READ_REG(&adapter->hw, PBA) & 0xffff));
3458         device_printf(dev, "Flow control watermarks high = %d low = %d\n",
3459                       adapter->hw.fc_high_water, adapter->hw.fc_low_water);
3460         device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
3461                       E1000_READ_REG(&adapter->hw, TIDV),
3462                       E1000_READ_REG(&adapter->hw, TADV));
3463         device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
3464                       E1000_READ_REG(&adapter->hw, RDTR),
3465                       E1000_READ_REG(&adapter->hw, RADV));
3466         device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n",
3467                       (long long)adapter->tx_fifo_wrk_cnt,
3468                       (long long)adapter->tx_fifo_reset_cnt);
3469         device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
3470                       E1000_READ_REG(&adapter->hw, TDH),
3471                       E1000_READ_REG(&adapter->hw, TDT));
3472         device_printf(dev, "Num Tx descriptors avail = %d\n",
3473                       adapter->num_tx_desc_avail);
3474         device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
3475                       adapter->no_tx_desc_avail1);
3476         device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
3477                       adapter->no_tx_desc_avail2);
3478         device_printf(dev, "Std mbuf failed = %ld\n",
3479                       adapter->mbuf_alloc_failed);
3480         device_printf(dev, "Std mbuf cluster failed = %ld\n",
3481                       adapter->mbuf_cluster_failed);
3482         device_printf(dev, "Driver dropped packets = %ld\n",
3483                       adapter->dropped_pkts);
3484 }
3485
3486 static void
3487 em_print_hw_stats(struct adapter *adapter)
3488 {
3489         device_t dev= adapter->dev;
3490
3491         device_printf(dev, "Excessive collisions = %lld\n",
3492                       (long long)adapter->stats.ecol);
3493         device_printf(dev, "Symbol errors = %lld\n",
3494                       (long long)adapter->stats.symerrs);
3495         device_printf(dev, "Sequence errors = %lld\n",
3496                       (long long)adapter->stats.sec);
3497         device_printf(dev, "Defer count = %lld\n",
3498                       (long long)adapter->stats.dc);
3499
3500         device_printf(dev, "Missed Packets = %lld\n",
3501                       (long long)adapter->stats.mpc);
3502         device_printf(dev, "Receive No Buffers = %lld\n",
3503                       (long long)adapter->stats.rnbc);
3504         /* RLEC is inaccurate on some hardware, calculate our own. */
3505         device_printf(dev, "Receive Length errors = %lld\n",
3506                       (long long)adapter->stats.roc +
3507                       (long long)adapter->stats.ruc);
3508         device_printf(dev, "Receive errors = %lld\n",
3509                       (long long)adapter->stats.rxerrc);
3510         device_printf(dev, "Crc errors = %lld\n",
3511                       (long long)adapter->stats.crcerrs);
3512         device_printf(dev, "Alignment errors = %lld\n",
3513                       (long long)adapter->stats.algnerrc);
3514         device_printf(dev, "Carrier extension errors = %lld\n",
3515                       (long long)adapter->stats.cexterr);
3516         device_printf(dev, "RX overruns = %lu\n", adapter->rx_overruns);
3517         device_printf(dev, "Watchdog timeouts = %lu\n",
3518                       adapter->watchdog_timeouts);
3519
3520         device_printf(dev, "XON Rcvd = %lld\n",
3521                       (long long)adapter->stats.xonrxc);
3522         device_printf(dev, "XON Xmtd = %lld\n",
3523                       (long long)adapter->stats.xontxc);
3524         device_printf(dev, "XOFF Rcvd = %lld\n",
3525                       (long long)adapter->stats.xoffrxc);
3526         device_printf(dev, "XOFF Xmtd = %lld\n",
3527                       (long long)adapter->stats.xofftxc);
3528
3529         device_printf(dev, "Good Packets Rcvd = %lld\n",
3530                       (long long)adapter->stats.gprc);
3531         device_printf(dev, "Good Packets Xmtd = %lld\n",
3532                       (long long)adapter->stats.gptc);
3533 }
3534
3535 static int
3536 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3537 {
3538         int error;
3539         int result;
3540         struct adapter *adapter;
3541
3542         result = -1;
3543         error = sysctl_handle_int(oidp, &result, 0, req);
3544
3545         if (error || !req->newptr)
3546                 return (error);
3547
3548         if (result == 1) {
3549                 adapter = (struct adapter *)arg1;
3550                 em_print_debug_info(adapter);
3551         }
3552
3553         return (error);
3554 }
3555
3556 static int
3557 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3558 {
3559         int error;
3560         int result;
3561         struct adapter *adapter;
3562
3563         result = -1;
3564         error = sysctl_handle_int(oidp, &result, 0, req);
3565
3566         if (error || !req->newptr)
3567                 return (error);
3568
3569         if (result == 1) {
3570                 adapter = (struct adapter *)arg1;
3571                 em_print_hw_stats(adapter);
3572         }
3573
3574         return (error);
3575 }
3576
3577 static int
3578 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3579 {
3580         struct em_int_delay_info *info;
3581         struct adapter *adapter;
3582         uint32_t regval;
3583         int error;
3584         int usecs;
3585         int ticks;
3586
3587         info = (struct em_int_delay_info *)arg1;
3588         adapter = info->adapter;
3589         usecs = info->value;
3590         error = sysctl_handle_int(oidp, &usecs, 0, req);
3591         if (error != 0 || req->newptr == NULL)
3592                 return (error);
3593         if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3594                 return (EINVAL);
3595         info->value = usecs;
3596         ticks = E1000_USECS_TO_TICKS(usecs);
3597
3598         lwkt_serialize_enter(adapter->interface_data.ac_if.if_serializer);
3599         regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3600         regval = (regval & ~0xffff) | (ticks & 0xffff);
3601         /* Handle a few special cases. */
3602         switch (info->offset) {
3603         case E1000_RDTR:
3604         case E1000_82542_RDTR:
3605                 regval |= E1000_RDT_FPDB;
3606                 break;
3607         case E1000_TIDV:
3608         case E1000_82542_TIDV:
3609                 if (ticks == 0) {
3610                         adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3611                         /* Don't write 0 into the TIDV register. */
3612                         regval++;
3613                 } else
3614                         adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3615                 break;
3616         }
3617         E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3618         lwkt_serialize_exit(adapter->interface_data.ac_if.if_serializer);
3619         return (0);
3620 }
3621
3622 static void
3623 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3624                         const char *description, struct em_int_delay_info *info,
3625                         int offset, int value)
3626 {
3627         info->adapter = adapter;
3628         info->offset = offset;
3629         info->value = value;
3630         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
3631                         SYSCTL_CHILDREN(adapter->sysctl_tree),
3632                         OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3633                         info, 0, em_sysctl_int_delay, "I", description);
3634 }
3635
3636 static int
3637 em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS)
3638 {
3639         struct adapter *adapter = (void *)arg1;
3640         int error;
3641         int throttle;
3642
3643         throttle = em_int_throttle_ceil;
3644         error = sysctl_handle_int(oidp, &throttle, 0, req);
3645         if (error || req->newptr == NULL)
3646                 return error;
3647         if (throttle < 0 || throttle > 1000000000 / 256)
3648                 return EINVAL;
3649         if (throttle) {
3650                 /*
3651                  * Set the interrupt throttling rate in 256ns increments,
3652                  * recalculate sysctl value assignment to get exact frequency.
3653                  */
3654                 throttle = 1000000000 / 256 / throttle;
3655                 lwkt_serialize_enter(adapter->interface_data.ac_if.if_serializer);
3656                 em_int_throttle_ceil = 1000000000 / 256 / throttle;
3657                 E1000_WRITE_REG(&adapter->hw, ITR, throttle);
3658                 lwkt_serialize_exit(adapter->interface_data.ac_if.if_serializer);
3659         } else {
3660                 lwkt_serialize_enter(adapter->interface_data.ac_if.if_serializer);
3661                 em_int_throttle_ceil = 0;
3662                 E1000_WRITE_REG(&adapter->hw, ITR, 0);
3663                 lwkt_serialize_exit(adapter->interface_data.ac_if.if_serializer);
3664         }
3665         device_printf(adapter->dev, "Interrupt moderation set to %d/sec\n", 
3666                         em_int_throttle_ceil);
3667         return 0;
3668 }