Report link states changes (link up/down and braudrate changes) through
[dragonfly.git] / sys / dev / netif / em / if_em.c
1 /*
2  *
3  * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>.  All rights reserved.
4  *
5  * Copyright (c) 2001-2006, Intel Corporation
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  * 
11  *  1. Redistributions of source code must retain the above copyright notice,
12  *     this list of conditions and the following disclaimer.
13  * 
14  *  2. Redistributions in binary form must reproduce the above copyright
15  *     notice, this list of conditions and the following disclaimer in the
16  *     documentation and/or other materials provided with the distribution.
17  * 
18  *  3. Neither the name of the Intel Corporation nor the names of its
19  *     contributors may be used to endorse or promote products derived from
20  *     this software without specific prior written permission.
21  * 
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  *
34  *
35  * Copyright (c) 2005 The DragonFly Project.  All rights reserved.
36  * 
37  * This code is derived from software contributed to The DragonFly Project
38  * by Matthew Dillon <dillon@backplane.com>
39  * 
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in
48  *    the documentation and/or other materials provided with the
49  *    distribution.
50  * 3. Neither the name of The DragonFly Project nor the names of its
51  *    contributors may be used to endorse or promote products derived
52  *    from this software without specific, prior written permission.
53  * 
54  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
57  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
58  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
59  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
60  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
61  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
62  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
63  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
64  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65  * SUCH DAMAGE.
66  * 
67  * $DragonFly: src/sys/dev/netif/em/if_em.c,v 1.56 2007/03/24 05:57:49 sephe Exp $
68  * $FreeBSD$
69  */
70 /*
71  * SERIALIZATION API RULES:
72  *
73  * - If the driver uses the same serializer for the interrupt as for the
74  *   ifnet, most of the serialization will be done automatically for the
75  *   driver.  
76  *
77  * - ifmedia entry points will be serialized by the ifmedia code using the
78  *   ifnet serializer.
79  *
80  * - if_* entry points except for if_input will be serialized by the IF
81  *   and protocol layers.
82  *
83  * - The device driver must be sure to serialize access from timeout code
84  *   installed by the device driver.
85  *
86  * - The device driver typically holds the serializer at the time it wishes
87  *   to call if_input.  If so, it should pass the serializer to if_input and
88  *   note that the serializer might be dropped temporarily by if_input 
89  *   (e.g. in case it has to bridge the packet to another interface).
90  *
91  *   NOTE!  Since callers into the device driver hold the ifnet serializer,
92  *   the device driver may be holding a serializer at the time it calls
93  *   if_input even if it is not serializer-aware.
94  */
95
96 #include "opt_polling.h"
97 #include "opt_inet.h"
98
99 #include <sys/param.h>
100 #include <sys/bus.h>
101 #include <sys/endian.h>
102 #include <sys/kernel.h>
103 #include <sys/ktr.h>
104 #include <sys/malloc.h>
105 #include <sys/mbuf.h>
106 #include <sys/module.h>
107 #include <sys/rman.h>
108 #include <sys/serialize.h>
109 #include <sys/socket.h>
110 #include <sys/sockio.h>
111 #include <sys/sysctl.h>
112
113 #include <net/bpf.h>
114 #include <net/ethernet.h>
115 #include <net/if.h>
116 #include <net/if_arp.h>
117 #include <net/if_dl.h>
118 #include <net/if_media.h>
119 #include <net/if_types.h>
120 #include <net/ifq_var.h>
121 #include <net/vlan/if_vlan_var.h>
122
123 #ifdef INET
124 #include <netinet/in.h>
125 #include <netinet/in_systm.h>
126 #include <netinet/in_var.h>
127 #include <netinet/ip.h>
128 #include <netinet/tcp.h>
129 #include <netinet/udp.h>
130 #endif
131
132 #include <dev/netif/em/if_em_hw.h>
133 #include <dev/netif/em/if_em.h>
134
135 #define EM_X60_WORKAROUND
136
137 /*********************************************************************
138  *  Set this to one to display debug statistics
139  *********************************************************************/
140 int     em_display_debug_stats = 0;
141
142 /*********************************************************************
143  *  Driver version
144  *********************************************************************/
145
146 char em_driver_version[] = "6.2.9";
147
148
149 /*********************************************************************
150  *  PCI Device ID Table
151  *
152  *  Used by probe to select devices to load on
153  *  Last field stores an index into em_strings
154  *  Last entry must be all 0s
155  *
156  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
157  *********************************************************************/
158
159 static em_vendor_info_t em_vendor_info_array[] =
160 {
161         /* Intel(R) PRO/1000 Network Connection */
162         { 0x8086, E1000_DEV_ID_82540EM,         PCI_ANY_ID, PCI_ANY_ID, 0},
163         { 0x8086, E1000_DEV_ID_82540EM_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
164         { 0x8086, E1000_DEV_ID_82540EP,         PCI_ANY_ID, PCI_ANY_ID, 0},
165         { 0x8086, E1000_DEV_ID_82540EP_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
166         { 0x8086, E1000_DEV_ID_82540EP_LP,      PCI_ANY_ID, PCI_ANY_ID, 0},
167
168         { 0x8086, E1000_DEV_ID_82541EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
169         { 0x8086, E1000_DEV_ID_82541ER,         PCI_ANY_ID, PCI_ANY_ID, 0},
170         { 0x8086, E1000_DEV_ID_82541ER_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
171         { 0x8086, E1000_DEV_ID_82541EI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
172         { 0x8086, E1000_DEV_ID_82541GI,         PCI_ANY_ID, PCI_ANY_ID, 0},
173         { 0x8086, E1000_DEV_ID_82541GI_LF,      PCI_ANY_ID, PCI_ANY_ID, 0},
174         { 0x8086, E1000_DEV_ID_82541GI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
175
176         { 0x8086, E1000_DEV_ID_82542,           PCI_ANY_ID, PCI_ANY_ID, 0},
177
178         { 0x8086, E1000_DEV_ID_82543GC_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
179         { 0x8086, E1000_DEV_ID_82543GC_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
180
181         { 0x8086, E1000_DEV_ID_82544EI_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
182         { 0x8086, E1000_DEV_ID_82544EI_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
183         { 0x8086, E1000_DEV_ID_82544GC_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
184         { 0x8086, E1000_DEV_ID_82544GC_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
185
186         { 0x8086, E1000_DEV_ID_82545EM_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
187         { 0x8086, E1000_DEV_ID_82545EM_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
188         { 0x8086, E1000_DEV_ID_82545GM_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
189         { 0x8086, E1000_DEV_ID_82545GM_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
190         { 0x8086, E1000_DEV_ID_82545GM_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
191
192         { 0x8086, E1000_DEV_ID_82546EB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
193         { 0x8086, E1000_DEV_ID_82546EB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
194         { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
195         { 0x8086, E1000_DEV_ID_82546GB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
196         { 0x8086, E1000_DEV_ID_82546GB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
197         { 0x8086, E1000_DEV_ID_82546GB_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
198         { 0x8086, E1000_DEV_ID_82546GB_PCIE,    PCI_ANY_ID, PCI_ANY_ID, 0},
199         { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
200         { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
201                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
202
203         { 0x8086, E1000_DEV_ID_82547EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
204         { 0x8086, E1000_DEV_ID_82547EI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
205         { 0x8086, E1000_DEV_ID_82547GI,         PCI_ANY_ID, PCI_ANY_ID, 0},
206
207         { 0x8086, E1000_DEV_ID_82571EB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
208         { 0x8086, E1000_DEV_ID_82571EB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
209         { 0x8086, E1000_DEV_ID_82571EB_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
210         { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER,
211                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
212         { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE,
213                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
214
215         { 0x8086, E1000_DEV_ID_82572EI_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
216         { 0x8086, E1000_DEV_ID_82572EI_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
217         { 0x8086, E1000_DEV_ID_82572EI_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
218         { 0x8086, E1000_DEV_ID_82572EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
219
220         { 0x8086, E1000_DEV_ID_82573E,          PCI_ANY_ID, PCI_ANY_ID, 0},
221         { 0x8086, E1000_DEV_ID_82573E_IAMT,     PCI_ANY_ID, PCI_ANY_ID, 0},
222         { 0x8086, E1000_DEV_ID_82573L,          PCI_ANY_ID, PCI_ANY_ID, 0},
223
224         { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT,
225                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
226         { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT,
227                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
228         { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT,
229                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
230         { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT,
231                                                 PCI_ANY_ID, PCI_ANY_ID, 0},
232
233         { 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT,  PCI_ANY_ID, PCI_ANY_ID, 0},
234         { 0x8086, E1000_DEV_ID_ICH8_IGP_AMT,    PCI_ANY_ID, PCI_ANY_ID, 0},
235         { 0x8086, E1000_DEV_ID_ICH8_IGP_C,      PCI_ANY_ID, PCI_ANY_ID, 0},
236         { 0x8086, E1000_DEV_ID_ICH8_IFE,        PCI_ANY_ID, PCI_ANY_ID, 0},
237         { 0x8086, E1000_DEV_ID_ICH8_IFE_GT,     PCI_ANY_ID, PCI_ANY_ID, 0},
238         { 0x8086, E1000_DEV_ID_ICH8_IFE_G,      PCI_ANY_ID, PCI_ANY_ID, 0},
239         { 0x8086, E1000_DEV_ID_ICH8_IGP_M,      PCI_ANY_ID, PCI_ANY_ID, 0},
240
241         { 0x8086, 0x101A, PCI_ANY_ID, PCI_ANY_ID, 0},
242         { 0x8086, 0x1014, PCI_ANY_ID, PCI_ANY_ID, 0},
243         /* required last entry */
244         { 0, 0, 0, 0, 0}
245 };
246
247 /*********************************************************************
248  *  Table of branding strings for all supported NICs.
249  *********************************************************************/
250
251 static const char *em_strings[] = {
252         "Intel(R) PRO/1000 Network Connection"
253 };
254
255 /*********************************************************************
256  *  Function prototypes
257  *********************************************************************/
258 static int      em_probe(device_t);
259 static int      em_attach(device_t);
260 static int      em_detach(device_t);
261 static int      em_shutdown(device_t);
262 static void     em_intr(void *);
263 static int      em_suspend(device_t);
264 static int      em_resume(device_t);
265 static void     em_start(struct ifnet *);
266 static int      em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
267 static void     em_watchdog(struct ifnet *);
268 static void     em_init(void *);
269 static void     em_stop(void *);
270 static void     em_media_status(struct ifnet *, struct ifmediareq *);
271 static int      em_media_change(struct ifnet *);
272 static void     em_identify_hardware(struct adapter *);
273 static int      em_allocate_pci_resources(device_t);
274 static void     em_free_pci_resources(device_t);
275 static void     em_local_timer(void *);
276 static int      em_hardware_init(struct adapter *);
277 static void     em_setup_interface(device_t, struct adapter *);
278 static int      em_setup_transmit_structures(struct adapter *);
279 static void     em_initialize_transmit_unit(struct adapter *);
280 static int      em_setup_receive_structures(struct adapter *);
281 static void     em_initialize_receive_unit(struct adapter *);
282 static void     em_enable_intr(struct adapter *);
283 static void     em_disable_intr(struct adapter *);
284 static void     em_free_transmit_structures(struct adapter *);
285 static void     em_free_receive_structures(struct adapter *);
286 static void     em_update_stats_counters(struct adapter *);
287 static void     em_txeof(struct adapter *);
288 static int      em_allocate_receive_structures(struct adapter *);
289 static void     em_rxeof(struct adapter *, int);
290 static void     em_receive_checksum(struct adapter *, struct em_rx_desc *,
291                                     struct mbuf *);
292 static void     em_transmit_checksum_setup(struct adapter *, struct mbuf *,
293                                            uint32_t *, uint32_t *);
294 static void     em_set_promisc(struct adapter *);
295 static void     em_disable_promisc(struct adapter *);
296 static void     em_set_multi(struct adapter *);
297 static void     em_print_hw_stats(struct adapter *);
298 static void     em_update_link_status(struct adapter *);
299 static int      em_get_buf(int i, struct adapter *, struct mbuf *, int how);
300 static void     em_enable_vlans(struct adapter *);
301 static void     em_disable_vlans(struct adapter *);
302 static int      em_encap(struct adapter *, struct mbuf *);
303 static void     em_smartspeed(struct adapter *);
304 static int      em_82547_fifo_workaround(struct adapter *, int);
305 static void     em_82547_update_fifo_head(struct adapter *, int);
306 static int      em_82547_tx_fifo_reset(struct adapter *);
307 static void     em_82547_move_tail(void *);
308 static void     em_82547_move_tail_serialized(struct adapter *);
309 static int      em_dma_malloc(struct adapter *, bus_size_t,
310                               struct em_dma_alloc *);
311 static void     em_dma_free(struct adapter *, struct em_dma_alloc *);
312 static void     em_print_debug_info(struct adapter *);
313 static int      em_is_valid_ether_addr(uint8_t *);
314 static int      em_sysctl_stats(SYSCTL_HANDLER_ARGS);
315 static int      em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
316 static uint32_t em_fill_descriptors(bus_addr_t address, uint32_t length, 
317                                    PDESC_ARRAY desc_array);
318 static int      em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
319 static int      em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS);
320 static void     em_add_int_delay_sysctl(struct adapter *, const char *,
321                                         const char *,
322                                         struct em_int_delay_info *, int, int);
323
324 /*********************************************************************
325  *  FreeBSD Device Interface Entry Points
326  *********************************************************************/
327
328 static device_method_t em_methods[] = {
329         /* Device interface */
330         DEVMETHOD(device_probe, em_probe),
331         DEVMETHOD(device_attach, em_attach),
332         DEVMETHOD(device_detach, em_detach),
333         DEVMETHOD(device_shutdown, em_shutdown),
334         DEVMETHOD(device_suspend, em_suspend),
335         DEVMETHOD(device_resume, em_resume),
336         {0, 0}
337 };
338
339 static driver_t em_driver = {
340         "em", em_methods, sizeof(struct adapter),
341 };
342
343 static devclass_t em_devclass;
344
345 DECLARE_DUMMY_MODULE(if_em);
346 DRIVER_MODULE(if_em, pci, em_driver, em_devclass, 0, 0);
347
348 /*********************************************************************
349  *  Tunable default values.
350  *********************************************************************/
351
352 #define E1000_TICKS_TO_USECS(ticks)     ((1024 * (ticks) + 500) / 1000)
353 #define E1000_USECS_TO_TICKS(usecs)     ((1000 * (usecs) + 512) / 1024)
354
355 static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
356 static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
357 static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
358 static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
359 static int em_int_throttle_ceil = 10000;
360 static int em_rxd = EM_DEFAULT_RXD;
361 static int em_txd = EM_DEFAULT_TXD;
362 static int em_smart_pwr_down = FALSE;
363
364 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
365 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
366 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
367 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
368 TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil);
369 TUNABLE_INT("hw.em.rxd", &em_rxd);
370 TUNABLE_INT("hw.em.txd", &em_txd);
371 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
372
373 /*
374  * Kernel trace for characterization of operations
375  */
376 #if !defined(KTR_IF_EM)
377 #define KTR_IF_EM       KTR_ALL
378 #endif
379 KTR_INFO_MASTER(if_em);
380 KTR_INFO(KTR_IF_EM, if_em, intr_beg, 0, "intr begin", 0);
381 KTR_INFO(KTR_IF_EM, if_em, intr_end, 1, "intr end", 0);
382 #ifdef DEVICE_POLLING
383 KTR_INFO(KTR_IF_EM, if_em, poll_beg, 2, "poll begin", 0);
384 KTR_INFO(KTR_IF_EM, if_em, poll_end, 3, "poll end", 0);
385 #endif
386 KTR_INFO(KTR_IF_EM, if_em, pkt_receive, 4, "rx packet", 0);
387 KTR_INFO(KTR_IF_EM, if_em, pkt_txqueue, 5, "tx packet", 0);
388 KTR_INFO(KTR_IF_EM, if_em, pkt_txclean, 6, "tx clean", 0);
389 #define logif(name)     KTR_LOG(if_em_ ## name)
390
391 /*********************************************************************
392  *  Device identification routine
393  *
394  *  em_probe determines if the driver should be loaded on
395  *  adapter based on PCI vendor/device id of the adapter.
396  *
397  *  return 0 on success, positive on failure
398  *********************************************************************/
399
400 static int
401 em_probe(device_t dev)
402 {
403         em_vendor_info_t *ent;
404
405         uint16_t pci_vendor_id = 0;
406         uint16_t pci_device_id = 0;
407         uint16_t pci_subvendor_id = 0;
408         uint16_t pci_subdevice_id = 0;
409         char adapter_name[60];
410
411         INIT_DEBUGOUT("em_probe: begin");
412
413         pci_vendor_id = pci_get_vendor(dev);
414         if (pci_vendor_id != EM_VENDOR_ID)
415                 return (ENXIO);
416
417         pci_device_id = pci_get_device(dev);
418         pci_subvendor_id = pci_get_subvendor(dev);
419         pci_subdevice_id = pci_get_subdevice(dev);
420
421         ent = em_vendor_info_array;
422         while (ent->vendor_id != 0) {
423                 if ((pci_vendor_id == ent->vendor_id) &&
424                     (pci_device_id == ent->device_id) &&
425
426                     ((pci_subvendor_id == ent->subvendor_id) ||
427                      (ent->subvendor_id == PCI_ANY_ID)) &&
428
429                     ((pci_subdevice_id == ent->subdevice_id) ||
430                      (ent->subdevice_id == PCI_ANY_ID))) {
431                         ksnprintf(adapter_name, sizeof(adapter_name),
432                                  "%s, Version - %s",  em_strings[ent->index], 
433                                  em_driver_version);
434                         device_set_desc_copy(dev, adapter_name);
435                         return (0);
436                 }
437                 ent++;
438         }
439
440         return (ENXIO);
441 }
442
443 /*********************************************************************
444  *  Device initialization routine
445  *
446  *  The attach entry point is called when the driver is being loaded.
447  *  This routine identifies the type of hardware, allocates all resources
448  *  and initializes the hardware.
449  *
450  *  return 0 on success, positive on failure
451  *********************************************************************/
452
453 static int
454 em_attach(device_t dev)
455 {
456         struct adapter *adapter;
457         int tsize, rsize;
458         int error = 0;
459
460         INIT_DEBUGOUT("em_attach: begin");
461
462         adapter = device_get_softc(dev);
463
464         callout_init(&adapter->timer);
465         callout_init(&adapter->tx_fifo_timer);
466
467         adapter->dev = dev;
468         adapter->osdep.dev = dev;
469
470         /* SYSCTL stuff */
471         sysctl_ctx_init(&adapter->sysctl_ctx);
472         adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx,
473                                                SYSCTL_STATIC_CHILDREN(_hw),
474                                                OID_AUTO, 
475                                                device_get_nameunit(dev),
476                                                CTLFLAG_RD,
477                                                0, "");
478
479         if (adapter->sysctl_tree == NULL) {
480                 device_printf(dev, "Unable to create sysctl tree\n");
481                 return EIO;
482         }
483
484         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,  
485                         SYSCTL_CHILDREN(adapter->sysctl_tree),
486                         OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW, 
487                         (void *)adapter, 0,
488                         em_sysctl_debug_info, "I", "Debug Information");
489
490         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,  
491                         SYSCTL_CHILDREN(adapter->sysctl_tree),
492                         OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, 
493                         (void *)adapter, 0,
494                         em_sysctl_stats, "I", "Statistics");
495
496         /* Determine hardware revision */
497         em_identify_hardware(adapter);
498
499         /* Set up some sysctls for the tunable interrupt delays */
500         em_add_int_delay_sysctl(adapter, "rx_int_delay",
501                                 "receive interrupt delay in usecs",
502                                 &adapter->rx_int_delay,
503                                 E1000_REG_OFFSET(&adapter->hw, RDTR),
504                                 em_rx_int_delay_dflt);
505         em_add_int_delay_sysctl(adapter, "tx_int_delay",
506                                 "transmit interrupt delay in usecs",
507                                 &adapter->tx_int_delay,
508                                 E1000_REG_OFFSET(&adapter->hw, TIDV),
509                                 em_tx_int_delay_dflt);
510         if (adapter->hw.mac_type >= em_82540) {
511                 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
512                                         "receive interrupt delay limit in usecs",
513                                         &adapter->rx_abs_int_delay,
514                                         E1000_REG_OFFSET(&adapter->hw, RADV),
515                                         em_rx_abs_int_delay_dflt);
516                 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
517                                         "transmit interrupt delay limit in usecs",
518                                         &adapter->tx_abs_int_delay,
519                                         E1000_REG_OFFSET(&adapter->hw, TADV),
520                                         em_tx_abs_int_delay_dflt);
521                 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
522                         SYSCTL_CHILDREN(adapter->sysctl_tree),
523                         OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW,
524                         adapter, 0, em_sysctl_int_throttle, "I", NULL);
525         }
526
527         /*
528          * Validate number of transmit and receive descriptors. It
529          * must not exceed hardware maximum, and must be multiple
530          * of EM_DBA_ALIGN.
531          */
532         if (((em_txd * sizeof(struct em_tx_desc)) % EM_DBA_ALIGN) != 0 ||
533             (adapter->hw.mac_type >= em_82544 && em_txd > EM_MAX_TXD) ||
534             (adapter->hw.mac_type < em_82544 && em_txd > EM_MAX_TXD_82543) ||
535             (em_txd < EM_MIN_TXD)) {
536                 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
537                               EM_DEFAULT_TXD, em_txd);
538                 adapter->num_tx_desc = EM_DEFAULT_TXD;
539         } else {
540                 adapter->num_tx_desc = em_txd;
541         }
542  
543         if (((em_rxd * sizeof(struct em_rx_desc)) % EM_DBA_ALIGN) != 0 ||
544             (adapter->hw.mac_type >= em_82544 && em_rxd > EM_MAX_RXD) ||
545             (adapter->hw.mac_type < em_82544 && em_rxd > EM_MAX_RXD_82543) ||
546             (em_rxd < EM_MIN_RXD)) {
547                 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
548                               EM_DEFAULT_RXD, em_rxd);
549                 adapter->num_rx_desc = EM_DEFAULT_RXD;
550         } else {
551                 adapter->num_rx_desc = em_rxd;
552         }
553
554         adapter->hw.autoneg = DO_AUTO_NEG;
555         adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
556         adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
557         adapter->hw.tbi_compatibility_en = TRUE;
558         adapter->rx_buffer_len = EM_RXBUFFER_2048;
559
560         adapter->hw.phy_init_script = 1;
561         adapter->hw.phy_reset_disable = FALSE;
562
563 #ifndef EM_MASTER_SLAVE
564         adapter->hw.master_slave = em_ms_hw_default;
565 #else
566         adapter->hw.master_slave = EM_MASTER_SLAVE;
567 #endif
568
569         /*
570          * Set the max frame size assuming standard ethernet
571          * sized frames.
572          */   
573         adapter->hw.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
574
575         adapter->hw.min_frame_size =
576             MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
577
578         /*
579          * This controls when hardware reports transmit completion
580          * status.
581          */
582         adapter->hw.report_tx_early = 1;
583
584         error = em_allocate_pci_resources(dev);
585         if (error)
586                 goto fail;
587
588         /* Initialize eeprom parameters */
589         em_init_eeprom_params(&adapter->hw);
590
591         tsize = roundup2(adapter->num_tx_desc * sizeof(struct em_tx_desc),
592                          EM_DBA_ALIGN);
593
594         /* Allocate Transmit Descriptor ring */
595         error = em_dma_malloc(adapter, tsize, &adapter->txdma);
596         if (error) {
597                 device_printf(dev, "Unable to allocate TxDescriptor memory\n");
598                 goto fail;
599         }
600         adapter->tx_desc_base = (struct em_tx_desc *)adapter->txdma.dma_vaddr;
601
602         rsize = roundup2(adapter->num_rx_desc * sizeof(struct em_rx_desc),
603                          EM_DBA_ALIGN);
604
605         /* Allocate Receive Descriptor ring */
606         error = em_dma_malloc(adapter, rsize, &adapter->rxdma);
607         if (error) {
608                 device_printf(dev, "Unable to allocate rx_desc memory\n");
609                 goto fail;
610         }
611         adapter->rx_desc_base = (struct em_rx_desc *)adapter->rxdma.dma_vaddr;
612
613         /* Initialize the hardware */
614         if (em_hardware_init(adapter)) {
615                 device_printf(dev, "Unable to initialize the hardware\n");
616                 error = EIO;
617                 goto fail;
618         }
619
620         /* Copy the permanent MAC address out of the EEPROM */
621         if (em_read_mac_addr(&adapter->hw) < 0) {
622                 device_printf(dev,
623                               "EEPROM read error while reading MAC address\n");
624                 error = EIO;
625                 goto fail;
626         }
627
628         if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
629                 device_printf(dev, "Invalid MAC address\n");
630                 error = EIO;
631                 goto fail;
632         }
633
634         /* Setup OS specific network interface */
635         em_setup_interface(dev, adapter);
636
637         /* Initialize statistics */
638         em_clear_hw_cntrs(&adapter->hw);
639         em_update_stats_counters(adapter);
640         adapter->hw.get_link_status = 1;
641         em_update_link_status(adapter);
642
643         /* Indicate SOL/IDER usage */
644         if (em_check_phy_reset_block(&adapter->hw)) {
645                 device_printf(dev, "PHY reset is blocked due to "
646                               "SOL/IDER session.\n");
647         }
648  
649         /* Identify 82544 on PCIX */
650         em_get_bus_info(&adapter->hw);
651         if (adapter->hw.bus_type == em_bus_type_pcix &&
652             adapter->hw.mac_type == em_82544)
653                 adapter->pcix_82544 = TRUE;
654         else
655                 adapter->pcix_82544 = FALSE;
656
657         error = bus_setup_intr(dev, adapter->res_interrupt, INTR_NETSAFE,
658                            em_intr, adapter,
659                            &adapter->int_handler_tag,
660                            adapter->interface_data.ac_if.if_serializer);
661         if (error) {
662                 device_printf(dev, "Error registering interrupt handler!\n");
663                 ether_ifdetach(&adapter->interface_data.ac_if);
664                 goto fail;
665         }
666
667         INIT_DEBUGOUT("em_attach: end");
668         return(0);
669
670 fail:
671         em_detach(dev);
672         return(error);
673 }
674
675 /*********************************************************************
676  *  Device removal routine
677  *
678  *  The detach entry point is called when the driver is being removed.
679  *  This routine stops the adapter and deallocates all the resources
680  *  that were allocated for driver operation.
681  *
682  *  return 0 on success, positive on failure
683  *********************************************************************/
684
685 static int
686 em_detach(device_t dev)
687 {
688         struct adapter *adapter = device_get_softc(dev);
689
690         INIT_DEBUGOUT("em_detach: begin");
691
692         if (device_is_attached(dev)) {
693                 struct ifnet *ifp = &adapter->interface_data.ac_if;
694
695                 lwkt_serialize_enter(ifp->if_serializer);
696                 adapter->in_detach = 1;
697                 em_stop(adapter);
698                 em_phy_hw_reset(&adapter->hw);
699                 bus_teardown_intr(dev, adapter->res_interrupt, 
700                                   adapter->int_handler_tag);
701                 lwkt_serialize_exit(ifp->if_serializer);
702
703                 ether_ifdetach(ifp);
704         }
705         bus_generic_detach(dev);
706
707         em_free_pci_resources(dev);
708
709         /* Free Transmit Descriptor ring */
710         if (adapter->tx_desc_base != NULL) {
711                 em_dma_free(adapter, &adapter->txdma);
712                 adapter->tx_desc_base = NULL;
713         }
714
715         /* Free Receive Descriptor ring */
716         if (adapter->rx_desc_base != NULL) {
717                 em_dma_free(adapter, &adapter->rxdma);
718                 adapter->rx_desc_base = NULL;
719         }
720
721         /* Free sysctl tree */
722         if (adapter->sysctl_tree != NULL) {
723                 adapter->sysctl_tree = NULL;
724                 sysctl_ctx_free(&adapter->sysctl_ctx);
725         }
726
727         return (0);
728 }
729
730 /*********************************************************************
731  *
732  *  Shutdown entry point
733  *
734  **********************************************************************/
735
736 static int
737 em_shutdown(device_t dev)
738 {
739         struct adapter *adapter = device_get_softc(dev);
740         struct ifnet *ifp = &adapter->interface_data.ac_if;
741
742         lwkt_serialize_enter(ifp->if_serializer);
743         em_stop(adapter);
744         lwkt_serialize_exit(ifp->if_serializer);
745
746         return (0);
747 }
748
749 /*
750  * Suspend/resume device methods.
751  */
752 static int
753 em_suspend(device_t dev)
754 {
755         struct adapter *adapter = device_get_softc(dev);
756         struct ifnet *ifp = &adapter->interface_data.ac_if;
757
758         lwkt_serialize_enter(ifp->if_serializer);
759         em_stop(adapter);
760         lwkt_serialize_exit(ifp->if_serializer);
761         return (0);
762 }
763
764 static int
765 em_resume(device_t dev)
766 {
767         struct adapter *adapter = device_get_softc(dev);
768         struct ifnet *ifp = &adapter->interface_data.ac_if;
769
770         lwkt_serialize_enter(ifp->if_serializer);
771         ifp->if_flags &= ~IFF_RUNNING;
772         em_init(adapter);
773         if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == (IFF_UP | IFF_RUNNING))
774                 em_start(ifp);
775         lwkt_serialize_exit(ifp->if_serializer);
776
777         return bus_generic_resume(dev);
778 }
779
780 /*********************************************************************
781  *  Transmit entry point
782  *
783  *  em_start is called by the stack to initiate a transmit.
784  *  The driver will remain in this routine as long as there are
785  *  packets to transmit and transmit resources are available.
786  *  In case resources are not available stack is notified and
787  *  the packet is requeued.
788  **********************************************************************/
789
790 static void
791 em_start(struct ifnet *ifp)
792 {
793         struct mbuf *m_head;
794         struct adapter *adapter = ifp->if_softc;
795
796         ASSERT_SERIALIZED(ifp->if_serializer);
797
798         if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
799                 return;
800         if (!adapter->link_active)
801                 return;
802         while (!ifq_is_empty(&ifp->if_snd)) {
803                 m_head = ifq_poll(&ifp->if_snd);
804
805                 if (m_head == NULL)
806                         break;
807
808                 logif(pkt_txqueue);
809                 if (em_encap(adapter, m_head)) {
810                         ifp->if_flags |= IFF_OACTIVE;
811                         break;
812                 }
813                 ifq_dequeue(&ifp->if_snd, m_head);
814
815                 /* Send a copy of the frame to the BPF listener */
816                 BPF_MTAP(ifp, m_head);
817
818                 /* Set timeout in case hardware has problems transmitting. */
819                 ifp->if_timer = EM_TX_TIMEOUT;
820         }
821 }
822
823 /*********************************************************************
824  *  Ioctl entry point
825  *
826  *  em_ioctl is called when the user wants to configure the
827  *  interface.
828  *
829  *  return 0 on success, positive on failure
830  **********************************************************************/
831
832 static int
833 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
834 {
835         int max_frame_size, mask, error = 0, reinit = 0;
836         struct ifreq *ifr = (struct ifreq *) data;
837         struct adapter *adapter = ifp->if_softc;
838         uint16_t eeprom_data = 0;
839
840         ASSERT_SERIALIZED(ifp->if_serializer);
841
842         if (adapter->in_detach)
843                 return 0;
844
845         switch (command) {
846         case SIOCSIFMTU:
847                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
848                 switch (adapter->hw.mac_type) {
849                 case em_82573:
850                         /*
851                          * 82573 only supports jumbo frames
852                          * if ASPM is disabled.
853                          */
854                         em_read_eeprom(&adapter->hw, EEPROM_INIT_3GIO_3,
855                             1, &eeprom_data);
856                         if (eeprom_data & EEPROM_WORD1A_ASPM_MASK) {
857                                 max_frame_size = ETHER_MAX_LEN;
858                                 break;
859                         }
860                         /* Allow Jumbo frames */
861                         /* FALLTHROUGH */
862                 case em_82571:
863                 case em_82572:
864                 case em_80003es2lan:    /* Limit Jumbo Frame size */
865                         max_frame_size = 9234;
866                         break;
867                 case em_ich8lan:
868                         /* ICH8 does not support jumbo frames */
869                         max_frame_size = ETHER_MAX_LEN;
870                         break;
871                 default:
872                         max_frame_size = MAX_JUMBO_FRAME_SIZE;
873                         break;
874                 }
875                 if (ifr->ifr_mtu >
876                         max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) {
877                         error = EINVAL;
878                 } else {
879                         ifp->if_mtu = ifr->ifr_mtu;
880                         adapter->hw.max_frame_size = 
881                         ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
882                         ifp->if_flags &= ~IFF_RUNNING;
883                         em_init(adapter);
884                 }
885                 break;
886         case SIOCSIFFLAGS:
887                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS "
888                                "(Set Interface Flags)");
889                 if (ifp->if_flags & IFF_UP) {
890                         if (!(ifp->if_flags & IFF_RUNNING)) {
891                                 em_init(adapter);
892                         } else if ((ifp->if_flags ^ adapter->if_flags) &
893                                    IFF_PROMISC) {
894                                 em_disable_promisc(adapter);
895                                 em_set_promisc(adapter);
896                         }
897                 } else {
898                         if (ifp->if_flags & IFF_RUNNING)
899                                 em_stop(adapter);
900                 }
901                 adapter->if_flags = ifp->if_flags;
902                 break;
903         case SIOCADDMULTI:
904         case SIOCDELMULTI:
905                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
906                 if (ifp->if_flags & IFF_RUNNING) {
907                         em_disable_intr(adapter);
908                         em_set_multi(adapter);
909                         if (adapter->hw.mac_type == em_82542_rev2_0)
910                                 em_initialize_receive_unit(adapter);
911 #ifdef DEVICE_POLLING
912                         /* Do not enable interrupt if polling(4) is enabled */
913                         if ((ifp->if_flags & IFF_POLLING) == 0)
914 #endif
915                         em_enable_intr(adapter);
916                 }
917                 break;
918         case SIOCSIFMEDIA:
919                 /* Check SOL/IDER usage */
920                 if (em_check_phy_reset_block(&adapter->hw)) {
921                         if_printf(ifp, "Media change is blocked due to "
922                                   "SOL/IDER session.\n");
923                         break;
924                 }
925                 /* FALLTHROUGH */
926         case SIOCGIFMEDIA:
927                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA "
928                                "(Get/Set Interface Media)");
929                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
930                 break;
931         case SIOCSIFCAP:
932                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
933                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
934                 if (mask & IFCAP_HWCSUM) {
935                         ifp->if_capenable ^= IFCAP_HWCSUM;
936                         reinit = 1;
937                 }
938                 if (mask & IFCAP_VLAN_HWTAGGING) {
939                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
940                         reinit = 1;
941                 }
942                 if (reinit && (ifp->if_flags & IFF_RUNNING)) {
943                         ifp->if_flags &= ~IFF_RUNNING;
944                         em_init(adapter);
945                 }
946                 break;
947         default:
948                 error = ether_ioctl(ifp, command, data);
949                 break;
950         }
951
952         return (error);
953 }
954
955 /*********************************************************************
956  *  Watchdog entry point
957  *
958  *  This routine is called whenever hardware quits transmitting.
959  *
960  **********************************************************************/
961
962 static void
963 em_watchdog(struct ifnet *ifp)
964 {
965         struct adapter *adapter = ifp->if_softc;
966
967         /*
968          * If we are in this routine because of pause frames, then
969          * don't reset the hardware.
970          */
971         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
972                 ifp->if_timer = EM_TX_TIMEOUT;
973                 return;
974         }
975
976         if (em_check_for_link(&adapter->hw) == 0)
977                 if_printf(ifp, "watchdog timeout -- resetting\n");
978
979         ifp->if_flags &= ~IFF_RUNNING;
980         em_init(adapter);
981
982         adapter->watchdog_timeouts++;
983 }
984
985 /*********************************************************************
986  *  Init entry point
987  *
988  *  This routine is used in two ways. It is used by the stack as
989  *  init entry point in network interface structure. It is also used
990  *  by the driver as a hw/sw initialization routine to get to a
991  *  consistent state.
992  *
993  *  return 0 on success, positive on failure
994  **********************************************************************/
995
996 static void
997 em_init(void *arg)
998 {
999         struct adapter *adapter = arg;
1000         uint32_t pba;
1001         struct ifnet *ifp = &adapter->interface_data.ac_if;
1002
1003         ASSERT_SERIALIZED(ifp->if_serializer);
1004
1005         INIT_DEBUGOUT("em_init: begin");
1006
1007         if (ifp->if_flags & IFF_RUNNING)
1008                 return;
1009
1010         em_stop(adapter);
1011
1012         /*
1013          * Packet Buffer Allocation (PBA)
1014          * Writing PBA sets the receive portion of the buffer
1015          * the remainder is used for the transmit buffer.
1016          *
1017          * Devices before the 82547 had a Packet Buffer of 64K.
1018          *   Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1019          * After the 82547 the buffer was reduced to 40K.
1020          *   Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1021          *   Note: default does not leave enough room for Jumbo Frame >10k.
1022          */
1023         switch (adapter->hw.mac_type) {
1024         case em_82547:
1025         case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1026                 if (adapter->hw.max_frame_size > EM_RXBUFFER_8192)
1027                         pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1028                 else
1029                         pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1030
1031                 adapter->tx_fifo_head = 0;
1032                 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1033                 adapter->tx_fifo_size =
1034                         (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
1035                 break;
1036         /* Total Packet Buffer on these is 48K */
1037         case em_82571:
1038         case em_82572:
1039         case em_80003es2lan:
1040                 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1041                 break;
1042         case em_82573: /* 82573: Total Packet Buffer is 32K */
1043                 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
1044                 break;
1045         case em_ich8lan:
1046                 pba = E1000_PBA_8K;
1047                 break;
1048         default:
1049                 /* Devices before 82547 had a Packet Buffer of 64K.   */
1050                 if(adapter->hw.max_frame_size > EM_RXBUFFER_8192)
1051                         pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1052                 else
1053                         pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1054         }
1055
1056         INIT_DEBUGOUT1("em_init: pba=%dK",pba);
1057         E1000_WRITE_REG(&adapter->hw, PBA, pba);
1058
1059         /* Get the latest mac address, User can use a LAA */
1060         bcopy(adapter->interface_data.ac_enaddr, adapter->hw.mac_addr,
1061               ETHER_ADDR_LEN);
1062
1063         /* Initialize the hardware */
1064         if (em_hardware_init(adapter)) {
1065                 if_printf(ifp, "Unable to initialize the hardware\n");
1066                 return;
1067         }
1068         em_update_link_status(adapter);
1069
1070         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1071                 em_enable_vlans(adapter);
1072
1073         /* Set hardware offload abilities */
1074         if (adapter->hw.mac_type >= em_82543) {
1075                 if (ifp->if_capenable & IFCAP_TXCSUM)
1076                         ifp->if_hwassist = EM_CHECKSUM_FEATURES;
1077                 else
1078                         ifp->if_hwassist = 0;
1079         }
1080
1081         /* Prepare transmit descriptors and buffers */
1082         if (em_setup_transmit_structures(adapter)) {
1083                 if_printf(ifp, "Could not setup transmit structures\n");
1084                 em_stop(adapter);
1085                 return;
1086         }
1087         em_initialize_transmit_unit(adapter);
1088
1089         /* Setup Multicast table */
1090         em_set_multi(adapter);
1091
1092         /* Prepare receive descriptors and buffers */
1093         if (em_setup_receive_structures(adapter)) {
1094                 if_printf(ifp, "Could not setup receive structures\n");
1095                 em_stop(adapter);
1096                 return;
1097         }
1098         em_initialize_receive_unit(adapter);
1099
1100         /* Don't lose promiscuous settings */
1101         em_set_promisc(adapter);
1102
1103         ifp->if_flags |= IFF_RUNNING;
1104         ifp->if_flags &= ~IFF_OACTIVE;
1105
1106         callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1107         em_clear_hw_cntrs(&adapter->hw);
1108
1109 #ifdef DEVICE_POLLING
1110         /* Do not enable interrupt if polling(4) is enabled */
1111         if (ifp->if_flags & IFF_POLLING)
1112                 em_disable_intr(adapter);
1113         else
1114 #endif
1115         em_enable_intr(adapter);
1116
1117         /* Don't reset the phy next time init gets called */
1118         adapter->hw.phy_reset_disable = TRUE;
1119 }
1120
1121 #ifdef DEVICE_POLLING
1122
1123 static void
1124 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1125 {
1126         struct adapter *adapter = ifp->if_softc;
1127         uint32_t reg_icr;
1128
1129         logif(poll_beg);
1130
1131         ASSERT_SERIALIZED(ifp->if_serializer);
1132
1133         switch(cmd) {
1134         case POLL_REGISTER:
1135                 em_disable_intr(adapter);
1136                 break;
1137         case POLL_DEREGISTER:
1138                 em_enable_intr(adapter);
1139                 break;
1140         case POLL_AND_CHECK_STATUS:
1141                 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1142                 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1143                         callout_stop(&adapter->timer);
1144                         adapter->hw.get_link_status = 1;
1145                         em_check_for_link(&adapter->hw);
1146                         em_update_link_status(adapter);
1147                         callout_reset(&adapter->timer, hz, em_local_timer,
1148                                       adapter);
1149                 }
1150                 /* fall through */
1151         case POLL_ONLY:
1152                 if (ifp->if_flags & IFF_RUNNING) {
1153                         em_rxeof(adapter, count);
1154                         em_txeof(adapter);
1155
1156                         if (!ifq_is_empty(&ifp->if_snd))
1157                                 em_start(ifp);
1158                 }
1159                 break;
1160         }
1161         logif(poll_end);
1162 }
1163
1164 #endif /* DEVICE_POLLING */
1165
1166 /*********************************************************************
1167  *
1168  *  Interrupt Service routine
1169  *
1170  *********************************************************************/
1171 static void
1172 em_intr(void *arg)
1173 {
1174         uint32_t reg_icr;
1175         struct ifnet *ifp;
1176         struct adapter *adapter = arg;
1177
1178         ifp = &adapter->interface_data.ac_if;  
1179
1180         logif(intr_beg);
1181         ASSERT_SERIALIZED(ifp->if_serializer);
1182
1183         reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1184         if ((adapter->hw.mac_type >= em_82571 &&
1185              (reg_icr & E1000_ICR_INT_ASSERTED) == 0) ||
1186             reg_icr == 0) {
1187                 logif(intr_end);
1188                 return;
1189         }
1190
1191         /*
1192          * XXX: some laptops trigger several spurious interrupts on em(4)
1193          * when in the resume cycle. The ICR register reports all-ones
1194          * value in this case. Processing such interrupts would lead to
1195          * a freeze. I don't know why.
1196          */
1197         if (reg_icr == 0xffffffff) {
1198                 logif(intr_end);
1199                 return;
1200         }
1201
1202         /*
1203          * note: do not attempt to improve efficiency by looping.  This 
1204          * only results in unnecessary piecemeal collection of received
1205          * packets and unnecessary piecemeal cleanups of the transmit ring.
1206          */
1207         if (ifp->if_flags & IFF_RUNNING) {
1208                 em_rxeof(adapter, -1);
1209                 em_txeof(adapter);
1210         }
1211
1212         /* Link status change */
1213         if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1214                 callout_stop(&adapter->timer);
1215                 adapter->hw.get_link_status = 1;
1216                 em_check_for_link(&adapter->hw);
1217                 em_update_link_status(adapter);
1218                 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1219         }
1220
1221         if (reg_icr & E1000_ICR_RXO)
1222                 adapter->rx_overruns++;
1223
1224         if ((ifp->if_flags & IFF_RUNNING) && !ifq_is_empty(&ifp->if_snd))
1225                 em_start(ifp);
1226         logif(intr_end);
1227 }
1228
1229 /*********************************************************************
1230  *
1231  *  Media Ioctl callback
1232  *
1233  *  This routine is called whenever the user queries the status of
1234  *  the interface using ifconfig.
1235  *
1236  **********************************************************************/
1237 static void
1238 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1239 {
1240         struct adapter *adapter = ifp->if_softc;
1241         u_char fiber_type = IFM_1000_SX;
1242
1243         INIT_DEBUGOUT("em_media_status: begin");
1244
1245         ASSERT_SERIALIZED(ifp->if_serializer);
1246
1247         em_check_for_link(&adapter->hw);
1248         em_update_link_status(adapter);
1249
1250         ifmr->ifm_status = IFM_AVALID;
1251         ifmr->ifm_active = IFM_ETHER;
1252
1253         if (!adapter->link_active)
1254                 return;
1255
1256         ifmr->ifm_status |= IFM_ACTIVE;
1257
1258         if (adapter->hw.media_type == em_media_type_fiber ||
1259             adapter->hw.media_type == em_media_type_internal_serdes) {
1260                 if (adapter->hw.mac_type == em_82545)
1261                         fiber_type = IFM_1000_LX;
1262                 ifmr->ifm_active |= fiber_type | IFM_FDX;
1263         } else {
1264                 switch (adapter->link_speed) {
1265                 case 10:
1266                         ifmr->ifm_active |= IFM_10_T;
1267                         break;
1268                 case 100:
1269                         ifmr->ifm_active |= IFM_100_TX;
1270                         break;
1271                 case 1000:
1272                         ifmr->ifm_active |= IFM_1000_T;
1273                         break;
1274                 }
1275                 if (adapter->link_duplex == FULL_DUPLEX)
1276                         ifmr->ifm_active |= IFM_FDX;
1277                 else
1278                         ifmr->ifm_active |= IFM_HDX;
1279         }
1280 }
1281
1282 /*********************************************************************
1283  *
1284  *  Media Ioctl callback
1285  *
1286  *  This routine is called when the user changes speed/duplex using
1287  *  media/mediopt option with ifconfig.
1288  *
1289  **********************************************************************/
1290 static int
1291 em_media_change(struct ifnet *ifp)
1292 {
1293         struct adapter *adapter = ifp->if_softc;
1294         struct ifmedia *ifm = &adapter->media;
1295
1296         INIT_DEBUGOUT("em_media_change: begin");
1297
1298         ASSERT_SERIALIZED(ifp->if_serializer);
1299
1300         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1301                 return (EINVAL);
1302
1303         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1304         case IFM_AUTO:
1305                 adapter->hw.autoneg = DO_AUTO_NEG;
1306                 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1307                 break;
1308         case IFM_1000_LX:
1309         case IFM_1000_SX:
1310         case IFM_1000_T:
1311                 adapter->hw.autoneg = DO_AUTO_NEG;
1312                 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1313                 break;
1314         case IFM_100_TX:
1315                 adapter->hw.autoneg = FALSE;
1316                 adapter->hw.autoneg_advertised = 0;
1317                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1318                         adapter->hw.forced_speed_duplex = em_100_full;
1319                 else
1320                         adapter->hw.forced_speed_duplex = em_100_half;
1321                 break;
1322         case IFM_10_T:
1323                 adapter->hw.autoneg = FALSE;
1324                 adapter->hw.autoneg_advertised = 0;
1325                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1326                         adapter->hw.forced_speed_duplex = em_10_full;
1327                 else
1328                         adapter->hw.forced_speed_duplex = em_10_half;
1329                 break;
1330         default:
1331                 if_printf(ifp, "Unsupported media type\n");
1332         }
1333         /*
1334          * As the speed/duplex settings may have changed we need to
1335          * reset the PHY.
1336          */
1337         adapter->hw.phy_reset_disable = FALSE;
1338
1339         ifp->if_flags &= ~IFF_RUNNING;
1340         em_init(adapter);
1341
1342         return(0);
1343 }
1344
1345 static void
1346 em_tx_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize,
1347          int error)
1348 {
1349         struct em_q *q = arg;
1350
1351         if (error)
1352                 return;
1353         KASSERT(nsegs <= EM_MAX_SCATTER,
1354                 ("Too many DMA segments returned when mapping tx packet"));
1355         q->nsegs = nsegs;
1356         bcopy(seg, q->segs, nsegs * sizeof(seg[0]));
1357 }
1358
1359 /*********************************************************************
1360  *
1361  *  This routine maps the mbufs to tx descriptors.
1362  *
1363  *  return 0 on success, positive on failure
1364  **********************************************************************/
1365 static int
1366 em_encap(struct adapter *adapter, struct mbuf *m_head)
1367 {
1368         uint32_t txd_upper = 0, txd_lower = 0, txd_used = 0, txd_saved = 0;
1369         int i, j, error, last = 0;
1370
1371         struct ifvlan *ifv = NULL;
1372         struct em_q q;
1373         struct em_buffer *tx_buffer = NULL, *tx_buffer_first;
1374         bus_dmamap_t map;
1375         struct em_tx_desc *current_tx_desc = NULL;
1376         struct ifnet *ifp = &adapter->interface_data.ac_if;
1377
1378         /*
1379          * Force a cleanup if number of TX descriptors
1380          * available hits the threshold
1381          */
1382         if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1383                 em_txeof(adapter);
1384                 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1385                         adapter->no_tx_desc_avail1++;
1386                         return (ENOBUFS);
1387                 }
1388         }
1389
1390         /*
1391          * Capture the first descriptor index, this descriptor will have
1392          * the index of the EOP which is the only one that now gets a
1393          * DONE bit writeback.
1394          */
1395         tx_buffer_first = &adapter->tx_buffer_area[adapter->next_avail_tx_desc];
1396
1397         /*
1398          * Map the packet for DMA.
1399          */
1400         map = tx_buffer_first->map;
1401         error = bus_dmamap_load_mbuf(adapter->txtag, map, m_head, em_tx_cb,
1402                                      &q, BUS_DMA_NOWAIT);
1403         if (error != 0) {
1404                 adapter->no_tx_dma_setup++;
1405                 return (error);
1406         }
1407         KASSERT(q.nsegs != 0, ("em_encap: empty packet"));
1408
1409         if (q.nsegs > (adapter->num_tx_desc_avail - 2)) {
1410                 adapter->no_tx_desc_avail2++;
1411                 error = ENOBUFS;
1412                 goto fail;
1413         }
1414
1415         if (ifp->if_hwassist > 0) {
1416                 em_transmit_checksum_setup(adapter,  m_head,
1417                                            &txd_upper, &txd_lower);
1418         }
1419
1420         /* Find out if we are in vlan mode */
1421         if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1422             m_head->m_pkthdr.rcvif != NULL &&
1423             m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1424                 ifv = m_head->m_pkthdr.rcvif->if_softc;
1425
1426         i = adapter->next_avail_tx_desc;
1427         if (adapter->pcix_82544)
1428                 txd_saved = i;
1429
1430         /* Set up our transmit descriptors */
1431         for (j = 0; j < q.nsegs; j++) {
1432                 /* If adapter is 82544 and on PCIX bus */
1433                 if(adapter->pcix_82544) {
1434                         DESC_ARRAY desc_array;
1435                         uint32_t array_elements, counter;
1436
1437                         /* 
1438                          * Check the Address and Length combination and
1439                          * split the data accordingly
1440                          */
1441                         array_elements = em_fill_descriptors(q.segs[j].ds_addr,
1442                                                 q.segs[j].ds_len, &desc_array);
1443                         for (counter = 0; counter < array_elements; counter++) {
1444                                 if (txd_used == adapter->num_tx_desc_avail) {
1445                                         adapter->next_avail_tx_desc = txd_saved;
1446                                         adapter->no_tx_desc_avail2++;
1447                                         error = ENOBUFS;
1448                                         goto fail;
1449                                 }
1450                                 tx_buffer = &adapter->tx_buffer_area[i];
1451                                 current_tx_desc = &adapter->tx_desc_base[i];
1452                                 current_tx_desc->buffer_addr = htole64(
1453                                         desc_array.descriptor[counter].address);
1454                                 current_tx_desc->lower.data = htole32(
1455                                         adapter->txd_cmd | txd_lower |
1456                                         (uint16_t)desc_array.descriptor[counter].length);
1457                                 current_tx_desc->upper.data = htole32(txd_upper);
1458
1459                                 last = i;
1460                                 if (++i == adapter->num_tx_desc)
1461                                         i = 0;
1462
1463                                 tx_buffer->m_head = NULL;
1464                                 tx_buffer->next_eop = -1;
1465                                 txd_used++;
1466                         }
1467                 } else {
1468                         tx_buffer = &adapter->tx_buffer_area[i];
1469                         current_tx_desc = &adapter->tx_desc_base[i];
1470
1471                         current_tx_desc->buffer_addr = htole64(q.segs[j].ds_addr);
1472                         current_tx_desc->lower.data = htole32(
1473                                 adapter->txd_cmd | txd_lower | q.segs[j].ds_len);
1474                         current_tx_desc->upper.data = htole32(txd_upper);
1475
1476                         last = i;
1477                         if (++i == adapter->num_tx_desc)
1478                                 i = 0;
1479
1480                         tx_buffer->m_head = NULL;
1481                         tx_buffer->next_eop = -1;
1482                 }
1483         }
1484
1485         adapter->next_avail_tx_desc = i;
1486         if (adapter->pcix_82544)
1487                 adapter->num_tx_desc_avail -= txd_used;
1488         else
1489                 adapter->num_tx_desc_avail -= q.nsegs;
1490
1491         if (ifv != NULL) {
1492                 /* Set the vlan id */
1493                 current_tx_desc->upper.fields.special = htole16(ifv->ifv_tag);
1494
1495                 /* Tell hardware to add tag */
1496                 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1497         }
1498
1499         tx_buffer->m_head = m_head;
1500         tx_buffer_first->map = tx_buffer->map;
1501         tx_buffer->map = map;
1502         bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
1503
1504         /*
1505          * Last Descriptor of Packet needs End Of Packet (EOP)
1506          * and Report Status (RS)
1507          */
1508         current_tx_desc->lower.data |=
1509                 htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
1510
1511         /*
1512          * Keep track in the first buffer which descriptor will be
1513          * written back.
1514          */
1515         tx_buffer_first->next_eop = last;
1516
1517         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1518                         BUS_DMASYNC_PREWRITE);
1519
1520         /* 
1521          * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1522          * that this frame is available to transmit.
1523          */
1524         if (adapter->hw.mac_type == em_82547 &&
1525             adapter->link_duplex == HALF_DUPLEX) {
1526                 em_82547_move_tail_serialized(adapter);
1527         } else {
1528                 E1000_WRITE_REG(&adapter->hw, TDT, i);
1529                 if (adapter->hw.mac_type == em_82547) {
1530                         em_82547_update_fifo_head(adapter,
1531                                                   m_head->m_pkthdr.len);
1532                 }
1533         }
1534
1535         return (0);
1536 fail:
1537         bus_dmamap_unload(adapter->txtag, map);
1538         return error;
1539 }
1540
1541 /*********************************************************************
1542  *
1543  * 82547 workaround to avoid controller hang in half-duplex environment.
1544  * The workaround is to avoid queuing a large packet that would span
1545  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1546  * in this case. We do that only when FIFO is quiescent.
1547  *
1548  **********************************************************************/
1549 static void
1550 em_82547_move_tail(void *arg)
1551 {
1552         struct adapter *adapter = arg;
1553         struct ifnet *ifp = &adapter->interface_data.ac_if;
1554
1555         lwkt_serialize_enter(ifp->if_serializer);
1556         em_82547_move_tail_serialized(adapter);
1557         lwkt_serialize_exit(ifp->if_serializer);
1558 }
1559
1560 static void
1561 em_82547_move_tail_serialized(struct adapter *adapter)
1562 {
1563         uint16_t hw_tdt;
1564         uint16_t sw_tdt;
1565         struct em_tx_desc *tx_desc;
1566         uint16_t length = 0;
1567         boolean_t eop = 0;
1568
1569         hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1570         sw_tdt = adapter->next_avail_tx_desc;
1571
1572         while (hw_tdt != sw_tdt) {
1573                 tx_desc = &adapter->tx_desc_base[hw_tdt];
1574                 length += tx_desc->lower.flags.length;
1575                 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1576                 if (++hw_tdt == adapter->num_tx_desc)
1577                         hw_tdt = 0;
1578
1579                 if (eop) {
1580                         if (em_82547_fifo_workaround(adapter, length)) {
1581                                 adapter->tx_fifo_wrk_cnt++;
1582                                 callout_reset(&adapter->tx_fifo_timer, 1,
1583                                         em_82547_move_tail, adapter);
1584                                 break;
1585                         }
1586                         E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1587                         em_82547_update_fifo_head(adapter, length);
1588                         length = 0;
1589                 }
1590         }       
1591 }
1592
1593 static int
1594 em_82547_fifo_workaround(struct adapter *adapter, int len)
1595 {       
1596         int fifo_space, fifo_pkt_len;
1597
1598         fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1599
1600         if (adapter->link_duplex == HALF_DUPLEX) {
1601                 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1602
1603                 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1604                         if (em_82547_tx_fifo_reset(adapter))
1605                                 return (0);
1606                         else
1607                                 return (1);
1608                 }
1609         }
1610
1611         return (0);
1612 }
1613
1614 static void
1615 em_82547_update_fifo_head(struct adapter *adapter, int len)
1616 {
1617         int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
1618
1619         /* tx_fifo_head is always 16 byte aligned */
1620         adapter->tx_fifo_head += fifo_pkt_len;
1621         if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
1622                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1623 }
1624
1625 static int
1626 em_82547_tx_fifo_reset(struct adapter *adapter)
1627 {
1628         uint32_t tctl;
1629
1630         if (E1000_READ_REG(&adapter->hw, TDT) == E1000_READ_REG(&adapter->hw, TDH) &&
1631             E1000_READ_REG(&adapter->hw, TDFT) == E1000_READ_REG(&adapter->hw, TDFH) &&
1632             E1000_READ_REG(&adapter->hw, TDFTS) == E1000_READ_REG(&adapter->hw, TDFHS) &&
1633             E1000_READ_REG(&adapter->hw, TDFPC) == 0) {
1634                 /* Disable TX unit */
1635                 tctl = E1000_READ_REG(&adapter->hw, TCTL);
1636                 E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1637
1638                 /* Reset FIFO pointers */
1639                 E1000_WRITE_REG(&adapter->hw, TDFT,  adapter->tx_head_addr);
1640                 E1000_WRITE_REG(&adapter->hw, TDFH,  adapter->tx_head_addr);
1641                 E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr);
1642                 E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr);
1643
1644                 /* Re-enable TX unit */
1645                 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1646                 E1000_WRITE_FLUSH(&adapter->hw);
1647
1648                 adapter->tx_fifo_head = 0;
1649                 adapter->tx_fifo_reset_cnt++;
1650
1651                 return (TRUE);
1652         } else {
1653                 return (FALSE);
1654         }
1655 }
1656
1657 static void
1658 em_set_promisc(struct adapter *adapter)
1659 {
1660         uint32_t reg_rctl;
1661         struct ifnet *ifp = &adapter->interface_data.ac_if;
1662
1663         reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1664
1665         adapter->em_insert_vlan_header = 0;
1666         if (ifp->if_flags & IFF_PROMISC) {
1667                 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1668                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1669
1670                 /*
1671                  * Disable VLAN stripping in promiscous mode.
1672                  * This enables bridging of vlan tagged frames to occur 
1673                  * and also allows vlan tags to be seen in tcpdump.
1674                  */
1675                 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1676                         em_disable_vlans(adapter);
1677                 adapter->em_insert_vlan_header = 1;
1678         } else if (ifp->if_flags & IFF_ALLMULTI) {
1679                 reg_rctl |= E1000_RCTL_MPE;
1680                 reg_rctl &= ~E1000_RCTL_UPE;
1681                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1682         }
1683 }
1684
1685 static void
1686 em_disable_promisc(struct adapter *adapter)
1687 {
1688         struct ifnet *ifp = &adapter->interface_data.ac_if;
1689
1690         uint32_t reg_rctl;
1691
1692         reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1693
1694         reg_rctl &= (~E1000_RCTL_UPE);
1695         reg_rctl &= (~E1000_RCTL_MPE);
1696         E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1697
1698         if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1699                 em_enable_vlans(adapter);
1700         adapter->em_insert_vlan_header = 0;
1701 }
1702
1703 /*********************************************************************
1704  *  Multicast Update
1705  *
1706  *  This routine is called whenever multicast address list is updated.
1707  *
1708  **********************************************************************/
1709
1710 static void
1711 em_set_multi(struct adapter *adapter)
1712 {
1713         uint32_t reg_rctl = 0;
1714         uint8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1715         struct ifmultiaddr *ifma;
1716         int mcnt = 0;
1717         struct ifnet *ifp = &adapter->interface_data.ac_if;
1718
1719         IOCTL_DEBUGOUT("em_set_multi: begin");
1720
1721         if (adapter->hw.mac_type == em_82542_rev2_0) {
1722                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1723                 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1724                         em_pci_clear_mwi(&adapter->hw);
1725                 reg_rctl |= E1000_RCTL_RST;
1726                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1727                 msec_delay(5);
1728         }
1729
1730         LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1731                 if (ifma->ifma_addr->sa_family != AF_LINK)
1732                         continue;
1733
1734                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1735                         break;
1736
1737                 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1738                       &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1739                 mcnt++;
1740         }
1741
1742         if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1743                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1744                 reg_rctl |= E1000_RCTL_MPE;
1745                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1746         } else {
1747                 em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1748         }
1749
1750         if (adapter->hw.mac_type == em_82542_rev2_0) {
1751                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1752                 reg_rctl &= ~E1000_RCTL_RST;
1753                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1754                 msec_delay(5);
1755                 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1756                         em_pci_set_mwi(&adapter->hw);
1757         }
1758 }
1759
1760 /*********************************************************************
1761  *  Timer routine
1762  *
1763  *  This routine checks for link status and updates statistics.
1764  *
1765  **********************************************************************/
1766
1767 static void
1768 em_local_timer(void *arg)
1769 {
1770         struct ifnet *ifp;
1771         struct adapter *adapter = arg;
1772         ifp = &adapter->interface_data.ac_if;
1773
1774         lwkt_serialize_enter(ifp->if_serializer);
1775
1776         em_check_for_link(&adapter->hw);
1777         em_update_link_status(adapter);
1778         em_update_stats_counters(adapter);
1779         if (em_display_debug_stats && ifp->if_flags & IFF_RUNNING)
1780                 em_print_hw_stats(adapter);
1781         em_smartspeed(adapter);
1782
1783         callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1784
1785         lwkt_serialize_exit(ifp->if_serializer);
1786 }
1787
1788 static void
1789 em_update_link_status(struct adapter *adapter)
1790 {
1791         struct ifnet *ifp;
1792         ifp = &adapter->interface_data.ac_if;
1793
1794         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1795                 if (adapter->link_active == 0) {
1796                         em_get_speed_and_duplex(&adapter->hw, 
1797                                                 &adapter->link_speed, 
1798                                                 &adapter->link_duplex);
1799                         /* Check if we may set SPEED_MODE bit on PCI-E */
1800                         if (adapter->link_speed == SPEED_1000 &&
1801                             (adapter->hw.mac_type == em_82571 ||
1802                              adapter->hw.mac_type == em_82572)) {
1803                                 int tarc0;
1804
1805                                 tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
1806                                 tarc0 |= SPEED_MODE_BIT;
1807                                 E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
1808                         }
1809                         if (bootverbose) {
1810                                 if_printf(&adapter->interface_data.ac_if,
1811                                           "Link is up %d Mbps %s\n",
1812                                           adapter->link_speed,
1813                                           adapter->link_duplex == FULL_DUPLEX ?
1814                                                 "Full Duplex" : "Half Duplex");
1815                         }
1816                         adapter->link_active = 1;
1817                         adapter->smartspeed = 0;
1818                         ifp->if_baudrate = adapter->link_speed * 1000000;
1819                         ifp->if_link_state = LINK_STATE_UP;
1820                         if_link_state_change(ifp);
1821                 }
1822         } else {
1823                 if (adapter->link_active == 1) {
1824                         ifp->if_baudrate = 0;
1825                         adapter->link_speed = 0;
1826                         adapter->link_duplex = 0;
1827                         if (bootverbose) {
1828                                 if_printf(&adapter->interface_data.ac_if,
1829                                           "Link is Down\n");
1830                         }
1831                         adapter->link_active = 0;
1832                         ifp->if_link_state = LINK_STATE_DOWN;
1833                         if_link_state_change(ifp);
1834                 }
1835         }
1836 }
1837
1838 /*********************************************************************
1839  *
1840  *  This routine disables all traffic on the adapter by issuing a
1841  *  global reset on the MAC and deallocates TX/RX buffers.
1842  *
1843  **********************************************************************/
1844
1845 static void
1846 em_stop(void *arg)
1847 {
1848         struct ifnet   *ifp;
1849         struct adapter * adapter = arg;
1850         ifp = &adapter->interface_data.ac_if;
1851
1852         ASSERT_SERIALIZED(ifp->if_serializer);
1853
1854         INIT_DEBUGOUT("em_stop: begin");
1855         em_disable_intr(adapter);
1856         em_reset_hw(&adapter->hw);
1857         callout_stop(&adapter->timer);
1858         callout_stop(&adapter->tx_fifo_timer);
1859         em_free_transmit_structures(adapter);
1860         em_free_receive_structures(adapter);
1861
1862         /* Tell the stack that the interface is no longer active */
1863         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1864         ifp->if_timer = 0;
1865 }
1866
1867 /*********************************************************************
1868  *
1869  *  Determine hardware revision.
1870  *
1871  **********************************************************************/
1872 static void
1873 em_identify_hardware(struct adapter *adapter)
1874 {
1875         device_t dev = adapter->dev;
1876
1877         /* Make sure our PCI config space has the necessary stuff set */
1878         adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1879         if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1880               (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1881                 device_printf(dev, "Memory Access and/or Bus Master bits "
1882                               "were not set!\n");
1883                 adapter->hw.pci_cmd_word |= PCIM_CMD_BUSMASTEREN |
1884                                             PCIM_CMD_MEMEN;
1885                 pci_write_config(dev, PCIR_COMMAND,
1886                                  adapter->hw.pci_cmd_word, 2);
1887         }
1888
1889         /* Save off the information about this board */
1890         adapter->hw.vendor_id = pci_get_vendor(dev);
1891         adapter->hw.device_id = pci_get_device(dev);
1892         adapter->hw.revision_id = pci_get_revid(dev);
1893         adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev);
1894         adapter->hw.subsystem_id = pci_get_subdevice(dev);
1895
1896         /* Identify the MAC */
1897         if (em_set_mac_type(&adapter->hw))
1898                 device_printf(dev, "Unknown MAC Type\n");
1899
1900         if (adapter->hw.mac_type == em_82541 ||
1901             adapter->hw.mac_type == em_82541_rev_2 ||
1902             adapter->hw.mac_type == em_82547 ||
1903             adapter->hw.mac_type == em_82547_rev_2)
1904                 adapter->hw.phy_init_script = TRUE;
1905 }
1906
1907 static int
1908 em_allocate_pci_resources(device_t dev)
1909 {
1910         struct adapter *adapter = device_get_softc(dev);
1911         int rid;
1912
1913         rid = PCIR_BAR(0);
1914         adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1915                                                      &rid, RF_ACTIVE);
1916         if (adapter->res_memory == NULL) {
1917                 device_printf(dev, "Unable to allocate bus resource: memory\n");
1918                 return ENXIO;
1919         }
1920         adapter->osdep.mem_bus_space_tag =
1921                 rman_get_bustag(adapter->res_memory);
1922         adapter->osdep.mem_bus_space_handle =
1923             rman_get_bushandle(adapter->res_memory);
1924         adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
1925
1926         if (adapter->hw.mac_type > em_82543) {
1927                 /* Figure our where our IO BAR is ? */
1928                 for (rid = PCIR_BAR(0); rid < PCIR_CIS;) {
1929                         uint32_t val;
1930
1931                         val = pci_read_config(dev, rid, 4);
1932                         if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
1933                                 adapter->io_rid = rid;
1934                                 break;
1935                         }
1936                         rid += 4;
1937                         /* check for 64bit BAR */
1938                         if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
1939                                 rid += 4;
1940                 }
1941                 if (rid >= PCIR_CIS) {
1942                         device_printf(dev, "Unable to locate IO BAR\n");
1943                         return (ENXIO);
1944                 }
1945
1946                 adapter->res_ioport = bus_alloc_resource_any(dev,
1947                     SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
1948                 if (!(adapter->res_ioport)) {
1949                         device_printf(dev, "Unable to allocate bus resource: "
1950                                       "ioport\n");
1951                         return ENXIO;
1952                 }
1953                 adapter->hw.io_base = 0;
1954                 adapter->osdep.io_bus_space_tag =
1955                         rman_get_bustag(adapter->res_ioport);
1956                 adapter->osdep.io_bus_space_handle =
1957                         rman_get_bushandle(adapter->res_ioport);
1958         }
1959
1960         /* For ICH8 we need to find the flash memory. */
1961         if (adapter->hw.mac_type == em_ich8lan) {
1962                 rid = EM_FLASH;
1963                 adapter->flash_mem = bus_alloc_resource_any(dev,
1964                     SYS_RES_MEMORY, &rid, RF_ACTIVE);
1965                 if (adapter->flash_mem == NULL) {
1966                         device_printf(dev, "Unable to allocate bus resource: "
1967                                       "flash memory\n");
1968                         return ENXIO;
1969                 }
1970                 adapter->osdep.flash_bus_space_tag =
1971                     rman_get_bustag(adapter->flash_mem);
1972                 adapter->osdep.flash_bus_space_handle =
1973                     rman_get_bushandle(adapter->flash_mem);
1974         }
1975
1976         rid = 0x0;
1977         adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1978             &rid, RF_SHAREABLE | RF_ACTIVE);
1979         if (adapter->res_interrupt == NULL) {
1980                 device_printf(dev, "Unable to allocate bus resource: "
1981                               "interrupt\n");
1982                 return ENXIO;
1983         }
1984
1985         adapter->hw.back = &adapter->osdep;
1986
1987         return 0;
1988 }
1989
1990 static void
1991 em_free_pci_resources(device_t dev)
1992 {
1993         struct adapter *adapter = device_get_softc(dev);
1994
1995         if (adapter->res_interrupt != NULL) {
1996                 bus_release_resource(dev, SYS_RES_IRQ, 0, 
1997                                      adapter->res_interrupt);
1998         }
1999         if (adapter->res_memory != NULL) {
2000                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), 
2001                                      adapter->res_memory);
2002         }
2003
2004         if (adapter->res_ioport != NULL) {
2005                 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid, 
2006                                      adapter->res_ioport);
2007         }
2008
2009         if (adapter->flash_mem != NULL) {
2010                 bus_release_resource(dev, SYS_RES_MEMORY, EM_FLASH,
2011                                      adapter->flash_mem);
2012         }
2013 }
2014
2015 /*********************************************************************
2016  *
2017  *  Initialize the hardware to a configuration as specified by the
2018  *  adapter structure. The controller is reset, the EEPROM is
2019  *  verified, the MAC address is set, then the shared initialization
2020  *  routines are called.
2021  *
2022  **********************************************************************/
2023 static int
2024 em_hardware_init(struct adapter *adapter)
2025 {
2026         uint16_t        rx_buffer_size;
2027
2028         INIT_DEBUGOUT("em_hardware_init: begin");
2029         /* Issue a global reset */
2030         em_reset_hw(&adapter->hw);
2031
2032         /* When hardware is reset, fifo_head is also reset */
2033         adapter->tx_fifo_head = 0;
2034
2035         /* Make sure we have a good EEPROM before we read from it */
2036         if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
2037                 if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
2038                         device_printf(adapter->dev,
2039                                       "The EEPROM Checksum Is Not Valid\n");
2040                         return (EIO);
2041                 }
2042         }
2043
2044         if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
2045                 device_printf(adapter->dev,
2046                               "EEPROM read error while reading part number\n");
2047                 return (EIO);
2048         }
2049
2050         /* Set up smart power down as default off on newer adapters. */
2051         if (!em_smart_pwr_down &&
2052             (adapter->hw.mac_type == em_82571 ||
2053              adapter->hw.mac_type == em_82572)) {
2054                 uint16_t phy_tmp = 0;
2055
2056                 /* Speed up time to link by disabling smart power down. */
2057                 em_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
2058                                 &phy_tmp);
2059                 phy_tmp &= ~IGP02E1000_PM_SPD;
2060                 em_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
2061                                  phy_tmp);
2062         }
2063
2064         /*
2065          * These parameters control the automatic generation (Tx) and
2066          * response (Rx) to Ethernet PAUSE frames.
2067          * - High water mark should allow for at least two frames to be
2068          *   received after sending an XOFF.
2069          * - Low water mark works best when it is very near the high water mark.
2070          *   This allows the receiver to restart by sending XON when it has
2071          *   drained a bit.  Here we use an arbitary value of 1500 which will
2072          *   restart after one full frame is pulled from the buffer.  There
2073          *   could be several smaller frames in the buffer and if so they will
2074          *   not trigger the XON until their total number reduces the buffer
2075          *   by 1500.
2076          * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2077          */
2078         rx_buffer_size = ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff) << 10);
2079
2080         adapter->hw.fc_high_water =
2081             rx_buffer_size - roundup2(adapter->hw.max_frame_size, 1024); 
2082         adapter->hw.fc_low_water = adapter->hw.fc_high_water - 1500;
2083         if (adapter->hw.mac_type == em_80003es2lan)
2084                 adapter->hw.fc_pause_time = 0xFFFF;
2085         else
2086                 adapter->hw.fc_pause_time = 0x1000;
2087         adapter->hw.fc_send_xon = TRUE;
2088         adapter->hw.fc = E1000_FC_FULL;
2089
2090         if (em_init_hw(&adapter->hw) < 0) {
2091                 device_printf(adapter->dev, "Hardware Initialization Failed");
2092                 return (EIO);
2093         }
2094
2095         em_check_for_link(&adapter->hw);
2096
2097         return (0);
2098 }
2099
2100 /*********************************************************************
2101  *
2102  *  Setup networking device structure and register an interface.
2103  *
2104  **********************************************************************/
2105 static void
2106 em_setup_interface(device_t dev, struct adapter *adapter)
2107 {
2108         struct ifnet *ifp;
2109         u_char fiber_type = IFM_1000_SX;        /* default type */
2110         INIT_DEBUGOUT("em_setup_interface: begin");
2111
2112         ifp = &adapter->interface_data.ac_if;
2113         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
2114         ifp->if_mtu = ETHERMTU;
2115         ifp->if_baudrate = 1000000000;
2116         ifp->if_init =  em_init;
2117         ifp->if_softc = adapter;
2118         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2119         ifp->if_ioctl = em_ioctl;
2120         ifp->if_start = em_start;
2121 #ifdef DEVICE_POLLING
2122         ifp->if_poll = em_poll;
2123 #endif
2124         ifp->if_watchdog = em_watchdog;
2125         ifq_set_maxlen(&ifp->if_snd, adapter->num_tx_desc - 1);
2126         ifq_set_ready(&ifp->if_snd);
2127
2128         if (adapter->hw.mac_type >= em_82543)
2129                 ifp->if_capabilities |= IFCAP_HWCSUM;
2130
2131         ifp->if_capenable = ifp->if_capabilities;
2132
2133         ether_ifattach(ifp, adapter->hw.mac_addr, NULL);
2134
2135         /*
2136          * Tell the upper layer(s) we support long frames.
2137          */
2138         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2139         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2140 #if 0
2141         ifp->if_capenable |= IFCAP_VLAN_MTU;
2142 #endif
2143
2144         /*
2145          * Specify the media types supported by this adapter and register
2146          * callbacks to update media and link information
2147          */
2148         ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
2149                      em_media_status);
2150         if (adapter->hw.media_type == em_media_type_fiber ||
2151             adapter->hw.media_type == em_media_type_internal_serdes) {
2152                 if (adapter->hw.mac_type == em_82545)
2153                         fiber_type = IFM_1000_LX;
2154                 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 
2155                             0, NULL);
2156                 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
2157         } else {
2158                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
2159                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2160                             0, NULL);
2161                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
2162                             0, NULL);
2163                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2164                             0, NULL);
2165                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
2166                             0, NULL);
2167                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
2168         }
2169         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2170         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2171 }
2172
2173 /*********************************************************************
2174  *
2175  *  Workaround for SmartSpeed on 82541 and 82547 controllers
2176  *
2177  **********************************************************************/
2178 static void
2179 em_smartspeed(struct adapter *adapter)
2180 {
2181         uint16_t phy_tmp;
2182
2183         if (adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
2184             !adapter->hw.autoneg ||
2185             !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
2186                 return;
2187
2188         if (adapter->smartspeed == 0) {
2189                 /*
2190                  * If Master/Slave config fault is asserted twice,
2191                  * we assume back-to-back.
2192                  */
2193                 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2194                 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2195                         return;
2196                 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
2197                 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2198                         em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2199                         if (phy_tmp & CR_1000T_MS_ENABLE) {
2200                                 phy_tmp &= ~CR_1000T_MS_ENABLE;
2201                                 em_write_phy_reg(&adapter->hw,
2202                                                  PHY_1000T_CTRL, phy_tmp);
2203                                 adapter->smartspeed++;
2204                                 if (adapter->hw.autoneg &&
2205                                     !em_phy_setup_autoneg(&adapter->hw) &&
2206                                     !em_read_phy_reg(&adapter->hw, PHY_CTRL,
2207                                                      &phy_tmp)) {
2208                                         phy_tmp |= (MII_CR_AUTO_NEG_EN |
2209                                                     MII_CR_RESTART_AUTO_NEG);
2210                                         em_write_phy_reg(&adapter->hw,
2211                                                          PHY_CTRL, phy_tmp);
2212                                 }
2213                         }
2214                 }
2215                 return;
2216         } else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2217                 /* If still no link, perhaps using 2/3 pair cable */
2218                 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
2219                 phy_tmp |= CR_1000T_MS_ENABLE;
2220                 em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2221                 if (adapter->hw.autoneg &&
2222                     !em_phy_setup_autoneg(&adapter->hw) &&
2223                     !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
2224                         phy_tmp |= (MII_CR_AUTO_NEG_EN |
2225                                     MII_CR_RESTART_AUTO_NEG);
2226                         em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
2227                 }
2228         }
2229         /* Restart process after EM_SMARTSPEED_MAX iterations */
2230         if (adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2231                 adapter->smartspeed = 0;
2232 }
2233
2234 /*
2235  * Manage DMA'able memory.
2236  */
2237 static void
2238 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2239 {
2240         if (error)
2241                 return;
2242         *(bus_addr_t *)arg = segs->ds_addr;
2243 }
2244
2245 static int
2246 em_dma_malloc(struct adapter *adapter, bus_size_t size,
2247               struct em_dma_alloc *dma)
2248 {
2249         device_t dev = adapter->dev;
2250         int error;
2251
2252         error = bus_dma_tag_create(NULL,                /* parent */
2253                                    EM_DBA_ALIGN, 0,     /* alignment, bounds */
2254                                    BUS_SPACE_MAXADDR,   /* lowaddr */
2255                                    BUS_SPACE_MAXADDR,   /* highaddr */
2256                                    NULL, NULL,          /* filter, filterarg */
2257                                    size,                /* maxsize */
2258                                    1,                   /* nsegments */
2259                                    size,                /* maxsegsize */
2260                                    0,                   /* flags */
2261                                    &dma->dma_tag);
2262         if (error) {
2263                 device_printf(dev, "%s: bus_dma_tag_create failed; error %d\n",
2264                               __func__, error);
2265                 return error;
2266         }
2267
2268         error = bus_dmamem_alloc(dma->dma_tag, (void**)&dma->dma_vaddr,
2269                                  BUS_DMA_WAITOK, &dma->dma_map);
2270         if (error) {
2271                 device_printf(dev, "%s: bus_dmammem_alloc failed; "
2272                               "size %llu, error %d\n",
2273                               __func__, (uintmax_t)size, error);
2274                 goto fail;
2275         }
2276
2277         error = bus_dmamap_load(dma->dma_tag, dma->dma_map,
2278                                 dma->dma_vaddr, size,
2279                                 em_dmamap_cb, &dma->dma_paddr,
2280                                 BUS_DMA_WAITOK);
2281         if (error) {
2282                 device_printf(dev, "%s: bus_dmamap_load failed; error %u\n",
2283                               __func__, error);
2284                 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2285                 goto fail;
2286         }
2287
2288         return 0;
2289 fail:
2290         bus_dma_tag_destroy(dma->dma_tag);
2291         dma->dma_tag = NULL;
2292         return error;
2293 }
2294
2295 static void
2296 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2297 {
2298         if (dma->dma_tag != NULL) {
2299                 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2300                 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2301                 bus_dma_tag_destroy(dma->dma_tag);
2302                 dma->dma_tag = NULL;
2303         }
2304 }
2305
2306 /*********************************************************************
2307  *
2308  *  Allocate and initialize transmit structures.
2309  *
2310  **********************************************************************/
2311 static int
2312 em_setup_transmit_structures(struct adapter *adapter)
2313 {
2314         struct em_buffer *tx_buffer;
2315         bus_size_t size;
2316         int error, i;
2317
2318         /*
2319          * Setup DMA descriptor areas.
2320          */
2321         size = roundup2(adapter->hw.max_frame_size, MCLBYTES);
2322         if (bus_dma_tag_create(NULL,                    /* parent */
2323                                1, 0,                    /* alignment, bounds */
2324                                BUS_SPACE_MAXADDR,       /* lowaddr */ 
2325                                BUS_SPACE_MAXADDR,       /* highaddr */
2326                                NULL, NULL,              /* filter, filterarg */
2327                                size,                    /* maxsize */
2328                                EM_MAX_SCATTER,          /* nsegments */
2329                                size,                    /* maxsegsize */
2330                                0,                       /* flags */ 
2331                                &adapter->txtag)) {
2332                 device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
2333                 return(ENOMEM);
2334         }
2335
2336         adapter->tx_buffer_area =
2337                 kmalloc(sizeof(struct em_buffer) * adapter->num_tx_desc,
2338                         M_DEVBUF, M_WAITOK | M_ZERO);
2339
2340         bzero(adapter->tx_desc_base,
2341               sizeof(struct em_tx_desc) * adapter->num_tx_desc);
2342         tx_buffer = adapter->tx_buffer_area;
2343         for (i = 0; i < adapter->num_tx_desc; i++) {
2344                 error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map);
2345                 if (error) {
2346                         device_printf(adapter->dev,
2347                                       "Unable to create TX DMA map\n");
2348                         goto fail;
2349                 }
2350                 tx_buffer++;
2351         }
2352
2353         adapter->next_avail_tx_desc = 0;
2354         adapter->next_tx_to_clean = 0;
2355
2356         /* Set number of descriptors available */
2357         adapter->num_tx_desc_avail = adapter->num_tx_desc;
2358
2359         /* Set checksum context */
2360         adapter->active_checksum_context = OFFLOAD_NONE;
2361
2362         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2363                         BUS_DMASYNC_PREWRITE);
2364
2365         return (0);
2366 fail:
2367         em_free_transmit_structures(adapter);
2368         return (error);
2369 }
2370
2371 /*********************************************************************
2372  *
2373  *  Enable transmit unit.
2374  *
2375  **********************************************************************/
2376 static void
2377 em_initialize_transmit_unit(struct adapter *adapter)
2378 {
2379         uint32_t reg_tctl;
2380         uint32_t reg_tipg = 0;
2381         uint64_t bus_addr;
2382
2383         INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2384
2385         /* Setup the Base and Length of the Tx Descriptor Ring */
2386         bus_addr = adapter->txdma.dma_paddr;
2387         E1000_WRITE_REG(&adapter->hw, TDLEN,
2388                         adapter->num_tx_desc * sizeof(struct em_tx_desc));
2389         E1000_WRITE_REG(&adapter->hw, TDBAH, (uint32_t)(bus_addr >> 32));
2390         E1000_WRITE_REG(&adapter->hw, TDBAL, (uint32_t)bus_addr);
2391
2392         /* Setup the HW Tx Head and Tail descriptor pointers */
2393         E1000_WRITE_REG(&adapter->hw, TDT, 0);
2394         E1000_WRITE_REG(&adapter->hw, TDH, 0);
2395
2396         HW_DEBUGOUT2("Base = %x, Length = %x\n",
2397                      E1000_READ_REG(&adapter->hw, TDBAL),
2398                      E1000_READ_REG(&adapter->hw, TDLEN));
2399
2400         /* Set the default values for the Tx Inter Packet Gap timer */
2401         switch (adapter->hw.mac_type) {
2402         case em_82542_rev2_0:
2403         case em_82542_rev2_1:
2404                 reg_tipg = DEFAULT_82542_TIPG_IPGT;
2405                 reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2406                 reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2407                 break;
2408         case em_80003es2lan:
2409                 reg_tipg = DEFAULT_82543_TIPG_IPGR1;
2410                 reg_tipg |=
2411                     DEFAULT_80003ES2LAN_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2412                 break;
2413         default:
2414                 if (adapter->hw.media_type == em_media_type_fiber ||
2415                     adapter->hw.media_type == em_media_type_internal_serdes)
2416                         reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2417                 else
2418                         reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2419                 reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2420                 reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2421         }
2422
2423         E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
2424         E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
2425         if (adapter->hw.mac_type >= em_82540) {
2426                 E1000_WRITE_REG(&adapter->hw, TADV,
2427                                 adapter->tx_abs_int_delay.value);
2428         }
2429
2430         /* Program the Transmit Control Register */
2431         reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2432                    (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2433         if (adapter->hw.mac_type >= em_82571)
2434                 reg_tctl |= E1000_TCTL_MULR;
2435         if (adapter->link_duplex == 1)
2436                 reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2437         else
2438                 reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2439
2440         /* This write will effectively turn on the transmit unit. */
2441         E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
2442
2443         /* Setup Transmit Descriptor Base Settings */
2444         adapter->txd_cmd = E1000_TXD_CMD_IFCS;
2445
2446         if (adapter->tx_int_delay.value > 0)
2447                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2448 }
2449
2450 /*********************************************************************
2451  *
2452  *  Free all transmit related data structures.
2453  *
2454  **********************************************************************/
2455 static void
2456 em_free_transmit_structures(struct adapter *adapter)
2457 {
2458         struct em_buffer *tx_buffer;
2459         int i;
2460
2461         INIT_DEBUGOUT("free_transmit_structures: begin");
2462
2463         if (adapter->tx_buffer_area != NULL) {
2464                 tx_buffer = adapter->tx_buffer_area;
2465                 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2466                         if (tx_buffer->m_head != NULL) {
2467                                 bus_dmamap_unload(adapter->txtag,
2468                                                   tx_buffer->map);
2469                                 m_freem(tx_buffer->m_head);
2470                         }
2471
2472                         if (tx_buffer->map != NULL) {
2473                                 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2474                                 tx_buffer->map = NULL;
2475                         }
2476                         tx_buffer->m_head = NULL;
2477                 }
2478         }
2479         if (adapter->tx_buffer_area != NULL) {
2480                 kfree(adapter->tx_buffer_area, M_DEVBUF);
2481                 adapter->tx_buffer_area = NULL;
2482         }
2483         if (adapter->txtag != NULL) {
2484                 bus_dma_tag_destroy(adapter->txtag);
2485                 adapter->txtag = NULL;
2486         }
2487 }
2488
2489 /*********************************************************************
2490  *
2491  *  The offload context needs to be set when we transfer the first
2492  *  packet of a particular protocol (TCP/UDP). We change the
2493  *  context only if the protocol type changes.
2494  *
2495  **********************************************************************/
2496 static void
2497 em_transmit_checksum_setup(struct adapter *adapter,
2498                            struct mbuf *mp,
2499                            uint32_t *txd_upper,
2500                            uint32_t *txd_lower) 
2501 {
2502         struct em_context_desc *TXD;
2503         struct em_buffer *tx_buffer;
2504         int curr_txd;
2505
2506         if (mp->m_pkthdr.csum_flags) {
2507                 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2508                         *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2509                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2510                         if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2511                                 return;
2512                         else
2513                                 adapter->active_checksum_context = OFFLOAD_TCP_IP;
2514                 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2515                         *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2516                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2517                         if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2518                                 return;
2519                         else
2520                                 adapter->active_checksum_context = OFFLOAD_UDP_IP;
2521                 } else {
2522                         *txd_upper = 0;
2523                         *txd_lower = 0;
2524                         return;
2525                 }
2526         } else {
2527                 *txd_upper = 0;
2528                 *txd_lower = 0;
2529                 return;
2530         }
2531
2532         /*
2533          * If we reach this point, the checksum offload context
2534          * needs to be reset.
2535          */
2536         curr_txd = adapter->next_avail_tx_desc;
2537         tx_buffer = &adapter->tx_buffer_area[curr_txd];
2538         TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2539
2540         TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2541         TXD->lower_setup.ip_fields.ipcso =
2542             ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2543         TXD->lower_setup.ip_fields.ipcse =
2544             htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2545
2546         TXD->upper_setup.tcp_fields.tucss =
2547             ETHER_HDR_LEN + sizeof(struct ip);
2548         TXD->upper_setup.tcp_fields.tucse = htole16(0);
2549
2550         if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2551                 TXD->upper_setup.tcp_fields.tucso =
2552                         ETHER_HDR_LEN + sizeof(struct ip) +
2553                         offsetof(struct tcphdr, th_sum);
2554         } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2555                 TXD->upper_setup.tcp_fields.tucso =
2556                         ETHER_HDR_LEN + sizeof(struct ip) +
2557                         offsetof(struct udphdr, uh_sum);
2558         }
2559
2560         TXD->tcp_seg_setup.data = htole32(0);
2561         TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2562
2563         tx_buffer->m_head = NULL;
2564         tx_buffer->next_eop = -1;
2565
2566         if (++curr_txd == adapter->num_tx_desc)
2567                 curr_txd = 0;
2568
2569         adapter->num_tx_desc_avail--;
2570         adapter->next_avail_tx_desc = curr_txd;
2571 }
2572
2573 /**********************************************************************
2574  *
2575  *  Examine each tx_buffer in the used queue. If the hardware is done
2576  *  processing the packet then free associated resources. The
2577  *  tx_buffer is put back on the free queue.
2578  *
2579  **********************************************************************/
2580
2581 static void
2582 em_txeof(struct adapter *adapter)
2583 {
2584         int first, last, done, num_avail;
2585         struct em_buffer *tx_buffer;
2586         struct em_tx_desc *tx_desc, *eop_desc;
2587         struct ifnet *ifp = &adapter->interface_data.ac_if;
2588
2589         if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2590                 return;
2591
2592         num_avail = adapter->num_tx_desc_avail; 
2593         first = adapter->next_tx_to_clean;
2594         tx_desc = &adapter->tx_desc_base[first];
2595         tx_buffer = &adapter->tx_buffer_area[first];
2596         last = tx_buffer->next_eop;
2597         KKASSERT(last >= 0 && last < adapter->num_tx_desc);
2598         eop_desc = &adapter->tx_desc_base[last];
2599
2600         /*
2601          * Now caculate the terminating index for the cleanup loop below
2602          */
2603         if (++last == adapter->num_tx_desc)
2604                 last = 0;
2605         done = last;
2606
2607         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2608                         BUS_DMASYNC_POSTREAD);
2609
2610         while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2611                 while (first != done) {
2612                         tx_desc->upper.data = 0;
2613                         tx_desc->lower.data = 0;
2614                         num_avail++;
2615
2616                         logif(pkt_txclean);
2617
2618                         if (tx_buffer->m_head) {
2619                                 ifp->if_opackets++;
2620                                 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2621                                                 BUS_DMASYNC_POSTWRITE);
2622                                 bus_dmamap_unload(adapter->txtag,
2623                                                   tx_buffer->map);
2624
2625                                 m_freem(tx_buffer->m_head);
2626                                 tx_buffer->m_head = NULL;
2627                         }
2628                         tx_buffer->next_eop = -1;
2629
2630                         if (++first == adapter->num_tx_desc)
2631                                 first = 0;
2632
2633                         tx_buffer = &adapter->tx_buffer_area[first];
2634                         tx_desc = &adapter->tx_desc_base[first];
2635                 }
2636                 /* See if we can continue to the next packet */
2637                 last = tx_buffer->next_eop;
2638                 if (last != -1) {
2639                         KKASSERT(last >= 0 && last < adapter->num_tx_desc);
2640                         eop_desc = &adapter->tx_desc_base[last];
2641                         if (++last == adapter->num_tx_desc)
2642                                 last = 0;
2643                         done = last;
2644                 } else {
2645                         break;
2646                 }
2647         }
2648
2649         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2650                         BUS_DMASYNC_PREWRITE);
2651
2652         adapter->next_tx_to_clean = first;
2653
2654         /*
2655          * If we have enough room, clear IFF_OACTIVE to tell the stack
2656          * that it is OK to send packets.
2657          * If there are no pending descriptors, clear the timeout. Otherwise,
2658          * if some descriptors have been freed, restart the timeout.
2659          */
2660         if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2661                 ifp->if_flags &= ~IFF_OACTIVE;
2662                 if (num_avail == adapter->num_tx_desc)
2663                         ifp->if_timer = 0;
2664                 else if (num_avail == adapter->num_tx_desc_avail)
2665                         ifp->if_timer = EM_TX_TIMEOUT;
2666         }
2667         adapter->num_tx_desc_avail = num_avail;
2668 }
2669
2670 /*********************************************************************
2671  *
2672  *  Get a buffer from system mbuf buffer pool.
2673  *
2674  **********************************************************************/
2675 static int
2676 em_get_buf(int i, struct adapter *adapter, struct mbuf *nmp, int how)
2677 {
2678         struct mbuf *mp = nmp;
2679         struct em_buffer *rx_buffer;
2680         struct ifnet *ifp;
2681         bus_addr_t paddr;
2682         int error;
2683
2684         ifp = &adapter->interface_data.ac_if;
2685
2686         if (mp == NULL) {
2687                 mp = m_getcl(how, MT_DATA, M_PKTHDR);
2688                 if (mp == NULL) {
2689                         adapter->mbuf_cluster_failed++;
2690                         return (ENOBUFS);
2691                 }
2692                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2693         } else {
2694                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2695                 mp->m_data = mp->m_ext.ext_buf;
2696                 mp->m_next = NULL;
2697         }
2698
2699         if (ifp->if_mtu <= ETHERMTU)
2700                 m_adj(mp, ETHER_ALIGN);
2701
2702         rx_buffer = &adapter->rx_buffer_area[i];
2703
2704         /*
2705          * Using memory from the mbuf cluster pool, invoke the
2706          * bus_dma machinery to arrange the memory mapping.
2707          */
2708         error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
2709                                 mtod(mp, void *), mp->m_len,
2710                                 em_dmamap_cb, &paddr, 0);
2711         if (error) {
2712                 m_free(mp);
2713                 return (error);
2714         }
2715         rx_buffer->m_head = mp;
2716         adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
2717         bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2718
2719         return (0);
2720 }
2721
2722 /*********************************************************************
2723  *
2724  *  Allocate memory for rx_buffer structures. Since we use one
2725  *  rx_buffer per received packet, the maximum number of rx_buffer's
2726  *  that we'll need is equal to the number of receive descriptors
2727  *  that we've allocated.
2728  *
2729  **********************************************************************/
2730 static int
2731 em_allocate_receive_structures(struct adapter *adapter)
2732 {
2733         int i, error, size;
2734         struct em_buffer *rx_buffer;
2735
2736         size = adapter->num_rx_desc * sizeof(struct em_buffer);
2737         adapter->rx_buffer_area = kmalloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
2738
2739         error = bus_dma_tag_create(NULL,                /* parent */
2740                                    1, 0,                /* alignment, bounds */
2741                                    BUS_SPACE_MAXADDR,   /* lowaddr */
2742                                    BUS_SPACE_MAXADDR,   /* highaddr */
2743                                    NULL, NULL,          /* filter, filterarg */
2744                                    MCLBYTES,            /* maxsize */
2745                                    1,                   /* nsegments */
2746                                    MCLBYTES,            /* maxsegsize */
2747                                    0,                   /* flags */
2748                                    &adapter->rxtag);
2749         if (error) {
2750                 device_printf(adapter->dev, "%s: bus_dma_tag_create failed; "
2751                               "error %u\n", __func__, error);
2752                 goto fail;
2753         }
2754  
2755         rx_buffer = adapter->rx_buffer_area;
2756         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2757                 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2758                                           &rx_buffer->map);
2759                 if (error) {
2760                         device_printf(adapter->dev,
2761                                       "%s: bus_dmamap_create failed; "
2762                                       "error %u\n", __func__, error);
2763                         goto fail;
2764                 }
2765         }
2766
2767         for (i = 0; i < adapter->num_rx_desc; i++) {
2768                 error = em_get_buf(i, adapter, NULL, MB_WAIT);
2769                 if (error)
2770                         goto fail;
2771         }
2772
2773         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2774                         BUS_DMASYNC_PREWRITE);
2775
2776         return (0);
2777 fail:
2778         em_free_receive_structures(adapter);
2779         return (error);
2780 }
2781
2782 /*********************************************************************
2783  *
2784  *  Allocate and initialize receive structures.
2785  *
2786  **********************************************************************/
2787 static int
2788 em_setup_receive_structures(struct adapter *adapter)
2789 {
2790         int error;
2791
2792         bzero(adapter->rx_desc_base,
2793               sizeof(struct em_rx_desc) * adapter->num_rx_desc);
2794
2795         error = em_allocate_receive_structures(adapter);
2796         if (error)
2797                 return (error);
2798
2799         /* Setup our descriptor pointers */
2800         adapter->next_rx_desc_to_check = 0;
2801
2802         return (0);
2803 }
2804
2805 /*********************************************************************
2806  *
2807  *  Enable receive unit.
2808  *
2809  **********************************************************************/
2810 static void
2811 em_initialize_receive_unit(struct adapter *adapter)
2812 {
2813         uint32_t reg_rctl;
2814         uint32_t reg_rxcsum;
2815         struct ifnet *ifp;
2816         uint64_t bus_addr;
2817  
2818         INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2819
2820         ifp = &adapter->interface_data.ac_if;
2821
2822         /*
2823          * Make sure receives are disabled while setting
2824          * up the descriptor ring
2825          */
2826         E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2827
2828         /* Set the Receive Delay Timer Register */
2829         E1000_WRITE_REG(&adapter->hw, RDTR, 
2830                         adapter->rx_int_delay.value | E1000_RDT_FPDB);
2831
2832         if(adapter->hw.mac_type >= em_82540) {
2833                 E1000_WRITE_REG(&adapter->hw, RADV,
2834                                 adapter->rx_abs_int_delay.value);
2835
2836                 /* Set the interrupt throttling rate in 256ns increments */  
2837                 if (em_int_throttle_ceil) {
2838                         E1000_WRITE_REG(&adapter->hw, ITR,
2839                                 1000000000 / 256 / em_int_throttle_ceil);
2840                 } else {
2841                         E1000_WRITE_REG(&adapter->hw, ITR, 0);
2842                 }
2843         }
2844
2845         /* Setup the Base and Length of the Rx Descriptor Ring */
2846         bus_addr = adapter->rxdma.dma_paddr;
2847         E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2848                         sizeof(struct em_rx_desc));
2849         E1000_WRITE_REG(&adapter->hw, RDBAH, (uint32_t)(bus_addr >> 32));
2850         E1000_WRITE_REG(&adapter->hw, RDBAL, (uint32_t)bus_addr);
2851
2852         /* Setup the Receive Control Register */
2853         reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2854                    E1000_RCTL_RDMTS_HALF |
2855                    (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2856
2857         if (adapter->hw.tbi_compatibility_on == TRUE)
2858                 reg_rctl |= E1000_RCTL_SBP;
2859
2860         switch (adapter->rx_buffer_len) {
2861         default:
2862         case EM_RXBUFFER_2048:
2863                 reg_rctl |= E1000_RCTL_SZ_2048;
2864                 break;
2865         case EM_RXBUFFER_4096:
2866                 reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX |
2867                             E1000_RCTL_LPE;
2868                 break;            
2869         case EM_RXBUFFER_8192:
2870                 reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX |
2871                             E1000_RCTL_LPE;
2872                 break;
2873         case EM_RXBUFFER_16384:
2874                 reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX |
2875                             E1000_RCTL_LPE;
2876                 break;
2877         }
2878
2879         if (ifp->if_mtu > ETHERMTU)
2880                 reg_rctl |= E1000_RCTL_LPE;
2881
2882         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2883         if ((adapter->hw.mac_type >= em_82543) &&
2884             (ifp->if_capenable & IFCAP_RXCSUM)) {
2885                 reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2886                 reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2887                 E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2888         }
2889
2890 #ifdef EM_X60_WORKAROUND
2891         if (adapter->hw.mac_type == em_82573)
2892                 E1000_WRITE_REG(&adapter->hw, RDTR, 32);
2893 #endif
2894
2895         /* Enable Receives */
2896         E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2897
2898         /* Setup the HW Rx Head and Tail Descriptor Pointers */
2899         E1000_WRITE_REG(&adapter->hw, RDH, 0);
2900         E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2901 }
2902
2903 /*********************************************************************
2904  *
2905  *  Free receive related data structures.
2906  *
2907  **********************************************************************/
2908 static void
2909 em_free_receive_structures(struct adapter *adapter)
2910 {
2911         struct em_buffer *rx_buffer;
2912         int i;
2913
2914         INIT_DEBUGOUT("free_receive_structures: begin");
2915
2916         if (adapter->rx_buffer_area != NULL) {
2917                 rx_buffer = adapter->rx_buffer_area;
2918                 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2919                         if (rx_buffer->m_head != NULL) {
2920                                 bus_dmamap_unload(adapter->rxtag,
2921                                                   rx_buffer->map);
2922                                 m_freem(rx_buffer->m_head);
2923                                 rx_buffer->m_head = NULL;
2924                         }
2925                         if (rx_buffer->map != NULL) {
2926                                 bus_dmamap_destroy(adapter->rxtag,
2927                                                    rx_buffer->map);
2928                                 rx_buffer->map = NULL;
2929                         }
2930                 }
2931         }
2932         if (adapter->rx_buffer_area != NULL) {
2933                 kfree(adapter->rx_buffer_area, M_DEVBUF);
2934                 adapter->rx_buffer_area = NULL;
2935         }
2936         if (adapter->rxtag != NULL) {
2937                 bus_dma_tag_destroy(adapter->rxtag);
2938                 adapter->rxtag = NULL;
2939         }
2940 }
2941
2942 /*********************************************************************
2943  *
2944  *  This routine executes in interrupt context. It replenishes
2945  *  the mbufs in the descriptor and sends data which has been
2946  *  dma'ed into host memory to upper layer.
2947  *
2948  *  We loop at most count times if count is > 0, or until done if
2949  *  count < 0.
2950  *
2951  *********************************************************************/
2952 static void
2953 em_rxeof(struct adapter *adapter, int count)
2954 {
2955         struct ifnet *ifp;
2956         struct mbuf *mp;
2957         uint8_t accept_frame = 0;
2958         uint8_t eop = 0;
2959         uint16_t len, desc_len, prev_len_adj;
2960         int i;
2961
2962         /* Pointer to the receive descriptor being examined. */
2963         struct em_rx_desc *current_desc;
2964
2965         ifp = &adapter->interface_data.ac_if;
2966         i = adapter->next_rx_desc_to_check;
2967         current_desc = &adapter->rx_desc_base[i];
2968
2969         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2970                         BUS_DMASYNC_POSTREAD);
2971
2972         if (!(current_desc->status & E1000_RXD_STAT_DD))
2973                 return;
2974
2975         while ((current_desc->status & E1000_RXD_STAT_DD) && count != 0) {
2976                 logif(pkt_receive);
2977                 mp = adapter->rx_buffer_area[i].m_head;
2978                 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2979                                 BUS_DMASYNC_POSTREAD);
2980                 bus_dmamap_unload(adapter->rxtag,
2981                                   adapter->rx_buffer_area[i].map);
2982
2983                 accept_frame = 1;
2984                 prev_len_adj = 0;
2985                 desc_len = le16toh(current_desc->length);
2986                 if (current_desc->status & E1000_RXD_STAT_EOP) {
2987                         count--;
2988                         eop = 1;
2989                         if (desc_len < ETHER_CRC_LEN) {
2990                                 len = 0;
2991                                 prev_len_adj = ETHER_CRC_LEN - desc_len;
2992                         } else {
2993                                 len = desc_len - ETHER_CRC_LEN;
2994                         }
2995                 } else {
2996                         eop = 0;
2997                         len = desc_len;
2998                 }
2999
3000                 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
3001                         uint8_t last_byte;
3002                         uint32_t pkt_len = desc_len;
3003
3004                         if (adapter->fmp != NULL)
3005                                 pkt_len += adapter->fmp->m_pkthdr.len; 
3006
3007                         last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
3008
3009                         if (TBI_ACCEPT(&adapter->hw, current_desc->status, 
3010                                        current_desc->errors, 
3011                                        pkt_len, last_byte)) {
3012                                 em_tbi_adjust_stats(&adapter->hw, 
3013                                                     &adapter->stats, 
3014                                                     pkt_len, 
3015                                                     adapter->hw.mac_addr);
3016                                 if (len > 0)
3017                                         len--;
3018                         } else {
3019                                 accept_frame = 0;
3020                         }
3021                 }
3022
3023                 if (accept_frame) {
3024                         if (em_get_buf(i, adapter, NULL, MB_DONTWAIT) == ENOBUFS) {
3025                                 adapter->dropped_pkts++;
3026                                 em_get_buf(i, adapter, mp, MB_DONTWAIT);
3027                                 if (adapter->fmp != NULL)
3028                                         m_freem(adapter->fmp);
3029                                 adapter->fmp = NULL;
3030                                 adapter->lmp = NULL;
3031                                 goto skip;
3032                         }
3033
3034                         /* Assign correct length to the current fragment */
3035                         mp->m_len = len;
3036
3037                         if (adapter->fmp == NULL) {
3038                                 mp->m_pkthdr.len = len;
3039                                 adapter->fmp = mp;       /* Store the first mbuf */
3040                                 adapter->lmp = mp;
3041                         } else {
3042                                 /* Chain mbuf's together */
3043                                 /* 
3044                                  * Adjust length of previous mbuf in chain if
3045                                  * we received less than 4 bytes in the last
3046                                  * descriptor.
3047                                  */
3048                                 if (prev_len_adj > 0) {
3049                                         adapter->lmp->m_len -= prev_len_adj;
3050                                         adapter->fmp->m_pkthdr.len -= prev_len_adj;
3051                                 }
3052                                 adapter->lmp->m_next = mp;
3053                                 adapter->lmp = adapter->lmp->m_next;
3054                                 adapter->fmp->m_pkthdr.len += len;
3055                         }
3056
3057                         if (eop) {
3058                                 adapter->fmp->m_pkthdr.rcvif = ifp;
3059                                 ifp->if_ipackets++;
3060
3061                                 em_receive_checksum(adapter, current_desc,
3062                                                     adapter->fmp);
3063                                 if (current_desc->status & E1000_RXD_STAT_VP) {
3064                                         VLAN_INPUT_TAG(adapter->fmp,
3065                                                        (current_desc->special & 
3066                                                         E1000_RXD_SPC_VLAN_MASK));
3067                                 } else {
3068                                         ifp->if_input(ifp, adapter->fmp);
3069                                 }
3070                                 adapter->fmp = NULL;
3071                                 adapter->lmp = NULL;
3072                         }
3073                 } else {
3074                         adapter->dropped_pkts++;
3075                         em_get_buf(i, adapter, mp, MB_DONTWAIT);
3076                         if (adapter->fmp != NULL) 
3077                                 m_freem(adapter->fmp);
3078                         adapter->fmp = NULL;
3079                         adapter->lmp = NULL;
3080                 }
3081
3082 skip:
3083                 /* Zero out the receive descriptors status. */
3084                 current_desc->status = 0;
3085
3086                 /* Advance our pointers to the next descriptor. */
3087                 if (++i == adapter->num_rx_desc) {
3088                         i = 0;
3089                         current_desc = adapter->rx_desc_base;
3090                 } else {
3091                         current_desc++;
3092                 }
3093         }
3094
3095         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
3096                         BUS_DMASYNC_PREWRITE);
3097
3098         adapter->next_rx_desc_to_check = i;
3099
3100         /* Advance the E1000's Receive Queue #0  "Tail Pointer". */
3101         if (--i < 0)
3102                 i = adapter->num_rx_desc - 1;
3103
3104         E1000_WRITE_REG(&adapter->hw, RDT, i);
3105 }
3106
3107 /*********************************************************************
3108  *
3109  *  Verify that the hardware indicated that the checksum is valid.
3110  *  Inform the stack about the status of checksum so that stack
3111  *  doesn't spend time verifying the checksum.
3112  *
3113  *********************************************************************/
3114 static void
3115 em_receive_checksum(struct adapter *adapter,
3116                     struct em_rx_desc *rx_desc,
3117                     struct mbuf *mp)
3118 {
3119         /* 82543 or newer only */
3120         if ((adapter->hw.mac_type < em_82543) ||
3121             /* Ignore Checksum bit is set */
3122             (rx_desc->status & E1000_RXD_STAT_IXSM)) {
3123                 mp->m_pkthdr.csum_flags = 0;
3124                 return;
3125         }
3126
3127         if (rx_desc->status & E1000_RXD_STAT_IPCS) {
3128                 /* Did it pass? */
3129                 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
3130                         /* IP Checksum Good */
3131                         mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
3132                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3133                 } else {
3134                         mp->m_pkthdr.csum_flags = 0;
3135                 }
3136         }
3137
3138         if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
3139                 /* Did it pass? */
3140                 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
3141                         mp->m_pkthdr.csum_flags |=
3142                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
3143                         mp->m_pkthdr.csum_data = htons(0xffff);
3144                 }
3145         }
3146 }
3147
3148
3149 static void 
3150 em_enable_vlans(struct adapter *adapter)
3151 {
3152         uint32_t ctrl;
3153
3154         E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
3155
3156         ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3157         ctrl |= E1000_CTRL_VME;
3158         E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3159 }
3160
3161 static void
3162 em_disable_vlans(struct adapter *adapter)
3163 {
3164         uint32_t ctrl;
3165
3166         ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3167         ctrl &= ~E1000_CTRL_VME;
3168         E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3169 }
3170
3171 /*
3172  * note: we must call bus_enable_intr() prior to enabling the hardware
3173  * interrupt and bus_disable_intr() after disabling the hardware interrupt
3174  * in order to avoid handler execution races from scheduled interrupt
3175  * threads.
3176  */
3177 static void
3178 em_enable_intr(struct adapter *adapter)
3179 {
3180         struct ifnet *ifp = &adapter->interface_data.ac_if;
3181         
3182         if ((ifp->if_flags & IFF_POLLING) == 0) {
3183                 lwkt_serialize_handler_enable(ifp->if_serializer);
3184                 E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
3185         }
3186 }
3187
3188 static void
3189 em_disable_intr(struct adapter *adapter)
3190 {
3191         /*
3192          * The first version of 82542 had an errata where when link was forced
3193          * it would stay up even up even if the cable was disconnected.
3194          * Sequence errors were used to detect the disconnect and then the
3195          * driver would unforce the link.  This code in the in the ISR.  For
3196          * this to work correctly the Sequence error interrupt had to be
3197          * enabled all the time.
3198          */
3199         if (adapter->hw.mac_type == em_82542_rev2_0) {
3200                 E1000_WRITE_REG(&adapter->hw, IMC,
3201                                 (0xffffffff & ~E1000_IMC_RXSEQ));
3202         } else {
3203                 E1000_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
3204         }
3205
3206         lwkt_serialize_handler_disable(adapter->interface_data.ac_if.if_serializer);
3207 }
3208
3209 static int
3210 em_is_valid_ether_addr(uint8_t *addr)
3211 {
3212         static const char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3213
3214         if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN))
3215                 return (FALSE);
3216         else
3217                 return (TRUE);
3218 }
3219
3220 void
3221 em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3222 {
3223         pci_write_config(((struct em_osdep *)hw->back)->dev, reg, *value, 2);
3224 }
3225
3226 void
3227 em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3228 {
3229         *value = pci_read_config(((struct em_osdep *)hw->back)->dev, reg, 2);
3230 }
3231
3232 void
3233 em_pci_set_mwi(struct em_hw *hw)
3234 {
3235         pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
3236                          (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
3237 }
3238
3239 void
3240 em_pci_clear_mwi(struct em_hw *hw)
3241 {
3242         pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
3243                          (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
3244 }
3245
3246 uint32_t
3247 em_io_read(struct em_hw *hw, unsigned long port)
3248 {
3249         struct em_osdep *io = hw->back;
3250
3251         return bus_space_read_4(io->io_bus_space_tag,
3252                                 io->io_bus_space_handle, port);
3253 }
3254
3255 void
3256 em_io_write(struct em_hw *hw, unsigned long port, uint32_t value)
3257 {
3258         struct em_osdep *io = hw->back;
3259
3260         bus_space_write_4(io->io_bus_space_tag,
3261                           io->io_bus_space_handle, port, value);
3262 }
3263
3264 /*
3265  * We may eventually really do this, but its unnecessary 
3266  * for now so we just return unsupported.
3267  */
3268 int32_t
3269 em_read_pcie_cap_reg(struct em_hw *hw, uint32_t reg, uint16_t *value)
3270 {
3271         return (0);
3272 }
3273
3274
3275 /*********************************************************************
3276  * 82544 Coexistence issue workaround.
3277  *    There are 2 issues.
3278  *      1. Transmit Hang issue.
3279  *    To detect this issue, following equation can be used...
3280  *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3281  *          If SUM[3:0] is in between 1 to 4, we will have this issue.
3282  *
3283  *      2. DAC issue.
3284  *    To detect this issue, following equation can be used...
3285  *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
3286  *          If SUM[3:0] is in between 9 to c, we will have this issue.
3287  *
3288  *
3289  *    WORKAROUND:
3290  *          Make sure we do not have ending address as 1,2,3,4(Hang) or
3291  *          9,a,b,c (DAC)
3292  *
3293 *************************************************************************/
3294 static uint32_t
3295 em_fill_descriptors(bus_addr_t address, uint32_t length, PDESC_ARRAY desc_array)
3296 {
3297         /* Since issue is sensitive to length and address.*/
3298         /* Let us first check the address...*/
3299         uint32_t safe_terminator;
3300         if (length <= 4) {
3301                 desc_array->descriptor[0].address = address;
3302                 desc_array->descriptor[0].length = length;
3303                 desc_array->elements = 1;
3304                 return (desc_array->elements);
3305         }
3306         safe_terminator = (uint32_t)((((uint32_t)address & 0x7) + (length & 0xF)) & 0xF);
3307         /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */ 
3308         if (safe_terminator == 0 ||
3309             (safe_terminator > 4 && safe_terminator < 9) || 
3310             (safe_terminator > 0xC && safe_terminator <= 0xF)) {
3311                 desc_array->descriptor[0].address = address;
3312                 desc_array->descriptor[0].length = length;
3313                 desc_array->elements = 1;
3314                 return (desc_array->elements);
3315         }
3316
3317         desc_array->descriptor[0].address = address;
3318         desc_array->descriptor[0].length = length - 4;
3319         desc_array->descriptor[1].address = address + (length - 4);
3320         desc_array->descriptor[1].length = 4;
3321         desc_array->elements = 2;
3322         return (desc_array->elements);
3323 }
3324
3325 /**********************************************************************
3326  *
3327  *  Update the board statistics counters.
3328  *
3329  **********************************************************************/
3330 static void
3331 em_update_stats_counters(struct adapter *adapter)
3332 {
3333         struct ifnet   *ifp;
3334
3335         if (adapter->hw.media_type == em_media_type_copper ||
3336             (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
3337                 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
3338                 adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
3339         }
3340         adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
3341         adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
3342         adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
3343         adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
3344
3345         adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
3346         adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
3347         adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
3348         adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
3349         adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
3350         adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
3351         adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
3352         adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
3353         adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
3354         adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
3355         adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
3356         adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
3357         adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
3358         adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
3359         adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
3360         adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
3361         adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
3362         adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
3363         adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
3364         adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
3365
3366         /* For the 64-bit byte counters the low dword must be read first. */
3367         /* Both registers clear on the read of the high dword */
3368
3369         adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
3370         adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
3371         adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
3372         adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
3373
3374         adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
3375         adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
3376         adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
3377         adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
3378         adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
3379
3380         adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
3381         adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
3382         adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
3383         adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
3384
3385         adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
3386         adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
3387         adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
3388         adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
3389         adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
3390         adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
3391         adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
3392         adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
3393         adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
3394         adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
3395
3396         if (adapter->hw.mac_type >= em_82543) {
3397                 adapter->stats.algnerrc += 
3398                     E1000_READ_REG(&adapter->hw, ALGNERRC);
3399                 adapter->stats.rxerrc += 
3400                     E1000_READ_REG(&adapter->hw, RXERRC);
3401                 adapter->stats.tncrs += 
3402                     E1000_READ_REG(&adapter->hw, TNCRS);
3403                 adapter->stats.cexterr += 
3404                     E1000_READ_REG(&adapter->hw, CEXTERR);
3405                 adapter->stats.tsctc += 
3406                     E1000_READ_REG(&adapter->hw, TSCTC);
3407                 adapter->stats.tsctfc += 
3408                     E1000_READ_REG(&adapter->hw, TSCTFC);
3409         }
3410         ifp = &adapter->interface_data.ac_if;
3411
3412         /* Fill out the OS statistics structure */
3413         ifp->if_collisions = adapter->stats.colc;
3414
3415         /* Rx Errors */
3416         ifp->if_ierrors =
3417                 adapter->dropped_pkts +
3418                 adapter->stats.rxerrc +
3419                 adapter->stats.crcerrs +
3420                 adapter->stats.algnerrc +
3421                 adapter->stats.ruc + adapter->stats.roc +
3422                 adapter->stats.mpc + adapter->stats.cexterr +
3423                 adapter->rx_overruns;
3424
3425         /* Tx Errors */
3426         ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol +
3427                           adapter->watchdog_timeouts;
3428 }
3429
3430
3431 /**********************************************************************
3432  *
3433  *  This routine is called only when em_display_debug_stats is enabled.
3434  *  This routine provides a way to take a look at important statistics
3435  *  maintained by the driver and hardware.
3436  *
3437  **********************************************************************/
3438 static void
3439 em_print_debug_info(struct adapter *adapter)
3440 {
3441         device_t dev= adapter->dev;
3442         uint8_t *hw_addr = adapter->hw.hw_addr;
3443
3444         device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
3445         device_printf(dev, "CTRL  = 0x%x RCTL = 0x%x\n",
3446                       E1000_READ_REG(&adapter->hw, CTRL),
3447                       E1000_READ_REG(&adapter->hw, RCTL));
3448         device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk\n",
3449                       ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff0000) >> 16),
3450                       (E1000_READ_REG(&adapter->hw, PBA) & 0xffff));
3451         device_printf(dev, "Flow control watermarks high = %d low = %d\n",
3452                       adapter->hw.fc_high_water, adapter->hw.fc_low_water);
3453         device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
3454                       E1000_READ_REG(&adapter->hw, TIDV),
3455                       E1000_READ_REG(&adapter->hw, TADV));
3456         device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
3457                       E1000_READ_REG(&adapter->hw, RDTR),
3458                       E1000_READ_REG(&adapter->hw, RADV));
3459         device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n",
3460                       (long long)adapter->tx_fifo_wrk_cnt,
3461                       (long long)adapter->tx_fifo_reset_cnt);
3462         device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
3463                       E1000_READ_REG(&adapter->hw, TDH),
3464                       E1000_READ_REG(&adapter->hw, TDT));
3465         device_printf(dev, "Num Tx descriptors avail = %d\n",
3466                       adapter->num_tx_desc_avail);
3467         device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
3468                       adapter->no_tx_desc_avail1);
3469         device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
3470                       adapter->no_tx_desc_avail2);
3471         device_printf(dev, "Std mbuf failed = %ld\n",
3472                       adapter->mbuf_alloc_failed);
3473         device_printf(dev, "Std mbuf cluster failed = %ld\n",
3474                       adapter->mbuf_cluster_failed);
3475         device_printf(dev, "Driver dropped packets = %ld\n",
3476                       adapter->dropped_pkts);
3477 }
3478
3479 static void
3480 em_print_hw_stats(struct adapter *adapter)
3481 {
3482         device_t dev= adapter->dev;
3483
3484         device_printf(dev, "Excessive collisions = %lld\n",
3485                       (long long)adapter->stats.ecol);
3486         device_printf(dev, "Symbol errors = %lld\n",
3487                       (long long)adapter->stats.symerrs);
3488         device_printf(dev, "Sequence errors = %lld\n",
3489                       (long long)adapter->stats.sec);
3490         device_printf(dev, "Defer count = %lld\n",
3491                       (long long)adapter->stats.dc);
3492
3493         device_printf(dev, "Missed Packets = %lld\n",
3494                       (long long)adapter->stats.mpc);
3495         device_printf(dev, "Receive No Buffers = %lld\n",
3496                       (long long)adapter->stats.rnbc);
3497         /* RLEC is inaccurate on some hardware, calculate our own. */
3498         device_printf(dev, "Receive Length errors = %lld\n",
3499                       (long long)adapter->stats.roc +
3500                       (long long)adapter->stats.ruc);
3501         device_printf(dev, "Receive errors = %lld\n",
3502                       (long long)adapter->stats.rxerrc);
3503         device_printf(dev, "Crc errors = %lld\n",
3504                       (long long)adapter->stats.crcerrs);
3505         device_printf(dev, "Alignment errors = %lld\n",
3506                       (long long)adapter->stats.algnerrc);
3507         device_printf(dev, "Carrier extension errors = %lld\n",
3508                       (long long)adapter->stats.cexterr);
3509         device_printf(dev, "RX overruns = %lu\n", adapter->rx_overruns);
3510         device_printf(dev, "Watchdog timeouts = %lu\n",
3511                       adapter->watchdog_timeouts);
3512
3513         device_printf(dev, "XON Rcvd = %lld\n",
3514                       (long long)adapter->stats.xonrxc);
3515         device_printf(dev, "XON Xmtd = %lld\n",
3516                       (long long)adapter->stats.xontxc);
3517         device_printf(dev, "XOFF Rcvd = %lld\n",
3518                       (long long)adapter->stats.xoffrxc);
3519         device_printf(dev, "XOFF Xmtd = %lld\n",
3520                       (long long)adapter->stats.xofftxc);
3521
3522         device_printf(dev, "Good Packets Rcvd = %lld\n",
3523                       (long long)adapter->stats.gprc);
3524         device_printf(dev, "Good Packets Xmtd = %lld\n",
3525                       (long long)adapter->stats.gptc);
3526 }
3527
3528 static int
3529 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3530 {
3531         int error;
3532         int result;
3533         struct adapter *adapter;
3534
3535         result = -1;
3536         error = sysctl_handle_int(oidp, &result, 0, req);
3537
3538         if (error || !req->newptr)
3539                 return (error);
3540
3541         if (result == 1) {
3542                 adapter = (struct adapter *)arg1;
3543                 em_print_debug_info(adapter);
3544         }
3545
3546         return (error);
3547 }
3548
3549 static int
3550 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3551 {
3552         int error;
3553         int result;
3554         struct adapter *adapter;
3555
3556         result = -1;
3557         error = sysctl_handle_int(oidp, &result, 0, req);
3558
3559         if (error || !req->newptr)
3560                 return (error);
3561
3562         if (result == 1) {
3563                 adapter = (struct adapter *)arg1;
3564                 em_print_hw_stats(adapter);
3565         }
3566
3567         return (error);
3568 }
3569
3570 static int
3571 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3572 {
3573         struct em_int_delay_info *info;
3574         struct adapter *adapter;
3575         uint32_t regval;
3576         int error;
3577         int usecs;
3578         int ticks;
3579
3580         info = (struct em_int_delay_info *)arg1;
3581         adapter = info->adapter;
3582         usecs = info->value;
3583         error = sysctl_handle_int(oidp, &usecs, 0, req);
3584         if (error != 0 || req->newptr == NULL)
3585                 return (error);
3586         if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3587                 return (EINVAL);
3588         info->value = usecs;
3589         ticks = E1000_USECS_TO_TICKS(usecs);
3590
3591         lwkt_serialize_enter(adapter->interface_data.ac_if.if_serializer);
3592         regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3593         regval = (regval & ~0xffff) | (ticks & 0xffff);
3594         /* Handle a few special cases. */
3595         switch (info->offset) {
3596         case E1000_RDTR:
3597         case E1000_82542_RDTR:
3598                 regval |= E1000_RDT_FPDB;
3599                 break;
3600         case E1000_TIDV:
3601         case E1000_82542_TIDV:
3602                 if (ticks == 0) {
3603                         adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3604                         /* Don't write 0 into the TIDV register. */
3605                         regval++;
3606                 } else
3607                         adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3608                 break;
3609         }
3610         E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3611         lwkt_serialize_exit(adapter->interface_data.ac_if.if_serializer);
3612         return (0);
3613 }
3614
3615 static void
3616 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3617                         const char *description, struct em_int_delay_info *info,
3618                         int offset, int value)
3619 {
3620         info->adapter = adapter;
3621         info->offset = offset;
3622         info->value = value;
3623         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
3624                         SYSCTL_CHILDREN(adapter->sysctl_tree),
3625                         OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3626                         info, 0, em_sysctl_int_delay, "I", description);
3627 }
3628
3629 static int
3630 em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS)
3631 {
3632         struct adapter *adapter = (void *)arg1;
3633         int error;
3634         int throttle;
3635
3636         throttle = em_int_throttle_ceil;
3637         error = sysctl_handle_int(oidp, &throttle, 0, req);
3638         if (error || req->newptr == NULL)
3639                 return error;
3640         if (throttle < 0 || throttle > 1000000000 / 256)
3641                 return EINVAL;
3642         if (throttle) {
3643                 /*
3644                  * Set the interrupt throttling rate in 256ns increments,
3645                  * recalculate sysctl value assignment to get exact frequency.
3646                  */
3647                 throttle = 1000000000 / 256 / throttle;
3648                 lwkt_serialize_enter(adapter->interface_data.ac_if.if_serializer);
3649                 em_int_throttle_ceil = 1000000000 / 256 / throttle;
3650                 E1000_WRITE_REG(&adapter->hw, ITR, throttle);
3651                 lwkt_serialize_exit(adapter->interface_data.ac_if.if_serializer);
3652         } else {
3653                 lwkt_serialize_enter(adapter->interface_data.ac_if.if_serializer);
3654                 em_int_throttle_ceil = 0;
3655                 E1000_WRITE_REG(&adapter->hw, ITR, 0);
3656                 lwkt_serialize_exit(adapter->interface_data.ac_if.if_serializer);
3657         }
3658         device_printf(adapter->dev, "Interrupt moderation set to %d/sec\n", 
3659                         em_int_throttle_ceil);
3660         return 0;
3661 }