339825ba34514385f96c4ef6f0f6cd4ec8c6a670
[dragonfly.git] / sys / dev / netif / em / if_em.c
1 /*
2  *
3  * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>.  All rights reserved.
4  *
5  * Copyright (c) 2001-2005, Intel Corporation
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  * 
11  *  1. Redistributions of source code must retain the above copyright notice,
12  *     this list of conditions and the following disclaimer.
13  * 
14  *  2. Redistributions in binary form must reproduce the above copyright
15  *     notice, this list of conditions and the following disclaimer in the
16  *     documentation and/or other materials provided with the distribution.
17  * 
18  *  3. Neither the name of the Intel Corporation nor the names of its
19  *     contributors may be used to endorse or promote products derived from
20  *     this software without specific prior written permission.
21  * 
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  *
34  *
35  * Copyright (c) 2005 The DragonFly Project.  All rights reserved.
36  * 
37  * This code is derived from software contributed to The DragonFly Project
38  * by Matthew Dillon <dillon@backplane.com>
39  * 
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in
48  *    the documentation and/or other materials provided with the
49  *    distribution.
50  * 3. Neither the name of The DragonFly Project nor the names of its
51  *    contributors may be used to endorse or promote products derived
52  *    from this software without specific, prior written permission.
53  * 
54  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
57  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
58  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
59  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
60  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
61  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
62  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
63  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
64  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65  * SUCH DAMAGE.
66  * 
67  * $DragonFly: src/sys/dev/netif/em/if_em.c,v 1.45 2005/12/10 18:28:18 dillon Exp $
68  * $FreeBSD$
69  */
70 /*
71  * SERIALIZATION API RULES:
72  *
73  * - If the driver uses the same serializer for the interrupt as for the
74  *   ifnet, most of the serialization will be done automatically for the
75  *   driver.  
76  *
77  * - ifmedia entry points will be serialized by the ifmedia code using the
78  *   ifnet serializer.
79  *
80  * - if_* entry points except for if_input will be serialized by the IF
81  *   and protocol layers.
82  *
83  * - The device driver must be sure to serialize access from timeout code
84  *   installed by the device driver.
85  *
86  * - The device driver typically holds the serializer at the time it wishes
87  *   to call if_input.  If so, it should pass the serializer to if_input and
88  *   note that the serializer might be dropped temporarily by if_input 
89  *   (e.g. in case it has to bridge the packet to another interface).
90  *
91  *   NOTE!  Since callers into the device driver hold the ifnet serializer,
92  *   the device driver may be holding a serializer at the time it calls
93  *   if_input even if it is not serializer-aware.
94  */
95
96 #include "opt_polling.h"
97
98 #include <dev/netif/em/if_em.h>
99 #include <net/ifq_var.h>
100
101 /*********************************************************************
102  *  Set this to one to display debug statistics                                                   
103  *********************************************************************/
104 int             em_display_debug_stats = 0;
105
106 /*********************************************************************
107  *  Driver version
108  *********************************************************************/
109
110 char em_driver_version[] = "3.2.15";
111
112
113 /*********************************************************************
114  *  PCI Device ID Table
115  *
116  *  Used by probe to select devices to load on
117  *  Last field stores an index into em_strings
118  *  Last entry must be all 0s
119  *
120  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
121  *********************************************************************/
122
123 static em_vendor_info_t em_vendor_info_array[] =
124 {
125         /* Intel(R) PRO/1000 Network Connection */
126         { 0x8086, E1000_DEV_ID_82540EM,         PCI_ANY_ID, PCI_ANY_ID, 0},
127         { 0x8086, E1000_DEV_ID_82540EM_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
128         { 0x8086, E1000_DEV_ID_82540EP,         PCI_ANY_ID, PCI_ANY_ID, 0},
129         { 0x8086, E1000_DEV_ID_82540EP_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
130         { 0x8086, E1000_DEV_ID_82540EP_LP,      PCI_ANY_ID, PCI_ANY_ID, 0},
131
132         { 0x8086, E1000_DEV_ID_82541EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
133         { 0x8086, E1000_DEV_ID_82541ER,         PCI_ANY_ID, PCI_ANY_ID, 0},
134         { 0x8086, E1000_DEV_ID_82541EI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
135         { 0x8086, E1000_DEV_ID_82541GI,         PCI_ANY_ID, PCI_ANY_ID, 0},
136         { 0x8086, E1000_DEV_ID_82541GI_LF,      PCI_ANY_ID, PCI_ANY_ID, 0},
137         { 0x8086, E1000_DEV_ID_82541GI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
138
139         { 0x8086, E1000_DEV_ID_82542,           PCI_ANY_ID, PCI_ANY_ID, 0},
140
141         { 0x8086, E1000_DEV_ID_82543GC_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
142         { 0x8086, E1000_DEV_ID_82543GC_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
143
144         { 0x8086, E1000_DEV_ID_82544EI_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
145         { 0x8086, E1000_DEV_ID_82544EI_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
146         { 0x8086, E1000_DEV_ID_82544GC_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
147         { 0x8086, E1000_DEV_ID_82544GC_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
148
149         { 0x8086, E1000_DEV_ID_82545EM_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
150         { 0x8086, E1000_DEV_ID_82545EM_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
151         { 0x8086, E1000_DEV_ID_82545GM_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
152         { 0x8086, E1000_DEV_ID_82545GM_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
153         { 0x8086, E1000_DEV_ID_82545GM_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
154
155         { 0x8086, E1000_DEV_ID_82546EB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
156         { 0x8086, E1000_DEV_ID_82546EB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
157         { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
158         { 0x8086, E1000_DEV_ID_82546GB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
159         { 0x8086, E1000_DEV_ID_82546GB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
160         { 0x8086, E1000_DEV_ID_82546GB_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
161         { 0x8086, E1000_DEV_ID_82546GB_PCIE,    PCI_ANY_ID, PCI_ANY_ID, 0},
162 #ifdef KINGSPORT_PROJECT
163         { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
164 #endif  /* KINGSPORT_PROJECT */
165
166         { 0x8086, E1000_DEV_ID_82547EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
167         { 0x8086, E1000_DEV_ID_82547GI,         PCI_ANY_ID, PCI_ANY_ID, 0},
168
169         { 0x8086, E1000_DEV_ID_82571EB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
170         { 0x8086, E1000_DEV_ID_82571EB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
171         { 0x8086, E1000_DEV_ID_82571EB_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
172
173         { 0x8086, E1000_DEV_ID_82572EI_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
174         { 0x8086, E1000_DEV_ID_82572EI_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
175         { 0x8086, E1000_DEV_ID_82572EI_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
176
177         { 0x8086, E1000_DEV_ID_82573E,          PCI_ANY_ID, PCI_ANY_ID, 0},
178         { 0x8086, E1000_DEV_ID_82573E_IAMT,     PCI_ANY_ID, PCI_ANY_ID, 0},
179         { 0x8086, E1000_DEV_ID_82573L,          PCI_ANY_ID, PCI_ANY_ID, 0},
180
181         { 0x8086, 0x101A, PCI_ANY_ID, PCI_ANY_ID, 0},
182         { 0x8086, 0x1014, PCI_ANY_ID, PCI_ANY_ID, 0},
183         /* required last entry */
184         { 0, 0, 0, 0, 0}
185 };
186
187 /*********************************************************************
188  *  Table of branding strings for all supported NICs.
189  *********************************************************************/
190
191 static const char *em_strings[] = {
192         "Intel(R) PRO/1000 Network Connection"
193 };
194
195 /*********************************************************************
196  *  Function prototypes            
197  *********************************************************************/
198 static int      em_probe(device_t);
199 static int      em_attach(device_t);
200 static int      em_detach(device_t);
201 static int      em_shutdown(device_t);
202 static void     em_intr(void *);
203 static void     em_start(struct ifnet *);
204 static int      em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
205 static void     em_watchdog(struct ifnet *);
206 static void     em_init(void *);
207 static void     em_stop(void *);
208 static void     em_media_status(struct ifnet *, struct ifmediareq *);
209 static int      em_media_change(struct ifnet *);
210 static void     em_identify_hardware(struct adapter *);
211 static void     em_local_timer(void *);
212 static int      em_hardware_init(struct adapter *);
213 static void     em_setup_interface(device_t, struct adapter *);
214 static int      em_setup_transmit_structures(struct adapter *);
215 static void     em_initialize_transmit_unit(struct adapter *);
216 static int      em_setup_receive_structures(struct adapter *);
217 static void     em_initialize_receive_unit(struct adapter *);
218 static void     em_enable_intr(struct adapter *);
219 static void     em_disable_intr(struct adapter *);
220 static void     em_free_transmit_structures(struct adapter *);
221 static void     em_free_receive_structures(struct adapter *);
222 static void     em_update_stats_counters(struct adapter *);
223 static void     em_clean_transmit_interrupts(struct adapter *);
224 static int      em_allocate_receive_structures(struct adapter *);
225 static int      em_allocate_transmit_structures(struct adapter *);
226 static void     em_process_receive_interrupts(struct adapter *, int);
227 static void     em_receive_checksum(struct adapter *, struct em_rx_desc *,
228                                     struct mbuf *);
229 static void     em_transmit_checksum_setup(struct adapter *, struct mbuf *,
230                                            uint32_t *, uint32_t *);
231 static void     em_set_promisc(struct adapter *);
232 static void     em_disable_promisc(struct adapter *);
233 static void     em_set_multi(struct adapter *);
234 static void     em_print_hw_stats(struct adapter *);
235 static void     em_print_link_status(struct adapter *);
236 static int      em_get_buf(int i, struct adapter *, struct mbuf *, int how);
237 static void     em_enable_vlans(struct adapter *);
238 static int      em_encap(struct adapter *, struct mbuf *);
239 static void     em_smartspeed(struct adapter *);
240 static int      em_82547_fifo_workaround(struct adapter *, int);
241 static void     em_82547_update_fifo_head(struct adapter *, int);
242 static int      em_82547_tx_fifo_reset(struct adapter *);
243 static void     em_82547_move_tail(void *arg);
244 static void     em_82547_move_tail_serialized(void *arg);
245 static int      em_dma_malloc(struct adapter *, bus_size_t,
246                               struct em_dma_alloc *, int);
247 static void     em_dma_free(struct adapter *, struct em_dma_alloc *);
248 static void     em_print_debug_info(struct adapter *);
249 static int      em_is_valid_ether_addr(uint8_t *);
250 static int      em_sysctl_stats(SYSCTL_HANDLER_ARGS);
251 static int      em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
252 static uint32_t em_fill_descriptors(uint64_t address, uint32_t length, 
253                                    PDESC_ARRAY desc_array);
254 static int      em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
255 static int      em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS);
256 static void     em_add_int_delay_sysctl(struct adapter *, const char *,
257                                         const char *,
258                                         struct em_int_delay_info *, int, int);
259
260 /*********************************************************************
261  *  FreeBSD Device Interface Entry Points                    
262  *********************************************************************/
263
264 static device_method_t em_methods[] = {
265         /* Device interface */
266         DEVMETHOD(device_probe, em_probe),
267         DEVMETHOD(device_attach, em_attach),
268         DEVMETHOD(device_detach, em_detach),
269         DEVMETHOD(device_shutdown, em_shutdown),
270         {0, 0}
271 };
272
273 static driver_t em_driver = {
274         "em", em_methods, sizeof(struct adapter),
275 };
276
277 static devclass_t em_devclass;
278
279 DECLARE_DUMMY_MODULE(if_em);
280 DRIVER_MODULE(if_em, pci, em_driver, em_devclass, 0, 0);
281
282 /*********************************************************************
283  *  Tunable default values.
284  *********************************************************************/
285
286 #define E1000_TICKS_TO_USECS(ticks)     ((1024 * (ticks) + 500) / 1000)
287 #define E1000_USECS_TO_TICKS(usecs)     ((1000 * (usecs) + 512) / 1024)
288
289 static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
290 static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
291 static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
292 static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
293 static int em_int_throttle_ceil = 10000;
294
295 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
296 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
297 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
298 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
299 TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil);
300
301 /*
302  * Kernel trace for characterization of operations
303  */
304 #if !defined(KTR_IF_EM)
305 #define KTR_IF_EM       KTR_ALL
306 #endif
307 KTR_INFO_MASTER(if_em);
308 KTR_INFO(KTR_IF_EM, if_em, intr_beg, 0, "intr begin", 0);
309 KTR_INFO(KTR_IF_EM, if_em, intr_end, 1, "intr end", 0);
310 KTR_INFO(KTR_IF_EM, if_em, poll_beg, 2, "poll begin", 0);
311 KTR_INFO(KTR_IF_EM, if_em, poll_end, 3, "poll end", 0);
312 KTR_INFO(KTR_IF_EM, if_em, pkt_receive, 4, "rx packet", 0);
313 KTR_INFO(KTR_IF_EM, if_em, pkt_txqueue, 5, "tx packet", 0);
314 KTR_INFO(KTR_IF_EM, if_em, pkt_txclean, 6, "tx clean", 0);
315 #define logif(name)     KTR_LOG(if_em_ ## name)
316
317 /*********************************************************************
318  *  Device identification routine
319  *
320  *  em_probe determines if the driver should be loaded on
321  *  adapter based on PCI vendor/device id of the adapter.
322  *
323  *  return 0 on success, positive on failure
324  *********************************************************************/
325
326 static int
327 em_probe(device_t dev)
328 {
329         em_vendor_info_t *ent;
330
331         uint16_t pci_vendor_id = 0;
332         uint16_t pci_device_id = 0;
333         uint16_t pci_subvendor_id = 0;
334         uint16_t pci_subdevice_id = 0;
335         char adapter_name[60];
336
337         INIT_DEBUGOUT("em_probe: begin");
338
339         pci_vendor_id = pci_get_vendor(dev);
340         if (pci_vendor_id != EM_VENDOR_ID)
341                 return(ENXIO);
342
343         pci_device_id = pci_get_device(dev);
344         pci_subvendor_id = pci_get_subvendor(dev);
345         pci_subdevice_id = pci_get_subdevice(dev);
346
347         ent = em_vendor_info_array;
348         while (ent->vendor_id != 0) {
349                 if ((pci_vendor_id == ent->vendor_id) &&
350                     (pci_device_id == ent->device_id) &&
351
352                     ((pci_subvendor_id == ent->subvendor_id) ||
353                      (ent->subvendor_id == PCI_ANY_ID)) &&
354
355                     ((pci_subdevice_id == ent->subdevice_id) ||
356                      (ent->subdevice_id == PCI_ANY_ID))) {
357                         snprintf(adapter_name, sizeof(adapter_name),
358                                  "%s, Version - %s",  em_strings[ent->index], 
359                                  em_driver_version);
360                         device_set_desc_copy(dev, adapter_name);
361                         return(0);
362                 }
363                 ent++;
364         }
365
366         return(ENXIO);
367 }
368
369 /*********************************************************************
370  *  Device initialization routine
371  *
372  *  The attach entry point is called when the driver is being loaded.
373  *  This routine identifies the type of hardware, allocates all resources 
374  *  and initializes the hardware.     
375  *  
376  *  return 0 on success, positive on failure
377  *********************************************************************/
378
379 static int
380 em_attach(device_t dev)
381 {
382         struct adapter *adapter;
383         int tsize, rsize;
384         int i, val, rid;
385         int error = 0;
386
387         INIT_DEBUGOUT("em_attach: begin");
388
389         adapter = device_get_softc(dev);
390
391         callout_init(&adapter->timer);
392         callout_init(&adapter->tx_fifo_timer);
393
394         adapter->dev = dev;
395         adapter->osdep.dev = dev;
396
397         /* SYSCTL stuff */
398         sysctl_ctx_init(&adapter->sysctl_ctx);
399         adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx,
400                                                SYSCTL_STATIC_CHILDREN(_hw),
401                                                OID_AUTO, 
402                                                device_get_nameunit(dev),
403                                                CTLFLAG_RD,
404                                                0, "");
405
406         if (adapter->sysctl_tree == NULL) {
407                 error = EIO;
408                 goto fail;
409         }
410
411         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,  
412                         SYSCTL_CHILDREN(adapter->sysctl_tree),
413                         OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW, 
414                         (void *)adapter, 0,
415                         em_sysctl_debug_info, "I", "Debug Information");
416
417         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,  
418                         SYSCTL_CHILDREN(adapter->sysctl_tree),
419                         OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, 
420                         (void *)adapter, 0,
421                         em_sysctl_stats, "I", "Statistics");
422
423         /* Determine hardware revision */
424         em_identify_hardware(adapter);
425
426         /* Set up some sysctls for the tunable interrupt delays */
427         em_add_int_delay_sysctl(adapter, "rx_int_delay",
428                                 "receive interrupt delay in usecs",
429                                 &adapter->rx_int_delay,
430                                 E1000_REG_OFFSET(&adapter->hw, RDTR),
431                                 em_rx_int_delay_dflt);
432         em_add_int_delay_sysctl(adapter, "tx_int_delay",
433                                 "transmit interrupt delay in usecs",
434                                 &adapter->tx_int_delay,
435                                 E1000_REG_OFFSET(&adapter->hw, TIDV),
436                                 em_tx_int_delay_dflt);
437         if (adapter->hw.mac_type >= em_82540) {
438                 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
439                                         "receive interrupt delay limit in usecs",
440                                         &adapter->rx_abs_int_delay,
441                                         E1000_REG_OFFSET(&adapter->hw, RADV),
442                                         em_rx_abs_int_delay_dflt);
443                 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
444                                         "transmit interrupt delay limit in usecs",
445                                         &adapter->tx_abs_int_delay,
446                                         E1000_REG_OFFSET(&adapter->hw, TADV),
447                                         em_tx_abs_int_delay_dflt);
448                 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
449                         SYSCTL_CHILDREN(adapter->sysctl_tree),
450                         OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW,
451                         adapter, 0, em_sysctl_int_throttle, "I", NULL);
452         }
453
454         /* Parameters (to be read from user) */   
455         adapter->num_tx_desc = EM_MAX_TXD;
456         adapter->num_rx_desc = EM_MAX_RXD;
457         adapter->hw.autoneg = DO_AUTO_NEG;
458         adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
459         adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
460         adapter->hw.tbi_compatibility_en = TRUE;
461         adapter->rx_buffer_len = EM_RXBUFFER_2048;
462
463         adapter->hw.phy_init_script = 1;
464         adapter->hw.phy_reset_disable = FALSE;
465
466 #ifndef EM_MASTER_SLAVE
467         adapter->hw.master_slave = em_ms_hw_default;
468 #else
469         adapter->hw.master_slave = EM_MASTER_SLAVE;
470 #endif
471
472         /* 
473          * Set the max frame size assuming standard ethernet 
474          * sized frames 
475          */   
476         adapter->hw.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
477
478         adapter->hw.min_frame_size = 
479             MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
480
481         /* 
482          * This controls when hardware reports transmit completion 
483          * status. 
484          */
485         adapter->hw.report_tx_early = 1;
486
487         rid = EM_MMBA;
488         adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
489                                                      &rid, RF_ACTIVE);
490         if (!(adapter->res_memory)) {
491                 device_printf(dev, "Unable to allocate bus resource: memory\n");
492                 error = ENXIO;
493                 goto fail;
494         }
495         adapter->osdep.mem_bus_space_tag = 
496             rman_get_bustag(adapter->res_memory);
497         adapter->osdep.mem_bus_space_handle = 
498             rman_get_bushandle(adapter->res_memory);
499         adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
500
501         if (adapter->hw.mac_type > em_82543) {
502                 /* Figure our where our IO BAR is ? */
503                 rid = EM_MMBA;
504                 for (i = 0; i < 5; i++) {
505                         val = pci_read_config(dev, rid, 4);
506                         if (val & 0x00000001) {
507                                 adapter->io_rid = rid;
508                                 break;
509                         }
510                         rid += 4;
511                 }
512
513                 adapter->res_ioport = bus_alloc_resource_any(dev,
514                     SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
515                 if (!(adapter->res_ioport)) {
516                         device_printf(dev, "Unable to allocate bus resource: ioport\n");
517                         error = ENXIO;
518                         goto fail;
519                 }
520
521                 adapter->hw.reg_io_tag = rman_get_bustag(adapter->res_ioport);
522                 adapter->hw.reg_io_handle = rman_get_bushandle(adapter->res_ioport);
523         }
524
525         rid = 0x0;
526         adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ,
527             &rid, RF_SHAREABLE | RF_ACTIVE);
528         if (!(adapter->res_interrupt)) {
529                 device_printf(dev, "Unable to allocate bus resource: interrupt\n");
530                 error = ENXIO;
531                 goto fail;
532         }
533
534         adapter->hw.back = &adapter->osdep;
535
536         em_init_eeprom_params(&adapter->hw);
537
538         tsize = EM_ROUNDUP(adapter->num_tx_desc *
539                            sizeof(struct em_tx_desc), 4096);
540
541         /* Allocate Transmit Descriptor ring */
542         if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_WAITOK)) {
543                 device_printf(dev, "Unable to allocate TxDescriptor memory\n");
544                 error = ENOMEM;
545                 goto fail;
546         }
547         adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
548
549         rsize = EM_ROUNDUP(adapter->num_rx_desc *
550                            sizeof(struct em_rx_desc), 4096);
551
552         /* Allocate Receive Descriptor ring */
553         if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_WAITOK)) {
554                 device_printf(dev, "Unable to allocate rx_desc memory\n");
555                 error = ENOMEM;
556                 goto fail;
557         }
558         adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
559
560         /* Initialize the hardware */
561         if (em_hardware_init(adapter)) {
562                 device_printf(dev, "Unable to initialize the hardware\n");
563                 error = EIO;
564                 goto fail;
565         }
566
567         /* Copy the permanent MAC address out of the EEPROM */
568         if (em_read_mac_addr(&adapter->hw) < 0) {
569                 device_printf(dev, "EEPROM read error while reading mac address\n");
570                 error = EIO;
571                 goto fail;
572         }
573
574         if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
575                 device_printf(dev, "Invalid mac address\n");
576                 error = EIO;
577                 goto fail;
578         }
579
580         /* Setup OS specific network interface */
581         em_setup_interface(dev, adapter);
582
583         /* Initialize statistics */
584         em_clear_hw_cntrs(&adapter->hw);
585         em_update_stats_counters(adapter);
586         adapter->hw.get_link_status = 1;
587         em_check_for_link(&adapter->hw);
588
589         /* Print the link status */
590         if (adapter->link_active == 1) {
591                 em_get_speed_and_duplex(&adapter->hw, &adapter->link_speed, 
592                                         &adapter->link_duplex);
593                 device_printf(dev, "Speed: %d Mbps, Duplex: %s\n",
594                     adapter->link_speed,
595                     adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
596         } else
597                 device_printf(dev, "Speed: N/A, Duplex:N/A\n");
598
599         /* Identify 82544 on PCIX */
600         em_get_bus_info(&adapter->hw);  
601         if (adapter->hw.bus_type == em_bus_type_pcix &&
602             adapter->hw.mac_type == em_82544)
603                 adapter->pcix_82544 = TRUE;
604         else
605                 adapter->pcix_82544 = FALSE;
606
607         error = bus_setup_intr(dev, adapter->res_interrupt, INTR_NETSAFE,
608                            (void (*)(void *)) em_intr, adapter,
609                            &adapter->int_handler_tag,
610                            adapter->interface_data.ac_if.if_serializer);
611         if (error) {
612                 device_printf(dev, "Error registering interrupt handler!\n");
613                 ether_ifdetach(&adapter->interface_data.ac_if);
614                 goto fail;
615         }
616
617         INIT_DEBUGOUT("em_attach: end");
618         return(0);
619
620 fail:
621         em_detach(dev);
622         return(error);
623 }
624
625 /*********************************************************************
626  *  Device removal routine
627  *
628  *  The detach entry point is called when the driver is being removed.
629  *  This routine stops the adapter and deallocates all the resources
630  *  that were allocated for driver operation.
631  *  
632  *  return 0 on success, positive on failure
633  *********************************************************************/
634
635 static int
636 em_detach(device_t dev)
637 {
638         struct adapter *adapter = device_get_softc(dev);
639
640         INIT_DEBUGOUT("em_detach: begin");
641
642         lwkt_serialize_enter(adapter->interface_data.ac_if.if_serializer);
643         adapter->in_detach = 1;
644
645         if (device_is_attached(dev)) {
646                 em_stop(adapter);
647                 em_phy_hw_reset(&adapter->hw);
648                 ether_ifdetach(&adapter->interface_data.ac_if);
649         }
650         bus_generic_detach(dev);
651
652         if (adapter->int_handler_tag != NULL) {
653                 bus_teardown_intr(dev, adapter->res_interrupt, 
654                                   adapter->int_handler_tag);
655         }
656         if (adapter->res_interrupt != NULL) {
657                 bus_release_resource(dev, SYS_RES_IRQ, 0, 
658                                      adapter->res_interrupt);
659         }
660         if (adapter->res_memory != NULL) {
661                 bus_release_resource(dev, SYS_RES_MEMORY, EM_MMBA, 
662                                      adapter->res_memory);
663         }
664
665         if (adapter->res_ioport != NULL) {
666                 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid, 
667                                      adapter->res_ioport);
668         }
669
670         /* Free Transmit Descriptor ring */
671         if (adapter->tx_desc_base != NULL) {
672                 em_dma_free(adapter, &adapter->txdma);
673                 adapter->tx_desc_base = NULL;
674         }
675
676         /* Free Receive Descriptor ring */
677         if (adapter->rx_desc_base != NULL) {
678                 em_dma_free(adapter, &adapter->rxdma);
679                 adapter->rx_desc_base = NULL;
680         }
681
682         adapter->sysctl_tree = NULL;
683         sysctl_ctx_free(&adapter->sysctl_ctx);
684
685         lwkt_serialize_exit(adapter->interface_data.ac_if.if_serializer);
686         return(0);
687 }
688
689 /*********************************************************************
690  *
691  *  Shutdown entry point
692  *
693  **********************************************************************/ 
694
695 static int
696 em_shutdown(device_t dev)
697 {
698         struct adapter *adapter = device_get_softc(dev);
699         em_stop(adapter);
700         return(0);
701 }
702
703 /*********************************************************************
704  *  Transmit entry point
705  *
706  *  em_start is called by the stack to initiate a transmit.
707  *  The driver will remain in this routine as long as there are
708  *  packets to transmit and transmit resources are available.
709  *  In case resources are not available stack is notified and
710  *  the packet is requeued.
711  **********************************************************************/
712
713 static void
714 em_start(struct ifnet *ifp)
715 {
716         struct mbuf *m_head;
717         struct adapter *adapter = ifp->if_softc;
718
719         ASSERT_SERIALIZED(adapter->interface_data.ac_if.if_serializer);
720
721         if (!adapter->link_active)
722                 return;
723         while (!ifq_is_empty(&ifp->if_snd)) {
724                 m_head = ifq_poll(&ifp->if_snd);
725
726                 if (m_head == NULL)
727                         break;
728
729                 logif(pkt_txqueue);
730                 if (em_encap(adapter, m_head)) { 
731                         ifp->if_flags |= IFF_OACTIVE;
732                         break;
733                 }
734                 ifq_dequeue(&ifp->if_snd, m_head);
735
736                 /* Send a copy of the frame to the BPF listener */
737                 BPF_MTAP(ifp, m_head);
738         
739                 /* Set timeout in case hardware has problems transmitting */
740                 ifp->if_timer = EM_TX_TIMEOUT;        
741         }
742 }
743
744 /*********************************************************************
745  *  Ioctl entry point
746  *
747  *  em_ioctl is called when the user wants to configure the
748  *  interface.
749  *
750  *  return 0 on success, positive on failure
751  **********************************************************************/
752
753 static int
754 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
755 {
756         int max_frame_size, mask, error = 0;
757         struct ifreq *ifr = (struct ifreq *) data;
758         struct adapter *adapter = ifp->if_softc;
759
760         ASSERT_SERIALIZED(adapter->interface_data.ac_if.if_serializer);
761
762         if (adapter->in_detach)
763                 goto out;
764
765         switch (command) {
766         case SIOCSIFADDR:
767         case SIOCGIFADDR:
768                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
769                 ether_ioctl(ifp, command, data);
770                 break;
771         case SIOCSIFMTU:
772                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
773                 switch (adapter->hw.mac_type) {
774                 case em_82571:
775                 case em_82572:
776                         max_frame_size = 10500;
777                         break;
778                 case em_82573:
779                         /* 82573 does not support jumbo frames */
780                         max_frame_size = ETHER_MAX_LEN;
781                         break;
782                 default:
783                         max_frame_size = MAX_JUMBO_FRAME_SIZE;
784                         break;
785                 }
786                 if (ifr->ifr_mtu >
787                         max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) {
788                         error = EINVAL;
789                 } else {
790                         ifp->if_mtu = ifr->ifr_mtu;
791                         adapter->hw.max_frame_size = 
792                         ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
793                         em_init(adapter);
794                 }
795                 break;
796         case SIOCSIFFLAGS:
797                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
798                 if (ifp->if_flags & IFF_UP) {
799                         if (!(ifp->if_flags & IFF_RUNNING))
800                                 em_init(adapter);
801                         em_disable_promisc(adapter);
802                         em_set_promisc(adapter);
803                 } else {
804                         if (ifp->if_flags & IFF_RUNNING)
805                                 em_stop(adapter);
806                 }
807                 break;
808         case SIOCADDMULTI:
809         case SIOCDELMULTI:
810                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
811                 if (ifp->if_flags & IFF_RUNNING) {
812                         em_disable_intr(adapter);
813                         em_set_multi(adapter);
814                         if (adapter->hw.mac_type == em_82542_rev2_0)
815                                 em_initialize_receive_unit(adapter);
816                         em_enable_intr(adapter);
817                 }
818                 break;
819         case SIOCSIFMEDIA:
820         case SIOCGIFMEDIA:
821                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
822                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
823                 break;
824         case SIOCSIFCAP:
825                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
826                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
827                 if (mask & IFCAP_HWCSUM) {
828                         if (IFCAP_HWCSUM & ifp->if_capenable)
829                                 ifp->if_capenable &= ~IFCAP_HWCSUM;
830                         else
831                                 ifp->if_capenable |= IFCAP_HWCSUM;
832                         if (ifp->if_flags & IFF_RUNNING)
833                                 em_init(adapter);
834                 }
835                 break;
836         default:
837                 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)", (int)command);
838                 error = EINVAL;
839         }
840
841 out:
842         return(error);
843 }
844
845 /*********************************************************************
846  *  Watchdog entry point
847  *
848  *  This routine is called whenever hardware quits transmitting.
849  *
850  **********************************************************************/
851
852 static void
853 em_watchdog(struct ifnet *ifp)
854 {
855         struct adapter * adapter;
856         adapter = ifp->if_softc;
857
858         /* If we are in this routine because of pause frames, then
859          * don't reset the hardware.
860          */
861         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
862                 ifp->if_timer = EM_TX_TIMEOUT;
863                 return;
864         }
865
866         if (em_check_for_link(&adapter->hw))
867                 if_printf(ifp, "watchdog timeout -- resetting\n");
868
869         ifp->if_flags &= ~IFF_RUNNING;
870
871         em_init(adapter);
872
873         ifp->if_oerrors++;
874 }
875
876 /*********************************************************************
877  *  Init entry point
878  *
879  *  This routine is used in two ways. It is used by the stack as
880  *  init entry point in network interface structure. It is also used
881  *  by the driver as a hw/sw initialization routine to get to a 
882  *  consistent state.
883  *
884  *  return 0 on success, positive on failure
885  **********************************************************************/
886
887 static void
888 em_init(void *arg)
889 {
890         struct adapter *adapter = arg;
891         uint32_t pba;
892         struct ifnet *ifp = &adapter->interface_data.ac_if;
893
894         INIT_DEBUGOUT("em_init: begin");
895
896         em_stop(adapter);
897
898         /*
899          * Packet Buffer Allocation (PBA)
900          * Writing PBA sets the receive portion of the buffer
901          * the remainder is used for the transmit buffer.
902          */
903         switch (adapter->hw.mac_type) {
904         case em_82547: 
905         case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
906                 if (adapter->hw.max_frame_size > EM_RXBUFFER_8192)
907                         pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
908                 else
909                         pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
910
911                 adapter->tx_fifo_head = 0;
912                 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
913                 adapter->tx_fifo_size =
914                         (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
915                 break;
916         case em_82571: /* 82571: Total Packet Buffer is 48K */
917         case em_82572: /* 82572: Total Packet Buffer is 48K */
918                 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
919                 break;
920         case em_82573: /* 82573: Total Packet Buffer is 32K */
921                 /* Jumbo frames not supported */
922                 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
923                 break;
924         default:
925                 /* Devices before 82547 had a Packet Buffer of 64K.   */
926                 if(adapter->hw.max_frame_size > EM_RXBUFFER_8192)
927                         pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
928                 else
929                         pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
930         }
931
932         INIT_DEBUGOUT1("em_init: pba=%dK",pba);
933         E1000_WRITE_REG(&adapter->hw, PBA, pba);
934
935         /* Get the latest mac address, User can use a LAA */
936         bcopy(adapter->interface_data.ac_enaddr, adapter->hw.mac_addr,
937               ETHER_ADDR_LEN);
938
939         /* Initialize the hardware */
940         if (em_hardware_init(adapter)) {
941                 if_printf(ifp, "Unable to initialize the hardware\n");
942                 return;
943         }
944
945         em_enable_vlans(adapter);
946
947         /* Prepare transmit descriptors and buffers */
948         if (em_setup_transmit_structures(adapter)) {
949                 if_printf(ifp, "Could not setup transmit structures\n");
950                 em_stop(adapter); 
951                 return;
952         }
953         em_initialize_transmit_unit(adapter);
954
955         /* Setup Multicast table */
956         em_set_multi(adapter);
957
958         /* Prepare receive descriptors and buffers */
959         if (em_setup_receive_structures(adapter)) {
960                 if_printf(ifp, "Could not setup receive structures\n");
961                 em_stop(adapter);
962                 return;
963         }
964         em_initialize_receive_unit(adapter);
965
966         /* Don't loose promiscuous settings */
967         em_set_promisc(adapter);
968
969         ifp->if_flags |= IFF_RUNNING;
970         ifp->if_flags &= ~IFF_OACTIVE;
971
972         if (adapter->hw.mac_type >= em_82543) {
973                 if (ifp->if_capenable & IFCAP_TXCSUM)
974                         ifp->if_hwassist = EM_CHECKSUM_FEATURES;
975                 else
976                         ifp->if_hwassist = 0;
977         }
978
979         callout_reset(&adapter->timer, hz, em_local_timer, adapter);
980         em_clear_hw_cntrs(&adapter->hw);
981         em_enable_intr(adapter);
982
983         /* Don't reset the phy next time init gets called */
984         adapter->hw.phy_reset_disable = TRUE;
985 }
986
987 #ifdef DEVICE_POLLING
988
989 static void
990 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
991 {
992         struct adapter *adapter = ifp->if_softc;
993         uint32_t reg_icr;
994
995         logif(poll_beg);
996
997         ASSERT_SERIALIZED(ifp->if_serializer);
998
999         switch(cmd) {
1000         case POLL_REGISTER:
1001                 em_disable_intr(adapter);
1002                 break;
1003         case POLL_DEREGISTER:
1004                 em_enable_intr(adapter);
1005                 break;
1006         case POLL_AND_CHECK_STATUS:
1007                 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1008                 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1009                         callout_stop(&adapter->timer);
1010                         adapter->hw.get_link_status = 1;
1011                         em_check_for_link(&adapter->hw);
1012                         em_print_link_status(adapter);
1013                         callout_reset(&adapter->timer, hz, em_local_timer,
1014                                       adapter);
1015                 }
1016                 /* fall through */
1017         case POLL_ONLY:
1018                 if (ifp->if_flags & IFF_RUNNING) {
1019                         em_process_receive_interrupts(adapter, count);
1020                         em_clean_transmit_interrupts(adapter);
1021                 }
1022                 if (ifp->if_flags & IFF_RUNNING) {
1023                         if (!ifq_is_empty(&ifp->if_snd))
1024                                 em_start(ifp);
1025                 }
1026                 break;
1027         }
1028         logif(poll_end);
1029 }
1030
1031 #endif /* DEVICE_POLLING */
1032
1033 /*********************************************************************
1034  *
1035  *  Interrupt Service routine
1036  *
1037  **********************************************************************/
1038 static void
1039 em_intr(void *arg)
1040 {
1041         uint32_t reg_icr;
1042         struct ifnet *ifp;
1043         struct adapter *adapter = arg;
1044
1045         ifp = &adapter->interface_data.ac_if;  
1046
1047         logif(intr_beg);
1048         ASSERT_SERIALIZED(ifp->if_serializer);
1049
1050         reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1051         if (!reg_icr) {
1052                 logif(intr_end);
1053                 return;
1054         }
1055
1056         /* Link status change */
1057         if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1058                 callout_stop(&adapter->timer);
1059                 adapter->hw.get_link_status = 1;
1060                 em_check_for_link(&adapter->hw);
1061                 em_print_link_status(adapter);
1062                 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1063         }
1064
1065         /*
1066          * note: do not attempt to improve efficiency by looping.  This 
1067          * only results in unnecessary piecemeal collection of received
1068          * packets and unnecessary piecemeal cleanups of the transmit ring.
1069          */
1070         if (ifp->if_flags & IFF_RUNNING) {
1071                 em_process_receive_interrupts(adapter, -1);
1072                 em_clean_transmit_interrupts(adapter);
1073         }
1074
1075         if ((ifp->if_flags & IFF_RUNNING) && !ifq_is_empty(&ifp->if_snd))
1076                 em_start(ifp);
1077         logif(intr_end);
1078 }
1079
1080 /*********************************************************************
1081  *
1082  *  Media Ioctl callback
1083  *
1084  *  This routine is called whenever the user queries the status of
1085  *  the interface using ifconfig.
1086  *
1087  **********************************************************************/
1088 static void
1089 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1090 {
1091         struct adapter * adapter = ifp->if_softc;
1092
1093         INIT_DEBUGOUT("em_media_status: begin");
1094
1095         ASSERT_SERIALIZED(ifp->if_serializer);
1096
1097         em_check_for_link(&adapter->hw);
1098         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1099                 if (adapter->link_active == 0) {
1100                         em_get_speed_and_duplex(&adapter->hw, 
1101                                                 &adapter->link_speed, 
1102                                                 &adapter->link_duplex);
1103                         adapter->link_active = 1;
1104                 }
1105         } else {
1106                 if (adapter->link_active == 1) {
1107                         adapter->link_speed = 0;
1108                         adapter->link_duplex = 0;
1109                         adapter->link_active = 0;
1110                 }
1111         }
1112
1113         ifmr->ifm_status = IFM_AVALID;
1114         ifmr->ifm_active = IFM_ETHER;
1115
1116         if (!adapter->link_active)
1117                 return;
1118
1119         ifmr->ifm_status |= IFM_ACTIVE;
1120
1121         if (adapter->hw.media_type == em_media_type_fiber) {
1122                 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1123         } else {
1124                 switch (adapter->link_speed) {
1125                 case 10:
1126                         ifmr->ifm_active |= IFM_10_T;
1127                         break;
1128                 case 100:
1129                         ifmr->ifm_active |= IFM_100_TX;
1130                         break;
1131                 case 1000:
1132                         ifmr->ifm_active |= IFM_1000_T;
1133                         break;
1134                 }
1135                 if (adapter->link_duplex == FULL_DUPLEX)
1136                         ifmr->ifm_active |= IFM_FDX;
1137                 else
1138                         ifmr->ifm_active |= IFM_HDX;
1139         }
1140 }
1141
1142 /*********************************************************************
1143  *
1144  *  Media Ioctl callback
1145  *
1146  *  This routine is called when the user changes speed/duplex using
1147  *  media/mediopt option with ifconfig.
1148  *
1149  **********************************************************************/
1150 static int
1151 em_media_change(struct ifnet *ifp)
1152 {
1153         struct adapter * adapter = ifp->if_softc;
1154         struct ifmedia  *ifm = &adapter->media;
1155
1156         INIT_DEBUGOUT("em_media_change: begin");
1157
1158         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1159                 return(EINVAL);
1160
1161         ASSERT_SERIALIZED(ifp->if_serializer);
1162
1163         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1164         case IFM_AUTO:
1165                 adapter->hw.autoneg = DO_AUTO_NEG;
1166                 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1167                 break;
1168         case IFM_1000_SX:
1169         case IFM_1000_T:
1170                 adapter->hw.autoneg = DO_AUTO_NEG;
1171                 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1172                 break;
1173         case IFM_100_TX:
1174                 adapter->hw.autoneg = FALSE;
1175                 adapter->hw.autoneg_advertised = 0;
1176                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1177                         adapter->hw.forced_speed_duplex = em_100_full;
1178                 else
1179                         adapter->hw.forced_speed_duplex = em_100_half;
1180                 break;
1181         case IFM_10_T:
1182                 adapter->hw.autoneg = FALSE;
1183                 adapter->hw.autoneg_advertised = 0;
1184                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1185                         adapter->hw.forced_speed_duplex = em_10_full;
1186                 else
1187                         adapter->hw.forced_speed_duplex = em_10_half;
1188                 break;
1189         default:
1190                 if_printf(ifp, "Unsupported media type\n");
1191         }
1192         /*
1193          * As the speed/duplex settings may have changed we need to
1194          * reset the PHY.
1195          */
1196         adapter->hw.phy_reset_disable = FALSE;
1197
1198         em_init(adapter);
1199
1200         return(0);
1201 }
1202
1203 static void
1204 em_tx_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize,
1205          int error)
1206 {
1207         struct em_q *q = arg;
1208
1209         if (error)
1210                 return;
1211         KASSERT(nsegs <= EM_MAX_SCATTER,
1212                 ("Too many DMA segments returned when mapping tx packet"));
1213         q->nsegs = nsegs;
1214         bcopy(seg, q->segs, nsegs * sizeof(seg[0]));
1215 }
1216
1217 /*********************************************************************
1218  *
1219  *  This routine maps the mbufs to tx descriptors.
1220  *
1221  *  return 0 on success, positive on failure
1222  **********************************************************************/
1223 static int
1224 em_encap(struct adapter *adapter, struct mbuf *m_head)
1225 {
1226         uint32_t txd_upper;
1227         uint32_t txd_lower, txd_used = 0, txd_saved = 0;
1228         int i, j, error;
1229         uint64_t address;
1230
1231         /* For 82544 Workaround */
1232         DESC_ARRAY desc_array;
1233         uint32_t array_elements;
1234         uint32_t counter;
1235
1236         struct ifvlan *ifv = NULL;
1237         struct em_q q;
1238         struct em_buffer *tx_buffer = NULL;
1239         struct em_tx_desc *current_tx_desc = NULL;
1240         struct ifnet *ifp = &adapter->interface_data.ac_if;
1241
1242         /*
1243          * Force a cleanup if number of TX descriptors
1244          * available hits the threshold
1245          */
1246         if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1247                 em_clean_transmit_interrupts(adapter);
1248                 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1249                         adapter->no_tx_desc_avail1++;
1250                         return(ENOBUFS);
1251                 }
1252         }
1253         /*
1254          * Map the packet for DMA.
1255          */
1256         if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &q.map)) {
1257                 adapter->no_tx_map_avail++;
1258                 return(ENOMEM);
1259         }
1260         error = bus_dmamap_load_mbuf(adapter->txtag, q.map, m_head, em_tx_cb,
1261                                      &q, BUS_DMA_NOWAIT);
1262         if (error != 0) {
1263                 adapter->no_tx_dma_setup++;
1264                 bus_dmamap_destroy(adapter->txtag, q.map);
1265                 return(error);
1266         }
1267         KASSERT(q.nsegs != 0, ("em_encap: empty packet"));
1268
1269         if (q.nsegs > adapter->num_tx_desc_avail) {
1270                 adapter->no_tx_desc_avail2++;
1271                 bus_dmamap_unload(adapter->txtag, q.map);
1272                 bus_dmamap_destroy(adapter->txtag, q.map);
1273                 return(ENOBUFS);
1274         }
1275
1276         if (ifp->if_hwassist > 0) {
1277                 em_transmit_checksum_setup(adapter,  m_head,
1278                                            &txd_upper, &txd_lower);
1279         } else {
1280                 txd_upper = txd_lower = 0;
1281         }
1282
1283         /* Find out if we are in vlan mode */
1284         if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1285             m_head->m_pkthdr.rcvif != NULL &&
1286             m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1287                 ifv = m_head->m_pkthdr.rcvif->if_softc;
1288
1289         i = adapter->next_avail_tx_desc;
1290         if (adapter->pcix_82544) {
1291                 txd_saved = i;
1292                 txd_used = 0;
1293         }
1294         for (j = 0; j < q.nsegs; j++) {
1295                 /* If adapter is 82544 and on PCIX bus */
1296                 if(adapter->pcix_82544) {
1297                         array_elements = 0;
1298                         address = htole64(q.segs[j].ds_addr);
1299                         /* 
1300                          * Check the Address and Length combination and
1301                          * split the data accordingly
1302                          */
1303                         array_elements = em_fill_descriptors(address,
1304                                                              htole32(q.segs[j].ds_len),
1305                                                              &desc_array);
1306                         for (counter = 0; counter < array_elements; counter++) {
1307                                 if (txd_used == adapter->num_tx_desc_avail) {
1308                                         adapter->next_avail_tx_desc = txd_saved;
1309                                         adapter->no_tx_desc_avail2++;
1310                                         bus_dmamap_unload(adapter->txtag, q.map);
1311                                         bus_dmamap_destroy(adapter->txtag, q.map);
1312                                         return(ENOBUFS);
1313                                 }
1314                                 tx_buffer = &adapter->tx_buffer_area[i];
1315                                 current_tx_desc = &adapter->tx_desc_base[i];
1316                                 current_tx_desc->buffer_addr = htole64(
1317                                 desc_array.descriptor[counter].address);
1318                                 current_tx_desc->lower.data = htole32(
1319                                 (adapter->txd_cmd | txd_lower | 
1320                                 (uint16_t)desc_array.descriptor[counter].length));
1321                                 current_tx_desc->upper.data = htole32((txd_upper));
1322                                 if (++i == adapter->num_tx_desc)
1323                                         i = 0;
1324
1325                                 tx_buffer->m_head = NULL;
1326                                 txd_used++;
1327                         }
1328                 } else {
1329                         tx_buffer = &adapter->tx_buffer_area[i];
1330                         current_tx_desc = &adapter->tx_desc_base[i];
1331
1332                         current_tx_desc->buffer_addr = htole64(q.segs[j].ds_addr);
1333                         current_tx_desc->lower.data = htole32(
1334                                 adapter->txd_cmd | txd_lower | q.segs[j].ds_len);
1335                         current_tx_desc->upper.data = htole32(txd_upper);
1336
1337                         if (++i == adapter->num_tx_desc)
1338                                 i = 0;
1339
1340                         tx_buffer->m_head = NULL;
1341                 }
1342         }
1343
1344         adapter->next_avail_tx_desc = i;
1345         if (adapter->pcix_82544)
1346                 adapter->num_tx_desc_avail -= txd_used;
1347         else
1348                 adapter->num_tx_desc_avail -= q.nsegs;
1349
1350         if (ifv != NULL) {
1351                 /* Set the vlan id */
1352                 current_tx_desc->upper.fields.special = htole16(ifv->ifv_tag);
1353
1354                 /* Tell hardware to add tag */
1355                 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1356         }
1357
1358         tx_buffer->m_head = m_head;
1359         tx_buffer->map = q.map;
1360         bus_dmamap_sync(adapter->txtag, q.map, BUS_DMASYNC_PREWRITE);
1361
1362         /*
1363          * Last Descriptor of Packet needs End Of Packet (EOP)
1364          */
1365         current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1366
1367         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1368                         BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1369
1370         /* 
1371          * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1372          * that this frame is available to transmit.
1373          */
1374         if (adapter->hw.mac_type == em_82547 &&
1375             adapter->link_duplex == HALF_DUPLEX) {
1376                 em_82547_move_tail_serialized(adapter);
1377         } else {
1378                 E1000_WRITE_REG(&adapter->hw, TDT, i);
1379                 if (adapter->hw.mac_type == em_82547) {
1380                         em_82547_update_fifo_head(adapter,
1381                                                   m_head->m_pkthdr.len);
1382                 }
1383         }
1384
1385         return(0);
1386 }
1387
1388 /*********************************************************************
1389  *
1390  * 82547 workaround to avoid controller hang in half-duplex environment.
1391  * The workaround is to avoid queuing a large packet that would span   
1392  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1393  * in this case. We do that only when FIFO is queiced.
1394  *
1395  **********************************************************************/
1396 static void
1397 em_82547_move_tail(void *arg)
1398 {
1399         struct adapter *adapter = arg;
1400
1401         lwkt_serialize_enter(adapter->interface_data.ac_if.if_serializer);
1402         em_82547_move_tail_serialized(arg);
1403         lwkt_serialize_exit(adapter->interface_data.ac_if.if_serializer);
1404 }
1405
1406 static void
1407 em_82547_move_tail_serialized(void *arg)
1408 {
1409         struct adapter *adapter = arg;
1410         uint16_t hw_tdt;
1411         uint16_t sw_tdt;
1412         struct em_tx_desc *tx_desc;
1413         uint16_t length = 0;
1414         boolean_t eop = 0;
1415
1416         hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1417         sw_tdt = adapter->next_avail_tx_desc;
1418
1419         while (hw_tdt != sw_tdt) {
1420                 tx_desc = &adapter->tx_desc_base[hw_tdt];
1421                 length += tx_desc->lower.flags.length;
1422                 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1423                 if(++hw_tdt == adapter->num_tx_desc)
1424                         hw_tdt = 0;
1425
1426                 if(eop) {
1427                         if (em_82547_fifo_workaround(adapter, length)) {
1428                                 adapter->tx_fifo_wrk_cnt++;
1429                                 callout_reset(&adapter->tx_fifo_timer, 1,
1430                                         em_82547_move_tail, adapter);
1431                                 break;
1432                         }
1433                         E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1434                         em_82547_update_fifo_head(adapter, length);
1435                         length = 0;
1436                 }
1437         }       
1438 }
1439
1440 static int
1441 em_82547_fifo_workaround(struct adapter *adapter, int len)
1442 {       
1443         int fifo_space, fifo_pkt_len;
1444
1445         fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1446
1447         if (adapter->link_duplex == HALF_DUPLEX) {
1448                 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1449
1450                 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1451                         if (em_82547_tx_fifo_reset(adapter))
1452                                 return(0);
1453                         else
1454                                 return(1);
1455                 }
1456         }
1457
1458         return(0);
1459 }
1460
1461 static void
1462 em_82547_update_fifo_head(struct adapter *adapter, int len)
1463 {
1464         int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1465
1466         /* tx_fifo_head is always 16 byte aligned */
1467         adapter->tx_fifo_head += fifo_pkt_len;
1468         if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
1469                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1470 }
1471
1472 static int
1473 em_82547_tx_fifo_reset(struct adapter *adapter)
1474 {
1475         uint32_t tctl;
1476
1477         if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1478               E1000_READ_REG(&adapter->hw, TDH)) &&
1479              (E1000_READ_REG(&adapter->hw, TDFT) == 
1480               E1000_READ_REG(&adapter->hw, TDFH)) &&
1481              (E1000_READ_REG(&adapter->hw, TDFTS) ==
1482               E1000_READ_REG(&adapter->hw, TDFHS)) &&
1483              (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1484
1485                 /* Disable TX unit */
1486                 tctl = E1000_READ_REG(&adapter->hw, TCTL);
1487                 E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1488
1489                 /* Reset FIFO pointers */
1490                 E1000_WRITE_REG(&adapter->hw, TDFT,  adapter->tx_head_addr);
1491                 E1000_WRITE_REG(&adapter->hw, TDFH,  adapter->tx_head_addr);
1492                 E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr);
1493                 E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr);
1494
1495                 /* Re-enable TX unit */
1496                 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1497                 E1000_WRITE_FLUSH(&adapter->hw);
1498
1499                 adapter->tx_fifo_head = 0;
1500                 adapter->tx_fifo_reset_cnt++;
1501
1502                 return(TRUE);
1503         } else {
1504                 return(FALSE);
1505         }
1506 }
1507
1508 static void
1509 em_set_promisc(struct adapter *adapter)
1510 {
1511         uint32_t reg_rctl, ctrl;
1512         struct ifnet *ifp = &adapter->interface_data.ac_if;
1513
1514         reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1515         ctrl = E1000_READ_REG(&adapter->hw, CTRL);
1516
1517         if (ifp->if_flags & IFF_PROMISC) {
1518                 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1519                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1520
1521                 /*
1522                  * Disable VLAN stripping in promiscous mode.
1523                  * This enables bridging of vlan tagged frames to occur 
1524                  * and also allows vlan tags to be seen in tcpdump.
1525                  */
1526                 ctrl &= ~E1000_CTRL_VME; 
1527                 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
1528         } else if (ifp->if_flags & IFF_ALLMULTI) {
1529                 reg_rctl |= E1000_RCTL_MPE;
1530                 reg_rctl &= ~E1000_RCTL_UPE;
1531                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1532         }
1533 }
1534
1535 static void
1536 em_disable_promisc(struct adapter *adapter)
1537 {
1538         uint32_t reg_rctl;
1539
1540         reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1541
1542         reg_rctl &= (~E1000_RCTL_UPE);
1543         reg_rctl &= (~E1000_RCTL_MPE);
1544         E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1545
1546         em_enable_vlans(adapter);
1547 }
1548
1549 /*********************************************************************
1550  *  Multicast Update
1551  *
1552  *  This routine is called whenever multicast address list is updated.
1553  *
1554  **********************************************************************/
1555
1556 static void
1557 em_set_multi(struct adapter *adapter)
1558 {
1559         uint32_t reg_rctl = 0;
1560         uint8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1561         struct ifmultiaddr *ifma;
1562         int mcnt = 0;
1563         struct ifnet *ifp = &adapter->interface_data.ac_if;
1564
1565         IOCTL_DEBUGOUT("em_set_multi: begin");
1566
1567         if (adapter->hw.mac_type == em_82542_rev2_0) {
1568                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1569                 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1570                         em_pci_clear_mwi(&adapter->hw);
1571                 reg_rctl |= E1000_RCTL_RST;
1572                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1573                 msec_delay(5);
1574         }
1575
1576         LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1577                 if (ifma->ifma_addr->sa_family != AF_LINK)
1578                         continue;
1579
1580                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1581                         break;
1582
1583                 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1584                       &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1585                 mcnt++;
1586         }
1587
1588         if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1589                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1590                 reg_rctl |= E1000_RCTL_MPE;
1591                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1592         } else {
1593                 em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1594         }
1595
1596         if (adapter->hw.mac_type == em_82542_rev2_0) {
1597                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1598                 reg_rctl &= ~E1000_RCTL_RST;
1599                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1600                 msec_delay(5);
1601                 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1602                         em_pci_set_mwi(&adapter->hw);
1603         }
1604 }
1605
1606 /*********************************************************************
1607  *  Timer routine
1608  *
1609  *  This routine checks for link status and updates statistics.
1610  *
1611  **********************************************************************/
1612
1613 static void
1614 em_local_timer(void *arg)
1615 {
1616         struct ifnet *ifp;
1617         struct adapter *adapter = arg;
1618         ifp = &adapter->interface_data.ac_if;
1619
1620         lwkt_serialize_enter(ifp->if_serializer);
1621
1622         em_check_for_link(&adapter->hw);
1623         em_print_link_status(adapter);
1624         em_update_stats_counters(adapter);   
1625         if (em_display_debug_stats && ifp->if_flags & IFF_RUNNING)
1626                 em_print_hw_stats(adapter);
1627         em_smartspeed(adapter);
1628
1629         callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1630
1631         lwkt_serialize_exit(ifp->if_serializer);
1632 }
1633
1634 static void
1635 em_print_link_status(struct adapter *adapter)
1636 {
1637         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1638                 if (adapter->link_active == 0) {
1639                         em_get_speed_and_duplex(&adapter->hw, 
1640                                                 &adapter->link_speed, 
1641                                                 &adapter->link_duplex);
1642                         device_printf(adapter->dev, "Link is up %d Mbps %s\n",
1643                                adapter->link_speed,
1644                                ((adapter->link_duplex == FULL_DUPLEX) ?
1645                                 "Full Duplex" : "Half Duplex"));
1646                         adapter->link_active = 1;
1647                         adapter->smartspeed = 0;
1648                 }
1649         } else {
1650                 if (adapter->link_active == 1) {
1651                         adapter->link_speed = 0;
1652                         adapter->link_duplex = 0;
1653                         device_printf(adapter->dev, "Link is Down\n");
1654                         adapter->link_active = 0;
1655                 }
1656         }
1657 }
1658
1659 /*********************************************************************
1660  *
1661  *  This routine disables all traffic on the adapter by issuing a
1662  *  global reset on the MAC and deallocates TX/RX buffers. 
1663  *
1664  **********************************************************************/
1665
1666 static void
1667 em_stop(void *arg)
1668 {
1669         struct ifnet   *ifp;
1670         struct adapter * adapter = arg;
1671         ifp = &adapter->interface_data.ac_if;
1672
1673         INIT_DEBUGOUT("em_stop: begin");
1674         em_disable_intr(adapter);
1675         em_reset_hw(&adapter->hw);
1676         callout_stop(&adapter->timer);
1677         callout_stop(&adapter->tx_fifo_timer);
1678         em_free_transmit_structures(adapter);
1679         em_free_receive_structures(adapter);
1680
1681         /* Tell the stack that the interface is no longer active */
1682         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1683         ifp->if_timer = 0;
1684 }
1685
1686 /*********************************************************************
1687  *
1688  *  Determine hardware revision.
1689  *
1690  **********************************************************************/
1691 static void
1692 em_identify_hardware(struct adapter * adapter)
1693 {
1694         device_t dev = adapter->dev;
1695
1696         /* Make sure our PCI config space has the necessary stuff set */
1697         adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1698         if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1699               (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1700                 device_printf(dev, "Memory Access and/or Bus Master bits were not set!\n");
1701                 adapter->hw.pci_cmd_word |= 
1702                 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1703                 pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1704         }
1705
1706         /* Save off the information about this board */
1707         adapter->hw.vendor_id = pci_get_vendor(dev);
1708         adapter->hw.device_id = pci_get_device(dev);
1709         adapter->hw.revision_id = pci_get_revid(dev);
1710         adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev);
1711         adapter->hw.subsystem_id = pci_get_subdevice(dev);
1712
1713         /* Identify the MAC */
1714         if (em_set_mac_type(&adapter->hw))
1715                 device_printf(dev, "Unknown MAC Type\n");
1716
1717         if (adapter->hw.mac_type == em_82541 ||
1718             adapter->hw.mac_type == em_82541_rev_2 ||
1719             adapter->hw.mac_type == em_82547 ||
1720             adapter->hw.mac_type == em_82547_rev_2)
1721                 adapter->hw.phy_init_script = TRUE;
1722 }
1723
1724 /*********************************************************************
1725  *
1726  *  Initialize the hardware to a configuration as specified by the
1727  *  adapter structure. The controller is reset, the EEPROM is
1728  *  verified, the MAC address is set, then the shared initialization
1729  *  routines are called.
1730  *
1731  **********************************************************************/
1732 static int
1733 em_hardware_init(struct adapter *adapter)
1734 {
1735         uint16_t        rx_buffer_size;
1736
1737         INIT_DEBUGOUT("em_hardware_init: begin");
1738         /* Issue a global reset */
1739         em_reset_hw(&adapter->hw);
1740
1741         /* When hardware is reset, fifo_head is also reset */
1742         adapter->tx_fifo_head = 0;
1743
1744         /* Make sure we have a good EEPROM before we read from it */
1745         if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
1746                 device_printf(adapter->dev,
1747                               "The EEPROM Checksum Is Not Valid\n");
1748                 return(EIO);
1749         }
1750
1751         if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
1752                 device_printf(adapter->dev,
1753                               "EEPROM read error while reading part number\n");
1754                 return(EIO);
1755         }
1756
1757         /*
1758          * These parameters control the automatic generation (Tx) and 
1759          * response(Rx) to Ethernet PAUSE frames.
1760          * - High water mark should allow for at least two frames to be
1761          *   received after sending an XOFF.
1762          * - Low water mark works best when it is very near the high water mark.
1763          *   This allows the receiver to restart by sending XON when it has
1764          *   drained a bit.  Here we use an arbitary value of 1500 which will
1765          *   restart after one full frame is pulled from the buffer.  There
1766          *   could be several smaller frames in the buffer and if so they will
1767          *   not trigger the XON until their total number reduces the buffer
1768          *   by 1500.
1769          * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1770          */
1771         rx_buffer_size = ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff) << 10);
1772
1773         adapter->hw.fc_high_water =
1774             rx_buffer_size - EM_ROUNDUP(1 * adapter->hw.max_frame_size, 1024); 
1775         adapter->hw.fc_low_water = adapter->hw.fc_high_water - 1500;
1776         adapter->hw.fc_pause_time = 1000;
1777         adapter->hw.fc_send_xon = TRUE;
1778         adapter->hw.fc = em_fc_full;
1779
1780         if (em_init_hw(&adapter->hw) < 0) {
1781                 device_printf(adapter->dev, "Hardware Initialization Failed");
1782                 return(EIO);
1783         }
1784
1785         em_check_for_link(&adapter->hw);
1786         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
1787                 adapter->link_active = 1;
1788         else
1789                 adapter->link_active = 0;
1790
1791         if (adapter->link_active) {
1792                 em_get_speed_and_duplex(&adapter->hw, 
1793                                         &adapter->link_speed, 
1794                                         &adapter->link_duplex);
1795         } else {
1796                 adapter->link_speed = 0;
1797                 adapter->link_duplex = 0;
1798         }
1799
1800         return(0);
1801 }
1802
1803 /*********************************************************************
1804  *
1805  *  Setup networking device structure and register an interface.
1806  *
1807  **********************************************************************/
1808 static void
1809 em_setup_interface(device_t dev, struct adapter *adapter)
1810 {
1811         struct ifnet   *ifp;
1812         INIT_DEBUGOUT("em_setup_interface: begin");
1813
1814         ifp = &adapter->interface_data.ac_if;
1815         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1816         ifp->if_mtu = ETHERMTU;
1817         ifp->if_baudrate = 1000000000;
1818         ifp->if_init =  em_init;
1819         ifp->if_softc = adapter;
1820         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1821         ifp->if_ioctl = em_ioctl;
1822         ifp->if_start = em_start;
1823 #ifdef DEVICE_POLLING
1824         ifp->if_poll = em_poll;
1825 #endif
1826         ifp->if_watchdog = em_watchdog;
1827         ifq_set_maxlen(&ifp->if_snd, adapter->num_tx_desc - 1);
1828         ifq_set_ready(&ifp->if_snd);
1829
1830         if (adapter->hw.mac_type >= em_82543)
1831                 ifp->if_capabilities |= IFCAP_HWCSUM;
1832
1833         ifp->if_capenable = ifp->if_capabilities;
1834
1835         ether_ifattach(ifp, adapter->hw.mac_addr, NULL);
1836
1837         /*
1838          * Tell the upper layer(s) we support long frames.
1839          */
1840         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1841         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1842
1843         /* 
1844          * Specify the media types supported by this adapter and register
1845          * callbacks to update media and link information
1846          */
1847         ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
1848                      em_media_status);
1849         if (adapter->hw.media_type == em_media_type_fiber) {
1850                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 
1851                             0, NULL);
1852                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 
1853                             0, NULL);
1854         } else {
1855                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1856                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 
1857                             0, NULL);
1858                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 
1859                             0, NULL);
1860                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 
1861                             0, NULL);
1862                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 
1863                             0, NULL);
1864                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1865         }
1866         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1867         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1868 }
1869
1870 /*********************************************************************
1871  *
1872  *  Workaround for SmartSpeed on 82541 and 82547 controllers
1873  *
1874  **********************************************************************/        
1875 static void
1876 em_smartspeed(struct adapter *adapter)
1877 {
1878         uint16_t phy_tmp;
1879
1880         if (adapter->link_active || (adapter->hw.phy_type != em_phy_igp) || 
1881             !adapter->hw.autoneg ||
1882             !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1883                 return;
1884
1885         if (adapter->smartspeed == 0) {
1886                 /*
1887                  * If Master/Slave config fault is asserted twice,
1888                  * we assume back-to-back.
1889                  */
1890                 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1891                 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
1892                         return;
1893                 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1894                 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
1895                         em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
1896                                         &phy_tmp);
1897                         if (phy_tmp & CR_1000T_MS_ENABLE) {
1898                                 phy_tmp &= ~CR_1000T_MS_ENABLE;
1899                                 em_write_phy_reg(&adapter->hw,
1900                                                  PHY_1000T_CTRL, phy_tmp);
1901                                 adapter->smartspeed++;
1902                                 if (adapter->hw.autoneg &&
1903                                     !em_phy_setup_autoneg(&adapter->hw) &&
1904                                     !em_read_phy_reg(&adapter->hw, PHY_CTRL,
1905                                                      &phy_tmp)) {
1906                                         phy_tmp |= (MII_CR_AUTO_NEG_EN |  
1907                                                     MII_CR_RESTART_AUTO_NEG);
1908                                         em_write_phy_reg(&adapter->hw,
1909                                                          PHY_CTRL, phy_tmp);
1910                                 }
1911                         }
1912                 }
1913                 return;
1914         } else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
1915                 /* If still no link, perhaps using 2/3 pair cable */
1916                 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
1917                 phy_tmp |= CR_1000T_MS_ENABLE;
1918                 em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
1919                 if (adapter->hw.autoneg &&
1920                     !em_phy_setup_autoneg(&adapter->hw) &&
1921                     !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
1922                         phy_tmp |= (MII_CR_AUTO_NEG_EN |
1923                                     MII_CR_RESTART_AUTO_NEG);
1924                         em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
1925                 }
1926         }
1927         /* Restart process after EM_SMARTSPEED_MAX iterations */
1928         if (adapter->smartspeed++ == EM_SMARTSPEED_MAX)
1929                 adapter->smartspeed = 0;
1930 }
1931
1932 /*
1933  * Manage DMA'able memory.
1934  */
1935 static void
1936 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1937
1938         if (error)
1939                 return;
1940         *(bus_addr_t*) arg = segs->ds_addr;
1941 }
1942
1943 static int
1944 em_dma_malloc(struct adapter *adapter, bus_size_t size,
1945               struct em_dma_alloc *dma, int mapflags)
1946 {
1947         int r;
1948         device_t dev = adapter->dev;
1949
1950         r = bus_dma_tag_create(NULL,                    /* parent */
1951                                PAGE_SIZE, 0,            /* alignment, bounds */
1952                                BUS_SPACE_MAXADDR,       /* lowaddr */
1953                                BUS_SPACE_MAXADDR,       /* highaddr */
1954                                NULL, NULL,              /* filter, filterarg */
1955                                size,                    /* maxsize */
1956                                1,                       /* nsegments */
1957                                size,                    /* maxsegsize */
1958                                BUS_DMA_ALLOCNOW,        /* flags */
1959                                &dma->dma_tag);
1960         if (r != 0) {
1961                 device_printf(dev, "em_dma_malloc: bus_dma_tag_create failed; "
1962                               "error %u\n", r);
1963                 goto fail_0;
1964         }
1965
1966         r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
1967                              BUS_DMA_NOWAIT, &dma->dma_map);
1968         if (r != 0) {
1969                 device_printf(dev, "em_dma_malloc: bus_dmammem_alloc failed; "
1970                               "size %llu, error %d\n", (uintmax_t)size, r);
1971                 goto fail_2;
1972         }
1973
1974         r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1975                             size,
1976                             em_dmamap_cb,
1977                             &dma->dma_paddr,
1978                             mapflags | BUS_DMA_NOWAIT);
1979         if (r != 0) {
1980                 device_printf(dev, "em_dma_malloc: bus_dmamap_load failed; "
1981                               "error %u\n", r);
1982                 goto fail_3;
1983         }
1984
1985         dma->dma_size = size;
1986         return(0);
1987
1988 fail_3:
1989         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1990 fail_2:
1991         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1992         bus_dma_tag_destroy(dma->dma_tag);
1993 fail_0:
1994         dma->dma_map = NULL;
1995         dma->dma_tag = NULL;
1996         return(r);
1997 }
1998
1999 static void
2000 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2001 {
2002         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2003         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2004         bus_dma_tag_destroy(dma->dma_tag);
2005 }
2006
2007 /*********************************************************************
2008  *
2009  *  Allocate memory for tx_buffer structures. The tx_buffer stores all 
2010  *  the information needed to transmit a packet on the wire. 
2011  *
2012  **********************************************************************/
2013 static int
2014 em_allocate_transmit_structures(struct adapter * adapter)
2015 {
2016         adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
2017             adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
2018         if (adapter->tx_buffer_area == NULL) {
2019                 device_printf(adapter->dev, "Unable to allocate tx_buffer memory\n");
2020                 return(ENOMEM);
2021         }
2022
2023         return(0);
2024 }
2025
2026 /*********************************************************************
2027  *
2028  *  Allocate and initialize transmit structures. 
2029  *
2030  **********************************************************************/
2031 static int
2032 em_setup_transmit_structures(struct adapter * adapter)
2033 {
2034         /*
2035          * Setup DMA descriptor areas.
2036          */
2037         if (bus_dma_tag_create(NULL,                    /* parent */
2038                                1, 0,                    /* alignment, bounds */
2039                                BUS_SPACE_MAXADDR,       /* lowaddr */ 
2040                                BUS_SPACE_MAXADDR,       /* highaddr */
2041                                NULL, NULL,              /* filter, filterarg */
2042                                MCLBYTES * 8,            /* maxsize */
2043                                EM_MAX_SCATTER,          /* nsegments */
2044                                MCLBYTES * 8,            /* maxsegsize */
2045                                BUS_DMA_ALLOCNOW,        /* flags */ 
2046                                &adapter->txtag)) {
2047                 device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
2048                 return(ENOMEM);
2049         }
2050
2051         if (em_allocate_transmit_structures(adapter))
2052                 return(ENOMEM);
2053
2054         bzero((void *) adapter->tx_desc_base,
2055               (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
2056
2057         adapter->next_avail_tx_desc = 0;
2058         adapter->oldest_used_tx_desc = 0;
2059
2060         /* Set number of descriptors available */
2061         adapter->num_tx_desc_avail = adapter->num_tx_desc;
2062
2063         /* Set checksum context */
2064         adapter->active_checksum_context = OFFLOAD_NONE;
2065
2066         return(0);
2067 }
2068
2069 /*********************************************************************
2070  *
2071  *  Enable transmit unit.
2072  *
2073  **********************************************************************/
2074 static void
2075 em_initialize_transmit_unit(struct adapter * adapter)
2076 {
2077         uint32_t reg_tctl;
2078         uint32_t reg_tipg = 0;
2079         uint64_t bus_addr;
2080
2081         INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2082
2083         /* Setup the Base and Length of the Tx Descriptor Ring */
2084         bus_addr = adapter->txdma.dma_paddr;
2085         E1000_WRITE_REG(&adapter->hw, TDBAL, (uint32_t)bus_addr);
2086         E1000_WRITE_REG(&adapter->hw, TDBAH, (uint32_t)(bus_addr >> 32));
2087         E1000_WRITE_REG(&adapter->hw, TDLEN, 
2088                         adapter->num_tx_desc * sizeof(struct em_tx_desc));
2089
2090         /* Setup the HW Tx Head and Tail descriptor pointers */
2091         E1000_WRITE_REG(&adapter->hw, TDH, 0);
2092         E1000_WRITE_REG(&adapter->hw, TDT, 0);
2093
2094         HW_DEBUGOUT2("Base = %x, Length = %x\n", 
2095                      E1000_READ_REG(&adapter->hw, TDBAL),
2096                      E1000_READ_REG(&adapter->hw, TDLEN));
2097
2098         /* Set the default values for the Tx Inter Packet Gap timer */
2099         switch (adapter->hw.mac_type) {
2100         case em_82542_rev2_0:
2101         case em_82542_rev2_1:
2102                 reg_tipg = DEFAULT_82542_TIPG_IPGT;
2103                 reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2104                 reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2105                 break;
2106         default:
2107                 if (adapter->hw.media_type == em_media_type_fiber)
2108                         reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2109                 else
2110                         reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2111                 reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2112                 reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2113         }
2114
2115         E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
2116         E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
2117         if (adapter->hw.mac_type >= em_82540)
2118                 E1000_WRITE_REG(&adapter->hw, TADV,
2119                                 adapter->tx_abs_int_delay.value);
2120
2121         /* Program the Transmit Control Register */
2122         reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2123                    (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2124         if (adapter->hw.mac_type >= em_82571)
2125                 reg_tctl |= E1000_TCTL_MULR;
2126         if (adapter->link_duplex == 1)
2127                 reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2128         else
2129                 reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2130         E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
2131
2132         /* Setup Transmit Descriptor Settings for this adapter */   
2133         adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
2134
2135         if (adapter->tx_int_delay.value > 0)
2136                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2137 }
2138
2139 /*********************************************************************
2140  *
2141  *  Free all transmit related data structures.
2142  *
2143  **********************************************************************/
2144 static void
2145 em_free_transmit_structures(struct adapter * adapter)
2146 {
2147         struct em_buffer *tx_buffer;
2148         int i;
2149
2150         INIT_DEBUGOUT("free_transmit_structures: begin");
2151
2152         if (adapter->tx_buffer_area != NULL) {
2153                 tx_buffer = adapter->tx_buffer_area;
2154                 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2155                         if (tx_buffer->m_head != NULL) {
2156                                 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2157                                 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2158                                 m_freem(tx_buffer->m_head);
2159                         }
2160                         tx_buffer->m_head = NULL;
2161                 }
2162         }
2163         if (adapter->tx_buffer_area != NULL) {
2164                 free(adapter->tx_buffer_area, M_DEVBUF);
2165                 adapter->tx_buffer_area = NULL;
2166         }
2167         if (adapter->txtag != NULL) {
2168                 bus_dma_tag_destroy(adapter->txtag);
2169                 adapter->txtag = NULL;
2170         }
2171 }
2172
2173 /*********************************************************************
2174  *
2175  *  The offload context needs to be set when we transfer the first
2176  *  packet of a particular protocol (TCP/UDP). We change the
2177  *  context only if the protocol type changes.
2178  *
2179  **********************************************************************/
2180 static void
2181 em_transmit_checksum_setup(struct adapter * adapter,
2182                            struct mbuf *mp,
2183                            uint32_t *txd_upper,
2184                            uint32_t *txd_lower) 
2185 {
2186         struct em_context_desc *TXD;
2187         struct em_buffer *tx_buffer;
2188         int curr_txd;
2189
2190         if (mp->m_pkthdr.csum_flags) {
2191                 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2192                         *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2193                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2194                         if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2195                                 return;
2196                         else
2197                                 adapter->active_checksum_context = OFFLOAD_TCP_IP;
2198                 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2199                         *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2200                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2201                         if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2202                                 return;
2203                         else
2204                                 adapter->active_checksum_context = OFFLOAD_UDP_IP;
2205                 } else {
2206                         *txd_upper = 0;
2207                         *txd_lower = 0;
2208                         return;
2209                 }
2210         } else {
2211                 *txd_upper = 0;
2212                 *txd_lower = 0;
2213                 return;
2214         }
2215
2216         /* If we reach this point, the checksum offload context
2217          * needs to be reset.
2218          */
2219         curr_txd = adapter->next_avail_tx_desc;
2220         tx_buffer = &adapter->tx_buffer_area[curr_txd];
2221         TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2222
2223         TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2224         TXD->lower_setup.ip_fields.ipcso =
2225             ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2226         TXD->lower_setup.ip_fields.ipcse =
2227             htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2228
2229         TXD->upper_setup.tcp_fields.tucss = 
2230             ETHER_HDR_LEN + sizeof(struct ip);
2231         TXD->upper_setup.tcp_fields.tucse = htole16(0);
2232
2233         if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2234                 TXD->upper_setup.tcp_fields.tucso =
2235                     ETHER_HDR_LEN + sizeof(struct ip) +
2236                     offsetof(struct tcphdr, th_sum);
2237         } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2238                 TXD->upper_setup.tcp_fields.tucso =
2239                         ETHER_HDR_LEN + sizeof(struct ip) +
2240                         offsetof(struct udphdr, uh_sum);
2241         }
2242
2243         TXD->tcp_seg_setup.data = htole32(0);
2244         TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2245
2246         tx_buffer->m_head = NULL;
2247
2248         if (++curr_txd == adapter->num_tx_desc)
2249                 curr_txd = 0;
2250
2251         adapter->num_tx_desc_avail--;
2252         adapter->next_avail_tx_desc = curr_txd;
2253 }
2254
2255 /**********************************************************************
2256  *
2257  *  Examine each tx_buffer in the used queue. If the hardware is done
2258  *  processing the packet then free associated resources. The
2259  *  tx_buffer is put back on the free queue.
2260  *
2261  **********************************************************************/
2262
2263 static void
2264 em_clean_transmit_interrupts(struct adapter *adapter)
2265 {
2266         int i, num_avail;
2267         struct em_buffer *tx_buffer;
2268         struct em_tx_desc *tx_desc;
2269         struct ifnet *ifp = &adapter->interface_data.ac_if;
2270
2271         if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2272                 return;
2273
2274         num_avail = adapter->num_tx_desc_avail; 
2275         i = adapter->oldest_used_tx_desc;
2276
2277         tx_buffer = &adapter->tx_buffer_area[i];
2278         tx_desc = &adapter->tx_desc_base[i];
2279
2280         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2281                         BUS_DMASYNC_POSTREAD);
2282
2283         while(tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2284                 tx_desc->upper.data = 0;
2285                 num_avail++;                        
2286
2287                 logif(pkt_txclean);
2288
2289                 if (tx_buffer->m_head) {
2290                         ifp->if_opackets++;
2291                         bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2292                                         BUS_DMASYNC_POSTWRITE);
2293                         bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2294                         bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2295
2296                         m_freem(tx_buffer->m_head);
2297                         tx_buffer->m_head = NULL;
2298                 }
2299
2300                 if (++i == adapter->num_tx_desc)
2301                         i = 0;
2302
2303                 tx_buffer = &adapter->tx_buffer_area[i];
2304                 tx_desc = &adapter->tx_desc_base[i];
2305         }
2306
2307         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2308                         BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2309
2310         adapter->oldest_used_tx_desc = i;
2311
2312         /*
2313          * If we have enough room, clear IFF_OACTIVE to tell the stack
2314          * that it is OK to send packets.
2315          * If there are no pending descriptors, clear the timeout. Otherwise,
2316          * if some descriptors have been freed, restart the timeout.
2317          */
2318         if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2319                 ifp->if_flags &= ~IFF_OACTIVE;
2320                 if (num_avail == adapter->num_tx_desc)
2321                         ifp->if_timer = 0;
2322                 else if (num_avail == adapter->num_tx_desc_avail)
2323                         ifp->if_timer = EM_TX_TIMEOUT;
2324         }
2325         adapter->num_tx_desc_avail = num_avail;
2326 }
2327
2328 /*********************************************************************
2329  *
2330  *  Get a buffer from system mbuf buffer pool.
2331  *
2332  **********************************************************************/
2333 static int
2334 em_get_buf(int i, struct adapter *adapter, struct mbuf *nmp, int how)
2335 {
2336         struct mbuf *mp = nmp;
2337         struct em_buffer *rx_buffer;
2338         struct ifnet *ifp;
2339         bus_addr_t paddr;
2340         int error;
2341
2342         ifp = &adapter->interface_data.ac_if;
2343
2344         if (mp == NULL) {
2345                 mp = m_getcl(how, MT_DATA, M_PKTHDR);
2346                 if (mp == NULL) {
2347                         adapter->mbuf_cluster_failed++;
2348                         return(ENOBUFS);
2349                 }
2350                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2351         } else {
2352                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2353                 mp->m_data = mp->m_ext.ext_buf;
2354                 mp->m_next = NULL;
2355         }
2356         if (ifp->if_mtu <= ETHERMTU)
2357                 m_adj(mp, ETHER_ALIGN);
2358
2359         rx_buffer = &adapter->rx_buffer_area[i];
2360
2361         /*
2362          * Using memory from the mbuf cluster pool, invoke the
2363          * bus_dma machinery to arrange the memory mapping.
2364          */
2365         error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
2366                                 mtod(mp, void *), mp->m_len,
2367                                 em_dmamap_cb, &paddr, 0);
2368         if (error) {
2369                 m_free(mp);
2370                 return(error);
2371         }
2372         rx_buffer->m_head = mp;
2373         adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
2374         bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2375
2376         return(0);
2377 }
2378
2379 /*********************************************************************
2380  *
2381  *  Allocate memory for rx_buffer structures. Since we use one 
2382  *  rx_buffer per received packet, the maximum number of rx_buffer's 
2383  *  that we'll need is equal to the number of receive descriptors 
2384  *  that we've allocated.
2385  *
2386  **********************************************************************/
2387 static int
2388 em_allocate_receive_structures(struct adapter *adapter)
2389 {
2390         int i, error, size;
2391         struct em_buffer *rx_buffer;
2392
2393         size = adapter->num_rx_desc * sizeof(struct em_buffer);
2394         adapter->rx_buffer_area = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
2395
2396         error = bus_dma_tag_create(NULL,                /* parent */
2397                                    1, 0,                /* alignment, bounds */
2398                                    BUS_SPACE_MAXADDR,   /* lowaddr */
2399                                    BUS_SPACE_MAXADDR,   /* highaddr */
2400                                    NULL, NULL,          /* filter, filterarg */
2401                                    MCLBYTES,            /* maxsize */
2402                                    1,                   /* nsegments */
2403                                    MCLBYTES,            /* maxsegsize */
2404                                    BUS_DMA_ALLOCNOW,    /* flags */
2405                                    &adapter->rxtag);
2406         if (error != 0) {
2407                 device_printf(adapter->dev, "em_allocate_receive_structures: "
2408                               "bus_dma_tag_create failed; error %u\n", error);
2409                 goto fail_0;
2410         }
2411  
2412         rx_buffer = adapter->rx_buffer_area;
2413         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2414                 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2415                                           &rx_buffer->map);
2416                 if (error != 0) {
2417                         device_printf(adapter->dev,
2418                                       "em_allocate_receive_structures: "
2419                                       "bus_dmamap_create failed; error %u\n",
2420                                       error);
2421                         goto fail_1;
2422                 }
2423         }
2424
2425         for (i = 0; i < adapter->num_rx_desc; i++) {
2426                 error = em_get_buf(i, adapter, NULL, MB_WAIT);
2427                 if (error != 0) {
2428                         adapter->rx_buffer_area[i].m_head = NULL;
2429                         adapter->rx_desc_base[i].buffer_addr = 0;
2430                         return(error);
2431                 }
2432         }
2433
2434         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2435                         BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2436
2437         return(0);
2438
2439 fail_1:
2440         bus_dma_tag_destroy(adapter->rxtag);
2441 fail_0:
2442         adapter->rxtag = NULL;
2443         free(adapter->rx_buffer_area, M_DEVBUF);
2444         adapter->rx_buffer_area = NULL;
2445         return(error);
2446 }
2447
2448 /*********************************************************************
2449  *
2450  *  Allocate and initialize receive structures.
2451  *  
2452  **********************************************************************/
2453 static int
2454 em_setup_receive_structures(struct adapter *adapter)
2455 {
2456         bzero((void *) adapter->rx_desc_base,
2457               (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2458
2459         if (em_allocate_receive_structures(adapter))
2460                 return(ENOMEM);
2461
2462         /* Setup our descriptor pointers */
2463         adapter->next_rx_desc_to_check = 0;
2464         return(0);
2465 }
2466
2467 /*********************************************************************
2468  *
2469  *  Enable receive unit.
2470  *  
2471  **********************************************************************/
2472 static void
2473 em_initialize_receive_unit(struct adapter *adapter)
2474 {
2475         uint32_t reg_rctl;
2476         uint32_t reg_rxcsum;
2477         struct ifnet *ifp;
2478         uint64_t bus_addr;
2479  
2480         INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2481
2482         ifp = &adapter->interface_data.ac_if;
2483
2484         /* Make sure receives are disabled while setting up the descriptor ring */
2485         E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2486
2487         /* Set the Receive Delay Timer Register */
2488         E1000_WRITE_REG(&adapter->hw, RDTR, 
2489                         adapter->rx_int_delay.value | E1000_RDT_FPDB);
2490
2491         if(adapter->hw.mac_type >= em_82540) {
2492                 E1000_WRITE_REG(&adapter->hw, RADV,
2493                                 adapter->rx_abs_int_delay.value);
2494
2495                 /* Set the interrupt throttling rate in 256ns increments */  
2496                 if (em_int_throttle_ceil) {
2497                         E1000_WRITE_REG(&adapter->hw, ITR,
2498                                 1000000000 / 256 / em_int_throttle_ceil);
2499                 } else {
2500                         E1000_WRITE_REG(&adapter->hw, ITR, 0);
2501                 }
2502         }
2503
2504         /* Setup the Base and Length of the Rx Descriptor Ring */
2505         bus_addr = adapter->rxdma.dma_paddr;
2506         E1000_WRITE_REG(&adapter->hw, RDBAL, (uint32_t)bus_addr);
2507         E1000_WRITE_REG(&adapter->hw, RDBAH, (uint32_t)(bus_addr >> 32));
2508         E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2509                         sizeof(struct em_rx_desc));
2510
2511         /* Setup the HW Rx Head and Tail Descriptor Pointers */
2512         E1000_WRITE_REG(&adapter->hw, RDH, 0);
2513         E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2514
2515         /* Setup the Receive Control Register */
2516         reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2517                    E1000_RCTL_RDMTS_HALF |
2518                    (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2519
2520         if (adapter->hw.tbi_compatibility_on == TRUE)
2521                 reg_rctl |= E1000_RCTL_SBP;
2522
2523         switch (adapter->rx_buffer_len) {
2524         default:
2525         case EM_RXBUFFER_2048:
2526                 reg_rctl |= E1000_RCTL_SZ_2048;
2527                 break;
2528         case EM_RXBUFFER_4096:
2529                 reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2530                 break;            
2531         case EM_RXBUFFER_8192:
2532                 reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2533                 break;
2534         case EM_RXBUFFER_16384:
2535                 reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2536                 break;
2537         }
2538
2539         if (ifp->if_mtu > ETHERMTU)
2540                 reg_rctl |= E1000_RCTL_LPE;
2541
2542         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2543         if ((adapter->hw.mac_type >= em_82543) && 
2544             (ifp->if_capenable & IFCAP_RXCSUM)) {
2545                 reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2546                 reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2547                 E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2548         }
2549
2550         /* Enable Receives */
2551         E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2552 }
2553
2554 /*********************************************************************
2555  *
2556  *  Free receive related data structures.
2557  *
2558  **********************************************************************/
2559 static void
2560 em_free_receive_structures(struct adapter *adapter)
2561 {
2562         struct em_buffer *rx_buffer;
2563         int i;
2564
2565         INIT_DEBUGOUT("free_receive_structures: begin");
2566
2567         if (adapter->rx_buffer_area != NULL) {
2568                 rx_buffer = adapter->rx_buffer_area;
2569                 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2570                         if (rx_buffer->map != NULL) {
2571                                 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2572                                 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2573                         }
2574                         if (rx_buffer->m_head != NULL)
2575                                 m_freem(rx_buffer->m_head);
2576                         rx_buffer->m_head = NULL;
2577                 }
2578         }
2579         if (adapter->rx_buffer_area != NULL) {
2580                 free(adapter->rx_buffer_area, M_DEVBUF);
2581                 adapter->rx_buffer_area = NULL;
2582         }
2583         if (adapter->rxtag != NULL) {
2584                 bus_dma_tag_destroy(adapter->rxtag);
2585                 adapter->rxtag = NULL;
2586         }
2587 }
2588
2589 /*********************************************************************
2590  *
2591  *  This routine executes in interrupt context. It replenishes
2592  *  the mbufs in the descriptor and sends data which has been
2593  *  dma'ed into host memory to upper layer.
2594  *
2595  *  We loop at most count times if count is > 0, or until done if
2596  *  count < 0.
2597  *
2598  *********************************************************************/
2599 static void
2600 em_process_receive_interrupts(struct adapter *adapter, int count)
2601 {
2602         struct ifnet *ifp;
2603         struct mbuf *mp;
2604         uint8_t accept_frame = 0;
2605         uint8_t eop = 0;
2606         uint16_t len, desc_len, prev_len_adj;
2607         int i;
2608
2609         /* Pointer to the receive descriptor being examined. */
2610         struct em_rx_desc *current_desc;
2611
2612         ifp = &adapter->interface_data.ac_if;
2613         i = adapter->next_rx_desc_to_check;
2614         current_desc = &adapter->rx_desc_base[i];
2615
2616         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2617                         BUS_DMASYNC_POSTREAD);
2618
2619         if (!((current_desc->status) & E1000_RXD_STAT_DD))
2620                 return;
2621
2622         while ((current_desc->status & E1000_RXD_STAT_DD) && (count != 0)) {
2623                 logif(pkt_receive);
2624                 mp = adapter->rx_buffer_area[i].m_head;
2625                 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2626                                 BUS_DMASYNC_POSTREAD);
2627
2628                 accept_frame = 1;
2629                 prev_len_adj = 0;
2630                 desc_len = le16toh(current_desc->length);
2631                 if (current_desc->status & E1000_RXD_STAT_EOP) {
2632                         count--;
2633                         eop = 1;
2634                         if (desc_len < ETHER_CRC_LEN) {
2635                                 len = 0;
2636                                 prev_len_adj = ETHER_CRC_LEN - desc_len;
2637                         } else {
2638                                 len = desc_len - ETHER_CRC_LEN;
2639                         }
2640                 } else {
2641                         eop = 0;
2642                         len = desc_len;
2643                 }
2644
2645                 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2646                         uint8_t last_byte;
2647                         uint32_t pkt_len = desc_len;
2648
2649                         if (adapter->fmp != NULL)
2650                                 pkt_len += adapter->fmp->m_pkthdr.len; 
2651
2652                         last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2653
2654                         if (TBI_ACCEPT(&adapter->hw, current_desc->status, 
2655                                        current_desc->errors, 
2656                                        pkt_len, last_byte)) {
2657                                 em_tbi_adjust_stats(&adapter->hw, 
2658                                                     &adapter->stats, 
2659                                                     pkt_len, 
2660                                                     adapter->hw.mac_addr);
2661                                 if (len > 0)
2662                                         len--;
2663                         } else {
2664                                 accept_frame = 0;
2665                         }
2666                 }
2667
2668                 if (accept_frame) {
2669                         if (em_get_buf(i, adapter, NULL, MB_DONTWAIT) == ENOBUFS) {
2670                                 adapter->dropped_pkts++;
2671                                 em_get_buf(i, adapter, mp, MB_DONTWAIT);
2672                                 if (adapter->fmp != NULL) 
2673                                         m_freem(adapter->fmp);
2674                                 adapter->fmp = NULL;
2675                                 adapter->lmp = NULL;
2676                                 break;
2677                         }
2678
2679                         /* Assign correct length to the current fragment */
2680                         mp->m_len = len;
2681
2682                         if (adapter->fmp == NULL) {
2683                                 mp->m_pkthdr.len = len;
2684                                 adapter->fmp = mp;       /* Store the first mbuf */
2685                                 adapter->lmp = mp;
2686                         } else {
2687                                 /* Chain mbuf's together */
2688                                 /* 
2689                                  * Adjust length of previous mbuf in chain if we 
2690                                  * received less than 4 bytes in the last descriptor.
2691                                  */
2692                                 if (prev_len_adj > 0) {
2693                                         adapter->lmp->m_len -= prev_len_adj;
2694                                         adapter->fmp->m_pkthdr.len -= prev_len_adj;
2695                                 }
2696                                 adapter->lmp->m_next = mp;
2697                                 adapter->lmp = adapter->lmp->m_next;
2698                                 adapter->fmp->m_pkthdr.len += len;
2699                         }
2700
2701                         if (eop) {
2702                                 adapter->fmp->m_pkthdr.rcvif = ifp;
2703                                 ifp->if_ipackets++;
2704
2705                                 em_receive_checksum(adapter, current_desc,
2706                                                     adapter->fmp);
2707                                 if (current_desc->status & E1000_RXD_STAT_VP) {
2708                                         VLAN_INPUT_TAG(adapter->fmp,
2709                                                        (current_desc->special & 
2710                                                         E1000_RXD_SPC_VLAN_MASK));
2711                                 } else {
2712                                         ifp->if_input(ifp, adapter->fmp);
2713                                 }
2714                                 adapter->fmp = NULL;
2715                                 adapter->lmp = NULL;
2716                         }
2717                 } else {
2718                         adapter->dropped_pkts++;
2719                         em_get_buf(i, adapter, mp, MB_DONTWAIT);
2720                         if (adapter->fmp != NULL) 
2721                                 m_freem(adapter->fmp);
2722                         adapter->fmp = NULL;
2723                         adapter->lmp = NULL;
2724                 }
2725
2726                 /* Zero out the receive descriptors status  */
2727                 current_desc->status = 0;
2728
2729                 /* Advance the E1000's Receive Queue #0  "Tail Pointer". */
2730                 E1000_WRITE_REG(&adapter->hw, RDT, i);
2731
2732                 /* Advance our pointers to the next descriptor */
2733                 if (++i == adapter->num_rx_desc) {
2734                         i = 0;
2735                         current_desc = adapter->rx_desc_base;
2736                 } else {
2737                         current_desc++;
2738                 }
2739         }
2740
2741         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2742                         BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2743
2744         adapter->next_rx_desc_to_check = i;
2745 }
2746
2747 /*********************************************************************
2748  *
2749  *  Verify that the hardware indicated that the checksum is valid. 
2750  *  Inform the stack about the status of checksum so that stack
2751  *  doesn't spend time verifying the checksum.
2752  *
2753  *********************************************************************/
2754 static void
2755 em_receive_checksum(struct adapter *adapter,
2756                     struct em_rx_desc *rx_desc,
2757                     struct mbuf *mp)
2758 {
2759         /* 82543 or newer only */
2760         if ((adapter->hw.mac_type < em_82543) ||
2761             /* Ignore Checksum bit is set */
2762             (rx_desc->status & E1000_RXD_STAT_IXSM)) {
2763                 mp->m_pkthdr.csum_flags = 0;
2764                 return;
2765         }
2766
2767         if (rx_desc->status & E1000_RXD_STAT_IPCS) {
2768                 /* Did it pass? */
2769                 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
2770                         /* IP Checksum Good */
2771                         mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2772                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2773                 } else {
2774                         mp->m_pkthdr.csum_flags = 0;
2775                 }
2776         }
2777
2778         if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
2779                 /* Did it pass? */        
2780                 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
2781                         mp->m_pkthdr.csum_flags |= 
2782                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2783                         mp->m_pkthdr.csum_data = htons(0xffff);
2784                 }
2785         }
2786 }
2787
2788
2789 static void 
2790 em_enable_vlans(struct adapter *adapter)
2791 {
2792         uint32_t ctrl;
2793
2794         E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
2795
2796         ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2797         ctrl |= E1000_CTRL_VME; 
2798         E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2799 }
2800
2801 /*
2802  * note: we must call bus_enable_intr() prior to enabling the hardware
2803  * interrupt and bus_disable_intr() after disabling the hardware interrupt
2804  * in order to avoid handler execution races from scheduled interrupt
2805  * threads.
2806  */
2807 static void
2808 em_enable_intr(struct adapter *adapter)
2809 {
2810         struct ifnet *ifp = &adapter->interface_data.ac_if;
2811         
2812         if ((ifp->if_flags & IFF_POLLING) == 0) {
2813                 lwkt_serialize_handler_enable(ifp->if_serializer);
2814                 E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
2815         }
2816 }
2817
2818 static void
2819 em_disable_intr(struct adapter *adapter)
2820 {
2821         /*
2822          * The first version of 82542 had an errata where when link was
2823          * forced it would stay up even up even if the cable was disconnected.
2824          * Sequence errors were used to detect the disconnect and then the
2825          * driver would unforce the link.  This code in the in the ISR.  For
2826          * this to work correctly the Sequence error interrupt had to be
2827          * enabled all the time.
2828          */
2829         if (adapter->hw.mac_type == em_82542_rev2_0) {
2830                 E1000_WRITE_REG(&adapter->hw, IMC,
2831                                 (0xffffffff & ~E1000_IMC_RXSEQ));
2832         } else {
2833                 E1000_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
2834         }
2835
2836         lwkt_serialize_handler_disable(adapter->interface_data.ac_if.if_serializer);
2837 }
2838
2839 static int
2840 em_is_valid_ether_addr(uint8_t *addr)
2841 {
2842         char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
2843
2844         if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN)))
2845                 return(FALSE);
2846         else
2847                 return(TRUE);
2848 }
2849
2850 void 
2851 em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2852 {
2853         pci_write_config(((struct em_osdep *)hw->back)->dev, reg, *value, 2);
2854 }
2855
2856 void 
2857 em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2858 {
2859         *value = pci_read_config(((struct em_osdep *)hw->back)->dev, reg, 2);
2860 }
2861
2862 void
2863 em_pci_set_mwi(struct em_hw *hw)
2864 {
2865         pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2866                          (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
2867 }
2868
2869 void
2870 em_pci_clear_mwi(struct em_hw *hw)
2871 {
2872         pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2873                          (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
2874 }
2875
2876 uint32_t
2877 em_read_reg_io(struct em_hw *hw, uint32_t offset)
2878 {
2879         bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2880         return(bus_space_read_4(hw->reg_io_tag, hw->reg_io_handle, 4));
2881 }
2882
2883 void
2884 em_write_reg_io(struct em_hw *hw, uint32_t offset, uint32_t value)
2885 {
2886         bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2887         bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 4, value);
2888 }
2889
2890 /*********************************************************************
2891  * 82544 Coexistence issue workaround.
2892  *    There are 2 issues.
2893  *      1. Transmit Hang issue.
2894  *    To detect this issue, following equation can be used...
2895  *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2896  *          If SUM[3:0] is in between 1 to 4, we will have this issue.
2897  *
2898  *      2. DAC issue.
2899  *    To detect this issue, following equation can be used...
2900  *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2901  *          If SUM[3:0] is in between 9 to c, we will have this issue.
2902  *
2903  *
2904  *    WORKAROUND:
2905  *          Make sure we do not have ending address as 1,2,3,4(Hang) or
2906  *          9,a,b,c (DAC)
2907  *
2908 *************************************************************************/
2909 static uint32_t
2910 em_fill_descriptors(uint64_t address, uint32_t length, PDESC_ARRAY desc_array)
2911 {
2912         /* Since issue is sensitive to length and address.*/
2913         /* Let us first check the address...*/
2914         uint32_t safe_terminator;
2915         if (length <= 4) {
2916                 desc_array->descriptor[0].address = address;
2917                 desc_array->descriptor[0].length = length;
2918                 desc_array->elements = 1;
2919                 return(desc_array->elements);
2920         }
2921         safe_terminator = (uint32_t)((((uint32_t)address & 0x7) + (length & 0xF)) & 0xF);
2922         /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */ 
2923         if (safe_terminator == 0 ||
2924             (safe_terminator > 4 && safe_terminator < 9) || 
2925             (safe_terminator > 0xC && safe_terminator <= 0xF)) {
2926                 desc_array->descriptor[0].address = address;
2927                 desc_array->descriptor[0].length = length;
2928                 desc_array->elements = 1;
2929                 return(desc_array->elements);
2930         }
2931
2932         desc_array->descriptor[0].address = address;
2933         desc_array->descriptor[0].length = length - 4;
2934         desc_array->descriptor[1].address = address + (length - 4);
2935         desc_array->descriptor[1].length = 4;
2936         desc_array->elements = 2;
2937         return(desc_array->elements);
2938 }
2939
2940 /**********************************************************************
2941  *
2942  *  Update the board statistics counters. 
2943  *
2944  **********************************************************************/
2945 static void
2946 em_update_stats_counters(struct adapter *adapter)
2947 {
2948         struct ifnet   *ifp;
2949
2950         if (adapter->hw.media_type == em_media_type_copper ||
2951             (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
2952                 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
2953                 adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
2954         }
2955         adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
2956         adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
2957         adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
2958         adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
2959
2960         adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
2961         adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
2962         adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
2963         adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
2964         adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
2965         adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
2966         adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
2967         adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
2968         adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
2969         adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
2970         adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
2971         adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
2972         adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
2973         adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
2974         adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
2975         adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
2976         adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
2977         adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
2978         adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
2979         adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
2980
2981         /* For the 64-bit byte counters the low dword must be read first. */
2982         /* Both registers clear on the read of the high dword */
2983
2984         adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL); 
2985         adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
2986         adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
2987         adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
2988
2989         adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
2990         adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
2991         adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
2992         adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
2993         adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
2994
2995         adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
2996         adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
2997         adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
2998         adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
2999
3000         adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
3001         adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
3002         adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
3003         adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
3004         adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
3005         adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
3006         adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
3007         adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
3008         adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
3009         adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
3010
3011         if (adapter->hw.mac_type >= em_82543) {
3012                 adapter->stats.algnerrc += 
3013                     E1000_READ_REG(&adapter->hw, ALGNERRC);
3014                 adapter->stats.rxerrc += 
3015                     E1000_READ_REG(&adapter->hw, RXERRC);
3016                 adapter->stats.tncrs += 
3017                     E1000_READ_REG(&adapter->hw, TNCRS);
3018                 adapter->stats.cexterr += 
3019                     E1000_READ_REG(&adapter->hw, CEXTERR);
3020                 adapter->stats.tsctc += 
3021                     E1000_READ_REG(&adapter->hw, TSCTC);
3022                 adapter->stats.tsctfc += 
3023                     E1000_READ_REG(&adapter->hw, TSCTFC);
3024         }
3025         ifp = &adapter->interface_data.ac_if;
3026
3027         /* Fill out the OS statistics structure */
3028         ifp->if_ibytes = adapter->stats.gorcl;
3029         ifp->if_obytes = adapter->stats.gotcl;
3030         ifp->if_imcasts = adapter->stats.mprc;
3031         ifp->if_collisions = adapter->stats.colc;
3032
3033         /* Rx Errors */
3034         ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
3035             adapter->stats.crcerrs + adapter->stats.algnerrc +
3036             adapter->stats.rlec + adapter->stats.mpc + adapter->stats.cexterr;
3037
3038         /* Tx Errors */
3039         ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol;
3040 }
3041
3042
3043 /**********************************************************************
3044  *
3045  *  This routine is called only when em_display_debug_stats is enabled.
3046  *  This routine provides a way to take a look at important statistics
3047  *  maintained by the driver and hardware.
3048  *
3049  **********************************************************************/
3050 static void
3051 em_print_debug_info(struct adapter *adapter)
3052 {
3053         device_t dev= adapter->dev;
3054         uint8_t *hw_addr = adapter->hw.hw_addr;
3055
3056         device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
3057         device_printf(dev, "CTRL  = 0x%x\n",
3058                       E1000_READ_REG(&adapter->hw, CTRL)); 
3059         device_printf(dev, "RCTL  = 0x%x PS=(0x8402)\n",
3060                       E1000_READ_REG(&adapter->hw, RCTL)); 
3061         device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk\n",
3062                       ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff0000) >> 16),
3063                       (E1000_READ_REG(&adapter->hw, PBA) & 0xffff));
3064         device_printf(dev, "Flow control watermarks high = %d low = %d\n",
3065                       adapter->hw.fc_high_water, adapter->hw.fc_low_water);
3066         device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
3067                       E1000_READ_REG(&adapter->hw, TIDV),
3068                       E1000_READ_REG(&adapter->hw, TADV));
3069         device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
3070                       E1000_READ_REG(&adapter->hw, RDTR),
3071                       E1000_READ_REG(&adapter->hw, RADV));
3072         device_printf(dev, "fifo workaround = %lld, fifo_reset = %lld\n",
3073                       (long long)adapter->tx_fifo_wrk_cnt,
3074                       (long long)adapter->tx_fifo_reset_cnt);
3075         device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
3076                       E1000_READ_REG(&adapter->hw, TDH),
3077                       E1000_READ_REG(&adapter->hw, TDT));
3078         device_printf(dev, "Num Tx descriptors avail = %d\n",
3079                       adapter->num_tx_desc_avail);
3080         device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
3081                       adapter->no_tx_desc_avail1);
3082         device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
3083                       adapter->no_tx_desc_avail2);
3084         device_printf(dev, "Std mbuf failed = %ld\n",
3085                       adapter->mbuf_alloc_failed);
3086         device_printf(dev, "Std mbuf cluster failed = %ld\n",
3087                       adapter->mbuf_cluster_failed);
3088         device_printf(dev, "Driver dropped packets = %ld\n",
3089                       adapter->dropped_pkts);
3090 }
3091
3092 static void
3093 em_print_hw_stats(struct adapter *adapter)
3094 {
3095         device_t dev= adapter->dev;
3096
3097         device_printf(dev, "Adapter: %p\n", adapter);
3098
3099         device_printf(dev, "Excessive collisions = %lld\n",
3100                       (long long)adapter->stats.ecol);
3101         device_printf(dev, "Symbol errors = %lld\n",
3102                       (long long)adapter->stats.symerrs);
3103         device_printf(dev, "Sequence errors = %lld\n",
3104                       (long long)adapter->stats.sec);
3105         device_printf(dev, "Defer count = %lld\n",
3106                       (long long)adapter->stats.dc);
3107
3108         device_printf(dev, "Missed Packets = %lld\n",
3109                       (long long)adapter->stats.mpc);
3110         device_printf(dev, "Receive No Buffers = %lld\n",
3111                       (long long)adapter->stats.rnbc);
3112         device_printf(dev, "Receive length errors = %lld\n",
3113                       (long long)adapter->stats.rlec);
3114         device_printf(dev, "Receive errors = %lld\n",
3115                       (long long)adapter->stats.rxerrc);
3116         device_printf(dev, "Crc errors = %lld\n",
3117                       (long long)adapter->stats.crcerrs);
3118         device_printf(dev, "Alignment errors = %lld\n",
3119                       (long long)adapter->stats.algnerrc);
3120         device_printf(dev, "Carrier extension errors = %lld\n",
3121                       (long long)adapter->stats.cexterr);
3122
3123         device_printf(dev, "XON Rcvd = %lld\n",
3124                       (long long)adapter->stats.xonrxc);
3125         device_printf(dev, "XON Xmtd = %lld\n",
3126                       (long long)adapter->stats.xontxc);
3127         device_printf(dev, "XOFF Rcvd = %lld\n",
3128                       (long long)adapter->stats.xoffrxc);
3129         device_printf(dev, "XOFF Xmtd = %lld\n",
3130                       (long long)adapter->stats.xofftxc);
3131
3132         device_printf(dev, "Good Packets Rcvd = %lld\n",
3133                       (long long)adapter->stats.gprc);
3134         device_printf(dev, "Good Packets Xmtd = %lld\n",
3135                       (long long)adapter->stats.gptc);
3136 }
3137
3138 static int
3139 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3140 {
3141         int error;
3142         int result;
3143         struct adapter *adapter;
3144
3145         result = -1;
3146         error = sysctl_handle_int(oidp, &result, 0, req);
3147
3148         if (error || !req->newptr)
3149                 return(error);
3150
3151         if (result == 1) {
3152                 adapter = (struct adapter *)arg1;
3153                 em_print_debug_info(adapter);
3154         }
3155
3156         return(error);
3157 }
3158
3159 static int
3160 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3161 {
3162         int error;
3163         int result;
3164         struct adapter *adapter;
3165
3166         result = -1;
3167         error = sysctl_handle_int(oidp, &result, 0, req);
3168
3169         if (error || !req->newptr)
3170                 return(error);
3171
3172         if (result == 1) {
3173                 adapter = (struct adapter *)arg1;
3174                 em_print_hw_stats(adapter);
3175         }
3176
3177         return(error);
3178 }
3179
3180 static int
3181 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3182 {
3183         struct em_int_delay_info *info;
3184         struct adapter *adapter;
3185         uint32_t regval;
3186         int error;
3187         int usecs;
3188         int ticks;
3189
3190         info = (struct em_int_delay_info *)arg1;
3191         adapter = info->adapter;
3192         usecs = info->value;
3193         error = sysctl_handle_int(oidp, &usecs, 0, req);
3194         if (error != 0 || req->newptr == NULL)
3195                 return(error);
3196         if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3197                 return(EINVAL);
3198         info->value = usecs;
3199         ticks = E1000_USECS_TO_TICKS(usecs);
3200
3201         lwkt_serialize_enter(adapter->interface_data.ac_if.if_serializer);
3202         regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3203         regval = (regval & ~0xffff) | (ticks & 0xffff);
3204         /* Handle a few special cases. */
3205         switch (info->offset) {
3206         case E1000_RDTR:
3207         case E1000_82542_RDTR:
3208                 regval |= E1000_RDT_FPDB;
3209                 break;
3210         case E1000_TIDV:
3211         case E1000_82542_TIDV:
3212                 if (ticks == 0) {
3213                         adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3214                         /* Don't write 0 into the TIDV register. */
3215                         regval++;
3216                 } else
3217                         adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3218                 break;
3219         }
3220         E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3221         lwkt_serialize_exit(adapter->interface_data.ac_if.if_serializer);
3222         return(0);
3223 }
3224
3225 static void
3226 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3227                         const char *description, struct em_int_delay_info *info,
3228                         int offset, int value)
3229 {
3230         info->adapter = adapter;
3231         info->offset = offset;
3232         info->value = value;
3233         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
3234                         SYSCTL_CHILDREN(adapter->sysctl_tree),
3235                         OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3236                         info, 0, em_sysctl_int_delay, "I", description);
3237 }
3238
3239 static int
3240 em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS)
3241 {
3242         struct adapter *adapter = (void *)arg1;
3243         int error;
3244         int throttle;
3245
3246         throttle = em_int_throttle_ceil;
3247         error = sysctl_handle_int(oidp, &throttle, 0, req);
3248         if (error || req->newptr == NULL)
3249                 return error;
3250         if (throttle < 0 || throttle > 1000000000 / 256)
3251                 return EINVAL;
3252         if (throttle) {
3253                 /*
3254                  * Set the interrupt throttling rate in 256ns increments,
3255                  * recalculate sysctl value assignment to get exact frequency.
3256                  */
3257                 throttle = 1000000000 / 256 / throttle;
3258                 lwkt_serialize_enter(adapter->interface_data.ac_if.if_serializer);
3259                 em_int_throttle_ceil = 1000000000 / 256 / throttle;
3260                 E1000_WRITE_REG(&adapter->hw, ITR, throttle);
3261                 lwkt_serialize_exit(adapter->interface_data.ac_if.if_serializer);
3262         } else {
3263                 lwkt_serialize_enter(adapter->interface_data.ac_if.if_serializer);
3264                 em_int_throttle_ceil = 0;
3265                 E1000_WRITE_REG(&adapter->hw, ITR, 0);
3266                 lwkt_serialize_exit(adapter->interface_data.ac_if.if_serializer);
3267         }
3268         device_printf(adapter->dev, "Interrupt moderation set to %d/sec\n", 
3269                         em_int_throttle_ceil);
3270         return 0;
3271 }
3272