- In em_encap(), call seralized version of em_82547_move_tail(), since
[dragonfly.git] / sys / dev / netif / em / if_em.c
1 /**************************************************************************
2
3 Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>.  All rights reserved.
4
5 Copyright (c) 2001-2003, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11  1. Redistributions of source code must retain the above copyright notice,
12     this list of conditions and the following disclaimer.
13
14  2. Redistributions in binary form must reproduce the above copyright
15     notice, this list of conditions and the following disclaimer in the
16     documentation and/or other materials provided with the distribution.
17
18  3. Neither the name of the Intel Corporation nor the names of its
19     contributors may be used to endorse or promote products derived from
20     this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ***************************************************************************/
35
36 /*$FreeBSD: src/sys/dev/em/if_em.c,v 1.2.2.15 2003/06/09 22:10:15 pdeuskar Exp $*/
37 /*$DragonFly: src/sys/dev/netif/em/if_em.c,v 1.37 2005/10/02 13:19:55 sephe Exp $*/
38
39 #include "if_em.h"
40 #include <net/ifq_var.h>
41
42 /*********************************************************************
43  *  Set this to one to display debug statistics                                                   
44  *********************************************************************/
45 int             em_display_debug_stats = 0;
46
47 /*********************************************************************
48  *  Driver version
49  *********************************************************************/
50
51 char em_driver_version[] = "1.7.25";
52
53
54 /*********************************************************************
55  *  PCI Device ID Table
56  *
57  *  Used by probe to select devices to load on
58  *  Last field stores an index into em_strings
59  *  Last entry must be all 0s
60  *
61  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
62  *********************************************************************/
63
64 static em_vendor_info_t em_vendor_info_array[] =
65 {
66         /* Intel(R) PRO/1000 Network Connection */
67         { 0x8086, 0x1000, PCI_ANY_ID, PCI_ANY_ID, 0},
68         { 0x8086, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0},
69         { 0x8086, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0},
70         { 0x8086, 0x1008, PCI_ANY_ID, PCI_ANY_ID, 0},
71         { 0x8086, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0},
72         { 0x8086, 0x100C, PCI_ANY_ID, PCI_ANY_ID, 0},
73         { 0x8086, 0x100D, PCI_ANY_ID, PCI_ANY_ID, 0},
74         { 0x8086, 0x100E, PCI_ANY_ID, PCI_ANY_ID, 0},
75         { 0x8086, 0x100F, PCI_ANY_ID, PCI_ANY_ID, 0},
76         { 0x8086, 0x1010, PCI_ANY_ID, PCI_ANY_ID, 0},
77         { 0x8086, 0x1011, PCI_ANY_ID, PCI_ANY_ID, 0},
78         { 0x8086, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0},
79         { 0x8086, 0x1013, PCI_ANY_ID, PCI_ANY_ID, 0},
80         { 0x8086, 0x1014, PCI_ANY_ID, PCI_ANY_ID, 0},
81         { 0x8086, 0x1015, PCI_ANY_ID, PCI_ANY_ID, 0},
82         { 0x8086, 0x1016, PCI_ANY_ID, PCI_ANY_ID, 0},
83         { 0x8086, 0x1017, PCI_ANY_ID, PCI_ANY_ID, 0},
84         { 0x8086, 0x1018, PCI_ANY_ID, PCI_ANY_ID, 0},
85         { 0x8086, 0x1019, PCI_ANY_ID, PCI_ANY_ID, 0},
86         { 0x8086, 0x101A, PCI_ANY_ID, PCI_ANY_ID, 0},
87         { 0x8086, 0x101D, PCI_ANY_ID, PCI_ANY_ID, 0},
88         { 0x8086, 0x101E, PCI_ANY_ID, PCI_ANY_ID, 0},
89         { 0x8086, 0x1026, PCI_ANY_ID, PCI_ANY_ID, 0},
90         { 0x8086, 0x1027, PCI_ANY_ID, PCI_ANY_ID, 0},
91         { 0x8086, 0x1028, PCI_ANY_ID, PCI_ANY_ID, 0},
92         { 0x8086, 0x1075, PCI_ANY_ID, PCI_ANY_ID, 0},
93         { 0x8086, 0x1076, PCI_ANY_ID, PCI_ANY_ID, 0},
94         { 0x8086, 0x1077, PCI_ANY_ID, PCI_ANY_ID, 0},
95         { 0x8086, 0x1078, PCI_ANY_ID, PCI_ANY_ID, 0},
96         { 0x8086, 0x1079, PCI_ANY_ID, PCI_ANY_ID, 0},
97         { 0x8086, 0x107A, PCI_ANY_ID, PCI_ANY_ID, 0},
98         { 0x8086, 0x107B, PCI_ANY_ID, PCI_ANY_ID, 0},
99         /* required last entry */
100         { 0, 0, 0, 0, 0}
101 };
102
103 /*********************************************************************
104  *  Table of branding strings for all supported NICs.
105  *********************************************************************/
106
107 static const char *em_strings[] = {
108         "Intel(R) PRO/1000 Network Connection"
109 };
110
111 /*********************************************************************
112  *  Function prototypes            
113  *********************************************************************/
114 static int      em_probe(device_t);
115 static int      em_attach(device_t);
116 static int      em_detach(device_t);
117 static int      em_shutdown(device_t);
118 static void     em_intr(void *);
119 static void     em_start(struct ifnet *);
120 static void     em_start_serialized(struct ifnet *);
121 static int      em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
122 static void     em_watchdog(struct ifnet *);
123 static void     em_init(void *);
124 static void     em_init_serialized(void *);
125 static void     em_stop(void *);
126 static void     em_media_status(struct ifnet *, struct ifmediareq *);
127 static int      em_media_change(struct ifnet *);
128 static void     em_identify_hardware(struct adapter *);
129 static void     em_local_timer(void *);
130 static int      em_hardware_init(struct adapter *);
131 static void     em_setup_interface(device_t, struct adapter *);
132 static int      em_setup_transmit_structures(struct adapter *);
133 static void     em_initialize_transmit_unit(struct adapter *);
134 static int      em_setup_receive_structures(struct adapter *);
135 static void     em_initialize_receive_unit(struct adapter *);
136 static void     em_enable_intr(struct adapter *);
137 static void     em_disable_intr(struct adapter *);
138 static void     em_free_transmit_structures(struct adapter *);
139 static void     em_free_receive_structures(struct adapter *);
140 static void     em_update_stats_counters(struct adapter *);
141 static void     em_clean_transmit_interrupts(struct adapter *);
142 static int      em_allocate_receive_structures(struct adapter *);
143 static int      em_allocate_transmit_structures(struct adapter *);
144 static void     em_process_receive_interrupts(struct adapter *, int);
145 static void     em_receive_checksum(struct adapter *, struct em_rx_desc *,
146                                     struct mbuf *);
147 static void     em_transmit_checksum_setup(struct adapter *, struct mbuf *,
148                                            uint32_t *, uint32_t *);
149 static void     em_set_promisc(struct adapter *);
150 static void     em_disable_promisc(struct adapter *);
151 static void     em_set_multi(struct adapter *);
152 static void     em_print_hw_stats(struct adapter *);
153 static void     em_print_link_status(struct adapter *);
154 static int      em_get_buf(int i, struct adapter *, struct mbuf *, int how);
155 static void     em_enable_vlans(struct adapter *);
156 static int      em_encap(struct adapter *, struct mbuf *);
157 static void     em_smartspeed(struct adapter *);
158 static int      em_82547_fifo_workaround(struct adapter *, int);
159 static void     em_82547_update_fifo_head(struct adapter *, int);
160 static int      em_82547_tx_fifo_reset(struct adapter *);
161 static void     em_82547_move_tail(void *arg);
162 static void     em_82547_move_tail_serialized(void *arg);
163 static int      em_dma_malloc(struct adapter *, bus_size_t,
164                               struct em_dma_alloc *, int);
165 static void     em_dma_free(struct adapter *, struct em_dma_alloc *);
166 static void     em_print_debug_info(struct adapter *);
167 static int      em_is_valid_ether_addr(uint8_t *);
168 static int      em_sysctl_stats(SYSCTL_HANDLER_ARGS);
169 static int      em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
170 static uint32_t em_fill_descriptors(uint64_t address, uint32_t length, 
171                                    PDESC_ARRAY desc_array);
172 static int      em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
173 static int      em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS);
174 static void     em_add_int_delay_sysctl(struct adapter *, const char *,
175                                         const char *,
176                                         struct em_int_delay_info *, int, int);
177
178 /*********************************************************************
179  *  FreeBSD Device Interface Entry Points                    
180  *********************************************************************/
181
182 static device_method_t em_methods[] = {
183         /* Device interface */
184         DEVMETHOD(device_probe, em_probe),
185         DEVMETHOD(device_attach, em_attach),
186         DEVMETHOD(device_detach, em_detach),
187         DEVMETHOD(device_shutdown, em_shutdown),
188         {0, 0}
189 };
190
191 static driver_t em_driver = {
192         "em", em_methods, sizeof(struct adapter),
193 };
194
195 static devclass_t em_devclass;
196
197 DECLARE_DUMMY_MODULE(if_em);
198 DRIVER_MODULE(if_em, pci, em_driver, em_devclass, 0, 0);
199
200 /*********************************************************************
201  *  Tunable default values.
202  *********************************************************************/
203
204 #define E1000_TICKS_TO_USECS(ticks)     ((1024 * (ticks) + 500) / 1000)
205 #define E1000_USECS_TO_TICKS(usecs)     ((1000 * (usecs) + 512) / 1024)
206
207 static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
208 static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
209 static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
210 static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
211 static int em_int_throttle_ceil = 10000;
212
213 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
214 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
215 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
216 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
217 TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil);
218
219 /*********************************************************************
220  *  Device identification routine
221  *
222  *  em_probe determines if the driver should be loaded on
223  *  adapter based on PCI vendor/device id of the adapter.
224  *
225  *  return 0 on success, positive on failure
226  *********************************************************************/
227
228 static int
229 em_probe(device_t dev)
230 {
231         em_vendor_info_t *ent;
232
233         uint16_t pci_vendor_id = 0;
234         uint16_t pci_device_id = 0;
235         uint16_t pci_subvendor_id = 0;
236         uint16_t pci_subdevice_id = 0;
237         char adapter_name[60];
238
239         INIT_DEBUGOUT("em_probe: begin");
240
241         pci_vendor_id = pci_get_vendor(dev);
242         if (pci_vendor_id != EM_VENDOR_ID)
243                 return(ENXIO);
244
245         pci_device_id = pci_get_device(dev);
246         pci_subvendor_id = pci_get_subvendor(dev);
247         pci_subdevice_id = pci_get_subdevice(dev);
248
249         ent = em_vendor_info_array;
250         while (ent->vendor_id != 0) {
251                 if ((pci_vendor_id == ent->vendor_id) &&
252                     (pci_device_id == ent->device_id) &&
253
254                     ((pci_subvendor_id == ent->subvendor_id) ||
255                      (ent->subvendor_id == PCI_ANY_ID)) &&
256
257                     ((pci_subdevice_id == ent->subdevice_id) ||
258                      (ent->subdevice_id == PCI_ANY_ID))) {
259                         snprintf(adapter_name, sizeof(adapter_name),
260                                  "%s, Version - %s",  em_strings[ent->index], 
261                                  em_driver_version);
262                         device_set_desc_copy(dev, adapter_name);
263                         return(0);
264                 }
265                 ent++;
266         }
267
268         return(ENXIO);
269 }
270
271 /*********************************************************************
272  *  Device initialization routine
273  *
274  *  The attach entry point is called when the driver is being loaded.
275  *  This routine identifies the type of hardware, allocates all resources 
276  *  and initializes the hardware.     
277  *  
278  *  return 0 on success, positive on failure
279  *********************************************************************/
280
281 static int
282 em_attach(device_t dev)
283 {
284         struct adapter *adapter;
285         int tsize, rsize;
286         int i, val, rid;
287         int error = 0;
288
289         INIT_DEBUGOUT("em_attach: begin");
290
291         adapter = device_get_softc(dev);
292
293         lwkt_serialize_init(&adapter->serializer);
294
295         callout_init(&adapter->timer);
296         callout_init(&adapter->tx_fifo_timer);
297
298         adapter->dev = dev;
299         adapter->osdep.dev = dev;
300
301         /* SYSCTL stuff */
302         sysctl_ctx_init(&adapter->sysctl_ctx);
303         adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx,
304                                                SYSCTL_STATIC_CHILDREN(_hw),
305                                                OID_AUTO, 
306                                                device_get_nameunit(dev),
307                                                CTLFLAG_RD,
308                                                0, "");
309
310         if (adapter->sysctl_tree == NULL) {
311                 error = EIO;
312                 goto fail;
313         }
314
315         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,  
316                         SYSCTL_CHILDREN(adapter->sysctl_tree),
317                         OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW, 
318                         (void *)adapter, 0,
319                         em_sysctl_debug_info, "I", "Debug Information");
320
321         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,  
322                         SYSCTL_CHILDREN(adapter->sysctl_tree),
323                         OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, 
324                         (void *)adapter, 0,
325                         em_sysctl_stats, "I", "Statistics");
326
327         /* Determine hardware revision */
328         em_identify_hardware(adapter);
329
330         /* Set up some sysctls for the tunable interrupt delays */
331         em_add_int_delay_sysctl(adapter, "rx_int_delay",
332                                 "receive interrupt delay in usecs",
333                                 &adapter->rx_int_delay,
334                                 E1000_REG_OFFSET(&adapter->hw, RDTR),
335                                 em_rx_int_delay_dflt);
336         em_add_int_delay_sysctl(adapter, "tx_int_delay",
337                                 "transmit interrupt delay in usecs",
338                                 &adapter->tx_int_delay,
339                                 E1000_REG_OFFSET(&adapter->hw, TIDV),
340                                 em_tx_int_delay_dflt);
341         if (adapter->hw.mac_type >= em_82540) {
342                 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
343                                         "receive interrupt delay limit in usecs",
344                                         &adapter->rx_abs_int_delay,
345                                         E1000_REG_OFFSET(&adapter->hw, RADV),
346                                         em_rx_abs_int_delay_dflt);
347                 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
348                                         "transmit interrupt delay limit in usecs",
349                                         &adapter->tx_abs_int_delay,
350                                         E1000_REG_OFFSET(&adapter->hw, TADV),
351                                         em_tx_abs_int_delay_dflt);
352                 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
353                         SYSCTL_CHILDREN(adapter->sysctl_tree),
354                         OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW,
355                         adapter, 0, em_sysctl_int_throttle, "I", NULL);
356         }
357      
358         /* Parameters (to be read from user) */   
359         adapter->num_tx_desc = EM_MAX_TXD;
360         adapter->num_rx_desc = EM_MAX_RXD;
361         adapter->hw.autoneg = DO_AUTO_NEG;
362         adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
363         adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
364         adapter->hw.tbi_compatibility_en = TRUE;
365         adapter->rx_buffer_len = EM_RXBUFFER_2048;
366
367         /*
368          * These parameters control the automatic generation(Tx) and
369          * response(Rx) to Ethernet PAUSE frames.
370          */
371         adapter->hw.fc_high_water = FC_DEFAULT_HI_THRESH;
372         adapter->hw.fc_low_water  = FC_DEFAULT_LO_THRESH;
373         adapter->hw.fc_pause_time = FC_DEFAULT_TX_TIMER;
374         adapter->hw.fc_send_xon   = TRUE;
375         adapter->hw.fc = em_fc_full;
376
377         adapter->hw.phy_init_script = 1;
378         adapter->hw.phy_reset_disable = FALSE;
379
380 #ifndef EM_MASTER_SLAVE
381         adapter->hw.master_slave = em_ms_hw_default;
382 #else
383         adapter->hw.master_slave = EM_MASTER_SLAVE;
384 #endif
385
386         /* 
387          * Set the max frame size assuming standard ethernet 
388          * sized frames 
389          */   
390         adapter->hw.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
391
392         adapter->hw.min_frame_size = 
393             MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
394
395         /* 
396          * This controls when hardware reports transmit completion 
397          * status. 
398          */
399         adapter->hw.report_tx_early = 1;
400
401         rid = EM_MMBA;
402         adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
403                                                      &rid, RF_ACTIVE);
404         if (!(adapter->res_memory)) {
405                 device_printf(dev, "Unable to allocate bus resource: memory\n");
406                 error = ENXIO;
407                 goto fail;
408         }
409         adapter->osdep.mem_bus_space_tag = 
410             rman_get_bustag(adapter->res_memory);
411         adapter->osdep.mem_bus_space_handle = 
412             rman_get_bushandle(adapter->res_memory);
413         adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
414
415         if (adapter->hw.mac_type > em_82543) {
416                 /* Figure our where our IO BAR is ? */
417                 rid = EM_MMBA;
418                 for (i = 0; i < 5; i++) {
419                         val = pci_read_config(dev, rid, 4);
420                         if (val & 0x00000001) {
421                                 adapter->io_rid = rid;
422                                 break;
423                         }
424                         rid += 4;
425                 }
426
427                 adapter->res_ioport = bus_alloc_resource_any(dev,
428                     SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
429                 if (!(adapter->res_ioport)) {
430                         device_printf(dev, "Unable to allocate bus resource: ioport\n");
431                         error = ENXIO;
432                         goto fail;
433                 }
434
435                 adapter->hw.reg_io_tag = rman_get_bustag(adapter->res_ioport);
436                 adapter->hw.reg_io_handle = rman_get_bushandle(adapter->res_ioport);
437         }
438
439         rid = 0x0;
440         adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ,
441             &rid, RF_SHAREABLE | RF_ACTIVE);
442         if (!(adapter->res_interrupt)) {
443                 device_printf(dev, "Unable to allocate bus resource: interrupt\n");
444                 error = ENXIO;
445                 goto fail;
446         }
447
448         adapter->hw.back = &adapter->osdep;
449
450         /* Initialize eeprom parameters */
451         em_init_eeprom_params(&adapter->hw);
452
453         tsize = adapter->num_tx_desc * sizeof(struct em_tx_desc);
454
455         /* Allocate Transmit Descriptor ring */
456         if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_WAITOK)) {
457                 device_printf(dev, "Unable to allocate TxDescriptor memory\n");
458                 error = ENOMEM;
459                 goto fail;
460         }
461         adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
462
463         rsize = adapter->num_rx_desc * sizeof(struct em_rx_desc);
464
465         /* Allocate Receive Descriptor ring */
466         if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_WAITOK)) {
467                 device_printf(dev, "Unable to allocate rx_desc memory\n");
468                 error = ENOMEM;
469                 goto fail;
470         }
471         adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
472
473         /* Initialize the hardware */
474         if (em_hardware_init(adapter)) {
475                 device_printf(dev, "Unable to initialize the hardware\n");
476                 error = EIO;
477                 goto fail;
478         }
479
480         /* Copy the permanent MAC address out of the EEPROM */
481         if (em_read_mac_addr(&adapter->hw) < 0) {
482                 device_printf(dev, "EEPROM read error while reading mac address\n");
483                 error = EIO;
484                 goto fail;
485         }
486
487         if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
488                 device_printf(dev, "Invalid mac address\n");
489                 error = EIO;
490                 goto fail;
491         }
492
493         /* Setup OS specific network interface */
494         em_setup_interface(dev, adapter);
495
496         /* Initialize statistics */
497         em_clear_hw_cntrs(&adapter->hw);
498         em_update_stats_counters(adapter);
499         adapter->hw.get_link_status = 1;
500         em_check_for_link(&adapter->hw);
501
502         /* Print the link status */
503         if (adapter->link_active == 1) {
504                 em_get_speed_and_duplex(&adapter->hw, &adapter->link_speed, 
505                                         &adapter->link_duplex);
506                 device_printf(dev, "Speed: %d Mbps, Duplex: %s\n",
507                     adapter->link_speed,
508                     adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
509         } else
510                 device_printf(dev, "Speed: N/A, Duplex:N/A\n");
511
512         /* Identify 82544 on PCIX */
513         em_get_bus_info(&adapter->hw);  
514         if (adapter->hw.bus_type == em_bus_type_pcix &&
515             adapter->hw.mac_type == em_82544)
516                 adapter->pcix_82544 = TRUE;
517         else
518                 adapter->pcix_82544 = FALSE;
519
520         error = bus_setup_intr(dev, adapter->res_interrupt, INTR_TYPE_MISC,
521                            (void (*)(void *)) em_intr, adapter,
522                            &adapter->int_handler_tag, &adapter->serializer);
523         if (error) {
524                 device_printf(dev, "Error registering interrupt handler!\n");
525                 ether_ifdetach(&adapter->interface_data.ac_if);
526                 goto fail;
527         }
528
529         INIT_DEBUGOUT("em_attach: end");
530         return(0);
531
532 fail:
533         em_detach(dev);
534         return(error);
535 }
536
537 /*********************************************************************
538  *  Device removal routine
539  *
540  *  The detach entry point is called when the driver is being removed.
541  *  This routine stops the adapter and deallocates all the resources
542  *  that were allocated for driver operation.
543  *  
544  *  return 0 on success, positive on failure
545  *********************************************************************/
546
547 static int
548 em_detach(device_t dev)
549 {
550         struct adapter * adapter = device_get_softc(dev);
551
552         INIT_DEBUGOUT("em_detach: begin");
553
554         lwkt_serialize_enter(&adapter->serializer);
555         adapter->in_detach = 1;
556
557         if (device_is_attached(dev)) {
558                 em_stop(adapter);
559                 em_phy_hw_reset(&adapter->hw);
560                 ether_ifdetach(&adapter->interface_data.ac_if);
561         }
562         bus_generic_detach(dev);
563
564         if (adapter->res_interrupt != NULL) {
565                 bus_teardown_intr(dev, adapter->res_interrupt, 
566                                   adapter->int_handler_tag);
567                 bus_release_resource(dev, SYS_RES_IRQ, 0, 
568                                      adapter->res_interrupt);
569         }
570         if (adapter->res_memory != NULL) {
571                 bus_release_resource(dev, SYS_RES_MEMORY, EM_MMBA, 
572                                      adapter->res_memory);
573         }
574
575         if (adapter->res_ioport != NULL) {
576                 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid, 
577                                      adapter->res_ioport);
578         }
579
580         /* Free Transmit Descriptor ring */
581         if (adapter->tx_desc_base != NULL) {
582                 em_dma_free(adapter, &adapter->txdma);
583                 adapter->tx_desc_base = NULL;
584         }
585
586         /* Free Receive Descriptor ring */
587         if (adapter->rx_desc_base != NULL) {
588                 em_dma_free(adapter, &adapter->rxdma);
589                 adapter->rx_desc_base = NULL;
590         }
591
592         adapter->sysctl_tree = NULL;
593         sysctl_ctx_free(&adapter->sysctl_ctx);
594
595         lwkt_serialize_exit(&adapter->serializer);
596         return(0);
597 }
598
599 /*********************************************************************
600  *
601  *  Shutdown entry point
602  *
603  **********************************************************************/ 
604
605 static int
606 em_shutdown(device_t dev)
607 {
608         struct adapter *adapter = device_get_softc(dev);
609         em_stop(adapter);
610         return(0);
611 }
612
613 /*********************************************************************
614  *  Transmit entry point
615  *
616  *  em_start is called by the stack to initiate a transmit.
617  *  The driver will remain in this routine as long as there are
618  *  packets to transmit and transmit resources are available.
619  *  In case resources are not available stack is notified and
620  *  the packet is requeued.
621  **********************************************************************/
622
623 static void
624 em_start(struct ifnet *ifp)
625 {
626         struct adapter *adapter = ifp->if_softc;
627
628         lwkt_serialize_enter(&adapter->serializer);
629         em_start_serialized(ifp);
630         lwkt_serialize_exit(&adapter->serializer);
631 }
632
633 static void
634 em_start_serialized(struct ifnet *ifp)
635 {
636         struct mbuf *m_head;
637         struct adapter *adapter = ifp->if_softc;
638
639         if (!adapter->link_active)
640                 return;
641         while (!ifq_is_empty(&ifp->if_snd)) {
642                 m_head = ifq_poll(&ifp->if_snd);
643
644                 if (m_head == NULL)
645                         break;
646
647                 if (em_encap(adapter, m_head)) { 
648                         ifp->if_flags |= IFF_OACTIVE;
649                         break;
650                 }
651                 m_head = ifq_dequeue(&ifp->if_snd);
652
653                 /* Send a copy of the frame to the BPF listener */
654                 BPF_MTAP(ifp, m_head);
655         
656                 /* Set timeout in case hardware has problems transmitting */
657                 ifp->if_timer = EM_TX_TIMEOUT;        
658         }
659 }
660
661 /*********************************************************************
662  *  Ioctl entry point
663  *
664  *  em_ioctl is called when the user wants to configure the
665  *  interface.
666  *
667  *  return 0 on success, positive on failure
668  **********************************************************************/
669
670 static int
671 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
672 {
673         int mask, error = 0;
674         struct ifreq *ifr = (struct ifreq *) data;
675         struct adapter *adapter = ifp->if_softc;
676
677         lwkt_serialize_enter(&adapter->serializer);
678
679         if (adapter->in_detach)
680                 goto out;
681
682         switch (command) {
683         case SIOCSIFADDR:
684         case SIOCGIFADDR:
685                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
686                 lwkt_serialize_exit(&adapter->serializer);
687                 ether_ioctl(ifp, command, data);
688                 lwkt_serialize_enter(&adapter->serializer);
689                 break;
690         case SIOCSIFMTU:
691                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
692                 if (ifr->ifr_mtu > MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
693                         error = EINVAL;
694                 } else {
695                         ifp->if_mtu = ifr->ifr_mtu;
696                         adapter->hw.max_frame_size = 
697                         ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
698                         em_init_serialized(adapter);
699                 }
700                 break;
701         case SIOCSIFFLAGS:
702                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
703                 if (ifp->if_flags & IFF_UP) {
704                         if (!(ifp->if_flags & IFF_RUNNING))
705                                 em_init_serialized(adapter);
706                         em_disable_promisc(adapter);
707                         em_set_promisc(adapter);
708                 } else {
709                         if (ifp->if_flags & IFF_RUNNING)
710                                 em_stop(adapter);
711                 }
712                 break;
713         case SIOCADDMULTI:
714         case SIOCDELMULTI:
715                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
716                 if (ifp->if_flags & IFF_RUNNING) {
717                         em_disable_intr(adapter);
718                         em_set_multi(adapter);
719                         if (adapter->hw.mac_type == em_82542_rev2_0)
720                                 em_initialize_receive_unit(adapter);
721                         em_enable_intr(adapter);
722                 }
723                 break;
724         case SIOCSIFMEDIA:
725         case SIOCGIFMEDIA:
726                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
727                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
728                 break;
729         case SIOCSIFCAP:
730                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
731                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
732                 if (mask & IFCAP_HWCSUM) {
733                         if (IFCAP_HWCSUM & ifp->if_capenable)
734                                 ifp->if_capenable &= ~IFCAP_HWCSUM;
735                         else
736                                 ifp->if_capenable |= IFCAP_HWCSUM;
737                         if (ifp->if_flags & IFF_RUNNING)
738                                 em_init_serialized(adapter);
739                 }
740                 break;
741         default:
742                 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)\n", (int)command);
743                 error = EINVAL;
744         }
745
746 out:
747         lwkt_serialize_exit(&adapter->serializer);
748         return(error);
749 }
750
751 /*********************************************************************
752  *  Watchdog entry point
753  *
754  *  This routine is called whenever hardware quits transmitting.
755  *
756  **********************************************************************/
757
758 static void
759 em_watchdog(struct ifnet *ifp)
760 {
761         struct adapter * adapter;
762         adapter = ifp->if_softc;
763
764         /* If we are in this routine because of pause frames, then
765          * don't reset the hardware.
766          */
767         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
768                 ifp->if_timer = EM_TX_TIMEOUT;
769                 return;
770         }
771
772         if (em_check_for_link(&adapter->hw))
773                 if_printf(ifp, "watchdog timeout -- resetting\n");
774
775         ifp->if_flags &= ~IFF_RUNNING;
776
777         em_init(adapter);
778
779         ifp->if_oerrors++;
780 }
781
782 /*********************************************************************
783  *  Init entry point
784  *
785  *  This routine is used in two ways. It is used by the stack as
786  *  init entry point in network interface structure. It is also used
787  *  by the driver as a hw/sw initialization routine to get to a 
788  *  consistent state.
789  *
790  *  return 0 on success, positive on failure
791  **********************************************************************/
792
793 static void
794 em_init(void *arg)
795 {
796         struct adapter *adapter = arg;
797
798         lwkt_serialize_enter(&adapter->serializer);
799         em_init_serialized(arg);
800         lwkt_serialize_exit(&adapter->serializer);
801 }
802
803 static void
804 em_init_serialized(void *arg)
805 {
806         struct adapter *adapter = arg;
807         struct ifnet *ifp = &adapter->interface_data.ac_if;
808
809         INIT_DEBUGOUT("em_init: begin");
810
811         em_stop(adapter);
812
813         /* Get the latest mac address, User can use a LAA */
814         bcopy(adapter->interface_data.ac_enaddr, adapter->hw.mac_addr,
815               ETHER_ADDR_LEN);
816
817         /* Initialize the hardware */
818         if (em_hardware_init(adapter)) {
819                 if_printf(ifp, "Unable to initialize the hardware\n");
820                 return;
821         }
822
823         em_enable_vlans(adapter);
824
825         /* Prepare transmit descriptors and buffers */
826         if (em_setup_transmit_structures(adapter)) {
827                 if_printf(ifp, "Could not setup transmit structures\n");
828                 em_stop(adapter); 
829                 return;
830         }
831         em_initialize_transmit_unit(adapter);
832
833         /* Setup Multicast table */
834         em_set_multi(adapter);
835
836         /* Prepare receive descriptors and buffers */
837         if (em_setup_receive_structures(adapter)) {
838                 if_printf(ifp, "Could not setup receive structures\n");
839                 em_stop(adapter);
840                 return;
841         }
842         em_initialize_receive_unit(adapter);
843         
844         /* Don't loose promiscuous settings */
845         em_set_promisc(adapter);
846
847         ifp->if_flags |= IFF_RUNNING;
848         ifp->if_flags &= ~IFF_OACTIVE;
849
850         if (adapter->hw.mac_type >= em_82543) {
851                 if (ifp->if_capenable & IFCAP_TXCSUM)
852                         ifp->if_hwassist = EM_CHECKSUM_FEATURES;
853                 else
854                         ifp->if_hwassist = 0;
855         }
856
857         callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
858         em_clear_hw_cntrs(&adapter->hw);
859         em_enable_intr(adapter);
860
861         /* Don't reset the phy next time init gets called */
862         adapter->hw.phy_reset_disable = TRUE;
863 }
864
865 #ifdef DEVICE_POLLING
866
867 static void
868 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
869 {
870         struct adapter *adapter = ifp->if_softc;
871         uint32_t reg_icr;
872
873         lwkt_serialize_enter(&adapter->serializer);
874         switch(cmd) {
875         case POLL_REGISTER:
876                 em_disable_intr(adapter);
877                 break;
878         case POLL_DEREGISTER:
879                 em_enable_intr(adapter);
880                 break;
881         case POLL_AND_CHECK_STATUS:
882                 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
883                 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
884                         callout_stop(&adapter->timer);
885                         adapter->hw.get_link_status = 1;
886                         em_check_for_link(&adapter->hw);
887                         em_print_link_status(adapter);
888                         callout_reset(&adapter->timer, 2*hz, em_local_timer,
889                                       adapter);
890                 }
891                 /* fall through */
892         case POLL_ONLY:
893                 if (ifp->if_flags & IFF_RUNNING) {
894                         em_process_receive_interrupts(adapter, count);
895                         em_clean_transmit_interrupts(adapter);
896                 }
897                 if (ifp->if_flags & IFF_RUNNING) {
898                         if (!ifq_is_empty(&ifp->if_snd))
899                                 em_start_serialized(ifp);
900                 }
901                 break;
902         }
903         lwkt_serialize_exit(&adapter->serializer);
904 }
905
906 #endif /* DEVICE_POLLING */
907
908 /*********************************************************************
909  *
910  *  Interrupt Service routine
911  *
912  **********************************************************************/
913 static void
914 em_intr(void *arg)
915 {
916         uint32_t reg_icr;
917         struct ifnet *ifp;
918         struct adapter *adapter = arg;
919
920         ifp = &adapter->interface_data.ac_if;  
921
922         reg_icr = E1000_READ_REG(&adapter->hw, ICR);
923         if (!reg_icr)
924                 return;
925
926         /* Link status change */
927         if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
928                 callout_stop(&adapter->timer);
929                 adapter->hw.get_link_status = 1;
930                 em_check_for_link(&adapter->hw);
931                 em_print_link_status(adapter);
932                 callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
933         }
934
935         /*
936          * note: do not attempt to improve efficiency by looping.  This 
937          * only results in unnecessary piecemeal collection of received
938          * packets and unnecessary piecemeal cleanups of the transmit ring.
939          */
940         if (ifp->if_flags & IFF_RUNNING) {
941                 em_process_receive_interrupts(adapter, -1);
942                 em_clean_transmit_interrupts(adapter);
943         }
944
945         if ((ifp->if_flags & IFF_RUNNING) && !ifq_is_empty(&ifp->if_snd))
946                 em_start_serialized(ifp);
947 }
948
949 /*********************************************************************
950  *
951  *  Media Ioctl callback
952  *
953  *  This routine is called whenever the user queries the status of
954  *  the interface using ifconfig.
955  *
956  **********************************************************************/
957 static void
958 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
959 {
960         struct adapter * adapter = ifp->if_softc;
961
962         INIT_DEBUGOUT("em_media_status: begin");
963
964         em_check_for_link(&adapter->hw);
965         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
966                 if (adapter->link_active == 0) {
967                         em_get_speed_and_duplex(&adapter->hw, 
968                                                 &adapter->link_speed, 
969                                                 &adapter->link_duplex);
970                         adapter->link_active = 1;
971                 }
972         } else {
973                 if (adapter->link_active == 1) {
974                         adapter->link_speed = 0;
975                         adapter->link_duplex = 0;
976                         adapter->link_active = 0;
977                 }
978         }
979
980         ifmr->ifm_status = IFM_AVALID;
981         ifmr->ifm_active = IFM_ETHER;
982
983         if (!adapter->link_active)
984                 return;
985
986         ifmr->ifm_status |= IFM_ACTIVE;
987
988         if (adapter->hw.media_type == em_media_type_fiber) {
989                 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
990         } else {
991                 switch (adapter->link_speed) {
992                 case 10:
993                         ifmr->ifm_active |= IFM_10_T;
994                         break;
995                 case 100:
996                         ifmr->ifm_active |= IFM_100_TX;
997                         break;
998                 case 1000:
999                         ifmr->ifm_active |= IFM_1000_T;
1000                         break;
1001                 }
1002                 if (adapter->link_duplex == FULL_DUPLEX)
1003                         ifmr->ifm_active |= IFM_FDX;
1004                 else
1005                         ifmr->ifm_active |= IFM_HDX;
1006         }
1007 }
1008
1009 /*********************************************************************
1010  *
1011  *  Media Ioctl callback
1012  *
1013  *  This routine is called when the user changes speed/duplex using
1014  *  media/mediopt option with ifconfig.
1015  *
1016  **********************************************************************/
1017 static int
1018 em_media_change(struct ifnet *ifp)
1019 {
1020         struct adapter * adapter = ifp->if_softc;
1021         struct ifmedia  *ifm = &adapter->media;
1022
1023         INIT_DEBUGOUT("em_media_change: begin");
1024
1025         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1026                 return(EINVAL);
1027
1028         lwkt_serialize_enter(&adapter->serializer);
1029
1030         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1031         case IFM_AUTO:
1032                 adapter->hw.autoneg = DO_AUTO_NEG;
1033                 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1034                 break;
1035         case IFM_1000_SX:
1036         case IFM_1000_T:
1037                 adapter->hw.autoneg = DO_AUTO_NEG;
1038                 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1039                 break;
1040         case IFM_100_TX:
1041                 adapter->hw.autoneg = FALSE;
1042                 adapter->hw.autoneg_advertised = 0;
1043                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1044                         adapter->hw.forced_speed_duplex = em_100_full;
1045                 else
1046                         adapter->hw.forced_speed_duplex = em_100_half;
1047                 break;
1048         case IFM_10_T:
1049                 adapter->hw.autoneg = FALSE;
1050                 adapter->hw.autoneg_advertised = 0;
1051                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1052                         adapter->hw.forced_speed_duplex = em_10_full;
1053                 else
1054                         adapter->hw.forced_speed_duplex = em_10_half;
1055                 break;
1056         default:
1057                 if_printf(ifp, "Unsupported media type\n");
1058         }
1059         /*
1060          * As the speed/duplex settings may have changed we need to
1061          * reset the PHY.
1062          */
1063         adapter->hw.phy_reset_disable = FALSE;
1064
1065         em_init_serialized(adapter);
1066
1067         lwkt_serialize_exit(&adapter->serializer);
1068         return(0);
1069 }
1070
1071 static void
1072 em_tx_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize,
1073          int error)
1074 {
1075         struct em_q *q = arg;
1076
1077         if (error)
1078                 return;
1079         KASSERT(nsegs <= EM_MAX_SCATTER,
1080                 ("Too many DMA segments returned when mapping tx packet"));
1081         q->nsegs = nsegs;
1082         bcopy(seg, q->segs, nsegs * sizeof(seg[0]));
1083 }
1084
1085 #define EM_FIFO_HDR              0x10
1086 #define EM_82547_PKT_THRESH      0x3e0
1087 #define EM_82547_TX_FIFO_SIZE    0x2800
1088 #define EM_82547_TX_FIFO_BEGIN   0xf00
1089 /*********************************************************************
1090  *
1091  *  This routine maps the mbufs to tx descriptors.
1092  *
1093  *  return 0 on success, positive on failure
1094  **********************************************************************/
1095 static int
1096 em_encap(struct adapter *adapter, struct mbuf *m_head)
1097 {
1098         uint32_t txd_upper;
1099         uint32_t txd_lower, txd_used = 0, txd_saved = 0;
1100         int i, j, error;
1101         uint64_t address;
1102
1103         /* For 82544 Workaround */
1104         DESC_ARRAY desc_array;
1105         uint32_t array_elements;
1106         uint32_t counter;
1107
1108         struct ifvlan *ifv = NULL;
1109         struct em_q q;
1110         struct em_buffer *tx_buffer = NULL;
1111         struct em_tx_desc *current_tx_desc = NULL;
1112         struct ifnet *ifp = &adapter->interface_data.ac_if;
1113
1114         /*
1115          * Force a cleanup if number of TX descriptors
1116          * available hits the threshold
1117          */
1118         if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1119                 em_clean_transmit_interrupts(adapter);
1120                 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1121                         adapter->no_tx_desc_avail1++;
1122                         return(ENOBUFS);
1123                 }
1124         }
1125         /*
1126          * Map the packet for DMA.
1127          */
1128         if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &q.map)) {
1129                 adapter->no_tx_map_avail++;
1130                 return(ENOMEM);
1131         }
1132         error = bus_dmamap_load_mbuf(adapter->txtag, q.map, m_head, em_tx_cb,
1133                                      &q, BUS_DMA_NOWAIT);
1134         if (error != 0) {
1135                 adapter->no_tx_dma_setup++;
1136                 bus_dmamap_destroy(adapter->txtag, q.map);
1137                 return(error);
1138         }
1139         KASSERT(q.nsegs != 0, ("em_encap: empty packet"));
1140
1141         if (q.nsegs > adapter->num_tx_desc_avail) {
1142                 adapter->no_tx_desc_avail2++;
1143                 bus_dmamap_unload(adapter->txtag, q.map);
1144                 bus_dmamap_destroy(adapter->txtag, q.map);
1145                 return(ENOBUFS);
1146         }
1147
1148         if (ifp->if_hwassist > 0) {
1149                 em_transmit_checksum_setup(adapter,  m_head,
1150                                            &txd_upper, &txd_lower);
1151         }
1152         else 
1153                 txd_upper = txd_lower = 0;
1154
1155         /* Find out if we are in vlan mode */
1156         if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1157             m_head->m_pkthdr.rcvif != NULL &&
1158             m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1159                 ifv = m_head->m_pkthdr.rcvif->if_softc;
1160
1161         i = adapter->next_avail_tx_desc;
1162         if (adapter->pcix_82544) {
1163                 txd_saved = i;
1164                 txd_used = 0;
1165         }
1166         for (j = 0; j < q.nsegs; j++) {
1167                 /* If adapter is 82544 and on PCIX bus */
1168                 if(adapter->pcix_82544) {
1169                         array_elements = 0;
1170                         address = htole64(q.segs[j].ds_addr);
1171                         /* 
1172                          * Check the Address and Length combination and
1173                          * split the data accordingly
1174                          */
1175                         array_elements = em_fill_descriptors(address,
1176                                                              htole32(q.segs[j].ds_len),
1177                                                              &desc_array);
1178                         for (counter = 0; counter < array_elements; counter++) {
1179                                 if (txd_used == adapter->num_tx_desc_avail) {
1180                                         adapter->next_avail_tx_desc = txd_saved;
1181                                         adapter->no_tx_desc_avail2++;
1182                                         bus_dmamap_unload(adapter->txtag, q.map);
1183                                         bus_dmamap_destroy(adapter->txtag, q.map);
1184                                         return(ENOBUFS);
1185                                 }
1186                                 tx_buffer = &adapter->tx_buffer_area[i];
1187                                 current_tx_desc = &adapter->tx_desc_base[i];
1188                                 current_tx_desc->buffer_addr = htole64(
1189                                 desc_array.descriptor[counter].address);
1190                                 current_tx_desc->lower.data = htole32(
1191                                 (adapter->txd_cmd | txd_lower | 
1192                                 (uint16_t)desc_array.descriptor[counter].length));
1193                                 current_tx_desc->upper.data = htole32((txd_upper));
1194                                 if (++i == adapter->num_tx_desc)
1195                                         i = 0;
1196
1197                                 tx_buffer->m_head = NULL;
1198                                 txd_used++;
1199                         }
1200                 } else {
1201                         tx_buffer = &adapter->tx_buffer_area[i];
1202                         current_tx_desc = &adapter->tx_desc_base[i];
1203
1204                         current_tx_desc->buffer_addr = htole64(q.segs[j].ds_addr);
1205                         current_tx_desc->lower.data = htole32(
1206                                 adapter->txd_cmd | txd_lower | q.segs[j].ds_len);
1207                         current_tx_desc->upper.data = htole32(txd_upper);
1208
1209                         if (++i == adapter->num_tx_desc)
1210                                 i = 0;
1211
1212                         tx_buffer->m_head = NULL;
1213                 }
1214         }
1215
1216         adapter->next_avail_tx_desc = i;
1217         if (adapter->pcix_82544)
1218                 adapter->num_tx_desc_avail -= txd_used;
1219         else
1220                 adapter->num_tx_desc_avail -= q.nsegs;
1221
1222         if (ifv != NULL) {
1223                 /* Set the vlan id */
1224                 current_tx_desc->upper.fields.special = htole16(ifv->ifv_tag);
1225
1226                 /* Tell hardware to add tag */
1227                 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1228         }
1229
1230         tx_buffer->m_head = m_head;
1231         tx_buffer->map = q.map;
1232         bus_dmamap_sync(adapter->txtag, q.map, BUS_DMASYNC_PREWRITE);
1233
1234         /*
1235          * Last Descriptor of Packet needs End Of Packet (EOP)
1236          */
1237         current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1238
1239         /* 
1240          * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1241          * that this frame is available to transmit.
1242          */
1243         if (adapter->hw.mac_type == em_82547 &&
1244             adapter->link_duplex == HALF_DUPLEX) {
1245                 em_82547_move_tail_serialized(adapter);
1246         } else {
1247                 E1000_WRITE_REG(&adapter->hw, TDT, i);
1248                 if (adapter->hw.mac_type == em_82547) {
1249                         em_82547_update_fifo_head(adapter,
1250                                                   m_head->m_pkthdr.len);
1251                 }
1252         }
1253
1254         return(0);
1255 }
1256
1257 /*********************************************************************
1258  *
1259  * 82547 workaround to avoid controller hang in half-duplex environment.
1260  * The workaround is to avoid queuing a large packet that would span   
1261  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1262  * in this case. We do that only when FIFO is quiescent.
1263  *
1264  **********************************************************************/
1265 static void
1266 em_82547_move_tail(void *arg)
1267 {
1268         struct adapter *adapter = arg;
1269
1270         lwkt_serialize_enter(&adapter->serializer);
1271         em_82547_move_tail_serialized(arg);
1272         lwkt_serialize_exit(&adapter->serializer);
1273 }
1274
1275 static void
1276 em_82547_move_tail_serialized(void *arg)
1277 {
1278         struct adapter *adapter = arg;
1279         uint16_t hw_tdt;
1280         uint16_t sw_tdt;
1281         struct em_tx_desc *tx_desc;
1282         uint16_t length = 0;
1283         boolean_t eop = 0;
1284
1285         hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1286         sw_tdt = adapter->next_avail_tx_desc;
1287
1288         while (hw_tdt != sw_tdt) {
1289                 tx_desc = &adapter->tx_desc_base[hw_tdt];
1290                 length += tx_desc->lower.flags.length;
1291                 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1292                 if(++hw_tdt == adapter->num_tx_desc)
1293                         hw_tdt = 0;
1294
1295                 if(eop) {
1296                         if (em_82547_fifo_workaround(adapter, length)) {
1297                                 adapter->tx_fifo_wrk++;
1298                                 callout_reset(&adapter->tx_fifo_timer, 1,
1299                                         em_82547_move_tail, adapter);
1300                                 break;
1301                         }
1302                         E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1303                         em_82547_update_fifo_head(adapter, length);
1304                         length = 0;
1305                 }
1306         }       
1307 }
1308
1309 static int
1310 em_82547_fifo_workaround(struct adapter *adapter, int len)
1311 {       
1312         int fifo_space, fifo_pkt_len;
1313
1314         fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1315
1316         if (adapter->link_duplex == HALF_DUPLEX) {
1317                 fifo_space = EM_82547_TX_FIFO_SIZE - adapter->tx_fifo_head;
1318
1319                 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1320                         if (em_82547_tx_fifo_reset(adapter))
1321                                 return(0);
1322                         else
1323                                 return(1);
1324                 }
1325         }
1326
1327         return(0);
1328 }
1329
1330 static void
1331 em_82547_update_fifo_head(struct adapter *adapter, int len)
1332 {
1333         int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1334
1335         /* tx_fifo_head is always 16 byte aligned */
1336         adapter->tx_fifo_head += fifo_pkt_len;
1337         if (adapter->tx_fifo_head >= EM_82547_TX_FIFO_SIZE)
1338                 adapter->tx_fifo_head -= EM_82547_TX_FIFO_SIZE;
1339 }
1340
1341 static int
1342 em_82547_tx_fifo_reset(struct adapter *adapter)
1343 {       
1344         uint32_t tctl;
1345
1346         if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1347               E1000_READ_REG(&adapter->hw, TDH)) &&
1348              (E1000_READ_REG(&adapter->hw, TDFT) == 
1349               E1000_READ_REG(&adapter->hw, TDFH)) &&
1350              (E1000_READ_REG(&adapter->hw, TDFTS) ==
1351               E1000_READ_REG(&adapter->hw, TDFHS)) &&
1352              (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1353
1354                 /* Disable TX unit */
1355                 tctl = E1000_READ_REG(&adapter->hw, TCTL);
1356                 E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1357
1358                 /* Reset FIFO pointers */
1359                 E1000_WRITE_REG(&adapter->hw, TDFT, EM_82547_TX_FIFO_BEGIN);
1360                 E1000_WRITE_REG(&adapter->hw, TDFH, EM_82547_TX_FIFO_BEGIN);
1361                 E1000_WRITE_REG(&adapter->hw, TDFTS, EM_82547_TX_FIFO_BEGIN);
1362                 E1000_WRITE_REG(&adapter->hw, TDFHS, EM_82547_TX_FIFO_BEGIN);
1363
1364                 /* Re-enable TX unit */
1365                 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1366                 E1000_WRITE_FLUSH(&adapter->hw);
1367
1368                 adapter->tx_fifo_head = 0;
1369                 adapter->tx_fifo_reset++;
1370
1371                 return(TRUE);
1372         }
1373         else {
1374                 return(FALSE);
1375         }
1376 }
1377
1378 static void
1379 em_set_promisc(struct adapter *adapter)
1380 {
1381         uint32_t reg_rctl;
1382         struct ifnet *ifp = &adapter->interface_data.ac_if;
1383
1384         reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1385
1386         if (ifp->if_flags & IFF_PROMISC) {
1387                 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1388                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1389         } else if (ifp->if_flags & IFF_ALLMULTI) {
1390                 reg_rctl |= E1000_RCTL_MPE;
1391                 reg_rctl &= ~E1000_RCTL_UPE;
1392                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1393         }
1394 }
1395
1396 static void
1397 em_disable_promisc(struct adapter *adapter)
1398 {
1399         uint32_t reg_rctl;
1400
1401         reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1402
1403         reg_rctl &=  (~E1000_RCTL_UPE);
1404         reg_rctl &=  (~E1000_RCTL_MPE);
1405         E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1406 }
1407
1408 /*********************************************************************
1409  *  Multicast Update
1410  *
1411  *  This routine is called whenever multicast address list is updated.
1412  *
1413  **********************************************************************/
1414
1415 static void
1416 em_set_multi(struct adapter *adapter)
1417 {
1418         uint32_t reg_rctl = 0;
1419         uint8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1420         struct ifmultiaddr *ifma;
1421         int mcnt = 0;
1422         struct ifnet *ifp = &adapter->interface_data.ac_if;
1423
1424         IOCTL_DEBUGOUT("em_set_multi: begin");
1425
1426         if (adapter->hw.mac_type == em_82542_rev2_0) {
1427                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1428                 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1429                         em_pci_clear_mwi(&adapter->hw);
1430                 reg_rctl |= E1000_RCTL_RST;
1431                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1432                 msec_delay(5);
1433         }
1434
1435         LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1436                 if (ifma->ifma_addr->sa_family != AF_LINK)
1437                         continue;
1438
1439                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1440                         break;
1441
1442                 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1443                       &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1444                 mcnt++;
1445         }
1446
1447         if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1448                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1449                 reg_rctl |= E1000_RCTL_MPE;
1450                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1451         } else
1452                 em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1453
1454         if (adapter->hw.mac_type == em_82542_rev2_0) {
1455                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1456                 reg_rctl &= ~E1000_RCTL_RST;
1457                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1458                 msec_delay(5);
1459                 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1460                         em_pci_set_mwi(&adapter->hw);
1461         }
1462 }
1463
1464 /*********************************************************************
1465  *  Timer routine
1466  *
1467  *  This routine checks for link status and updates statistics.
1468  *
1469  **********************************************************************/
1470
1471 static void
1472 em_local_timer(void *arg)
1473 {
1474         struct ifnet *ifp;
1475         struct adapter *adapter = arg;
1476         ifp = &adapter->interface_data.ac_if;
1477
1478         lwkt_serialize_enter(&adapter->serializer);
1479
1480         em_check_for_link(&adapter->hw);
1481         em_print_link_status(adapter);
1482         em_update_stats_counters(adapter);   
1483         if (em_display_debug_stats && ifp->if_flags & IFF_RUNNING)
1484                 em_print_hw_stats(adapter);
1485         em_smartspeed(adapter);
1486
1487         callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
1488
1489         lwkt_serialize_exit(&adapter->serializer);
1490 }
1491
1492 static void
1493 em_print_link_status(struct adapter *adapter)
1494 {
1495         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1496                 if (adapter->link_active == 0) {
1497                         em_get_speed_and_duplex(&adapter->hw, 
1498                                                 &adapter->link_speed, 
1499                                                 &adapter->link_duplex);
1500                         device_printf(adapter->dev, "Link is up %d Mbps %s\n",
1501                                adapter->link_speed,
1502                                ((adapter->link_duplex == FULL_DUPLEX) ?
1503                                 "Full Duplex" : "Half Duplex"));
1504                         adapter->link_active = 1;
1505                         adapter->smartspeed = 0;
1506                 }
1507         } else {
1508                 if (adapter->link_active == 1) {
1509                         adapter->link_speed = 0;
1510                         adapter->link_duplex = 0;
1511                         device_printf(adapter->dev, "Link is Down\n");
1512                         adapter->link_active = 0;
1513                 }
1514         }
1515 }
1516
1517 /*********************************************************************
1518  *
1519  *  This routine disables all traffic on the adapter by issuing a
1520  *  global reset on the MAC and deallocates TX/RX buffers. 
1521  *
1522  **********************************************************************/
1523
1524 static void
1525 em_stop(void *arg)
1526 {
1527         struct ifnet   *ifp;
1528         struct adapter * adapter = arg;
1529         ifp = &adapter->interface_data.ac_if;
1530
1531         INIT_DEBUGOUT("em_stop: begin");
1532         em_disable_intr(adapter);
1533         em_reset_hw(&adapter->hw);
1534         callout_stop(&adapter->timer);
1535         callout_stop(&adapter->tx_fifo_timer);
1536         em_free_transmit_structures(adapter);
1537         em_free_receive_structures(adapter);
1538
1539         /* Tell the stack that the interface is no longer active */
1540         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1541         ifp->if_timer = 0;
1542 }
1543
1544 /*********************************************************************
1545  *
1546  *  Determine hardware revision.
1547  *
1548  **********************************************************************/
1549 static void
1550 em_identify_hardware(struct adapter * adapter)
1551 {
1552         device_t dev = adapter->dev;
1553
1554         /* Make sure our PCI config space has the necessary stuff set */
1555         adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1556         if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1557               (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1558                 device_printf(dev, "Memory Access and/or Bus Master bits were not set!\n");
1559                 adapter->hw.pci_cmd_word |= 
1560                 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1561                 pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1562         }
1563
1564         /* Save off the information about this board */
1565         adapter->hw.vendor_id = pci_get_vendor(dev);
1566         adapter->hw.device_id = pci_get_device(dev);
1567         adapter->hw.revision_id = pci_get_revid(dev);
1568         adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev);
1569         adapter->hw.subsystem_id = pci_get_subdevice(dev);
1570
1571         /* Identify the MAC */
1572         if (em_set_mac_type(&adapter->hw))
1573                 device_printf(dev, "Unknown MAC Type\n");
1574
1575         if (adapter->hw.mac_type == em_82541 ||
1576             adapter->hw.mac_type == em_82541_rev_2 ||
1577             adapter->hw.mac_type == em_82547 ||
1578             adapter->hw.mac_type == em_82547_rev_2)
1579                 adapter->hw.phy_init_script = TRUE;
1580 }
1581
1582 /*********************************************************************
1583  *
1584  *  Initialize the hardware to a configuration as specified by the
1585  *  adapter structure. The controller is reset, the EEPROM is
1586  *  verified, the MAC address is set, then the shared initialization
1587  *  routines are called.
1588  *
1589  **********************************************************************/
1590 static int
1591 em_hardware_init(struct adapter *adapter)
1592 {
1593         INIT_DEBUGOUT("em_hardware_init: begin");
1594         /* Issue a global reset */
1595         em_reset_hw(&adapter->hw);
1596
1597         /* When hardware is reset, fifo_head is also reset */
1598         adapter->tx_fifo_head = 0;
1599
1600         /* Make sure we have a good EEPROM before we read from it */
1601         if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
1602                 device_printf(adapter->dev, "The EEPROM Checksum Is Not Valid\n");
1603                 return(EIO);
1604         }
1605
1606         if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
1607                 device_printf(adapter->dev, "EEPROM read error while reading part number\n");
1608                 return(EIO);
1609         }
1610
1611         if (em_init_hw(&adapter->hw) < 0) {
1612                 device_printf(adapter->dev, "Hardware Initialization Failed");
1613                 return(EIO);
1614         }
1615
1616         em_check_for_link(&adapter->hw);
1617         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
1618                 adapter->link_active = 1;
1619         else
1620                 adapter->link_active = 0;
1621
1622         if (adapter->link_active) {
1623                 em_get_speed_and_duplex(&adapter->hw, 
1624                                         &adapter->link_speed, 
1625                                         &adapter->link_duplex);
1626         } else {
1627                 adapter->link_speed = 0;
1628                 adapter->link_duplex = 0;
1629         }
1630
1631         return(0);
1632 }
1633
1634 /*********************************************************************
1635  *
1636  *  Setup networking device structure and register an interface.
1637  *
1638  **********************************************************************/
1639 static void
1640 em_setup_interface(device_t dev, struct adapter *adapter)
1641 {
1642         struct ifnet   *ifp;
1643         INIT_DEBUGOUT("em_setup_interface: begin");
1644
1645         ifp = &adapter->interface_data.ac_if;
1646         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1647         ifp->if_mtu = ETHERMTU;
1648         ifp->if_baudrate = 1000000000;
1649         ifp->if_init =  em_init;
1650         ifp->if_softc = adapter;
1651         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1652         ifp->if_ioctl = em_ioctl;
1653         ifp->if_start = em_start;
1654 #ifdef DEVICE_POLLING
1655         ifp->if_poll = em_poll;
1656 #endif
1657         ifp->if_watchdog = em_watchdog;
1658         ifq_set_maxlen(&ifp->if_snd, adapter->num_tx_desc - 1);
1659         ifq_set_ready(&ifp->if_snd);
1660
1661         if (adapter->hw.mac_type >= em_82543)
1662                 ifp->if_capabilities |= IFCAP_HWCSUM;
1663
1664         ifp->if_capenable = ifp->if_capabilities;
1665
1666         ether_ifattach(ifp, adapter->hw.mac_addr);
1667
1668         /*
1669          * Tell the upper layer(s) we support long frames.
1670          */
1671         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1672         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1673
1674         /* 
1675          * Specify the media types supported by this adapter and register
1676          * callbacks to update media and link information
1677          */
1678         ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
1679                      em_media_status);
1680         if (adapter->hw.media_type == em_media_type_fiber) {
1681                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 
1682                             0, NULL);
1683                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 
1684                             0, NULL);
1685         } else {
1686                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1687                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 
1688                             0, NULL);
1689                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 
1690                             0, NULL);
1691                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 
1692                             0, NULL);
1693                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 
1694                             0, NULL);
1695                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1696         }
1697         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1698         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1699 }
1700
1701 /*********************************************************************
1702  *
1703  *  Workaround for SmartSpeed on 82541 and 82547 controllers
1704  *
1705  **********************************************************************/        
1706 static void
1707 em_smartspeed(struct adapter *adapter)
1708 {
1709         uint16_t phy_tmp;
1710
1711         if (adapter->link_active || (adapter->hw.phy_type != em_phy_igp) || 
1712             !adapter->hw.autoneg ||
1713             !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1714                 return;
1715
1716         if (adapter->smartspeed == 0) {
1717                 /*
1718                  * If Master/Slave config fault is asserted twice,
1719                  * we assume back-to-back.
1720                  */
1721                 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1722                 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
1723                         return;
1724                 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1725                 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
1726                         em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
1727                                         &phy_tmp);
1728                         if (phy_tmp & CR_1000T_MS_ENABLE) {
1729                                 phy_tmp &= ~CR_1000T_MS_ENABLE;
1730                                 em_write_phy_reg(&adapter->hw,
1731                                                  PHY_1000T_CTRL, phy_tmp);
1732                                 adapter->smartspeed++;
1733                                 if (adapter->hw.autoneg &&
1734                                     !em_phy_setup_autoneg(&adapter->hw) &&
1735                                     !em_read_phy_reg(&adapter->hw, PHY_CTRL,
1736                                                      &phy_tmp)) {
1737                                         phy_tmp |= (MII_CR_AUTO_NEG_EN |  
1738                                                     MII_CR_RESTART_AUTO_NEG);
1739                                         em_write_phy_reg(&adapter->hw,
1740                                                          PHY_CTRL, phy_tmp);
1741                                 }
1742                         }
1743                 }
1744                 return;
1745         } else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
1746                 /* If still no link, perhaps using 2/3 pair cable */
1747                 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
1748                 phy_tmp |= CR_1000T_MS_ENABLE;
1749                 em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
1750                 if (adapter->hw.autoneg &&
1751                     !em_phy_setup_autoneg(&adapter->hw) &&
1752                     !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
1753                         phy_tmp |= (MII_CR_AUTO_NEG_EN |
1754                                     MII_CR_RESTART_AUTO_NEG);
1755                         em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
1756                 }
1757         }
1758         /* Restart process after EM_SMARTSPEED_MAX iterations */
1759         if (adapter->smartspeed++ == EM_SMARTSPEED_MAX)
1760                 adapter->smartspeed = 0;
1761 }
1762
1763 /*
1764  * Manage DMA'able memory.
1765  */
1766 static void
1767 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1768
1769         if (error)
1770                 return;
1771         *(bus_addr_t*) arg = segs->ds_addr;
1772 }
1773
1774 static int
1775 em_dma_malloc(struct adapter *adapter, bus_size_t size,
1776               struct em_dma_alloc *dma, int mapflags)
1777 {
1778         int r;
1779         device_t dev = adapter->dev;
1780
1781         r = bus_dma_tag_create(NULL,                    /* parent */
1782                                PAGE_SIZE, 0,            /* alignment, bounds */
1783                                BUS_SPACE_MAXADDR,       /* lowaddr */
1784                                BUS_SPACE_MAXADDR,       /* highaddr */
1785                                NULL, NULL,              /* filter, filterarg */
1786                                size,                    /* maxsize */
1787                                1,                       /* nsegments */
1788                                size,                    /* maxsegsize */
1789                                BUS_DMA_ALLOCNOW,        /* flags */
1790                                &dma->dma_tag);
1791         if (r != 0) {
1792                 device_printf(dev, "em_dma_malloc: bus_dma_tag_create failed; "
1793                               "error %u\n", r);
1794                 goto fail_0;
1795         }
1796
1797         r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
1798                              BUS_DMA_NOWAIT, &dma->dma_map);
1799         if (r != 0) {
1800                 device_printf(dev, "em_dma_malloc: bus_dmammem_alloc failed; "
1801                               "size %llu, error %d\n", (uintmax_t)size, r);
1802                 goto fail_2;
1803         }
1804
1805         r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1806                             size,
1807                             em_dmamap_cb,
1808                             &dma->dma_paddr,
1809                             mapflags | BUS_DMA_NOWAIT);
1810         if (r != 0) {
1811                 device_printf(dev, "em_dma_malloc: bus_dmamap_load failed; "
1812                               "error %u\n", r);
1813                 goto fail_3;
1814         }
1815
1816         dma->dma_size = size;
1817         return(0);
1818
1819 fail_3:
1820         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1821 fail_2:
1822         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1823         bus_dma_tag_destroy(dma->dma_tag);
1824 fail_0:
1825         dma->dma_map = NULL;
1826         dma->dma_tag = NULL;
1827         return(r);
1828 }
1829
1830 static void
1831 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
1832 {
1833         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1834         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1835         bus_dma_tag_destroy(dma->dma_tag);
1836 }
1837
1838 /*********************************************************************
1839  *
1840  *  Allocate memory for tx_buffer structures. The tx_buffer stores all 
1841  *  the information needed to transmit a packet on the wire. 
1842  *
1843  **********************************************************************/
1844 static int
1845 em_allocate_transmit_structures(struct adapter * adapter)
1846 {
1847         adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
1848             adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
1849         if (adapter->tx_buffer_area == NULL) {
1850                 device_printf(adapter->dev, "Unable to allocate tx_buffer memory\n");
1851                 return(ENOMEM);
1852         }
1853
1854         return(0);
1855 }
1856
1857 /*********************************************************************
1858  *
1859  *  Allocate and initialize transmit structures. 
1860  *
1861  **********************************************************************/
1862 static int
1863 em_setup_transmit_structures(struct adapter * adapter)
1864 {
1865         /*
1866          * Setup DMA descriptor areas.
1867          */
1868         if (bus_dma_tag_create(NULL,                    /* parent */
1869                                1, 0,                    /* alignment, bounds */
1870                                BUS_SPACE_MAXADDR,       /* lowaddr */ 
1871                                BUS_SPACE_MAXADDR,       /* highaddr */
1872                                NULL, NULL,              /* filter, filterarg */
1873                                MCLBYTES * 8,            /* maxsize */
1874                                EM_MAX_SCATTER,          /* nsegments */
1875                                MCLBYTES * 8,            /* maxsegsize */
1876                                BUS_DMA_ALLOCNOW,        /* flags */ 
1877                                &adapter->txtag)) {
1878                 device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
1879                 return(ENOMEM);
1880         }
1881
1882         if (em_allocate_transmit_structures(adapter))
1883                 return(ENOMEM);
1884
1885         bzero((void *) adapter->tx_desc_base,
1886               (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
1887
1888         adapter->next_avail_tx_desc = 0;
1889         adapter->oldest_used_tx_desc = 0;
1890
1891         /* Set number of descriptors available */
1892         adapter->num_tx_desc_avail = adapter->num_tx_desc;
1893
1894         /* Set checksum context */
1895         adapter->active_checksum_context = OFFLOAD_NONE;
1896
1897         return(0);
1898 }
1899
1900 /*********************************************************************
1901  *
1902  *  Enable transmit unit.
1903  *
1904  **********************************************************************/
1905 static void
1906 em_initialize_transmit_unit(struct adapter * adapter)
1907 {
1908         uint32_t reg_tctl;
1909         uint32_t reg_tipg = 0;
1910         uint64_t bus_addr;
1911
1912         INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
1913
1914         /* Setup the Base and Length of the Tx Descriptor Ring */
1915         bus_addr = adapter->txdma.dma_paddr;
1916         E1000_WRITE_REG(&adapter->hw, TDBAL, (uint32_t)bus_addr);
1917         E1000_WRITE_REG(&adapter->hw, TDBAH, (uint32_t)(bus_addr >> 32));
1918         E1000_WRITE_REG(&adapter->hw, TDLEN, 
1919                         adapter->num_tx_desc * sizeof(struct em_tx_desc));
1920
1921         /* Setup the HW Tx Head and Tail descriptor pointers */
1922         E1000_WRITE_REG(&adapter->hw, TDH, 0);
1923         E1000_WRITE_REG(&adapter->hw, TDT, 0);
1924
1925         HW_DEBUGOUT2("Base = %x, Length = %x\n", 
1926                      E1000_READ_REG(&adapter->hw, TDBAL),
1927                      E1000_READ_REG(&adapter->hw, TDLEN));
1928
1929         /* Set the default values for the Tx Inter Packet Gap timer */
1930         switch (adapter->hw.mac_type) {
1931         case em_82542_rev2_0:
1932         case em_82542_rev2_1:
1933                 reg_tipg = DEFAULT_82542_TIPG_IPGT;
1934                 reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1935                 reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1936                 break;
1937         default:
1938                 if (adapter->hw.media_type == em_media_type_fiber)
1939                         reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1940                 else
1941                         reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1942                 reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1943                 reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1944         }
1945
1946         E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
1947         E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
1948         if (adapter->hw.mac_type >= em_82540)
1949                 E1000_WRITE_REG(&adapter->hw, TADV,
1950                                 adapter->tx_abs_int_delay.value);
1951
1952         /* Program the Transmit Control Register */
1953         reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
1954                    (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1955         if (adapter->link_duplex == 1)
1956                 reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
1957         else
1958                 reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
1959         E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
1960
1961         /* Setup Transmit Descriptor Settings for this adapter */   
1962         adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
1963
1964         if (adapter->tx_int_delay.value > 0)
1965                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1966 }
1967
1968 /*********************************************************************
1969  *
1970  *  Free all transmit related data structures.
1971  *
1972  **********************************************************************/
1973 static void
1974 em_free_transmit_structures(struct adapter * adapter)
1975 {
1976         struct em_buffer *tx_buffer;
1977         int i;
1978
1979         INIT_DEBUGOUT("free_transmit_structures: begin");
1980
1981         if (adapter->tx_buffer_area != NULL) {
1982                 tx_buffer = adapter->tx_buffer_area;
1983                 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
1984                         if (tx_buffer->m_head != NULL) {
1985                                 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1986                                 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1987                                 m_freem(tx_buffer->m_head);
1988                         }
1989                         tx_buffer->m_head = NULL;
1990                 }
1991         }
1992         if (adapter->tx_buffer_area != NULL) {
1993                 free(adapter->tx_buffer_area, M_DEVBUF);
1994                 adapter->tx_buffer_area = NULL;
1995         }
1996         if (adapter->txtag != NULL) {
1997                 bus_dma_tag_destroy(adapter->txtag);
1998                 adapter->txtag = NULL;
1999         }
2000 }
2001
2002 /*********************************************************************
2003  *
2004  *  The offload context needs to be set when we transfer the first
2005  *  packet of a particular protocol (TCP/UDP). We change the
2006  *  context only if the protocol type changes.
2007  *
2008  **********************************************************************/
2009 static void
2010 em_transmit_checksum_setup(struct adapter * adapter,
2011                            struct mbuf *mp,
2012                            uint32_t *txd_upper,
2013                            uint32_t *txd_lower) 
2014 {
2015         struct em_context_desc *TXD;
2016         struct em_buffer *tx_buffer;
2017         int curr_txd;
2018
2019         if (mp->m_pkthdr.csum_flags) {
2020                 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2021                         *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2022                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2023                         if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2024                                 return;
2025                         else
2026                                 adapter->active_checksum_context = OFFLOAD_TCP_IP;
2027                 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2028                         *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2029                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2030                         if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2031                                 return;
2032                         else
2033                                 adapter->active_checksum_context = OFFLOAD_UDP_IP;
2034                 } else {
2035                         *txd_upper = 0;
2036                         *txd_lower = 0;
2037                         return;
2038                 }
2039         } else {
2040                 *txd_upper = 0;
2041                 *txd_lower = 0;
2042                 return;
2043         }
2044
2045         /* If we reach this point, the checksum offload context
2046          * needs to be reset.
2047          */
2048         curr_txd = adapter->next_avail_tx_desc;
2049         tx_buffer = &adapter->tx_buffer_area[curr_txd];
2050         TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2051
2052         TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2053         TXD->lower_setup.ip_fields.ipcso =
2054             ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2055         TXD->lower_setup.ip_fields.ipcse =
2056             htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2057
2058         TXD->upper_setup.tcp_fields.tucss = 
2059             ETHER_HDR_LEN + sizeof(struct ip);
2060         TXD->upper_setup.tcp_fields.tucse = htole16(0);
2061
2062         if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2063                 TXD->upper_setup.tcp_fields.tucso =
2064                     ETHER_HDR_LEN + sizeof(struct ip) +
2065                     offsetof(struct tcphdr, th_sum);
2066         } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2067                 TXD->upper_setup.tcp_fields.tucso =
2068                         ETHER_HDR_LEN + sizeof(struct ip) +
2069                         offsetof(struct udphdr, uh_sum);
2070         }
2071
2072         TXD->tcp_seg_setup.data = htole32(0);
2073         TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2074
2075         tx_buffer->m_head = NULL;
2076
2077         if (++curr_txd == adapter->num_tx_desc)
2078                 curr_txd = 0;
2079
2080         adapter->num_tx_desc_avail--;
2081         adapter->next_avail_tx_desc = curr_txd;
2082 }
2083
2084 /**********************************************************************
2085  *
2086  *  Examine each tx_buffer in the used queue. If the hardware is done
2087  *  processing the packet then free associated resources. The
2088  *  tx_buffer is put back on the free queue.
2089  *
2090  **********************************************************************/
2091
2092 static void
2093 em_clean_transmit_interrupts(struct adapter *adapter)
2094 {
2095         int i, num_avail;
2096         struct em_buffer *tx_buffer;
2097         struct em_tx_desc *tx_desc;
2098         struct ifnet *ifp = &adapter->interface_data.ac_if;
2099
2100         if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2101                 return;
2102
2103 #ifdef DBG_STATS
2104         adapter->clean_tx_interrupts++;
2105 #endif
2106         num_avail = adapter->num_tx_desc_avail; 
2107         i = adapter->oldest_used_tx_desc;
2108
2109         tx_buffer = &adapter->tx_buffer_area[i];
2110         tx_desc = &adapter->tx_desc_base[i];
2111
2112         while(tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2113                 tx_desc->upper.data = 0;
2114                 num_avail++;                        
2115
2116                 if (tx_buffer->m_head) {
2117                         ifp->if_opackets++;
2118                         bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2119                                         BUS_DMASYNC_POSTWRITE);
2120                         bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2121                         bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2122
2123                         m_freem(tx_buffer->m_head);
2124                         tx_buffer->m_head = NULL;
2125                 }
2126
2127                 if (++i == adapter->num_tx_desc)
2128                         i = 0;
2129
2130                 tx_buffer = &adapter->tx_buffer_area[i];
2131                 tx_desc = &adapter->tx_desc_base[i];
2132         }
2133
2134         adapter->oldest_used_tx_desc = i;
2135
2136         /*
2137          * If we have enough room, clear IFF_OACTIVE to tell the stack
2138          * that it is OK to send packets.
2139          * If there are no pending descriptors, clear the timeout. Otherwise,
2140          * if some descriptors have been freed, restart the timeout.
2141          */
2142         if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2143                 ifp->if_flags &= ~IFF_OACTIVE;
2144                 if (num_avail == adapter->num_tx_desc)
2145                         ifp->if_timer = 0;
2146                 else if (num_avail == adapter->num_tx_desc_avail)
2147                         ifp->if_timer = EM_TX_TIMEOUT;
2148         }
2149         adapter->num_tx_desc_avail = num_avail;
2150 }
2151
2152 /*********************************************************************
2153  *
2154  *  Get a buffer from system mbuf buffer pool.
2155  *
2156  **********************************************************************/
2157 static int
2158 em_get_buf(int i, struct adapter *adapter, struct mbuf *nmp, int how)
2159 {
2160         struct mbuf *mp = nmp;
2161         struct em_buffer *rx_buffer;
2162         struct ifnet *ifp;
2163         bus_addr_t paddr;
2164         int error;
2165
2166         ifp = &adapter->interface_data.ac_if;
2167
2168         if (mp == NULL) {
2169                 mp = m_getcl(how, MT_DATA, M_PKTHDR);
2170                 if (mp == NULL) {
2171                         adapter->mbuf_cluster_failed++;
2172                         return(ENOBUFS);
2173                 }
2174                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2175         } else {
2176                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2177                 mp->m_data = mp->m_ext.ext_buf;
2178                 mp->m_next = NULL;
2179         }
2180         if (ifp->if_mtu <= ETHERMTU)
2181                 m_adj(mp, ETHER_ALIGN);
2182
2183         rx_buffer = &adapter->rx_buffer_area[i];
2184
2185         /*
2186          * Using memory from the mbuf cluster pool, invoke the
2187          * bus_dma machinery to arrange the memory mapping.
2188          */
2189         error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
2190                                 mtod(mp, void *), mp->m_len,
2191                                 em_dmamap_cb, &paddr, 0);
2192         if (error) {
2193                 m_free(mp);
2194                 return(error);
2195         }
2196         rx_buffer->m_head = mp;
2197         adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
2198         bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2199
2200         return(0);
2201 }
2202
2203 /*********************************************************************
2204  *
2205  *  Allocate memory for rx_buffer structures. Since we use one 
2206  *  rx_buffer per received packet, the maximum number of rx_buffer's 
2207  *  that we'll need is equal to the number of receive descriptors 
2208  *  that we've allocated.
2209  *
2210  **********************************************************************/
2211 static int
2212 em_allocate_receive_structures(struct adapter *adapter)
2213 {
2214         int i, error, size;
2215         struct em_buffer *rx_buffer;
2216
2217         size = adapter->num_rx_desc * sizeof(struct em_buffer);
2218         adapter->rx_buffer_area = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
2219
2220         error = bus_dma_tag_create(NULL,                /* parent */
2221                                    1, 0,                /* alignment, bounds */
2222                                    BUS_SPACE_MAXADDR,   /* lowaddr */
2223                                    BUS_SPACE_MAXADDR,   /* highaddr */
2224                                    NULL, NULL,          /* filter, filterarg */
2225                                    MCLBYTES,            /* maxsize */
2226                                    1,                   /* nsegments */
2227                                    MCLBYTES,            /* maxsegsize */
2228                                    BUS_DMA_ALLOCNOW,    /* flags */
2229                                    &adapter->rxtag);
2230         if (error != 0) {
2231                 device_printf(adapter->dev, "em_allocate_receive_structures: "
2232                               "bus_dma_tag_create failed; error %u\n", error);
2233                 goto fail_0;
2234         }
2235  
2236         rx_buffer = adapter->rx_buffer_area;
2237         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2238                 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2239                                           &rx_buffer->map);
2240                 if (error != 0) {
2241                         device_printf(adapter->dev,
2242                                       "em_allocate_receive_structures: "
2243                                       "bus_dmamap_create failed; error %u\n",
2244                                       error);
2245                         goto fail_1;
2246                 }
2247         }
2248
2249         for (i = 0; i < adapter->num_rx_desc; i++) {
2250                 error = em_get_buf(i, adapter, NULL, MB_WAIT);
2251                 if (error != 0) {
2252                         adapter->rx_buffer_area[i].m_head = NULL;
2253                         adapter->rx_desc_base[i].buffer_addr = 0;
2254                         return(error);
2255                 }
2256         }
2257
2258         return(0);
2259
2260 fail_1:
2261         bus_dma_tag_destroy(adapter->rxtag);
2262 fail_0:
2263         adapter->rxtag = NULL;
2264         free(adapter->rx_buffer_area, M_DEVBUF);
2265         adapter->rx_buffer_area = NULL;
2266         return(error);
2267 }
2268
2269 /*********************************************************************
2270  *
2271  *  Allocate and initialize receive structures.
2272  *  
2273  **********************************************************************/
2274 static int
2275 em_setup_receive_structures(struct adapter *adapter)
2276 {
2277         bzero((void *) adapter->rx_desc_base,
2278               (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2279
2280         if (em_allocate_receive_structures(adapter))
2281                 return(ENOMEM);
2282
2283         /* Setup our descriptor pointers */
2284         adapter->next_rx_desc_to_check = 0;
2285         return(0);
2286 }
2287
2288 /*********************************************************************
2289  *
2290  *  Enable receive unit.
2291  *  
2292  **********************************************************************/
2293 static void
2294 em_initialize_receive_unit(struct adapter *adapter)
2295 {
2296         uint32_t reg_rctl;
2297         uint32_t reg_rxcsum;
2298         struct ifnet *ifp;
2299         uint64_t bus_addr;
2300  
2301         INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2302
2303         ifp = &adapter->interface_data.ac_if;
2304
2305         /* Make sure receives are disabled while setting up the descriptor ring */
2306         E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2307
2308         /* Set the Receive Delay Timer Register */
2309         E1000_WRITE_REG(&adapter->hw, RDTR, 
2310                         adapter->rx_int_delay.value | E1000_RDT_FPDB);
2311
2312         if(adapter->hw.mac_type >= em_82540) {
2313                 E1000_WRITE_REG(&adapter->hw, RADV,
2314                                 adapter->rx_abs_int_delay.value);
2315
2316                 /* Set the interrupt throttling rate in 256ns increments */  
2317                 if (em_int_throttle_ceil) {
2318                         E1000_WRITE_REG(&adapter->hw, ITR,
2319                                 1000000000 / 256 / em_int_throttle_ceil);
2320                 } else {
2321                         E1000_WRITE_REG(&adapter->hw, ITR, 0);
2322                 }
2323         }
2324
2325         /* Setup the Base and Length of the Rx Descriptor Ring */
2326         bus_addr = adapter->rxdma.dma_paddr;
2327         E1000_WRITE_REG(&adapter->hw, RDBAL, (uint32_t)bus_addr);
2328         E1000_WRITE_REG(&adapter->hw, RDBAH, (uint32_t)(bus_addr >> 32));
2329         E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2330                         sizeof(struct em_rx_desc));
2331
2332         /* Setup the HW Rx Head and Tail Descriptor Pointers */
2333         E1000_WRITE_REG(&adapter->hw, RDH, 0);
2334         E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2335
2336         /* Setup the Receive Control Register */
2337         reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2338                    E1000_RCTL_RDMTS_HALF |
2339                    (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2340
2341         if (adapter->hw.tbi_compatibility_on == TRUE)
2342                 reg_rctl |= E1000_RCTL_SBP;
2343
2344         switch (adapter->rx_buffer_len) {
2345         default:
2346         case EM_RXBUFFER_2048:
2347                 reg_rctl |= E1000_RCTL_SZ_2048;
2348                 break;
2349         case EM_RXBUFFER_4096:
2350                 reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2351                 break;            
2352         case EM_RXBUFFER_8192:
2353                 reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2354                 break;
2355         case EM_RXBUFFER_16384:
2356                 reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2357                 break;
2358         }
2359
2360         if (ifp->if_mtu > ETHERMTU)
2361                 reg_rctl |= E1000_RCTL_LPE;
2362
2363         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2364         if ((adapter->hw.mac_type >= em_82543) && 
2365             (ifp->if_capenable & IFCAP_RXCSUM)) {
2366                 reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2367                 reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2368                 E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2369         }
2370
2371         /* Enable Receives */
2372         E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);  
2373 }
2374
2375 /*********************************************************************
2376  *
2377  *  Free receive related data structures.
2378  *
2379  **********************************************************************/
2380 static void
2381 em_free_receive_structures(struct adapter *adapter)
2382 {
2383         struct em_buffer *rx_buffer;
2384         int i;
2385
2386         INIT_DEBUGOUT("free_receive_structures: begin");
2387
2388         if (adapter->rx_buffer_area != NULL) {
2389                 rx_buffer = adapter->rx_buffer_area;
2390                 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2391                         if (rx_buffer->map != NULL) {
2392                                 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2393                                 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2394                         }
2395                         if (rx_buffer->m_head != NULL)
2396                                 m_freem(rx_buffer->m_head);
2397                         rx_buffer->m_head = NULL;
2398                 }
2399         }
2400         if (adapter->rx_buffer_area != NULL) {
2401                 free(adapter->rx_buffer_area, M_DEVBUF);
2402                 adapter->rx_buffer_area = NULL;
2403         }
2404         if (adapter->rxtag != NULL) {
2405                 bus_dma_tag_destroy(adapter->rxtag);
2406                 adapter->rxtag = NULL;
2407         }
2408 }
2409
2410 /*********************************************************************
2411  *
2412  *  This routine executes in interrupt context. It replenishes
2413  *  the mbufs in the descriptor and sends data which has been
2414  *  dma'ed into host memory to upper layer.
2415  *
2416  *  We loop at most count times if count is > 0, or until done if
2417  *  count < 0.
2418  *
2419  *********************************************************************/
2420 static void
2421 em_process_receive_interrupts(struct adapter *adapter, int count)
2422 {
2423         struct ifnet *ifp;
2424         struct mbuf *mp;
2425         uint8_t accept_frame = 0;
2426         uint8_t eop = 0;
2427         uint16_t len, desc_len, prev_len_adj;
2428         int i;
2429
2430         /* Pointer to the receive descriptor being examined. */
2431         struct em_rx_desc *current_desc;
2432
2433         ifp = &adapter->interface_data.ac_if;
2434         i = adapter->next_rx_desc_to_check;
2435         current_desc = &adapter->rx_desc_base[i];
2436
2437         if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
2438 #ifdef DBG_STATS
2439                 adapter->no_pkts_avail++;
2440 #endif
2441                 return;
2442         }
2443         while ((current_desc->status & E1000_RXD_STAT_DD) && (count != 0)) {
2444                 mp = adapter->rx_buffer_area[i].m_head;
2445                 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2446                                 BUS_DMASYNC_POSTREAD);
2447
2448                 accept_frame = 1;
2449                 prev_len_adj = 0;
2450                 desc_len = le16toh(current_desc->length);
2451                 if (current_desc->status & E1000_RXD_STAT_EOP) {
2452                         count--;
2453                         eop = 1;
2454                         if (desc_len < ETHER_CRC_LEN) {
2455                                 len = 0;
2456                                 prev_len_adj = ETHER_CRC_LEN - desc_len;
2457                         }
2458                         else {
2459                                 len = desc_len - ETHER_CRC_LEN;
2460                         }
2461                 } else {
2462                         eop = 0;
2463                         len = desc_len;
2464                 }
2465
2466                 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2467                         uint8_t last_byte;
2468                         uint32_t pkt_len = desc_len;
2469
2470                         if (adapter->fmp != NULL)
2471                                 pkt_len += adapter->fmp->m_pkthdr.len; 
2472
2473                         last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2474
2475                         if (TBI_ACCEPT(&adapter->hw, current_desc->status, 
2476                                        current_desc->errors, 
2477                                        pkt_len, last_byte)) {
2478                                 em_tbi_adjust_stats(&adapter->hw, 
2479                                                     &adapter->stats, 
2480                                                     pkt_len, 
2481                                                     adapter->hw.mac_addr);
2482                                 if (len > 0)
2483                                         len--;
2484                         }
2485                         else {
2486                                 accept_frame = 0;
2487                         }
2488                 }
2489
2490                 if (accept_frame) {
2491                         if (em_get_buf(i, adapter, NULL, MB_DONTWAIT) == ENOBUFS) {
2492                                 adapter->dropped_pkts++;
2493                                 em_get_buf(i, adapter, mp, MB_DONTWAIT);
2494                                 if (adapter->fmp != NULL) 
2495                                         m_freem(adapter->fmp);
2496                                 adapter->fmp = NULL;
2497                                 adapter->lmp = NULL;
2498                                 break;
2499                         }
2500
2501                         /* Assign correct length to the current fragment */
2502                         mp->m_len = len;
2503
2504                         if (adapter->fmp == NULL) {
2505                                 mp->m_pkthdr.len = len;
2506                                 adapter->fmp = mp;       /* Store the first mbuf */
2507                                 adapter->lmp = mp;
2508                         } else {
2509                                 /* Chain mbuf's together */
2510                                 /* 
2511                                  * Adjust length of previous mbuf in chain if we 
2512                                  * received less than 4 bytes in the last descriptor.
2513                                  */
2514                                 if (prev_len_adj > 0) {
2515                                         adapter->lmp->m_len -= prev_len_adj;
2516                                         adapter->fmp->m_pkthdr.len -= prev_len_adj;
2517                                 }
2518                                 adapter->lmp->m_next = mp;
2519                                 adapter->lmp = adapter->lmp->m_next;
2520                                 adapter->fmp->m_pkthdr.len += len;
2521                         }
2522
2523                         if (eop) {
2524                                 adapter->fmp->m_pkthdr.rcvif = ifp;
2525                                 ifp->if_ipackets++;
2526
2527                                 em_receive_checksum(adapter, current_desc,
2528                                                     adapter->fmp);
2529                                 if (current_desc->status & E1000_RXD_STAT_VP)
2530                                         VLAN_INPUT_TAG(adapter->fmp,
2531                                                        (current_desc->special & 
2532                                                         E1000_RXD_SPC_VLAN_MASK));
2533                                 else
2534                                         (*ifp->if_input)(ifp, adapter->fmp);
2535                                 adapter->fmp = NULL;
2536                                 adapter->lmp = NULL;
2537                         }
2538                 } else {
2539                         adapter->dropped_pkts++;
2540                         em_get_buf(i, adapter, mp, MB_DONTWAIT);
2541                         if (adapter->fmp != NULL) 
2542                                 m_freem(adapter->fmp);
2543                         adapter->fmp = NULL;
2544                         adapter->lmp = NULL;
2545                 }
2546
2547                 /* Zero out the receive descriptors status  */
2548                 current_desc->status = 0;
2549
2550                 /* Advance the E1000's Receive Queue #0  "Tail Pointer". */
2551                 E1000_WRITE_REG(&adapter->hw, RDT, i);
2552
2553                 /* Advance our pointers to the next descriptor */
2554                 if (++i == adapter->num_rx_desc) {
2555                         i = 0;
2556                         current_desc = adapter->rx_desc_base;
2557                 } else
2558                         current_desc++;
2559         }
2560         adapter->next_rx_desc_to_check = i;
2561 }
2562
2563 /*********************************************************************
2564  *
2565  *  Verify that the hardware indicated that the checksum is valid. 
2566  *  Inform the stack about the status of checksum so that stack
2567  *  doesn't spend time verifying the checksum.
2568  *
2569  *********************************************************************/
2570 static void
2571 em_receive_checksum(struct adapter *adapter,
2572                     struct em_rx_desc *rx_desc,
2573                     struct mbuf *mp)
2574 {
2575         /* 82543 or newer only */
2576         if ((adapter->hw.mac_type < em_82543) ||
2577             /* Ignore Checksum bit is set */
2578             (rx_desc->status & E1000_RXD_STAT_IXSM)) {
2579                 mp->m_pkthdr.csum_flags = 0;
2580                 return;
2581         }
2582
2583         if (rx_desc->status & E1000_RXD_STAT_IPCS) {
2584                 /* Did it pass? */
2585                 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
2586                         /* IP Checksum Good */
2587                         mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2588                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2589                 } else {
2590                         mp->m_pkthdr.csum_flags = 0;
2591                 }
2592         }
2593
2594         if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
2595                 /* Did it pass? */        
2596                 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
2597                         mp->m_pkthdr.csum_flags |= 
2598                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2599                         mp->m_pkthdr.csum_data = htons(0xffff);
2600                 }
2601         }
2602 }
2603
2604
2605 static void 
2606 em_enable_vlans(struct adapter *adapter)
2607 {
2608         uint32_t ctrl;
2609
2610         E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
2611
2612         ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2613         ctrl |= E1000_CTRL_VME; 
2614         E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2615 }
2616
2617 /*
2618  * note: we must call bus_enable_intr() prior to enabling the hardware
2619  * interrupt and bus_disable_intr() after disabling the hardware interrupt
2620  * in order to avoid handler execution races from scheduled interrupt
2621  * threads.
2622  */
2623 static void
2624 em_enable_intr(struct adapter *adapter)
2625 {
2626         struct ifnet *ifp = &adapter->interface_data.ac_if;
2627         
2628         if ((ifp->if_flags & IFF_POLLING) == 0) {
2629                 lwkt_serialize_handler_enable(&adapter->serializer);
2630                 E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
2631         }
2632 }
2633
2634 static void
2635 em_disable_intr(struct adapter *adapter)
2636 {
2637         E1000_WRITE_REG(&adapter->hw, IMC, 
2638                         (0xffffffff & ~E1000_IMC_RXSEQ));
2639         lwkt_serialize_handler_disable(&adapter->serializer);
2640 }
2641
2642 static int
2643 em_is_valid_ether_addr(uint8_t *addr)
2644 {
2645         char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
2646
2647         if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN)))
2648                 return(FALSE);
2649         else
2650                 return(TRUE);
2651 }
2652
2653 void 
2654 em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2655 {
2656         pci_write_config(((struct em_osdep *)hw->back)->dev, reg, *value, 2);
2657 }
2658
2659 void 
2660 em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2661 {
2662         *value = pci_read_config(((struct em_osdep *)hw->back)->dev, reg, 2);
2663 }
2664
2665 void
2666 em_pci_set_mwi(struct em_hw *hw)
2667 {
2668         pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2669                          (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
2670 }
2671
2672 void
2673 em_pci_clear_mwi(struct em_hw *hw)
2674 {
2675         pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2676                          (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
2677 }
2678
2679 uint32_t
2680 em_read_reg_io(struct em_hw *hw, uint32_t offset)
2681 {
2682         bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2683         return(bus_space_read_4(hw->reg_io_tag, hw->reg_io_handle, 4));
2684 }
2685
2686 void
2687 em_write_reg_io(struct em_hw *hw, uint32_t offset, uint32_t value)
2688 {
2689         bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2690         bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 4, value);
2691 }
2692
2693 /*********************************************************************
2694  * 82544 Coexistence issue workaround.
2695  *    There are 2 issues.
2696  *      1. Transmit Hang issue.
2697  *    To detect this issue, following equation can be used...
2698  *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2699  *          If SUM[3:0] is in between 1 to 4, we will have this issue.
2700  *
2701  *      2. DAC issue.
2702  *    To detect this issue, following equation can be used...
2703  *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2704  *          If SUM[3:0] is in between 9 to c, we will have this issue.
2705  *
2706  *
2707  *    WORKAROUND:
2708  *          Make sure we do not have ending address as 1,2,3,4(Hang) or
2709  *          9,a,b,c (DAC)
2710  *
2711 *************************************************************************/
2712 static uint32_t
2713 em_fill_descriptors(uint64_t address, uint32_t length, PDESC_ARRAY desc_array)
2714 {
2715         /* Since issue is sensitive to length and address.*/
2716         /* Let us first check the address...*/
2717         uint32_t safe_terminator;
2718         if (length <= 4) {
2719                 desc_array->descriptor[0].address = address;
2720                 desc_array->descriptor[0].length = length;
2721                 desc_array->elements = 1;
2722                 return(desc_array->elements);
2723         }
2724         safe_terminator = (uint32_t)((((uint32_t)address & 0x7) + (length & 0xF)) & 0xF);
2725         /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */ 
2726         if (safe_terminator == 0 ||
2727             (safe_terminator > 4 && safe_terminator < 9) || 
2728             (safe_terminator > 0xC && safe_terminator <= 0xF)) {
2729                 desc_array->descriptor[0].address = address;
2730                 desc_array->descriptor[0].length = length;
2731                 desc_array->elements = 1;
2732                 return(desc_array->elements);
2733         }
2734
2735         desc_array->descriptor[0].address = address;
2736         desc_array->descriptor[0].length = length - 4;
2737         desc_array->descriptor[1].address = address + (length - 4);
2738         desc_array->descriptor[1].length = 4;
2739         desc_array->elements = 2;
2740         return(desc_array->elements);
2741 }
2742
2743 /**********************************************************************
2744  *
2745  *  Update the board statistics counters. 
2746  *
2747  **********************************************************************/
2748 static void
2749 em_update_stats_counters(struct adapter *adapter)
2750 {
2751         struct ifnet   *ifp;
2752
2753         if (adapter->hw.media_type == em_media_type_copper ||
2754             (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
2755                 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
2756                 adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
2757         }
2758         adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
2759         adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
2760         adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
2761         adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
2762
2763         adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
2764         adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
2765         adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
2766         adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
2767         adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
2768         adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
2769         adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
2770         adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
2771         adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
2772         adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
2773         adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
2774         adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
2775         adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
2776         adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
2777         adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
2778         adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
2779         adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
2780         adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
2781         adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
2782         adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
2783
2784         /* For the 64-bit byte counters the low dword must be read first. */
2785         /* Both registers clear on the read of the high dword */
2786
2787         adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL); 
2788         adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
2789         adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
2790         adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
2791
2792         adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
2793         adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
2794         adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
2795         adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
2796         adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
2797
2798         adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
2799         adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
2800         adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
2801         adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
2802
2803         adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
2804         adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
2805         adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
2806         adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
2807         adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
2808         adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
2809         adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
2810         adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
2811         adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
2812         adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
2813
2814         if (adapter->hw.mac_type >= em_82543) {
2815                 adapter->stats.algnerrc += 
2816                     E1000_READ_REG(&adapter->hw, ALGNERRC);
2817                 adapter->stats.rxerrc += 
2818                     E1000_READ_REG(&adapter->hw, RXERRC);
2819                 adapter->stats.tncrs += 
2820                     E1000_READ_REG(&adapter->hw, TNCRS);
2821                 adapter->stats.cexterr += 
2822                     E1000_READ_REG(&adapter->hw, CEXTERR);
2823                 adapter->stats.tsctc += 
2824                     E1000_READ_REG(&adapter->hw, TSCTC);
2825                 adapter->stats.tsctfc += 
2826                     E1000_READ_REG(&adapter->hw, TSCTFC);
2827         }
2828         ifp = &adapter->interface_data.ac_if;
2829
2830         /* Fill out the OS statistics structure */
2831         ifp->if_ibytes = adapter->stats.gorcl;
2832         ifp->if_obytes = adapter->stats.gotcl;
2833         ifp->if_imcasts = adapter->stats.mprc;
2834         ifp->if_collisions = adapter->stats.colc;
2835
2836         /* Rx Errors */
2837         ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
2838             adapter->stats.crcerrs + adapter->stats.algnerrc +
2839             adapter->stats.rlec + adapter->stats.rnbc +
2840             adapter->stats.mpc + adapter->stats.cexterr;
2841
2842         /* Tx Errors */
2843         ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol;
2844 }
2845
2846
2847 /**********************************************************************
2848  *
2849  *  This routine is called only when em_display_debug_stats is enabled.
2850  *  This routine provides a way to take a look at important statistics
2851  *  maintained by the driver and hardware.
2852  *
2853  **********************************************************************/
2854 static void
2855 em_print_debug_info(struct adapter *adapter)
2856 {
2857         device_t dev= adapter->dev;
2858         uint8_t *hw_addr = adapter->hw.hw_addr;
2859
2860         device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
2861         device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
2862                       E1000_READ_REG(&adapter->hw, TIDV),
2863                       E1000_READ_REG(&adapter->hw, TADV));
2864         device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
2865                       E1000_READ_REG(&adapter->hw, RDTR),
2866                       E1000_READ_REG(&adapter->hw, RADV));
2867 #ifdef DBG_STATS
2868         device_printf(dev, "Packets not Avail = %ld\n", adapter->no_pkts_avail);
2869         device_printf(dev, "CleanTxInterrupts = %ld\n",
2870                       adapter->clean_tx_interrupts);
2871 #endif
2872         device_printf(dev, "fifo workaround = %lld, fifo_reset = %lld\n",
2873                       (long long)adapter->tx_fifo_wrk,
2874                       (long long)adapter->tx_fifo_reset);
2875         device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
2876                       E1000_READ_REG(&adapter->hw, TDH),
2877                       E1000_READ_REG(&adapter->hw, TDT));
2878         device_printf(dev, "Num Tx descriptors avail = %d\n",
2879                       adapter->num_tx_desc_avail);
2880         device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
2881                       adapter->no_tx_desc_avail1);
2882         device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
2883                       adapter->no_tx_desc_avail2);
2884         device_printf(dev, "Std mbuf failed = %ld\n",
2885                       adapter->mbuf_alloc_failed);
2886         device_printf(dev, "Std mbuf cluster failed = %ld\n",
2887                       adapter->mbuf_cluster_failed);
2888         device_printf(dev, "Driver dropped packets = %ld\n",
2889                       adapter->dropped_pkts);
2890 }
2891
2892 static void
2893 em_print_hw_stats(struct adapter *adapter)
2894 {
2895         device_t dev= adapter->dev;
2896
2897         device_printf(dev, "Adapter: %p\n", adapter);
2898
2899         device_printf(dev, "Excessive collisions = %lld\n",
2900                       (long long)adapter->stats.ecol);
2901         device_printf(dev, "Symbol errors = %lld\n",
2902                       (long long)adapter->stats.symerrs);
2903         device_printf(dev, "Sequence errors = %lld\n",
2904                       (long long)adapter->stats.sec);
2905         device_printf(dev, "Defer count = %lld\n",
2906                       (long long)adapter->stats.dc);
2907
2908         device_printf(dev, "Missed Packets = %lld\n",
2909                       (long long)adapter->stats.mpc);
2910         device_printf(dev, "Receive No Buffers = %lld\n",
2911                       (long long)adapter->stats.rnbc);
2912         device_printf(dev, "Receive length errors = %lld\n",
2913                       (long long)adapter->stats.rlec);
2914         device_printf(dev, "Receive errors = %lld\n",
2915                       (long long)adapter->stats.rxerrc);
2916         device_printf(dev, "Crc errors = %lld\n",
2917                       (long long)adapter->stats.crcerrs);
2918         device_printf(dev, "Alignment errors = %lld\n",
2919                       (long long)adapter->stats.algnerrc);
2920         device_printf(dev, "Carrier extension errors = %lld\n",
2921                       (long long)adapter->stats.cexterr);
2922
2923         device_printf(dev, "XON Rcvd = %lld\n",
2924                       (long long)adapter->stats.xonrxc);
2925         device_printf(dev, "XON Xmtd = %lld\n",
2926                       (long long)adapter->stats.xontxc);
2927         device_printf(dev, "XOFF Rcvd = %lld\n",
2928                       (long long)adapter->stats.xoffrxc);
2929         device_printf(dev, "XOFF Xmtd = %lld\n",
2930                       (long long)adapter->stats.xofftxc);
2931
2932         device_printf(dev, "Good Packets Rcvd = %lld\n",
2933                       (long long)adapter->stats.gprc);
2934         device_printf(dev, "Good Packets Xmtd = %lld\n",
2935                       (long long)adapter->stats.gptc);
2936 }
2937
2938 static int
2939 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
2940 {
2941         int error;
2942         int result;
2943         struct adapter *adapter;
2944
2945         result = -1;
2946         error = sysctl_handle_int(oidp, &result, 0, req);
2947
2948         if (error || !req->newptr)
2949                 return(error);
2950
2951         if (result == 1) {
2952                 adapter = (struct adapter *)arg1;
2953                 em_print_debug_info(adapter);
2954         }
2955
2956         return(error);
2957 }
2958
2959 static int
2960 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
2961 {
2962         int error;
2963         int result;
2964         struct adapter *adapter;
2965
2966         result = -1;
2967         error = sysctl_handle_int(oidp, &result, 0, req);
2968
2969         if (error || !req->newptr)
2970                 return(error);
2971
2972         if (result == 1) {
2973                 adapter = (struct adapter *)arg1;
2974                 em_print_hw_stats(adapter);
2975         }
2976
2977         return(error);
2978 }
2979
2980 static int
2981 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
2982 {
2983         struct em_int_delay_info *info;
2984         struct adapter *adapter;
2985         uint32_t regval;
2986         int error;
2987         int usecs;
2988         int ticks;
2989
2990         info = (struct em_int_delay_info *)arg1;
2991         adapter = info->adapter;
2992         usecs = info->value;
2993         error = sysctl_handle_int(oidp, &usecs, 0, req);
2994         if (error != 0 || req->newptr == NULL)
2995                 return(error);
2996         if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
2997                 return(EINVAL);
2998         info->value = usecs;
2999         ticks = E1000_USECS_TO_TICKS(usecs);
3000
3001         lwkt_serialize_enter(&adapter->serializer);
3002         regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3003         regval = (regval & ~0xffff) | (ticks & 0xffff);
3004         /* Handle a few special cases. */
3005         switch (info->offset) {
3006         case E1000_RDTR:
3007         case E1000_82542_RDTR:
3008                 regval |= E1000_RDT_FPDB;
3009                 break;
3010         case E1000_TIDV:
3011         case E1000_82542_TIDV:
3012                 if (ticks == 0) {
3013                         adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3014                         /* Don't write 0 into the TIDV register. */
3015                         regval++;
3016                 } else
3017                         adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3018                 break;
3019         }
3020         E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3021         lwkt_serialize_exit(&adapter->serializer);
3022         return(0);
3023 }
3024
3025 static void
3026 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3027                         const char *description, struct em_int_delay_info *info,
3028                         int offset, int value)
3029 {
3030         info->adapter = adapter;
3031         info->offset = offset;
3032         info->value = value;
3033         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
3034                         SYSCTL_CHILDREN(adapter->sysctl_tree),
3035                         OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3036                         info, 0, em_sysctl_int_delay, "I", description);
3037 }
3038
3039 static int
3040 em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS)
3041 {
3042         struct adapter *adapter = (void *)arg1;
3043         int error;
3044         int throttle;
3045
3046         throttle = em_int_throttle_ceil;
3047         error = sysctl_handle_int(oidp, &throttle, 0, req);
3048         if (error || req->newptr == NULL)
3049                 return error;
3050         if (throttle < 0 || throttle > 1000000000 / 256)
3051                 return EINVAL;
3052         if (throttle) {
3053                 /*
3054                  * Set the interrupt throttling rate in 256ns increments,
3055                  * recalculate sysctl value assignment to get exact frequency.
3056                  */
3057                 throttle = 1000000000 / 256 / throttle;
3058                 lwkt_serialize_enter(&adapter->serializer);
3059                 em_int_throttle_ceil = 1000000000 / 256 / throttle;
3060                 E1000_WRITE_REG(&adapter->hw, ITR, throttle);
3061                 lwkt_serialize_exit(&adapter->serializer);
3062         } else {
3063                 lwkt_serialize_enter(&adapter->serializer);
3064                 em_int_throttle_ceil = 0;
3065                 E1000_WRITE_REG(&adapter->hw, ITR, 0);
3066                 lwkt_serialize_exit(&adapter->serializer);
3067         }
3068         device_printf(adapter->dev, "Interrupt moderation set to %d/sec\n", 
3069                         em_int_throttle_ceil);
3070         return 0;
3071 }
3072