add missing bus_dmamap_sync()
[dragonfly.git] / sys / dev / netif / em / if_em.c
1 /**************************************************************************
2
3 Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>.  All rights reserved.
4
5 Copyright (c) 2001-2003, Intel Corporation
6 All rights reserved.
7
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10
11  1. Redistributions of source code must retain the above copyright notice,
12     this list of conditions and the following disclaimer.
13
14  2. Redistributions in binary form must reproduce the above copyright
15     notice, this list of conditions and the following disclaimer in the
16     documentation and/or other materials provided with the distribution.
17
18  3. Neither the name of the Intel Corporation nor the names of its
19     contributors may be used to endorse or promote products derived from
20     this software without specific prior written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
33
34 ***************************************************************************/
35
36 /*$FreeBSD: src/sys/dev/em/if_em.c,v 1.2.2.15 2003/06/09 22:10:15 pdeuskar Exp $*/
37 /*$DragonFly: src/sys/dev/netif/em/if_em.c,v 1.38 2005/10/04 02:06:46 sephe Exp $*/
38
39 #include "if_em.h"
40 #include <net/ifq_var.h>
41
42 /*********************************************************************
43  *  Set this to one to display debug statistics                                                   
44  *********************************************************************/
45 int             em_display_debug_stats = 0;
46
47 /*********************************************************************
48  *  Driver version
49  *********************************************************************/
50
51 char em_driver_version[] = "1.7.25";
52
53
54 /*********************************************************************
55  *  PCI Device ID Table
56  *
57  *  Used by probe to select devices to load on
58  *  Last field stores an index into em_strings
59  *  Last entry must be all 0s
60  *
61  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
62  *********************************************************************/
63
64 static em_vendor_info_t em_vendor_info_array[] =
65 {
66         /* Intel(R) PRO/1000 Network Connection */
67         { 0x8086, 0x1000, PCI_ANY_ID, PCI_ANY_ID, 0},
68         { 0x8086, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0},
69         { 0x8086, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0},
70         { 0x8086, 0x1008, PCI_ANY_ID, PCI_ANY_ID, 0},
71         { 0x8086, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0},
72         { 0x8086, 0x100C, PCI_ANY_ID, PCI_ANY_ID, 0},
73         { 0x8086, 0x100D, PCI_ANY_ID, PCI_ANY_ID, 0},
74         { 0x8086, 0x100E, PCI_ANY_ID, PCI_ANY_ID, 0},
75         { 0x8086, 0x100F, PCI_ANY_ID, PCI_ANY_ID, 0},
76         { 0x8086, 0x1010, PCI_ANY_ID, PCI_ANY_ID, 0},
77         { 0x8086, 0x1011, PCI_ANY_ID, PCI_ANY_ID, 0},
78         { 0x8086, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0},
79         { 0x8086, 0x1013, PCI_ANY_ID, PCI_ANY_ID, 0},
80         { 0x8086, 0x1014, PCI_ANY_ID, PCI_ANY_ID, 0},
81         { 0x8086, 0x1015, PCI_ANY_ID, PCI_ANY_ID, 0},
82         { 0x8086, 0x1016, PCI_ANY_ID, PCI_ANY_ID, 0},
83         { 0x8086, 0x1017, PCI_ANY_ID, PCI_ANY_ID, 0},
84         { 0x8086, 0x1018, PCI_ANY_ID, PCI_ANY_ID, 0},
85         { 0x8086, 0x1019, PCI_ANY_ID, PCI_ANY_ID, 0},
86         { 0x8086, 0x101A, PCI_ANY_ID, PCI_ANY_ID, 0},
87         { 0x8086, 0x101D, PCI_ANY_ID, PCI_ANY_ID, 0},
88         { 0x8086, 0x101E, PCI_ANY_ID, PCI_ANY_ID, 0},
89         { 0x8086, 0x1026, PCI_ANY_ID, PCI_ANY_ID, 0},
90         { 0x8086, 0x1027, PCI_ANY_ID, PCI_ANY_ID, 0},
91         { 0x8086, 0x1028, PCI_ANY_ID, PCI_ANY_ID, 0},
92         { 0x8086, 0x1075, PCI_ANY_ID, PCI_ANY_ID, 0},
93         { 0x8086, 0x1076, PCI_ANY_ID, PCI_ANY_ID, 0},
94         { 0x8086, 0x1077, PCI_ANY_ID, PCI_ANY_ID, 0},
95         { 0x8086, 0x1078, PCI_ANY_ID, PCI_ANY_ID, 0},
96         { 0x8086, 0x1079, PCI_ANY_ID, PCI_ANY_ID, 0},
97         { 0x8086, 0x107A, PCI_ANY_ID, PCI_ANY_ID, 0},
98         { 0x8086, 0x107B, PCI_ANY_ID, PCI_ANY_ID, 0},
99         /* required last entry */
100         { 0, 0, 0, 0, 0}
101 };
102
103 /*********************************************************************
104  *  Table of branding strings for all supported NICs.
105  *********************************************************************/
106
107 static const char *em_strings[] = {
108         "Intel(R) PRO/1000 Network Connection"
109 };
110
111 /*********************************************************************
112  *  Function prototypes            
113  *********************************************************************/
114 static int      em_probe(device_t);
115 static int      em_attach(device_t);
116 static int      em_detach(device_t);
117 static int      em_shutdown(device_t);
118 static void     em_intr(void *);
119 static void     em_start(struct ifnet *);
120 static void     em_start_serialized(struct ifnet *);
121 static int      em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
122 static void     em_watchdog(struct ifnet *);
123 static void     em_init(void *);
124 static void     em_init_serialized(void *);
125 static void     em_stop(void *);
126 static void     em_media_status(struct ifnet *, struct ifmediareq *);
127 static int      em_media_change(struct ifnet *);
128 static void     em_identify_hardware(struct adapter *);
129 static void     em_local_timer(void *);
130 static int      em_hardware_init(struct adapter *);
131 static void     em_setup_interface(device_t, struct adapter *);
132 static int      em_setup_transmit_structures(struct adapter *);
133 static void     em_initialize_transmit_unit(struct adapter *);
134 static int      em_setup_receive_structures(struct adapter *);
135 static void     em_initialize_receive_unit(struct adapter *);
136 static void     em_enable_intr(struct adapter *);
137 static void     em_disable_intr(struct adapter *);
138 static void     em_free_transmit_structures(struct adapter *);
139 static void     em_free_receive_structures(struct adapter *);
140 static void     em_update_stats_counters(struct adapter *);
141 static void     em_clean_transmit_interrupts(struct adapter *);
142 static int      em_allocate_receive_structures(struct adapter *);
143 static int      em_allocate_transmit_structures(struct adapter *);
144 static void     em_process_receive_interrupts(struct adapter *, int);
145 static void     em_receive_checksum(struct adapter *, struct em_rx_desc *,
146                                     struct mbuf *);
147 static void     em_transmit_checksum_setup(struct adapter *, struct mbuf *,
148                                            uint32_t *, uint32_t *);
149 static void     em_set_promisc(struct adapter *);
150 static void     em_disable_promisc(struct adapter *);
151 static void     em_set_multi(struct adapter *);
152 static void     em_print_hw_stats(struct adapter *);
153 static void     em_print_link_status(struct adapter *);
154 static int      em_get_buf(int i, struct adapter *, struct mbuf *, int how);
155 static void     em_enable_vlans(struct adapter *);
156 static int      em_encap(struct adapter *, struct mbuf *);
157 static void     em_smartspeed(struct adapter *);
158 static int      em_82547_fifo_workaround(struct adapter *, int);
159 static void     em_82547_update_fifo_head(struct adapter *, int);
160 static int      em_82547_tx_fifo_reset(struct adapter *);
161 static void     em_82547_move_tail(void *arg);
162 static void     em_82547_move_tail_serialized(void *arg);
163 static int      em_dma_malloc(struct adapter *, bus_size_t,
164                               struct em_dma_alloc *, int);
165 static void     em_dma_free(struct adapter *, struct em_dma_alloc *);
166 static void     em_print_debug_info(struct adapter *);
167 static int      em_is_valid_ether_addr(uint8_t *);
168 static int      em_sysctl_stats(SYSCTL_HANDLER_ARGS);
169 static int      em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
170 static uint32_t em_fill_descriptors(uint64_t address, uint32_t length, 
171                                    PDESC_ARRAY desc_array);
172 static int      em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
173 static int      em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS);
174 static void     em_add_int_delay_sysctl(struct adapter *, const char *,
175                                         const char *,
176                                         struct em_int_delay_info *, int, int);
177
178 /*********************************************************************
179  *  FreeBSD Device Interface Entry Points                    
180  *********************************************************************/
181
182 static device_method_t em_methods[] = {
183         /* Device interface */
184         DEVMETHOD(device_probe, em_probe),
185         DEVMETHOD(device_attach, em_attach),
186         DEVMETHOD(device_detach, em_detach),
187         DEVMETHOD(device_shutdown, em_shutdown),
188         {0, 0}
189 };
190
191 static driver_t em_driver = {
192         "em", em_methods, sizeof(struct adapter),
193 };
194
195 static devclass_t em_devclass;
196
197 DECLARE_DUMMY_MODULE(if_em);
198 DRIVER_MODULE(if_em, pci, em_driver, em_devclass, 0, 0);
199
200 /*********************************************************************
201  *  Tunable default values.
202  *********************************************************************/
203
204 #define E1000_TICKS_TO_USECS(ticks)     ((1024 * (ticks) + 500) / 1000)
205 #define E1000_USECS_TO_TICKS(usecs)     ((1000 * (usecs) + 512) / 1024)
206
207 static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
208 static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
209 static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
210 static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
211 static int em_int_throttle_ceil = 10000;
212
213 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
214 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
215 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
216 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
217 TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil);
218
219 /*********************************************************************
220  *  Device identification routine
221  *
222  *  em_probe determines if the driver should be loaded on
223  *  adapter based on PCI vendor/device id of the adapter.
224  *
225  *  return 0 on success, positive on failure
226  *********************************************************************/
227
228 static int
229 em_probe(device_t dev)
230 {
231         em_vendor_info_t *ent;
232
233         uint16_t pci_vendor_id = 0;
234         uint16_t pci_device_id = 0;
235         uint16_t pci_subvendor_id = 0;
236         uint16_t pci_subdevice_id = 0;
237         char adapter_name[60];
238
239         INIT_DEBUGOUT("em_probe: begin");
240
241         pci_vendor_id = pci_get_vendor(dev);
242         if (pci_vendor_id != EM_VENDOR_ID)
243                 return(ENXIO);
244
245         pci_device_id = pci_get_device(dev);
246         pci_subvendor_id = pci_get_subvendor(dev);
247         pci_subdevice_id = pci_get_subdevice(dev);
248
249         ent = em_vendor_info_array;
250         while (ent->vendor_id != 0) {
251                 if ((pci_vendor_id == ent->vendor_id) &&
252                     (pci_device_id == ent->device_id) &&
253
254                     ((pci_subvendor_id == ent->subvendor_id) ||
255                      (ent->subvendor_id == PCI_ANY_ID)) &&
256
257                     ((pci_subdevice_id == ent->subdevice_id) ||
258                      (ent->subdevice_id == PCI_ANY_ID))) {
259                         snprintf(adapter_name, sizeof(adapter_name),
260                                  "%s, Version - %s",  em_strings[ent->index], 
261                                  em_driver_version);
262                         device_set_desc_copy(dev, adapter_name);
263                         return(0);
264                 }
265                 ent++;
266         }
267
268         return(ENXIO);
269 }
270
271 /*********************************************************************
272  *  Device initialization routine
273  *
274  *  The attach entry point is called when the driver is being loaded.
275  *  This routine identifies the type of hardware, allocates all resources 
276  *  and initializes the hardware.     
277  *  
278  *  return 0 on success, positive on failure
279  *********************************************************************/
280
281 static int
282 em_attach(device_t dev)
283 {
284         struct adapter *adapter;
285         int tsize, rsize;
286         int i, val, rid;
287         int error = 0;
288
289         INIT_DEBUGOUT("em_attach: begin");
290
291         adapter = device_get_softc(dev);
292
293         lwkt_serialize_init(&adapter->serializer);
294
295         callout_init(&adapter->timer);
296         callout_init(&adapter->tx_fifo_timer);
297
298         adapter->dev = dev;
299         adapter->osdep.dev = dev;
300
301         /* SYSCTL stuff */
302         sysctl_ctx_init(&adapter->sysctl_ctx);
303         adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx,
304                                                SYSCTL_STATIC_CHILDREN(_hw),
305                                                OID_AUTO, 
306                                                device_get_nameunit(dev),
307                                                CTLFLAG_RD,
308                                                0, "");
309
310         if (adapter->sysctl_tree == NULL) {
311                 error = EIO;
312                 goto fail;
313         }
314
315         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,  
316                         SYSCTL_CHILDREN(adapter->sysctl_tree),
317                         OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW, 
318                         (void *)adapter, 0,
319                         em_sysctl_debug_info, "I", "Debug Information");
320
321         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,  
322                         SYSCTL_CHILDREN(adapter->sysctl_tree),
323                         OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, 
324                         (void *)adapter, 0,
325                         em_sysctl_stats, "I", "Statistics");
326
327         /* Determine hardware revision */
328         em_identify_hardware(adapter);
329
330         /* Set up some sysctls for the tunable interrupt delays */
331         em_add_int_delay_sysctl(adapter, "rx_int_delay",
332                                 "receive interrupt delay in usecs",
333                                 &adapter->rx_int_delay,
334                                 E1000_REG_OFFSET(&adapter->hw, RDTR),
335                                 em_rx_int_delay_dflt);
336         em_add_int_delay_sysctl(adapter, "tx_int_delay",
337                                 "transmit interrupt delay in usecs",
338                                 &adapter->tx_int_delay,
339                                 E1000_REG_OFFSET(&adapter->hw, TIDV),
340                                 em_tx_int_delay_dflt);
341         if (adapter->hw.mac_type >= em_82540) {
342                 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
343                                         "receive interrupt delay limit in usecs",
344                                         &adapter->rx_abs_int_delay,
345                                         E1000_REG_OFFSET(&adapter->hw, RADV),
346                                         em_rx_abs_int_delay_dflt);
347                 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
348                                         "transmit interrupt delay limit in usecs",
349                                         &adapter->tx_abs_int_delay,
350                                         E1000_REG_OFFSET(&adapter->hw, TADV),
351                                         em_tx_abs_int_delay_dflt);
352                 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
353                         SYSCTL_CHILDREN(adapter->sysctl_tree),
354                         OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW,
355                         adapter, 0, em_sysctl_int_throttle, "I", NULL);
356         }
357      
358         /* Parameters (to be read from user) */   
359         adapter->num_tx_desc = EM_MAX_TXD;
360         adapter->num_rx_desc = EM_MAX_RXD;
361         adapter->hw.autoneg = DO_AUTO_NEG;
362         adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
363         adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
364         adapter->hw.tbi_compatibility_en = TRUE;
365         adapter->rx_buffer_len = EM_RXBUFFER_2048;
366
367         /*
368          * These parameters control the automatic generation(Tx) and
369          * response(Rx) to Ethernet PAUSE frames.
370          */
371         adapter->hw.fc_high_water = FC_DEFAULT_HI_THRESH;
372         adapter->hw.fc_low_water  = FC_DEFAULT_LO_THRESH;
373         adapter->hw.fc_pause_time = FC_DEFAULT_TX_TIMER;
374         adapter->hw.fc_send_xon   = TRUE;
375         adapter->hw.fc = em_fc_full;
376
377         adapter->hw.phy_init_script = 1;
378         adapter->hw.phy_reset_disable = FALSE;
379
380 #ifndef EM_MASTER_SLAVE
381         adapter->hw.master_slave = em_ms_hw_default;
382 #else
383         adapter->hw.master_slave = EM_MASTER_SLAVE;
384 #endif
385
386         /* 
387          * Set the max frame size assuming standard ethernet 
388          * sized frames 
389          */   
390         adapter->hw.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
391
392         adapter->hw.min_frame_size = 
393             MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
394
395         /* 
396          * This controls when hardware reports transmit completion 
397          * status. 
398          */
399         adapter->hw.report_tx_early = 1;
400
401         rid = EM_MMBA;
402         adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
403                                                      &rid, RF_ACTIVE);
404         if (!(adapter->res_memory)) {
405                 device_printf(dev, "Unable to allocate bus resource: memory\n");
406                 error = ENXIO;
407                 goto fail;
408         }
409         adapter->osdep.mem_bus_space_tag = 
410             rman_get_bustag(adapter->res_memory);
411         adapter->osdep.mem_bus_space_handle = 
412             rman_get_bushandle(adapter->res_memory);
413         adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
414
415         if (adapter->hw.mac_type > em_82543) {
416                 /* Figure our where our IO BAR is ? */
417                 rid = EM_MMBA;
418                 for (i = 0; i < 5; i++) {
419                         val = pci_read_config(dev, rid, 4);
420                         if (val & 0x00000001) {
421                                 adapter->io_rid = rid;
422                                 break;
423                         }
424                         rid += 4;
425                 }
426
427                 adapter->res_ioport = bus_alloc_resource_any(dev,
428                     SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
429                 if (!(adapter->res_ioport)) {
430                         device_printf(dev, "Unable to allocate bus resource: ioport\n");
431                         error = ENXIO;
432                         goto fail;
433                 }
434
435                 adapter->hw.reg_io_tag = rman_get_bustag(adapter->res_ioport);
436                 adapter->hw.reg_io_handle = rman_get_bushandle(adapter->res_ioport);
437         }
438
439         rid = 0x0;
440         adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ,
441             &rid, RF_SHAREABLE | RF_ACTIVE);
442         if (!(adapter->res_interrupt)) {
443                 device_printf(dev, "Unable to allocate bus resource: interrupt\n");
444                 error = ENXIO;
445                 goto fail;
446         }
447
448         adapter->hw.back = &adapter->osdep;
449
450         /* Initialize eeprom parameters */
451         em_init_eeprom_params(&adapter->hw);
452
453         tsize = adapter->num_tx_desc * sizeof(struct em_tx_desc);
454
455         /* Allocate Transmit Descriptor ring */
456         if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_WAITOK)) {
457                 device_printf(dev, "Unable to allocate TxDescriptor memory\n");
458                 error = ENOMEM;
459                 goto fail;
460         }
461         adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
462
463         rsize = adapter->num_rx_desc * sizeof(struct em_rx_desc);
464
465         /* Allocate Receive Descriptor ring */
466         if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_WAITOK)) {
467                 device_printf(dev, "Unable to allocate rx_desc memory\n");
468                 error = ENOMEM;
469                 goto fail;
470         }
471         adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
472
473         /* Initialize the hardware */
474         if (em_hardware_init(adapter)) {
475                 device_printf(dev, "Unable to initialize the hardware\n");
476                 error = EIO;
477                 goto fail;
478         }
479
480         /* Copy the permanent MAC address out of the EEPROM */
481         if (em_read_mac_addr(&adapter->hw) < 0) {
482                 device_printf(dev, "EEPROM read error while reading mac address\n");
483                 error = EIO;
484                 goto fail;
485         }
486
487         if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
488                 device_printf(dev, "Invalid mac address\n");
489                 error = EIO;
490                 goto fail;
491         }
492
493         /* Setup OS specific network interface */
494         em_setup_interface(dev, adapter);
495
496         /* Initialize statistics */
497         em_clear_hw_cntrs(&adapter->hw);
498         em_update_stats_counters(adapter);
499         adapter->hw.get_link_status = 1;
500         em_check_for_link(&adapter->hw);
501
502         /* Print the link status */
503         if (adapter->link_active == 1) {
504                 em_get_speed_and_duplex(&adapter->hw, &adapter->link_speed, 
505                                         &adapter->link_duplex);
506                 device_printf(dev, "Speed: %d Mbps, Duplex: %s\n",
507                     adapter->link_speed,
508                     adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
509         } else
510                 device_printf(dev, "Speed: N/A, Duplex:N/A\n");
511
512         /* Identify 82544 on PCIX */
513         em_get_bus_info(&adapter->hw);  
514         if (adapter->hw.bus_type == em_bus_type_pcix &&
515             adapter->hw.mac_type == em_82544)
516                 adapter->pcix_82544 = TRUE;
517         else
518                 adapter->pcix_82544 = FALSE;
519
520         error = bus_setup_intr(dev, adapter->res_interrupt, INTR_TYPE_MISC,
521                            (void (*)(void *)) em_intr, adapter,
522                            &adapter->int_handler_tag, &adapter->serializer);
523         if (error) {
524                 device_printf(dev, "Error registering interrupt handler!\n");
525                 ether_ifdetach(&adapter->interface_data.ac_if);
526                 goto fail;
527         }
528
529         INIT_DEBUGOUT("em_attach: end");
530         return(0);
531
532 fail:
533         em_detach(dev);
534         return(error);
535 }
536
537 /*********************************************************************
538  *  Device removal routine
539  *
540  *  The detach entry point is called when the driver is being removed.
541  *  This routine stops the adapter and deallocates all the resources
542  *  that were allocated for driver operation.
543  *  
544  *  return 0 on success, positive on failure
545  *********************************************************************/
546
547 static int
548 em_detach(device_t dev)
549 {
550         struct adapter * adapter = device_get_softc(dev);
551
552         INIT_DEBUGOUT("em_detach: begin");
553
554         lwkt_serialize_enter(&adapter->serializer);
555         adapter->in_detach = 1;
556
557         if (device_is_attached(dev)) {
558                 em_stop(adapter);
559                 em_phy_hw_reset(&adapter->hw);
560                 ether_ifdetach(&adapter->interface_data.ac_if);
561         }
562         bus_generic_detach(dev);
563
564         if (adapter->res_interrupt != NULL) {
565                 bus_teardown_intr(dev, adapter->res_interrupt, 
566                                   adapter->int_handler_tag);
567                 bus_release_resource(dev, SYS_RES_IRQ, 0, 
568                                      adapter->res_interrupt);
569         }
570         if (adapter->res_memory != NULL) {
571                 bus_release_resource(dev, SYS_RES_MEMORY, EM_MMBA, 
572                                      adapter->res_memory);
573         }
574
575         if (adapter->res_ioport != NULL) {
576                 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid, 
577                                      adapter->res_ioport);
578         }
579
580         /* Free Transmit Descriptor ring */
581         if (adapter->tx_desc_base != NULL) {
582                 em_dma_free(adapter, &adapter->txdma);
583                 adapter->tx_desc_base = NULL;
584         }
585
586         /* Free Receive Descriptor ring */
587         if (adapter->rx_desc_base != NULL) {
588                 em_dma_free(adapter, &adapter->rxdma);
589                 adapter->rx_desc_base = NULL;
590         }
591
592         adapter->sysctl_tree = NULL;
593         sysctl_ctx_free(&adapter->sysctl_ctx);
594
595         lwkt_serialize_exit(&adapter->serializer);
596         return(0);
597 }
598
599 /*********************************************************************
600  *
601  *  Shutdown entry point
602  *
603  **********************************************************************/ 
604
605 static int
606 em_shutdown(device_t dev)
607 {
608         struct adapter *adapter = device_get_softc(dev);
609         em_stop(adapter);
610         return(0);
611 }
612
613 /*********************************************************************
614  *  Transmit entry point
615  *
616  *  em_start is called by the stack to initiate a transmit.
617  *  The driver will remain in this routine as long as there are
618  *  packets to transmit and transmit resources are available.
619  *  In case resources are not available stack is notified and
620  *  the packet is requeued.
621  **********************************************************************/
622
623 static void
624 em_start(struct ifnet *ifp)
625 {
626         struct adapter *adapter = ifp->if_softc;
627
628         lwkt_serialize_enter(&adapter->serializer);
629         em_start_serialized(ifp);
630         lwkt_serialize_exit(&adapter->serializer);
631 }
632
633 static void
634 em_start_serialized(struct ifnet *ifp)
635 {
636         struct mbuf *m_head;
637         struct adapter *adapter = ifp->if_softc;
638
639         if (!adapter->link_active)
640                 return;
641         while (!ifq_is_empty(&ifp->if_snd)) {
642                 m_head = ifq_poll(&ifp->if_snd);
643
644                 if (m_head == NULL)
645                         break;
646
647                 if (em_encap(adapter, m_head)) { 
648                         ifp->if_flags |= IFF_OACTIVE;
649                         break;
650                 }
651                 m_head = ifq_dequeue(&ifp->if_snd);
652
653                 /* Send a copy of the frame to the BPF listener */
654                 BPF_MTAP(ifp, m_head);
655         
656                 /* Set timeout in case hardware has problems transmitting */
657                 ifp->if_timer = EM_TX_TIMEOUT;        
658         }
659 }
660
661 /*********************************************************************
662  *  Ioctl entry point
663  *
664  *  em_ioctl is called when the user wants to configure the
665  *  interface.
666  *
667  *  return 0 on success, positive on failure
668  **********************************************************************/
669
670 static int
671 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
672 {
673         int mask, error = 0;
674         struct ifreq *ifr = (struct ifreq *) data;
675         struct adapter *adapter = ifp->if_softc;
676
677         lwkt_serialize_enter(&adapter->serializer);
678
679         if (adapter->in_detach)
680                 goto out;
681
682         switch (command) {
683         case SIOCSIFADDR:
684         case SIOCGIFADDR:
685                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
686                 lwkt_serialize_exit(&adapter->serializer);
687                 ether_ioctl(ifp, command, data);
688                 lwkt_serialize_enter(&adapter->serializer);
689                 break;
690         case SIOCSIFMTU:
691                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
692                 if (ifr->ifr_mtu > MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
693                         error = EINVAL;
694                 } else {
695                         ifp->if_mtu = ifr->ifr_mtu;
696                         adapter->hw.max_frame_size = 
697                         ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
698                         em_init_serialized(adapter);
699                 }
700                 break;
701         case SIOCSIFFLAGS:
702                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
703                 if (ifp->if_flags & IFF_UP) {
704                         if (!(ifp->if_flags & IFF_RUNNING))
705                                 em_init_serialized(adapter);
706                         em_disable_promisc(adapter);
707                         em_set_promisc(adapter);
708                 } else {
709                         if (ifp->if_flags & IFF_RUNNING)
710                                 em_stop(adapter);
711                 }
712                 break;
713         case SIOCADDMULTI:
714         case SIOCDELMULTI:
715                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
716                 if (ifp->if_flags & IFF_RUNNING) {
717                         em_disable_intr(adapter);
718                         em_set_multi(adapter);
719                         if (adapter->hw.mac_type == em_82542_rev2_0)
720                                 em_initialize_receive_unit(adapter);
721                         em_enable_intr(adapter);
722                 }
723                 break;
724         case SIOCSIFMEDIA:
725         case SIOCGIFMEDIA:
726                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
727                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
728                 break;
729         case SIOCSIFCAP:
730                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
731                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
732                 if (mask & IFCAP_HWCSUM) {
733                         if (IFCAP_HWCSUM & ifp->if_capenable)
734                                 ifp->if_capenable &= ~IFCAP_HWCSUM;
735                         else
736                                 ifp->if_capenable |= IFCAP_HWCSUM;
737                         if (ifp->if_flags & IFF_RUNNING)
738                                 em_init_serialized(adapter);
739                 }
740                 break;
741         default:
742                 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)\n", (int)command);
743                 error = EINVAL;
744         }
745
746 out:
747         lwkt_serialize_exit(&adapter->serializer);
748         return(error);
749 }
750
751 /*********************************************************************
752  *  Watchdog entry point
753  *
754  *  This routine is called whenever hardware quits transmitting.
755  *
756  **********************************************************************/
757
758 static void
759 em_watchdog(struct ifnet *ifp)
760 {
761         struct adapter * adapter;
762         adapter = ifp->if_softc;
763
764         /* If we are in this routine because of pause frames, then
765          * don't reset the hardware.
766          */
767         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
768                 ifp->if_timer = EM_TX_TIMEOUT;
769                 return;
770         }
771
772         if (em_check_for_link(&adapter->hw))
773                 if_printf(ifp, "watchdog timeout -- resetting\n");
774
775         ifp->if_flags &= ~IFF_RUNNING;
776
777         em_init(adapter);
778
779         ifp->if_oerrors++;
780 }
781
782 /*********************************************************************
783  *  Init entry point
784  *
785  *  This routine is used in two ways. It is used by the stack as
786  *  init entry point in network interface structure. It is also used
787  *  by the driver as a hw/sw initialization routine to get to a 
788  *  consistent state.
789  *
790  *  return 0 on success, positive on failure
791  **********************************************************************/
792
793 static void
794 em_init(void *arg)
795 {
796         struct adapter *adapter = arg;
797
798         lwkt_serialize_enter(&adapter->serializer);
799         em_init_serialized(arg);
800         lwkt_serialize_exit(&adapter->serializer);
801 }
802
803 static void
804 em_init_serialized(void *arg)
805 {
806         struct adapter *adapter = arg;
807         struct ifnet *ifp = &adapter->interface_data.ac_if;
808
809         INIT_DEBUGOUT("em_init: begin");
810
811         em_stop(adapter);
812
813         /* Get the latest mac address, User can use a LAA */
814         bcopy(adapter->interface_data.ac_enaddr, adapter->hw.mac_addr,
815               ETHER_ADDR_LEN);
816
817         /* Initialize the hardware */
818         if (em_hardware_init(adapter)) {
819                 if_printf(ifp, "Unable to initialize the hardware\n");
820                 return;
821         }
822
823         em_enable_vlans(adapter);
824
825         /* Prepare transmit descriptors and buffers */
826         if (em_setup_transmit_structures(adapter)) {
827                 if_printf(ifp, "Could not setup transmit structures\n");
828                 em_stop(adapter); 
829                 return;
830         }
831         em_initialize_transmit_unit(adapter);
832
833         /* Setup Multicast table */
834         em_set_multi(adapter);
835
836         /* Prepare receive descriptors and buffers */
837         if (em_setup_receive_structures(adapter)) {
838                 if_printf(ifp, "Could not setup receive structures\n");
839                 em_stop(adapter);
840                 return;
841         }
842         em_initialize_receive_unit(adapter);
843         
844         /* Don't loose promiscuous settings */
845         em_set_promisc(adapter);
846
847         ifp->if_flags |= IFF_RUNNING;
848         ifp->if_flags &= ~IFF_OACTIVE;
849
850         if (adapter->hw.mac_type >= em_82543) {
851                 if (ifp->if_capenable & IFCAP_TXCSUM)
852                         ifp->if_hwassist = EM_CHECKSUM_FEATURES;
853                 else
854                         ifp->if_hwassist = 0;
855         }
856
857         callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
858         em_clear_hw_cntrs(&adapter->hw);
859         em_enable_intr(adapter);
860
861         /* Don't reset the phy next time init gets called */
862         adapter->hw.phy_reset_disable = TRUE;
863 }
864
865 #ifdef DEVICE_POLLING
866
867 static void
868 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
869 {
870         struct adapter *adapter = ifp->if_softc;
871         uint32_t reg_icr;
872
873         lwkt_serialize_enter(&adapter->serializer);
874         switch(cmd) {
875         case POLL_REGISTER:
876                 em_disable_intr(adapter);
877                 break;
878         case POLL_DEREGISTER:
879                 em_enable_intr(adapter);
880                 break;
881         case POLL_AND_CHECK_STATUS:
882                 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
883                 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
884                         callout_stop(&adapter->timer);
885                         adapter->hw.get_link_status = 1;
886                         em_check_for_link(&adapter->hw);
887                         em_print_link_status(adapter);
888                         callout_reset(&adapter->timer, 2*hz, em_local_timer,
889                                       adapter);
890                 }
891                 /* fall through */
892         case POLL_ONLY:
893                 if (ifp->if_flags & IFF_RUNNING) {
894                         em_process_receive_interrupts(adapter, count);
895                         em_clean_transmit_interrupts(adapter);
896                 }
897                 if (ifp->if_flags & IFF_RUNNING) {
898                         if (!ifq_is_empty(&ifp->if_snd))
899                                 em_start_serialized(ifp);
900                 }
901                 break;
902         }
903         lwkt_serialize_exit(&adapter->serializer);
904 }
905
906 #endif /* DEVICE_POLLING */
907
908 /*********************************************************************
909  *
910  *  Interrupt Service routine
911  *
912  **********************************************************************/
913 static void
914 em_intr(void *arg)
915 {
916         uint32_t reg_icr;
917         struct ifnet *ifp;
918         struct adapter *adapter = arg;
919
920         ifp = &adapter->interface_data.ac_if;  
921
922         reg_icr = E1000_READ_REG(&adapter->hw, ICR);
923         if (!reg_icr)
924                 return;
925
926         /* Link status change */
927         if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
928                 callout_stop(&adapter->timer);
929                 adapter->hw.get_link_status = 1;
930                 em_check_for_link(&adapter->hw);
931                 em_print_link_status(adapter);
932                 callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
933         }
934
935         /*
936          * note: do not attempt to improve efficiency by looping.  This 
937          * only results in unnecessary piecemeal collection of received
938          * packets and unnecessary piecemeal cleanups of the transmit ring.
939          */
940         if (ifp->if_flags & IFF_RUNNING) {
941                 em_process_receive_interrupts(adapter, -1);
942                 em_clean_transmit_interrupts(adapter);
943         }
944
945         if ((ifp->if_flags & IFF_RUNNING) && !ifq_is_empty(&ifp->if_snd))
946                 em_start_serialized(ifp);
947 }
948
949 /*********************************************************************
950  *
951  *  Media Ioctl callback
952  *
953  *  This routine is called whenever the user queries the status of
954  *  the interface using ifconfig.
955  *
956  **********************************************************************/
957 static void
958 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
959 {
960         struct adapter * adapter = ifp->if_softc;
961
962         INIT_DEBUGOUT("em_media_status: begin");
963
964         em_check_for_link(&adapter->hw);
965         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
966                 if (adapter->link_active == 0) {
967                         em_get_speed_and_duplex(&adapter->hw, 
968                                                 &adapter->link_speed, 
969                                                 &adapter->link_duplex);
970                         adapter->link_active = 1;
971                 }
972         } else {
973                 if (adapter->link_active == 1) {
974                         adapter->link_speed = 0;
975                         adapter->link_duplex = 0;
976                         adapter->link_active = 0;
977                 }
978         }
979
980         ifmr->ifm_status = IFM_AVALID;
981         ifmr->ifm_active = IFM_ETHER;
982
983         if (!adapter->link_active)
984                 return;
985
986         ifmr->ifm_status |= IFM_ACTIVE;
987
988         if (adapter->hw.media_type == em_media_type_fiber) {
989                 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
990         } else {
991                 switch (adapter->link_speed) {
992                 case 10:
993                         ifmr->ifm_active |= IFM_10_T;
994                         break;
995                 case 100:
996                         ifmr->ifm_active |= IFM_100_TX;
997                         break;
998                 case 1000:
999                         ifmr->ifm_active |= IFM_1000_T;
1000                         break;
1001                 }
1002                 if (adapter->link_duplex == FULL_DUPLEX)
1003                         ifmr->ifm_active |= IFM_FDX;
1004                 else
1005                         ifmr->ifm_active |= IFM_HDX;
1006         }
1007 }
1008
1009 /*********************************************************************
1010  *
1011  *  Media Ioctl callback
1012  *
1013  *  This routine is called when the user changes speed/duplex using
1014  *  media/mediopt option with ifconfig.
1015  *
1016  **********************************************************************/
1017 static int
1018 em_media_change(struct ifnet *ifp)
1019 {
1020         struct adapter * adapter = ifp->if_softc;
1021         struct ifmedia  *ifm = &adapter->media;
1022
1023         INIT_DEBUGOUT("em_media_change: begin");
1024
1025         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1026                 return(EINVAL);
1027
1028         lwkt_serialize_enter(&adapter->serializer);
1029
1030         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1031         case IFM_AUTO:
1032                 adapter->hw.autoneg = DO_AUTO_NEG;
1033                 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1034                 break;
1035         case IFM_1000_SX:
1036         case IFM_1000_T:
1037                 adapter->hw.autoneg = DO_AUTO_NEG;
1038                 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1039                 break;
1040         case IFM_100_TX:
1041                 adapter->hw.autoneg = FALSE;
1042                 adapter->hw.autoneg_advertised = 0;
1043                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1044                         adapter->hw.forced_speed_duplex = em_100_full;
1045                 else
1046                         adapter->hw.forced_speed_duplex = em_100_half;
1047                 break;
1048         case IFM_10_T:
1049                 adapter->hw.autoneg = FALSE;
1050                 adapter->hw.autoneg_advertised = 0;
1051                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1052                         adapter->hw.forced_speed_duplex = em_10_full;
1053                 else
1054                         adapter->hw.forced_speed_duplex = em_10_half;
1055                 break;
1056         default:
1057                 if_printf(ifp, "Unsupported media type\n");
1058         }
1059         /*
1060          * As the speed/duplex settings may have changed we need to
1061          * reset the PHY.
1062          */
1063         adapter->hw.phy_reset_disable = FALSE;
1064
1065         em_init_serialized(adapter);
1066
1067         lwkt_serialize_exit(&adapter->serializer);
1068         return(0);
1069 }
1070
1071 static void
1072 em_tx_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize,
1073          int error)
1074 {
1075         struct em_q *q = arg;
1076
1077         if (error)
1078                 return;
1079         KASSERT(nsegs <= EM_MAX_SCATTER,
1080                 ("Too many DMA segments returned when mapping tx packet"));
1081         q->nsegs = nsegs;
1082         bcopy(seg, q->segs, nsegs * sizeof(seg[0]));
1083 }
1084
1085 #define EM_FIFO_HDR              0x10
1086 #define EM_82547_PKT_THRESH      0x3e0
1087 #define EM_82547_TX_FIFO_SIZE    0x2800
1088 #define EM_82547_TX_FIFO_BEGIN   0xf00
1089 /*********************************************************************
1090  *
1091  *  This routine maps the mbufs to tx descriptors.
1092  *
1093  *  return 0 on success, positive on failure
1094  **********************************************************************/
1095 static int
1096 em_encap(struct adapter *adapter, struct mbuf *m_head)
1097 {
1098         uint32_t txd_upper;
1099         uint32_t txd_lower, txd_used = 0, txd_saved = 0;
1100         int i, j, error;
1101         uint64_t address;
1102
1103         /* For 82544 Workaround */
1104         DESC_ARRAY desc_array;
1105         uint32_t array_elements;
1106         uint32_t counter;
1107
1108         struct ifvlan *ifv = NULL;
1109         struct em_q q;
1110         struct em_buffer *tx_buffer = NULL;
1111         struct em_tx_desc *current_tx_desc = NULL;
1112         struct ifnet *ifp = &adapter->interface_data.ac_if;
1113
1114         /*
1115          * Force a cleanup if number of TX descriptors
1116          * available hits the threshold
1117          */
1118         if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1119                 em_clean_transmit_interrupts(adapter);
1120                 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1121                         adapter->no_tx_desc_avail1++;
1122                         return(ENOBUFS);
1123                 }
1124         }
1125         /*
1126          * Map the packet for DMA.
1127          */
1128         if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &q.map)) {
1129                 adapter->no_tx_map_avail++;
1130                 return(ENOMEM);
1131         }
1132         error = bus_dmamap_load_mbuf(adapter->txtag, q.map, m_head, em_tx_cb,
1133                                      &q, BUS_DMA_NOWAIT);
1134         if (error != 0) {
1135                 adapter->no_tx_dma_setup++;
1136                 bus_dmamap_destroy(adapter->txtag, q.map);
1137                 return(error);
1138         }
1139         KASSERT(q.nsegs != 0, ("em_encap: empty packet"));
1140
1141         if (q.nsegs > adapter->num_tx_desc_avail) {
1142                 adapter->no_tx_desc_avail2++;
1143                 bus_dmamap_unload(adapter->txtag, q.map);
1144                 bus_dmamap_destroy(adapter->txtag, q.map);
1145                 return(ENOBUFS);
1146         }
1147
1148         if (ifp->if_hwassist > 0) {
1149                 em_transmit_checksum_setup(adapter,  m_head,
1150                                            &txd_upper, &txd_lower);
1151         }
1152         else 
1153                 txd_upper = txd_lower = 0;
1154
1155         /* Find out if we are in vlan mode */
1156         if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1157             m_head->m_pkthdr.rcvif != NULL &&
1158             m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1159                 ifv = m_head->m_pkthdr.rcvif->if_softc;
1160
1161         i = adapter->next_avail_tx_desc;
1162         if (adapter->pcix_82544) {
1163                 txd_saved = i;
1164                 txd_used = 0;
1165         }
1166         for (j = 0; j < q.nsegs; j++) {
1167                 /* If adapter is 82544 and on PCIX bus */
1168                 if(adapter->pcix_82544) {
1169                         array_elements = 0;
1170                         address = htole64(q.segs[j].ds_addr);
1171                         /* 
1172                          * Check the Address and Length combination and
1173                          * split the data accordingly
1174                          */
1175                         array_elements = em_fill_descriptors(address,
1176                                                              htole32(q.segs[j].ds_len),
1177                                                              &desc_array);
1178                         for (counter = 0; counter < array_elements; counter++) {
1179                                 if (txd_used == adapter->num_tx_desc_avail) {
1180                                         adapter->next_avail_tx_desc = txd_saved;
1181                                         adapter->no_tx_desc_avail2++;
1182                                         bus_dmamap_unload(adapter->txtag, q.map);
1183                                         bus_dmamap_destroy(adapter->txtag, q.map);
1184                                         return(ENOBUFS);
1185                                 }
1186                                 tx_buffer = &adapter->tx_buffer_area[i];
1187                                 current_tx_desc = &adapter->tx_desc_base[i];
1188                                 current_tx_desc->buffer_addr = htole64(
1189                                 desc_array.descriptor[counter].address);
1190                                 current_tx_desc->lower.data = htole32(
1191                                 (adapter->txd_cmd | txd_lower | 
1192                                 (uint16_t)desc_array.descriptor[counter].length));
1193                                 current_tx_desc->upper.data = htole32((txd_upper));
1194                                 if (++i == adapter->num_tx_desc)
1195                                         i = 0;
1196
1197                                 tx_buffer->m_head = NULL;
1198                                 txd_used++;
1199                         }
1200                 } else {
1201                         tx_buffer = &adapter->tx_buffer_area[i];
1202                         current_tx_desc = &adapter->tx_desc_base[i];
1203
1204                         current_tx_desc->buffer_addr = htole64(q.segs[j].ds_addr);
1205                         current_tx_desc->lower.data = htole32(
1206                                 adapter->txd_cmd | txd_lower | q.segs[j].ds_len);
1207                         current_tx_desc->upper.data = htole32(txd_upper);
1208
1209                         if (++i == adapter->num_tx_desc)
1210                                 i = 0;
1211
1212                         tx_buffer->m_head = NULL;
1213                 }
1214         }
1215
1216         adapter->next_avail_tx_desc = i;
1217         if (adapter->pcix_82544)
1218                 adapter->num_tx_desc_avail -= txd_used;
1219         else
1220                 adapter->num_tx_desc_avail -= q.nsegs;
1221
1222         if (ifv != NULL) {
1223                 /* Set the vlan id */
1224                 current_tx_desc->upper.fields.special = htole16(ifv->ifv_tag);
1225
1226                 /* Tell hardware to add tag */
1227                 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1228         }
1229
1230         tx_buffer->m_head = m_head;
1231         tx_buffer->map = q.map;
1232         bus_dmamap_sync(adapter->txtag, q.map, BUS_DMASYNC_PREWRITE);
1233
1234         /*
1235          * Last Descriptor of Packet needs End Of Packet (EOP)
1236          */
1237         current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1238
1239         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1240                         BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1241
1242         /* 
1243          * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1244          * that this frame is available to transmit.
1245          */
1246         if (adapter->hw.mac_type == em_82547 &&
1247             adapter->link_duplex == HALF_DUPLEX) {
1248                 em_82547_move_tail_serialized(adapter);
1249         } else {
1250                 E1000_WRITE_REG(&adapter->hw, TDT, i);
1251                 if (adapter->hw.mac_type == em_82547) {
1252                         em_82547_update_fifo_head(adapter,
1253                                                   m_head->m_pkthdr.len);
1254                 }
1255         }
1256
1257         return(0);
1258 }
1259
1260 /*********************************************************************
1261  *
1262  * 82547 workaround to avoid controller hang in half-duplex environment.
1263  * The workaround is to avoid queuing a large packet that would span   
1264  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1265  * in this case. We do that only when FIFO is quiescent.
1266  *
1267  **********************************************************************/
1268 static void
1269 em_82547_move_tail(void *arg)
1270 {
1271         struct adapter *adapter = arg;
1272
1273         lwkt_serialize_enter(&adapter->serializer);
1274         em_82547_move_tail_serialized(arg);
1275         lwkt_serialize_exit(&adapter->serializer);
1276 }
1277
1278 static void
1279 em_82547_move_tail_serialized(void *arg)
1280 {
1281         struct adapter *adapter = arg;
1282         uint16_t hw_tdt;
1283         uint16_t sw_tdt;
1284         struct em_tx_desc *tx_desc;
1285         uint16_t length = 0;
1286         boolean_t eop = 0;
1287
1288         hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1289         sw_tdt = adapter->next_avail_tx_desc;
1290
1291         while (hw_tdt != sw_tdt) {
1292                 tx_desc = &adapter->tx_desc_base[hw_tdt];
1293                 length += tx_desc->lower.flags.length;
1294                 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1295                 if(++hw_tdt == adapter->num_tx_desc)
1296                         hw_tdt = 0;
1297
1298                 if(eop) {
1299                         if (em_82547_fifo_workaround(adapter, length)) {
1300                                 adapter->tx_fifo_wrk++;
1301                                 callout_reset(&adapter->tx_fifo_timer, 1,
1302                                         em_82547_move_tail, adapter);
1303                                 break;
1304                         }
1305                         E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1306                         em_82547_update_fifo_head(adapter, length);
1307                         length = 0;
1308                 }
1309         }       
1310 }
1311
1312 static int
1313 em_82547_fifo_workaround(struct adapter *adapter, int len)
1314 {       
1315         int fifo_space, fifo_pkt_len;
1316
1317         fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1318
1319         if (adapter->link_duplex == HALF_DUPLEX) {
1320                 fifo_space = EM_82547_TX_FIFO_SIZE - adapter->tx_fifo_head;
1321
1322                 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1323                         if (em_82547_tx_fifo_reset(adapter))
1324                                 return(0);
1325                         else
1326                                 return(1);
1327                 }
1328         }
1329
1330         return(0);
1331 }
1332
1333 static void
1334 em_82547_update_fifo_head(struct adapter *adapter, int len)
1335 {
1336         int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1337
1338         /* tx_fifo_head is always 16 byte aligned */
1339         adapter->tx_fifo_head += fifo_pkt_len;
1340         if (adapter->tx_fifo_head >= EM_82547_TX_FIFO_SIZE)
1341                 adapter->tx_fifo_head -= EM_82547_TX_FIFO_SIZE;
1342 }
1343
1344 static int
1345 em_82547_tx_fifo_reset(struct adapter *adapter)
1346 {       
1347         uint32_t tctl;
1348
1349         if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1350               E1000_READ_REG(&adapter->hw, TDH)) &&
1351              (E1000_READ_REG(&adapter->hw, TDFT) == 
1352               E1000_READ_REG(&adapter->hw, TDFH)) &&
1353              (E1000_READ_REG(&adapter->hw, TDFTS) ==
1354               E1000_READ_REG(&adapter->hw, TDFHS)) &&
1355              (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1356
1357                 /* Disable TX unit */
1358                 tctl = E1000_READ_REG(&adapter->hw, TCTL);
1359                 E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1360
1361                 /* Reset FIFO pointers */
1362                 E1000_WRITE_REG(&adapter->hw, TDFT, EM_82547_TX_FIFO_BEGIN);
1363                 E1000_WRITE_REG(&adapter->hw, TDFH, EM_82547_TX_FIFO_BEGIN);
1364                 E1000_WRITE_REG(&adapter->hw, TDFTS, EM_82547_TX_FIFO_BEGIN);
1365                 E1000_WRITE_REG(&adapter->hw, TDFHS, EM_82547_TX_FIFO_BEGIN);
1366
1367                 /* Re-enable TX unit */
1368                 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1369                 E1000_WRITE_FLUSH(&adapter->hw);
1370
1371                 adapter->tx_fifo_head = 0;
1372                 adapter->tx_fifo_reset++;
1373
1374                 return(TRUE);
1375         }
1376         else {
1377                 return(FALSE);
1378         }
1379 }
1380
1381 static void
1382 em_set_promisc(struct adapter *adapter)
1383 {
1384         uint32_t reg_rctl;
1385         struct ifnet *ifp = &adapter->interface_data.ac_if;
1386
1387         reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1388
1389         if (ifp->if_flags & IFF_PROMISC) {
1390                 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1391                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1392         } else if (ifp->if_flags & IFF_ALLMULTI) {
1393                 reg_rctl |= E1000_RCTL_MPE;
1394                 reg_rctl &= ~E1000_RCTL_UPE;
1395                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1396         }
1397 }
1398
1399 static void
1400 em_disable_promisc(struct adapter *adapter)
1401 {
1402         uint32_t reg_rctl;
1403
1404         reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1405
1406         reg_rctl &=  (~E1000_RCTL_UPE);
1407         reg_rctl &=  (~E1000_RCTL_MPE);
1408         E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1409 }
1410
1411 /*********************************************************************
1412  *  Multicast Update
1413  *
1414  *  This routine is called whenever multicast address list is updated.
1415  *
1416  **********************************************************************/
1417
1418 static void
1419 em_set_multi(struct adapter *adapter)
1420 {
1421         uint32_t reg_rctl = 0;
1422         uint8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1423         struct ifmultiaddr *ifma;
1424         int mcnt = 0;
1425         struct ifnet *ifp = &adapter->interface_data.ac_if;
1426
1427         IOCTL_DEBUGOUT("em_set_multi: begin");
1428
1429         if (adapter->hw.mac_type == em_82542_rev2_0) {
1430                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1431                 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1432                         em_pci_clear_mwi(&adapter->hw);
1433                 reg_rctl |= E1000_RCTL_RST;
1434                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1435                 msec_delay(5);
1436         }
1437
1438         LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1439                 if (ifma->ifma_addr->sa_family != AF_LINK)
1440                         continue;
1441
1442                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1443                         break;
1444
1445                 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1446                       &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1447                 mcnt++;
1448         }
1449
1450         if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1451                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1452                 reg_rctl |= E1000_RCTL_MPE;
1453                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1454         } else
1455                 em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1456
1457         if (adapter->hw.mac_type == em_82542_rev2_0) {
1458                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1459                 reg_rctl &= ~E1000_RCTL_RST;
1460                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1461                 msec_delay(5);
1462                 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1463                         em_pci_set_mwi(&adapter->hw);
1464         }
1465 }
1466
1467 /*********************************************************************
1468  *  Timer routine
1469  *
1470  *  This routine checks for link status and updates statistics.
1471  *
1472  **********************************************************************/
1473
1474 static void
1475 em_local_timer(void *arg)
1476 {
1477         struct ifnet *ifp;
1478         struct adapter *adapter = arg;
1479         ifp = &adapter->interface_data.ac_if;
1480
1481         lwkt_serialize_enter(&adapter->serializer);
1482
1483         em_check_for_link(&adapter->hw);
1484         em_print_link_status(adapter);
1485         em_update_stats_counters(adapter);   
1486         if (em_display_debug_stats && ifp->if_flags & IFF_RUNNING)
1487                 em_print_hw_stats(adapter);
1488         em_smartspeed(adapter);
1489
1490         callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
1491
1492         lwkt_serialize_exit(&adapter->serializer);
1493 }
1494
1495 static void
1496 em_print_link_status(struct adapter *adapter)
1497 {
1498         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1499                 if (adapter->link_active == 0) {
1500                         em_get_speed_and_duplex(&adapter->hw, 
1501                                                 &adapter->link_speed, 
1502                                                 &adapter->link_duplex);
1503                         device_printf(adapter->dev, "Link is up %d Mbps %s\n",
1504                                adapter->link_speed,
1505                                ((adapter->link_duplex == FULL_DUPLEX) ?
1506                                 "Full Duplex" : "Half Duplex"));
1507                         adapter->link_active = 1;
1508                         adapter->smartspeed = 0;
1509                 }
1510         } else {
1511                 if (adapter->link_active == 1) {
1512                         adapter->link_speed = 0;
1513                         adapter->link_duplex = 0;
1514                         device_printf(adapter->dev, "Link is Down\n");
1515                         adapter->link_active = 0;
1516                 }
1517         }
1518 }
1519
1520 /*********************************************************************
1521  *
1522  *  This routine disables all traffic on the adapter by issuing a
1523  *  global reset on the MAC and deallocates TX/RX buffers. 
1524  *
1525  **********************************************************************/
1526
1527 static void
1528 em_stop(void *arg)
1529 {
1530         struct ifnet   *ifp;
1531         struct adapter * adapter = arg;
1532         ifp = &adapter->interface_data.ac_if;
1533
1534         INIT_DEBUGOUT("em_stop: begin");
1535         em_disable_intr(adapter);
1536         em_reset_hw(&adapter->hw);
1537         callout_stop(&adapter->timer);
1538         callout_stop(&adapter->tx_fifo_timer);
1539         em_free_transmit_structures(adapter);
1540         em_free_receive_structures(adapter);
1541
1542         /* Tell the stack that the interface is no longer active */
1543         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1544         ifp->if_timer = 0;
1545 }
1546
1547 /*********************************************************************
1548  *
1549  *  Determine hardware revision.
1550  *
1551  **********************************************************************/
1552 static void
1553 em_identify_hardware(struct adapter * adapter)
1554 {
1555         device_t dev = adapter->dev;
1556
1557         /* Make sure our PCI config space has the necessary stuff set */
1558         adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1559         if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1560               (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1561                 device_printf(dev, "Memory Access and/or Bus Master bits were not set!\n");
1562                 adapter->hw.pci_cmd_word |= 
1563                 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1564                 pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1565         }
1566
1567         /* Save off the information about this board */
1568         adapter->hw.vendor_id = pci_get_vendor(dev);
1569         adapter->hw.device_id = pci_get_device(dev);
1570         adapter->hw.revision_id = pci_get_revid(dev);
1571         adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev);
1572         adapter->hw.subsystem_id = pci_get_subdevice(dev);
1573
1574         /* Identify the MAC */
1575         if (em_set_mac_type(&adapter->hw))
1576                 device_printf(dev, "Unknown MAC Type\n");
1577
1578         if (adapter->hw.mac_type == em_82541 ||
1579             adapter->hw.mac_type == em_82541_rev_2 ||
1580             adapter->hw.mac_type == em_82547 ||
1581             adapter->hw.mac_type == em_82547_rev_2)
1582                 adapter->hw.phy_init_script = TRUE;
1583 }
1584
1585 /*********************************************************************
1586  *
1587  *  Initialize the hardware to a configuration as specified by the
1588  *  adapter structure. The controller is reset, the EEPROM is
1589  *  verified, the MAC address is set, then the shared initialization
1590  *  routines are called.
1591  *
1592  **********************************************************************/
1593 static int
1594 em_hardware_init(struct adapter *adapter)
1595 {
1596         INIT_DEBUGOUT("em_hardware_init: begin");
1597         /* Issue a global reset */
1598         em_reset_hw(&adapter->hw);
1599
1600         /* When hardware is reset, fifo_head is also reset */
1601         adapter->tx_fifo_head = 0;
1602
1603         /* Make sure we have a good EEPROM before we read from it */
1604         if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
1605                 device_printf(adapter->dev, "The EEPROM Checksum Is Not Valid\n");
1606                 return(EIO);
1607         }
1608
1609         if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
1610                 device_printf(adapter->dev, "EEPROM read error while reading part number\n");
1611                 return(EIO);
1612         }
1613
1614         if (em_init_hw(&adapter->hw) < 0) {
1615                 device_printf(adapter->dev, "Hardware Initialization Failed");
1616                 return(EIO);
1617         }
1618
1619         em_check_for_link(&adapter->hw);
1620         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
1621                 adapter->link_active = 1;
1622         else
1623                 adapter->link_active = 0;
1624
1625         if (adapter->link_active) {
1626                 em_get_speed_and_duplex(&adapter->hw, 
1627                                         &adapter->link_speed, 
1628                                         &adapter->link_duplex);
1629         } else {
1630                 adapter->link_speed = 0;
1631                 adapter->link_duplex = 0;
1632         }
1633
1634         return(0);
1635 }
1636
1637 /*********************************************************************
1638  *
1639  *  Setup networking device structure and register an interface.
1640  *
1641  **********************************************************************/
1642 static void
1643 em_setup_interface(device_t dev, struct adapter *adapter)
1644 {
1645         struct ifnet   *ifp;
1646         INIT_DEBUGOUT("em_setup_interface: begin");
1647
1648         ifp = &adapter->interface_data.ac_if;
1649         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1650         ifp->if_mtu = ETHERMTU;
1651         ifp->if_baudrate = 1000000000;
1652         ifp->if_init =  em_init;
1653         ifp->if_softc = adapter;
1654         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1655         ifp->if_ioctl = em_ioctl;
1656         ifp->if_start = em_start;
1657 #ifdef DEVICE_POLLING
1658         ifp->if_poll = em_poll;
1659 #endif
1660         ifp->if_watchdog = em_watchdog;
1661         ifq_set_maxlen(&ifp->if_snd, adapter->num_tx_desc - 1);
1662         ifq_set_ready(&ifp->if_snd);
1663
1664         if (adapter->hw.mac_type >= em_82543)
1665                 ifp->if_capabilities |= IFCAP_HWCSUM;
1666
1667         ifp->if_capenable = ifp->if_capabilities;
1668
1669         ether_ifattach(ifp, adapter->hw.mac_addr);
1670
1671         /*
1672          * Tell the upper layer(s) we support long frames.
1673          */
1674         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1675         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1676
1677         /* 
1678          * Specify the media types supported by this adapter and register
1679          * callbacks to update media and link information
1680          */
1681         ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
1682                      em_media_status);
1683         if (adapter->hw.media_type == em_media_type_fiber) {
1684                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 
1685                             0, NULL);
1686                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 
1687                             0, NULL);
1688         } else {
1689                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1690                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 
1691                             0, NULL);
1692                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 
1693                             0, NULL);
1694                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 
1695                             0, NULL);
1696                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 
1697                             0, NULL);
1698                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1699         }
1700         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1701         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1702 }
1703
1704 /*********************************************************************
1705  *
1706  *  Workaround for SmartSpeed on 82541 and 82547 controllers
1707  *
1708  **********************************************************************/        
1709 static void
1710 em_smartspeed(struct adapter *adapter)
1711 {
1712         uint16_t phy_tmp;
1713
1714         if (adapter->link_active || (adapter->hw.phy_type != em_phy_igp) || 
1715             !adapter->hw.autoneg ||
1716             !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1717                 return;
1718
1719         if (adapter->smartspeed == 0) {
1720                 /*
1721                  * If Master/Slave config fault is asserted twice,
1722                  * we assume back-to-back.
1723                  */
1724                 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1725                 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
1726                         return;
1727                 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1728                 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
1729                         em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
1730                                         &phy_tmp);
1731                         if (phy_tmp & CR_1000T_MS_ENABLE) {
1732                                 phy_tmp &= ~CR_1000T_MS_ENABLE;
1733                                 em_write_phy_reg(&adapter->hw,
1734                                                  PHY_1000T_CTRL, phy_tmp);
1735                                 adapter->smartspeed++;
1736                                 if (adapter->hw.autoneg &&
1737                                     !em_phy_setup_autoneg(&adapter->hw) &&
1738                                     !em_read_phy_reg(&adapter->hw, PHY_CTRL,
1739                                                      &phy_tmp)) {
1740                                         phy_tmp |= (MII_CR_AUTO_NEG_EN |  
1741                                                     MII_CR_RESTART_AUTO_NEG);
1742                                         em_write_phy_reg(&adapter->hw,
1743                                                          PHY_CTRL, phy_tmp);
1744                                 }
1745                         }
1746                 }
1747                 return;
1748         } else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
1749                 /* If still no link, perhaps using 2/3 pair cable */
1750                 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
1751                 phy_tmp |= CR_1000T_MS_ENABLE;
1752                 em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
1753                 if (adapter->hw.autoneg &&
1754                     !em_phy_setup_autoneg(&adapter->hw) &&
1755                     !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
1756                         phy_tmp |= (MII_CR_AUTO_NEG_EN |
1757                                     MII_CR_RESTART_AUTO_NEG);
1758                         em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
1759                 }
1760         }
1761         /* Restart process after EM_SMARTSPEED_MAX iterations */
1762         if (adapter->smartspeed++ == EM_SMARTSPEED_MAX)
1763                 adapter->smartspeed = 0;
1764 }
1765
1766 /*
1767  * Manage DMA'able memory.
1768  */
1769 static void
1770 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1771
1772         if (error)
1773                 return;
1774         *(bus_addr_t*) arg = segs->ds_addr;
1775 }
1776
1777 static int
1778 em_dma_malloc(struct adapter *adapter, bus_size_t size,
1779               struct em_dma_alloc *dma, int mapflags)
1780 {
1781         int r;
1782         device_t dev = adapter->dev;
1783
1784         r = bus_dma_tag_create(NULL,                    /* parent */
1785                                PAGE_SIZE, 0,            /* alignment, bounds */
1786                                BUS_SPACE_MAXADDR,       /* lowaddr */
1787                                BUS_SPACE_MAXADDR,       /* highaddr */
1788                                NULL, NULL,              /* filter, filterarg */
1789                                size,                    /* maxsize */
1790                                1,                       /* nsegments */
1791                                size,                    /* maxsegsize */
1792                                BUS_DMA_ALLOCNOW,        /* flags */
1793                                &dma->dma_tag);
1794         if (r != 0) {
1795                 device_printf(dev, "em_dma_malloc: bus_dma_tag_create failed; "
1796                               "error %u\n", r);
1797                 goto fail_0;
1798         }
1799
1800         r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
1801                              BUS_DMA_NOWAIT, &dma->dma_map);
1802         if (r != 0) {
1803                 device_printf(dev, "em_dma_malloc: bus_dmammem_alloc failed; "
1804                               "size %llu, error %d\n", (uintmax_t)size, r);
1805                 goto fail_2;
1806         }
1807
1808         r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1809                             size,
1810                             em_dmamap_cb,
1811                             &dma->dma_paddr,
1812                             mapflags | BUS_DMA_NOWAIT);
1813         if (r != 0) {
1814                 device_printf(dev, "em_dma_malloc: bus_dmamap_load failed; "
1815                               "error %u\n", r);
1816                 goto fail_3;
1817         }
1818
1819         dma->dma_size = size;
1820         return(0);
1821
1822 fail_3:
1823         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1824 fail_2:
1825         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1826         bus_dma_tag_destroy(dma->dma_tag);
1827 fail_0:
1828         dma->dma_map = NULL;
1829         dma->dma_tag = NULL;
1830         return(r);
1831 }
1832
1833 static void
1834 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
1835 {
1836         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1837         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1838         bus_dma_tag_destroy(dma->dma_tag);
1839 }
1840
1841 /*********************************************************************
1842  *
1843  *  Allocate memory for tx_buffer structures. The tx_buffer stores all 
1844  *  the information needed to transmit a packet on the wire. 
1845  *
1846  **********************************************************************/
1847 static int
1848 em_allocate_transmit_structures(struct adapter * adapter)
1849 {
1850         adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
1851             adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
1852         if (adapter->tx_buffer_area == NULL) {
1853                 device_printf(adapter->dev, "Unable to allocate tx_buffer memory\n");
1854                 return(ENOMEM);
1855         }
1856
1857         return(0);
1858 }
1859
1860 /*********************************************************************
1861  *
1862  *  Allocate and initialize transmit structures. 
1863  *
1864  **********************************************************************/
1865 static int
1866 em_setup_transmit_structures(struct adapter * adapter)
1867 {
1868         /*
1869          * Setup DMA descriptor areas.
1870          */
1871         if (bus_dma_tag_create(NULL,                    /* parent */
1872                                1, 0,                    /* alignment, bounds */
1873                                BUS_SPACE_MAXADDR,       /* lowaddr */ 
1874                                BUS_SPACE_MAXADDR,       /* highaddr */
1875                                NULL, NULL,              /* filter, filterarg */
1876                                MCLBYTES * 8,            /* maxsize */
1877                                EM_MAX_SCATTER,          /* nsegments */
1878                                MCLBYTES * 8,            /* maxsegsize */
1879                                BUS_DMA_ALLOCNOW,        /* flags */ 
1880                                &adapter->txtag)) {
1881                 device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
1882                 return(ENOMEM);
1883         }
1884
1885         if (em_allocate_transmit_structures(adapter))
1886                 return(ENOMEM);
1887
1888         bzero((void *) adapter->tx_desc_base,
1889               (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
1890
1891         adapter->next_avail_tx_desc = 0;
1892         adapter->oldest_used_tx_desc = 0;
1893
1894         /* Set number of descriptors available */
1895         adapter->num_tx_desc_avail = adapter->num_tx_desc;
1896
1897         /* Set checksum context */
1898         adapter->active_checksum_context = OFFLOAD_NONE;
1899
1900         return(0);
1901 }
1902
1903 /*********************************************************************
1904  *
1905  *  Enable transmit unit.
1906  *
1907  **********************************************************************/
1908 static void
1909 em_initialize_transmit_unit(struct adapter * adapter)
1910 {
1911         uint32_t reg_tctl;
1912         uint32_t reg_tipg = 0;
1913         uint64_t bus_addr;
1914
1915         INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
1916
1917         /* Setup the Base and Length of the Tx Descriptor Ring */
1918         bus_addr = adapter->txdma.dma_paddr;
1919         E1000_WRITE_REG(&adapter->hw, TDBAL, (uint32_t)bus_addr);
1920         E1000_WRITE_REG(&adapter->hw, TDBAH, (uint32_t)(bus_addr >> 32));
1921         E1000_WRITE_REG(&adapter->hw, TDLEN, 
1922                         adapter->num_tx_desc * sizeof(struct em_tx_desc));
1923
1924         /* Setup the HW Tx Head and Tail descriptor pointers */
1925         E1000_WRITE_REG(&adapter->hw, TDH, 0);
1926         E1000_WRITE_REG(&adapter->hw, TDT, 0);
1927
1928         HW_DEBUGOUT2("Base = %x, Length = %x\n", 
1929                      E1000_READ_REG(&adapter->hw, TDBAL),
1930                      E1000_READ_REG(&adapter->hw, TDLEN));
1931
1932         /* Set the default values for the Tx Inter Packet Gap timer */
1933         switch (adapter->hw.mac_type) {
1934         case em_82542_rev2_0:
1935         case em_82542_rev2_1:
1936                 reg_tipg = DEFAULT_82542_TIPG_IPGT;
1937                 reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1938                 reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1939                 break;
1940         default:
1941                 if (adapter->hw.media_type == em_media_type_fiber)
1942                         reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1943                 else
1944                         reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1945                 reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1946                 reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1947         }
1948
1949         E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
1950         E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
1951         if (adapter->hw.mac_type >= em_82540)
1952                 E1000_WRITE_REG(&adapter->hw, TADV,
1953                                 adapter->tx_abs_int_delay.value);
1954
1955         /* Program the Transmit Control Register */
1956         reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
1957                    (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1958         if (adapter->link_duplex == 1)
1959                 reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
1960         else
1961                 reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
1962         E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
1963
1964         /* Setup Transmit Descriptor Settings for this adapter */   
1965         adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
1966
1967         if (adapter->tx_int_delay.value > 0)
1968                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1969 }
1970
1971 /*********************************************************************
1972  *
1973  *  Free all transmit related data structures.
1974  *
1975  **********************************************************************/
1976 static void
1977 em_free_transmit_structures(struct adapter * adapter)
1978 {
1979         struct em_buffer *tx_buffer;
1980         int i;
1981
1982         INIT_DEBUGOUT("free_transmit_structures: begin");
1983
1984         if (adapter->tx_buffer_area != NULL) {
1985                 tx_buffer = adapter->tx_buffer_area;
1986                 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
1987                         if (tx_buffer->m_head != NULL) {
1988                                 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1989                                 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1990                                 m_freem(tx_buffer->m_head);
1991                         }
1992                         tx_buffer->m_head = NULL;
1993                 }
1994         }
1995         if (adapter->tx_buffer_area != NULL) {
1996                 free(adapter->tx_buffer_area, M_DEVBUF);
1997                 adapter->tx_buffer_area = NULL;
1998         }
1999         if (adapter->txtag != NULL) {
2000                 bus_dma_tag_destroy(adapter->txtag);
2001                 adapter->txtag = NULL;
2002         }
2003 }
2004
2005 /*********************************************************************
2006  *
2007  *  The offload context needs to be set when we transfer the first
2008  *  packet of a particular protocol (TCP/UDP). We change the
2009  *  context only if the protocol type changes.
2010  *
2011  **********************************************************************/
2012 static void
2013 em_transmit_checksum_setup(struct adapter * adapter,
2014                            struct mbuf *mp,
2015                            uint32_t *txd_upper,
2016                            uint32_t *txd_lower) 
2017 {
2018         struct em_context_desc *TXD;
2019         struct em_buffer *tx_buffer;
2020         int curr_txd;
2021
2022         if (mp->m_pkthdr.csum_flags) {
2023                 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2024                         *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2025                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2026                         if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2027                                 return;
2028                         else
2029                                 adapter->active_checksum_context = OFFLOAD_TCP_IP;
2030                 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2031                         *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2032                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2033                         if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2034                                 return;
2035                         else
2036                                 adapter->active_checksum_context = OFFLOAD_UDP_IP;
2037                 } else {
2038                         *txd_upper = 0;
2039                         *txd_lower = 0;
2040                         return;
2041                 }
2042         } else {
2043                 *txd_upper = 0;
2044                 *txd_lower = 0;
2045                 return;
2046         }
2047
2048         /* If we reach this point, the checksum offload context
2049          * needs to be reset.
2050          */
2051         curr_txd = adapter->next_avail_tx_desc;
2052         tx_buffer = &adapter->tx_buffer_area[curr_txd];
2053         TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2054
2055         TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2056         TXD->lower_setup.ip_fields.ipcso =
2057             ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2058         TXD->lower_setup.ip_fields.ipcse =
2059             htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2060
2061         TXD->upper_setup.tcp_fields.tucss = 
2062             ETHER_HDR_LEN + sizeof(struct ip);
2063         TXD->upper_setup.tcp_fields.tucse = htole16(0);
2064
2065         if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2066                 TXD->upper_setup.tcp_fields.tucso =
2067                     ETHER_HDR_LEN + sizeof(struct ip) +
2068                     offsetof(struct tcphdr, th_sum);
2069         } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2070                 TXD->upper_setup.tcp_fields.tucso =
2071                         ETHER_HDR_LEN + sizeof(struct ip) +
2072                         offsetof(struct udphdr, uh_sum);
2073         }
2074
2075         TXD->tcp_seg_setup.data = htole32(0);
2076         TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2077
2078         tx_buffer->m_head = NULL;
2079
2080         if (++curr_txd == adapter->num_tx_desc)
2081                 curr_txd = 0;
2082
2083         adapter->num_tx_desc_avail--;
2084         adapter->next_avail_tx_desc = curr_txd;
2085 }
2086
2087 /**********************************************************************
2088  *
2089  *  Examine each tx_buffer in the used queue. If the hardware is done
2090  *  processing the packet then free associated resources. The
2091  *  tx_buffer is put back on the free queue.
2092  *
2093  **********************************************************************/
2094
2095 static void
2096 em_clean_transmit_interrupts(struct adapter *adapter)
2097 {
2098         int i, num_avail;
2099         struct em_buffer *tx_buffer;
2100         struct em_tx_desc *tx_desc;
2101         struct ifnet *ifp = &adapter->interface_data.ac_if;
2102
2103         if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2104                 return;
2105
2106 #ifdef DBG_STATS
2107         adapter->clean_tx_interrupts++;
2108 #endif
2109         num_avail = adapter->num_tx_desc_avail; 
2110         i = adapter->oldest_used_tx_desc;
2111
2112         tx_buffer = &adapter->tx_buffer_area[i];
2113         tx_desc = &adapter->tx_desc_base[i];
2114
2115         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2116                         BUS_DMASYNC_POSTREAD);
2117
2118         while(tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2119                 tx_desc->upper.data = 0;
2120                 num_avail++;                        
2121
2122                 if (tx_buffer->m_head) {
2123                         ifp->if_opackets++;
2124                         bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2125                                         BUS_DMASYNC_POSTWRITE);
2126                         bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2127                         bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2128
2129                         m_freem(tx_buffer->m_head);
2130                         tx_buffer->m_head = NULL;
2131                 }
2132
2133                 if (++i == adapter->num_tx_desc)
2134                         i = 0;
2135
2136                 tx_buffer = &adapter->tx_buffer_area[i];
2137                 tx_desc = &adapter->tx_desc_base[i];
2138         }
2139
2140         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2141                         BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2142
2143         adapter->oldest_used_tx_desc = i;
2144
2145         /*
2146          * If we have enough room, clear IFF_OACTIVE to tell the stack
2147          * that it is OK to send packets.
2148          * If there are no pending descriptors, clear the timeout. Otherwise,
2149          * if some descriptors have been freed, restart the timeout.
2150          */
2151         if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2152                 ifp->if_flags &= ~IFF_OACTIVE;
2153                 if (num_avail == adapter->num_tx_desc)
2154                         ifp->if_timer = 0;
2155                 else if (num_avail == adapter->num_tx_desc_avail)
2156                         ifp->if_timer = EM_TX_TIMEOUT;
2157         }
2158         adapter->num_tx_desc_avail = num_avail;
2159 }
2160
2161 /*********************************************************************
2162  *
2163  *  Get a buffer from system mbuf buffer pool.
2164  *
2165  **********************************************************************/
2166 static int
2167 em_get_buf(int i, struct adapter *adapter, struct mbuf *nmp, int how)
2168 {
2169         struct mbuf *mp = nmp;
2170         struct em_buffer *rx_buffer;
2171         struct ifnet *ifp;
2172         bus_addr_t paddr;
2173         int error;
2174
2175         ifp = &adapter->interface_data.ac_if;
2176
2177         if (mp == NULL) {
2178                 mp = m_getcl(how, MT_DATA, M_PKTHDR);
2179                 if (mp == NULL) {
2180                         adapter->mbuf_cluster_failed++;
2181                         return(ENOBUFS);
2182                 }
2183                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2184         } else {
2185                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2186                 mp->m_data = mp->m_ext.ext_buf;
2187                 mp->m_next = NULL;
2188         }
2189         if (ifp->if_mtu <= ETHERMTU)
2190                 m_adj(mp, ETHER_ALIGN);
2191
2192         rx_buffer = &adapter->rx_buffer_area[i];
2193
2194         /*
2195          * Using memory from the mbuf cluster pool, invoke the
2196          * bus_dma machinery to arrange the memory mapping.
2197          */
2198         error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
2199                                 mtod(mp, void *), mp->m_len,
2200                                 em_dmamap_cb, &paddr, 0);
2201         if (error) {
2202                 m_free(mp);
2203                 return(error);
2204         }
2205         rx_buffer->m_head = mp;
2206         adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
2207         bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2208
2209         return(0);
2210 }
2211
2212 /*********************************************************************
2213  *
2214  *  Allocate memory for rx_buffer structures. Since we use one 
2215  *  rx_buffer per received packet, the maximum number of rx_buffer's 
2216  *  that we'll need is equal to the number of receive descriptors 
2217  *  that we've allocated.
2218  *
2219  **********************************************************************/
2220 static int
2221 em_allocate_receive_structures(struct adapter *adapter)
2222 {
2223         int i, error, size;
2224         struct em_buffer *rx_buffer;
2225
2226         size = adapter->num_rx_desc * sizeof(struct em_buffer);
2227         adapter->rx_buffer_area = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
2228
2229         error = bus_dma_tag_create(NULL,                /* parent */
2230                                    1, 0,                /* alignment, bounds */
2231                                    BUS_SPACE_MAXADDR,   /* lowaddr */
2232                                    BUS_SPACE_MAXADDR,   /* highaddr */
2233                                    NULL, NULL,          /* filter, filterarg */
2234                                    MCLBYTES,            /* maxsize */
2235                                    1,                   /* nsegments */
2236                                    MCLBYTES,            /* maxsegsize */
2237                                    BUS_DMA_ALLOCNOW,    /* flags */
2238                                    &adapter->rxtag);
2239         if (error != 0) {
2240                 device_printf(adapter->dev, "em_allocate_receive_structures: "
2241                               "bus_dma_tag_create failed; error %u\n", error);
2242                 goto fail_0;
2243         }
2244  
2245         rx_buffer = adapter->rx_buffer_area;
2246         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2247                 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2248                                           &rx_buffer->map);
2249                 if (error != 0) {
2250                         device_printf(adapter->dev,
2251                                       "em_allocate_receive_structures: "
2252                                       "bus_dmamap_create failed; error %u\n",
2253                                       error);
2254                         goto fail_1;
2255                 }
2256         }
2257
2258         for (i = 0; i < adapter->num_rx_desc; i++) {
2259                 error = em_get_buf(i, adapter, NULL, MB_WAIT);
2260                 if (error != 0) {
2261                         adapter->rx_buffer_area[i].m_head = NULL;
2262                         adapter->rx_desc_base[i].buffer_addr = 0;
2263                         return(error);
2264                 }
2265         }
2266
2267         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2268                         BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2269
2270         return(0);
2271
2272 fail_1:
2273         bus_dma_tag_destroy(adapter->rxtag);
2274 fail_0:
2275         adapter->rxtag = NULL;
2276         free(adapter->rx_buffer_area, M_DEVBUF);
2277         adapter->rx_buffer_area = NULL;
2278         return(error);
2279 }
2280
2281 /*********************************************************************
2282  *
2283  *  Allocate and initialize receive structures.
2284  *  
2285  **********************************************************************/
2286 static int
2287 em_setup_receive_structures(struct adapter *adapter)
2288 {
2289         bzero((void *) adapter->rx_desc_base,
2290               (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2291
2292         if (em_allocate_receive_structures(adapter))
2293                 return(ENOMEM);
2294
2295         /* Setup our descriptor pointers */
2296         adapter->next_rx_desc_to_check = 0;
2297         return(0);
2298 }
2299
2300 /*********************************************************************
2301  *
2302  *  Enable receive unit.
2303  *  
2304  **********************************************************************/
2305 static void
2306 em_initialize_receive_unit(struct adapter *adapter)
2307 {
2308         uint32_t reg_rctl;
2309         uint32_t reg_rxcsum;
2310         struct ifnet *ifp;
2311         uint64_t bus_addr;
2312  
2313         INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2314
2315         ifp = &adapter->interface_data.ac_if;
2316
2317         /* Make sure receives are disabled while setting up the descriptor ring */
2318         E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2319
2320         /* Set the Receive Delay Timer Register */
2321         E1000_WRITE_REG(&adapter->hw, RDTR, 
2322                         adapter->rx_int_delay.value | E1000_RDT_FPDB);
2323
2324         if(adapter->hw.mac_type >= em_82540) {
2325                 E1000_WRITE_REG(&adapter->hw, RADV,
2326                                 adapter->rx_abs_int_delay.value);
2327
2328                 /* Set the interrupt throttling rate in 256ns increments */  
2329                 if (em_int_throttle_ceil) {
2330                         E1000_WRITE_REG(&adapter->hw, ITR,
2331                                 1000000000 / 256 / em_int_throttle_ceil);
2332                 } else {
2333                         E1000_WRITE_REG(&adapter->hw, ITR, 0);
2334                 }
2335         }
2336
2337         /* Setup the Base and Length of the Rx Descriptor Ring */
2338         bus_addr = adapter->rxdma.dma_paddr;
2339         E1000_WRITE_REG(&adapter->hw, RDBAL, (uint32_t)bus_addr);
2340         E1000_WRITE_REG(&adapter->hw, RDBAH, (uint32_t)(bus_addr >> 32));
2341         E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2342                         sizeof(struct em_rx_desc));
2343
2344         /* Setup the HW Rx Head and Tail Descriptor Pointers */
2345         E1000_WRITE_REG(&adapter->hw, RDH, 0);
2346         E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2347
2348         /* Setup the Receive Control Register */
2349         reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2350                    E1000_RCTL_RDMTS_HALF |
2351                    (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2352
2353         if (adapter->hw.tbi_compatibility_on == TRUE)
2354                 reg_rctl |= E1000_RCTL_SBP;
2355
2356         switch (adapter->rx_buffer_len) {
2357         default:
2358         case EM_RXBUFFER_2048:
2359                 reg_rctl |= E1000_RCTL_SZ_2048;
2360                 break;
2361         case EM_RXBUFFER_4096:
2362                 reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2363                 break;            
2364         case EM_RXBUFFER_8192:
2365                 reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2366                 break;
2367         case EM_RXBUFFER_16384:
2368                 reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2369                 break;
2370         }
2371
2372         if (ifp->if_mtu > ETHERMTU)
2373                 reg_rctl |= E1000_RCTL_LPE;
2374
2375         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2376         if ((adapter->hw.mac_type >= em_82543) && 
2377             (ifp->if_capenable & IFCAP_RXCSUM)) {
2378                 reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2379                 reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2380                 E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2381         }
2382
2383         /* Enable Receives */
2384         E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);  
2385 }
2386
2387 /*********************************************************************
2388  *
2389  *  Free receive related data structures.
2390  *
2391  **********************************************************************/
2392 static void
2393 em_free_receive_structures(struct adapter *adapter)
2394 {
2395         struct em_buffer *rx_buffer;
2396         int i;
2397
2398         INIT_DEBUGOUT("free_receive_structures: begin");
2399
2400         if (adapter->rx_buffer_area != NULL) {
2401                 rx_buffer = adapter->rx_buffer_area;
2402                 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2403                         if (rx_buffer->map != NULL) {
2404                                 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2405                                 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2406                         }
2407                         if (rx_buffer->m_head != NULL)
2408                                 m_freem(rx_buffer->m_head);
2409                         rx_buffer->m_head = NULL;
2410                 }
2411         }
2412         if (adapter->rx_buffer_area != NULL) {
2413                 free(adapter->rx_buffer_area, M_DEVBUF);
2414                 adapter->rx_buffer_area = NULL;
2415         }
2416         if (adapter->rxtag != NULL) {
2417                 bus_dma_tag_destroy(adapter->rxtag);
2418                 adapter->rxtag = NULL;
2419         }
2420 }
2421
2422 /*********************************************************************
2423  *
2424  *  This routine executes in interrupt context. It replenishes
2425  *  the mbufs in the descriptor and sends data which has been
2426  *  dma'ed into host memory to upper layer.
2427  *
2428  *  We loop at most count times if count is > 0, or until done if
2429  *  count < 0.
2430  *
2431  *********************************************************************/
2432 static void
2433 em_process_receive_interrupts(struct adapter *adapter, int count)
2434 {
2435         struct ifnet *ifp;
2436         struct mbuf *mp;
2437         uint8_t accept_frame = 0;
2438         uint8_t eop = 0;
2439         uint16_t len, desc_len, prev_len_adj;
2440         int i;
2441
2442         /* Pointer to the receive descriptor being examined. */
2443         struct em_rx_desc *current_desc;
2444
2445         ifp = &adapter->interface_data.ac_if;
2446         i = adapter->next_rx_desc_to_check;
2447         current_desc = &adapter->rx_desc_base[i];
2448
2449         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2450                         BUS_DMASYNC_POSTREAD);
2451
2452         if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
2453 #ifdef DBG_STATS
2454                 adapter->no_pkts_avail++;
2455 #endif
2456                 return;
2457         }
2458         while ((current_desc->status & E1000_RXD_STAT_DD) && (count != 0)) {
2459                 mp = adapter->rx_buffer_area[i].m_head;
2460                 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2461                                 BUS_DMASYNC_POSTREAD);
2462
2463                 accept_frame = 1;
2464                 prev_len_adj = 0;
2465                 desc_len = le16toh(current_desc->length);
2466                 if (current_desc->status & E1000_RXD_STAT_EOP) {
2467                         count--;
2468                         eop = 1;
2469                         if (desc_len < ETHER_CRC_LEN) {
2470                                 len = 0;
2471                                 prev_len_adj = ETHER_CRC_LEN - desc_len;
2472                         }
2473                         else {
2474                                 len = desc_len - ETHER_CRC_LEN;
2475                         }
2476                 } else {
2477                         eop = 0;
2478                         len = desc_len;
2479                 }
2480
2481                 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2482                         uint8_t last_byte;
2483                         uint32_t pkt_len = desc_len;
2484
2485                         if (adapter->fmp != NULL)
2486                                 pkt_len += adapter->fmp->m_pkthdr.len; 
2487
2488                         last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2489
2490                         if (TBI_ACCEPT(&adapter->hw, current_desc->status, 
2491                                        current_desc->errors, 
2492                                        pkt_len, last_byte)) {
2493                                 em_tbi_adjust_stats(&adapter->hw, 
2494                                                     &adapter->stats, 
2495                                                     pkt_len, 
2496                                                     adapter->hw.mac_addr);
2497                                 if (len > 0)
2498                                         len--;
2499                         }
2500                         else {
2501                                 accept_frame = 0;
2502                         }
2503                 }
2504
2505                 if (accept_frame) {
2506                         if (em_get_buf(i, adapter, NULL, MB_DONTWAIT) == ENOBUFS) {
2507                                 adapter->dropped_pkts++;
2508                                 em_get_buf(i, adapter, mp, MB_DONTWAIT);
2509                                 if (adapter->fmp != NULL) 
2510                                         m_freem(adapter->fmp);
2511                                 adapter->fmp = NULL;
2512                                 adapter->lmp = NULL;
2513                                 break;
2514                         }
2515
2516                         /* Assign correct length to the current fragment */
2517                         mp->m_len = len;
2518
2519                         if (adapter->fmp == NULL) {
2520                                 mp->m_pkthdr.len = len;
2521                                 adapter->fmp = mp;       /* Store the first mbuf */
2522                                 adapter->lmp = mp;
2523                         } else {
2524                                 /* Chain mbuf's together */
2525                                 /* 
2526                                  * Adjust length of previous mbuf in chain if we 
2527                                  * received less than 4 bytes in the last descriptor.
2528                                  */
2529                                 if (prev_len_adj > 0) {
2530                                         adapter->lmp->m_len -= prev_len_adj;
2531                                         adapter->fmp->m_pkthdr.len -= prev_len_adj;
2532                                 }
2533                                 adapter->lmp->m_next = mp;
2534                                 adapter->lmp = adapter->lmp->m_next;
2535                                 adapter->fmp->m_pkthdr.len += len;
2536                         }
2537
2538                         if (eop) {
2539                                 adapter->fmp->m_pkthdr.rcvif = ifp;
2540                                 ifp->if_ipackets++;
2541
2542                                 em_receive_checksum(adapter, current_desc,
2543                                                     adapter->fmp);
2544                                 if (current_desc->status & E1000_RXD_STAT_VP)
2545                                         VLAN_INPUT_TAG(adapter->fmp,
2546                                                        (current_desc->special & 
2547                                                         E1000_RXD_SPC_VLAN_MASK));
2548                                 else
2549                                         (*ifp->if_input)(ifp, adapter->fmp);
2550                                 adapter->fmp = NULL;
2551                                 adapter->lmp = NULL;
2552                         }
2553                 } else {
2554                         adapter->dropped_pkts++;
2555                         em_get_buf(i, adapter, mp, MB_DONTWAIT);
2556                         if (adapter->fmp != NULL) 
2557                                 m_freem(adapter->fmp);
2558                         adapter->fmp = NULL;
2559                         adapter->lmp = NULL;
2560                 }
2561
2562                 /* Zero out the receive descriptors status  */
2563                 current_desc->status = 0;
2564
2565                 /* Advance the E1000's Receive Queue #0  "Tail Pointer". */
2566                 E1000_WRITE_REG(&adapter->hw, RDT, i);
2567
2568                 /* Advance our pointers to the next descriptor */
2569                 if (++i == adapter->num_rx_desc) {
2570                         i = 0;
2571                         current_desc = adapter->rx_desc_base;
2572                 } else
2573                         current_desc++;
2574         }
2575
2576         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2577                         BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2578
2579         adapter->next_rx_desc_to_check = i;
2580 }
2581
2582 /*********************************************************************
2583  *
2584  *  Verify that the hardware indicated that the checksum is valid. 
2585  *  Inform the stack about the status of checksum so that stack
2586  *  doesn't spend time verifying the checksum.
2587  *
2588  *********************************************************************/
2589 static void
2590 em_receive_checksum(struct adapter *adapter,
2591                     struct em_rx_desc *rx_desc,
2592                     struct mbuf *mp)
2593 {
2594         /* 82543 or newer only */
2595         if ((adapter->hw.mac_type < em_82543) ||
2596             /* Ignore Checksum bit is set */
2597             (rx_desc->status & E1000_RXD_STAT_IXSM)) {
2598                 mp->m_pkthdr.csum_flags = 0;
2599                 return;
2600         }
2601
2602         if (rx_desc->status & E1000_RXD_STAT_IPCS) {
2603                 /* Did it pass? */
2604                 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
2605                         /* IP Checksum Good */
2606                         mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2607                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2608                 } else {
2609                         mp->m_pkthdr.csum_flags = 0;
2610                 }
2611         }
2612
2613         if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
2614                 /* Did it pass? */        
2615                 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
2616                         mp->m_pkthdr.csum_flags |= 
2617                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2618                         mp->m_pkthdr.csum_data = htons(0xffff);
2619                 }
2620         }
2621 }
2622
2623
2624 static void 
2625 em_enable_vlans(struct adapter *adapter)
2626 {
2627         uint32_t ctrl;
2628
2629         E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
2630
2631         ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2632         ctrl |= E1000_CTRL_VME; 
2633         E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2634 }
2635
2636 /*
2637  * note: we must call bus_enable_intr() prior to enabling the hardware
2638  * interrupt and bus_disable_intr() after disabling the hardware interrupt
2639  * in order to avoid handler execution races from scheduled interrupt
2640  * threads.
2641  */
2642 static void
2643 em_enable_intr(struct adapter *adapter)
2644 {
2645         struct ifnet *ifp = &adapter->interface_data.ac_if;
2646         
2647         if ((ifp->if_flags & IFF_POLLING) == 0) {
2648                 lwkt_serialize_handler_enable(&adapter->serializer);
2649                 E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
2650         }
2651 }
2652
2653 static void
2654 em_disable_intr(struct adapter *adapter)
2655 {
2656         E1000_WRITE_REG(&adapter->hw, IMC, 
2657                         (0xffffffff & ~E1000_IMC_RXSEQ));
2658         lwkt_serialize_handler_disable(&adapter->serializer);
2659 }
2660
2661 static int
2662 em_is_valid_ether_addr(uint8_t *addr)
2663 {
2664         char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
2665
2666         if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN)))
2667                 return(FALSE);
2668         else
2669                 return(TRUE);
2670 }
2671
2672 void 
2673 em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2674 {
2675         pci_write_config(((struct em_osdep *)hw->back)->dev, reg, *value, 2);
2676 }
2677
2678 void 
2679 em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2680 {
2681         *value = pci_read_config(((struct em_osdep *)hw->back)->dev, reg, 2);
2682 }
2683
2684 void
2685 em_pci_set_mwi(struct em_hw *hw)
2686 {
2687         pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2688                          (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
2689 }
2690
2691 void
2692 em_pci_clear_mwi(struct em_hw *hw)
2693 {
2694         pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2695                          (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
2696 }
2697
2698 uint32_t
2699 em_read_reg_io(struct em_hw *hw, uint32_t offset)
2700 {
2701         bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2702         return(bus_space_read_4(hw->reg_io_tag, hw->reg_io_handle, 4));
2703 }
2704
2705 void
2706 em_write_reg_io(struct em_hw *hw, uint32_t offset, uint32_t value)
2707 {
2708         bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2709         bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 4, value);
2710 }
2711
2712 /*********************************************************************
2713  * 82544 Coexistence issue workaround.
2714  *    There are 2 issues.
2715  *      1. Transmit Hang issue.
2716  *    To detect this issue, following equation can be used...
2717  *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2718  *          If SUM[3:0] is in between 1 to 4, we will have this issue.
2719  *
2720  *      2. DAC issue.
2721  *    To detect this issue, following equation can be used...
2722  *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2723  *          If SUM[3:0] is in between 9 to c, we will have this issue.
2724  *
2725  *
2726  *    WORKAROUND:
2727  *          Make sure we do not have ending address as 1,2,3,4(Hang) or
2728  *          9,a,b,c (DAC)
2729  *
2730 *************************************************************************/
2731 static uint32_t
2732 em_fill_descriptors(uint64_t address, uint32_t length, PDESC_ARRAY desc_array)
2733 {
2734         /* Since issue is sensitive to length and address.*/
2735         /* Let us first check the address...*/
2736         uint32_t safe_terminator;
2737         if (length <= 4) {
2738                 desc_array->descriptor[0].address = address;
2739                 desc_array->descriptor[0].length = length;
2740                 desc_array->elements = 1;
2741                 return(desc_array->elements);
2742         }
2743         safe_terminator = (uint32_t)((((uint32_t)address & 0x7) + (length & 0xF)) & 0xF);
2744         /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */ 
2745         if (safe_terminator == 0 ||
2746             (safe_terminator > 4 && safe_terminator < 9) || 
2747             (safe_terminator > 0xC && safe_terminator <= 0xF)) {
2748                 desc_array->descriptor[0].address = address;
2749                 desc_array->descriptor[0].length = length;
2750                 desc_array->elements = 1;
2751                 return(desc_array->elements);
2752         }
2753
2754         desc_array->descriptor[0].address = address;
2755         desc_array->descriptor[0].length = length - 4;
2756         desc_array->descriptor[1].address = address + (length - 4);
2757         desc_array->descriptor[1].length = 4;
2758         desc_array->elements = 2;
2759         return(desc_array->elements);
2760 }
2761
2762 /**********************************************************************
2763  *
2764  *  Update the board statistics counters. 
2765  *
2766  **********************************************************************/
2767 static void
2768 em_update_stats_counters(struct adapter *adapter)
2769 {
2770         struct ifnet   *ifp;
2771
2772         if (adapter->hw.media_type == em_media_type_copper ||
2773             (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
2774                 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
2775                 adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
2776         }
2777         adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
2778         adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
2779         adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
2780         adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
2781
2782         adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
2783         adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
2784         adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
2785         adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
2786         adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
2787         adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
2788         adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
2789         adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
2790         adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
2791         adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
2792         adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
2793         adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
2794         adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
2795         adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
2796         adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
2797         adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
2798         adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
2799         adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
2800         adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
2801         adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
2802
2803         /* For the 64-bit byte counters the low dword must be read first. */
2804         /* Both registers clear on the read of the high dword */
2805
2806         adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL); 
2807         adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
2808         adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
2809         adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
2810
2811         adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
2812         adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
2813         adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
2814         adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
2815         adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
2816
2817         adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
2818         adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
2819         adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
2820         adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
2821
2822         adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
2823         adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
2824         adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
2825         adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
2826         adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
2827         adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
2828         adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
2829         adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
2830         adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
2831         adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
2832
2833         if (adapter->hw.mac_type >= em_82543) {
2834                 adapter->stats.algnerrc += 
2835                     E1000_READ_REG(&adapter->hw, ALGNERRC);
2836                 adapter->stats.rxerrc += 
2837                     E1000_READ_REG(&adapter->hw, RXERRC);
2838                 adapter->stats.tncrs += 
2839                     E1000_READ_REG(&adapter->hw, TNCRS);
2840                 adapter->stats.cexterr += 
2841                     E1000_READ_REG(&adapter->hw, CEXTERR);
2842                 adapter->stats.tsctc += 
2843                     E1000_READ_REG(&adapter->hw, TSCTC);
2844                 adapter->stats.tsctfc += 
2845                     E1000_READ_REG(&adapter->hw, TSCTFC);
2846         }
2847         ifp = &adapter->interface_data.ac_if;
2848
2849         /* Fill out the OS statistics structure */
2850         ifp->if_ibytes = adapter->stats.gorcl;
2851         ifp->if_obytes = adapter->stats.gotcl;
2852         ifp->if_imcasts = adapter->stats.mprc;
2853         ifp->if_collisions = adapter->stats.colc;
2854
2855         /* Rx Errors */
2856         ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
2857             adapter->stats.crcerrs + adapter->stats.algnerrc +
2858             adapter->stats.rlec + adapter->stats.rnbc +
2859             adapter->stats.mpc + adapter->stats.cexterr;
2860
2861         /* Tx Errors */
2862         ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol;
2863 }
2864
2865
2866 /**********************************************************************
2867  *
2868  *  This routine is called only when em_display_debug_stats is enabled.
2869  *  This routine provides a way to take a look at important statistics
2870  *  maintained by the driver and hardware.
2871  *
2872  **********************************************************************/
2873 static void
2874 em_print_debug_info(struct adapter *adapter)
2875 {
2876         device_t dev= adapter->dev;
2877         uint8_t *hw_addr = adapter->hw.hw_addr;
2878
2879         device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
2880         device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
2881                       E1000_READ_REG(&adapter->hw, TIDV),
2882                       E1000_READ_REG(&adapter->hw, TADV));
2883         device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
2884                       E1000_READ_REG(&adapter->hw, RDTR),
2885                       E1000_READ_REG(&adapter->hw, RADV));
2886 #ifdef DBG_STATS
2887         device_printf(dev, "Packets not Avail = %ld\n", adapter->no_pkts_avail);
2888         device_printf(dev, "CleanTxInterrupts = %ld\n",
2889                       adapter->clean_tx_interrupts);
2890 #endif
2891         device_printf(dev, "fifo workaround = %lld, fifo_reset = %lld\n",
2892                       (long long)adapter->tx_fifo_wrk,
2893                       (long long)adapter->tx_fifo_reset);
2894         device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
2895                       E1000_READ_REG(&adapter->hw, TDH),
2896                       E1000_READ_REG(&adapter->hw, TDT));
2897         device_printf(dev, "Num Tx descriptors avail = %d\n",
2898                       adapter->num_tx_desc_avail);
2899         device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
2900                       adapter->no_tx_desc_avail1);
2901         device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
2902                       adapter->no_tx_desc_avail2);
2903         device_printf(dev, "Std mbuf failed = %ld\n",
2904                       adapter->mbuf_alloc_failed);
2905         device_printf(dev, "Std mbuf cluster failed = %ld\n",
2906                       adapter->mbuf_cluster_failed);
2907         device_printf(dev, "Driver dropped packets = %ld\n",
2908                       adapter->dropped_pkts);
2909 }
2910
2911 static void
2912 em_print_hw_stats(struct adapter *adapter)
2913 {
2914         device_t dev= adapter->dev;
2915
2916         device_printf(dev, "Adapter: %p\n", adapter);
2917
2918         device_printf(dev, "Excessive collisions = %lld\n",
2919                       (long long)adapter->stats.ecol);
2920         device_printf(dev, "Symbol errors = %lld\n",
2921                       (long long)adapter->stats.symerrs);
2922         device_printf(dev, "Sequence errors = %lld\n",
2923                       (long long)adapter->stats.sec);
2924         device_printf(dev, "Defer count = %lld\n",
2925                       (long long)adapter->stats.dc);
2926
2927         device_printf(dev, "Missed Packets = %lld\n",
2928                       (long long)adapter->stats.mpc);
2929         device_printf(dev, "Receive No Buffers = %lld\n",
2930                       (long long)adapter->stats.rnbc);
2931         device_printf(dev, "Receive length errors = %lld\n",
2932                       (long long)adapter->stats.rlec);
2933         device_printf(dev, "Receive errors = %lld\n",
2934                       (long long)adapter->stats.rxerrc);
2935         device_printf(dev, "Crc errors = %lld\n",
2936                       (long long)adapter->stats.crcerrs);
2937         device_printf(dev, "Alignment errors = %lld\n",
2938                       (long long)adapter->stats.algnerrc);
2939         device_printf(dev, "Carrier extension errors = %lld\n",
2940                       (long long)adapter->stats.cexterr);
2941
2942         device_printf(dev, "XON Rcvd = %lld\n",
2943                       (long long)adapter->stats.xonrxc);
2944         device_printf(dev, "XON Xmtd = %lld\n",
2945                       (long long)adapter->stats.xontxc);
2946         device_printf(dev, "XOFF Rcvd = %lld\n",
2947                       (long long)adapter->stats.xoffrxc);
2948         device_printf(dev, "XOFF Xmtd = %lld\n",
2949                       (long long)adapter->stats.xofftxc);
2950
2951         device_printf(dev, "Good Packets Rcvd = %lld\n",
2952                       (long long)adapter->stats.gprc);
2953         device_printf(dev, "Good Packets Xmtd = %lld\n",
2954                       (long long)adapter->stats.gptc);
2955 }
2956
2957 static int
2958 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
2959 {
2960         int error;
2961         int result;
2962         struct adapter *adapter;
2963
2964         result = -1;
2965         error = sysctl_handle_int(oidp, &result, 0, req);
2966
2967         if (error || !req->newptr)
2968                 return(error);
2969
2970         if (result == 1) {
2971                 adapter = (struct adapter *)arg1;
2972                 em_print_debug_info(adapter);
2973         }
2974
2975         return(error);
2976 }
2977
2978 static int
2979 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
2980 {
2981         int error;
2982         int result;
2983         struct adapter *adapter;
2984
2985         result = -1;
2986         error = sysctl_handle_int(oidp, &result, 0, req);
2987
2988         if (error || !req->newptr)
2989                 return(error);
2990
2991         if (result == 1) {
2992                 adapter = (struct adapter *)arg1;
2993                 em_print_hw_stats(adapter);
2994         }
2995
2996         return(error);
2997 }
2998
2999 static int
3000 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3001 {
3002         struct em_int_delay_info *info;
3003         struct adapter *adapter;
3004         uint32_t regval;
3005         int error;
3006         int usecs;
3007         int ticks;
3008
3009         info = (struct em_int_delay_info *)arg1;
3010         adapter = info->adapter;
3011         usecs = info->value;
3012         error = sysctl_handle_int(oidp, &usecs, 0, req);
3013         if (error != 0 || req->newptr == NULL)
3014                 return(error);
3015         if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3016                 return(EINVAL);
3017         info->value = usecs;
3018         ticks = E1000_USECS_TO_TICKS(usecs);
3019
3020         lwkt_serialize_enter(&adapter->serializer);
3021         regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3022         regval = (regval & ~0xffff) | (ticks & 0xffff);
3023         /* Handle a few special cases. */
3024         switch (info->offset) {
3025         case E1000_RDTR:
3026         case E1000_82542_RDTR:
3027                 regval |= E1000_RDT_FPDB;
3028                 break;
3029         case E1000_TIDV:
3030         case E1000_82542_TIDV:
3031                 if (ticks == 0) {
3032                         adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3033                         /* Don't write 0 into the TIDV register. */
3034                         regval++;
3035                 } else
3036                         adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3037                 break;
3038         }
3039         E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3040         lwkt_serialize_exit(&adapter->serializer);
3041         return(0);
3042 }
3043
3044 static void
3045 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3046                         const char *description, struct em_int_delay_info *info,
3047                         int offset, int value)
3048 {
3049         info->adapter = adapter;
3050         info->offset = offset;
3051         info->value = value;
3052         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
3053                         SYSCTL_CHILDREN(adapter->sysctl_tree),
3054                         OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3055                         info, 0, em_sysctl_int_delay, "I", description);
3056 }
3057
3058 static int
3059 em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS)
3060 {
3061         struct adapter *adapter = (void *)arg1;
3062         int error;
3063         int throttle;
3064
3065         throttle = em_int_throttle_ceil;
3066         error = sysctl_handle_int(oidp, &throttle, 0, req);
3067         if (error || req->newptr == NULL)
3068                 return error;
3069         if (throttle < 0 || throttle > 1000000000 / 256)
3070                 return EINVAL;
3071         if (throttle) {
3072                 /*
3073                  * Set the interrupt throttling rate in 256ns increments,
3074                  * recalculate sysctl value assignment to get exact frequency.
3075                  */
3076                 throttle = 1000000000 / 256 / throttle;
3077                 lwkt_serialize_enter(&adapter->serializer);
3078                 em_int_throttle_ceil = 1000000000 / 256 / throttle;
3079                 E1000_WRITE_REG(&adapter->hw, ITR, throttle);
3080                 lwkt_serialize_exit(&adapter->serializer);
3081         } else {
3082                 lwkt_serialize_enter(&adapter->serializer);
3083                 em_int_throttle_ceil = 0;
3084                 E1000_WRITE_REG(&adapter->hw, ITR, 0);
3085                 lwkt_serialize_exit(&adapter->serializer);
3086         }
3087         device_printf(adapter->dev, "Interrupt moderation set to %d/sec\n", 
3088                         em_int_throttle_ceil);
3089         return 0;
3090 }
3091