Merge from vendor branch CVS:
[dragonfly.git] / sys / dev / netif / em / if_em.c
1 /*
2  *
3  * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>.  All rights reserved.
4  *
5  * Copyright (c) 2001-2005, Intel Corporation
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  * 
11  *  1. Redistributions of source code must retain the above copyright notice,
12  *     this list of conditions and the following disclaimer.
13  * 
14  *  2. Redistributions in binary form must reproduce the above copyright
15  *     notice, this list of conditions and the following disclaimer in the
16  *     documentation and/or other materials provided with the distribution.
17  * 
18  *  3. Neither the name of the Intel Corporation nor the names of its
19  *     contributors may be used to endorse or promote products derived from
20  *     this software without specific prior written permission.
21  * 
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  *
34  *
35  * Copyright (c) 2005 The DragonFly Project.  All rights reserved.
36  * 
37  * This code is derived from software contributed to The DragonFly Project
38  * by Matthew Dillon <dillon@backplane.com>
39  * 
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in
48  *    the documentation and/or other materials provided with the
49  *    distribution.
50  * 3. Neither the name of The DragonFly Project nor the names of its
51  *    contributors may be used to endorse or promote products derived
52  *    from this software without specific, prior written permission.
53  * 
54  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
57  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
58  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
59  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
60  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
61  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
62  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
63  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
64  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65  * SUCH DAMAGE.
66  * 
67  * $DragonFly: src/sys/dev/netif/em/if_em.c,v 1.44 2005/11/28 17:13:42 dillon Exp $
68  * $FreeBSD$
69  */
70 /*
71  * SERIALIZATION API RULES:
72  *
73  * - If the driver uses the same serializer for the interrupt as for the
74  *   ifnet, most of the serialization will be done automatically for the
75  *   driver.  
76  *
77  * - ifmedia entry points will be serialized by the ifmedia code using the
78  *   ifnet serializer.
79  *
80  * - if_* entry points except for if_input will be serialized by the IF
81  *   and protocol layers.
82  *
83  * - The device driver must be sure to serialize access from timeout code
84  *   installed by the device driver.
85  *
86  * - The device driver typically holds the serializer at the time it wishes
87  *   to call if_input.  If so, it should pass the serializer to if_input and
88  *   note that the serializer might be dropped temporarily by if_input 
89  *   (e.g. in case it has to bridge the packet to another interface).
90  *
91  *   NOTE!  Since callers into the device driver hold the ifnet serializer,
92  *   the device driver may be holding a serializer at the time it calls
93  *   if_input even if it is not serializer-aware.
94  */
95
96 #include "opt_polling.h"
97
98 #include <dev/netif/em/if_em.h>
99 #include <net/ifq_var.h>
100
101 /*********************************************************************
102  *  Set this to one to display debug statistics                                                   
103  *********************************************************************/
104 int             em_display_debug_stats = 0;
105
106 /*********************************************************************
107  *  Driver version
108  *********************************************************************/
109
110 char em_driver_version[] = "3.2.15";
111
112
113 /*********************************************************************
114  *  PCI Device ID Table
115  *
116  *  Used by probe to select devices to load on
117  *  Last field stores an index into em_strings
118  *  Last entry must be all 0s
119  *
120  *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
121  *********************************************************************/
122
123 static em_vendor_info_t em_vendor_info_array[] =
124 {
125         /* Intel(R) PRO/1000 Network Connection */
126         { 0x8086, E1000_DEV_ID_82540EM,         PCI_ANY_ID, PCI_ANY_ID, 0},
127         { 0x8086, E1000_DEV_ID_82540EM_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
128         { 0x8086, E1000_DEV_ID_82540EP,         PCI_ANY_ID, PCI_ANY_ID, 0},
129         { 0x8086, E1000_DEV_ID_82540EP_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
130         { 0x8086, E1000_DEV_ID_82540EP_LP,      PCI_ANY_ID, PCI_ANY_ID, 0},
131
132         { 0x8086, E1000_DEV_ID_82541EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
133         { 0x8086, E1000_DEV_ID_82541ER,         PCI_ANY_ID, PCI_ANY_ID, 0},
134         { 0x8086, E1000_DEV_ID_82541EI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
135         { 0x8086, E1000_DEV_ID_82541GI,         PCI_ANY_ID, PCI_ANY_ID, 0},
136         { 0x8086, E1000_DEV_ID_82541GI_LF,      PCI_ANY_ID, PCI_ANY_ID, 0},
137         { 0x8086, E1000_DEV_ID_82541GI_MOBILE,  PCI_ANY_ID, PCI_ANY_ID, 0},
138
139         { 0x8086, E1000_DEV_ID_82542,           PCI_ANY_ID, PCI_ANY_ID, 0},
140
141         { 0x8086, E1000_DEV_ID_82543GC_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
142         { 0x8086, E1000_DEV_ID_82543GC_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
143
144         { 0x8086, E1000_DEV_ID_82544EI_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
145         { 0x8086, E1000_DEV_ID_82544EI_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
146         { 0x8086, E1000_DEV_ID_82544GC_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
147         { 0x8086, E1000_DEV_ID_82544GC_LOM,     PCI_ANY_ID, PCI_ANY_ID, 0},
148
149         { 0x8086, E1000_DEV_ID_82545EM_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
150         { 0x8086, E1000_DEV_ID_82545EM_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
151         { 0x8086, E1000_DEV_ID_82545GM_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
152         { 0x8086, E1000_DEV_ID_82545GM_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
153         { 0x8086, E1000_DEV_ID_82545GM_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
154
155         { 0x8086, E1000_DEV_ID_82546EB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
156         { 0x8086, E1000_DEV_ID_82546EB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
157         { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
158         { 0x8086, E1000_DEV_ID_82546GB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
159         { 0x8086, E1000_DEV_ID_82546GB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
160         { 0x8086, E1000_DEV_ID_82546GB_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
161         { 0x8086, E1000_DEV_ID_82546GB_PCIE,    PCI_ANY_ID, PCI_ANY_ID, 0},
162 #ifdef KINGSPORT_PROJECT
163         { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
164 #endif  /* KINGSPORT_PROJECT */
165
166         { 0x8086, E1000_DEV_ID_82547EI,         PCI_ANY_ID, PCI_ANY_ID, 0},
167         { 0x8086, E1000_DEV_ID_82547GI,         PCI_ANY_ID, PCI_ANY_ID, 0},
168
169         { 0x8086, E1000_DEV_ID_82571EB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
170         { 0x8086, E1000_DEV_ID_82571EB_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
171         { 0x8086, E1000_DEV_ID_82571EB_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
172
173         { 0x8086, E1000_DEV_ID_82572EI_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
174         { 0x8086, E1000_DEV_ID_82572EI_FIBER,   PCI_ANY_ID, PCI_ANY_ID, 0},
175         { 0x8086, E1000_DEV_ID_82572EI_SERDES,  PCI_ANY_ID, PCI_ANY_ID, 0},
176
177         { 0x8086, E1000_DEV_ID_82573E,          PCI_ANY_ID, PCI_ANY_ID, 0},
178         { 0x8086, E1000_DEV_ID_82573E_IAMT,     PCI_ANY_ID, PCI_ANY_ID, 0},
179         { 0x8086, E1000_DEV_ID_82573L,          PCI_ANY_ID, PCI_ANY_ID, 0},
180
181         { 0x8086, 0x101A, PCI_ANY_ID, PCI_ANY_ID, 0},
182         { 0x8086, 0x1014, PCI_ANY_ID, PCI_ANY_ID, 0},
183         /* required last entry */
184         { 0, 0, 0, 0, 0}
185 };
186
187 /*********************************************************************
188  *  Table of branding strings for all supported NICs.
189  *********************************************************************/
190
191 static const char *em_strings[] = {
192         "Intel(R) PRO/1000 Network Connection"
193 };
194
195 /*********************************************************************
196  *  Function prototypes            
197  *********************************************************************/
198 static int      em_probe(device_t);
199 static int      em_attach(device_t);
200 static int      em_detach(device_t);
201 static int      em_shutdown(device_t);
202 static void     em_intr(void *);
203 static void     em_start(struct ifnet *);
204 static int      em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
205 static void     em_watchdog(struct ifnet *);
206 static void     em_init(void *);
207 static void     em_stop(void *);
208 static void     em_media_status(struct ifnet *, struct ifmediareq *);
209 static int      em_media_change(struct ifnet *);
210 static void     em_identify_hardware(struct adapter *);
211 static void     em_local_timer(void *);
212 static int      em_hardware_init(struct adapter *);
213 static void     em_setup_interface(device_t, struct adapter *);
214 static int      em_setup_transmit_structures(struct adapter *);
215 static void     em_initialize_transmit_unit(struct adapter *);
216 static int      em_setup_receive_structures(struct adapter *);
217 static void     em_initialize_receive_unit(struct adapter *);
218 static void     em_enable_intr(struct adapter *);
219 static void     em_disable_intr(struct adapter *);
220 static void     em_free_transmit_structures(struct adapter *);
221 static void     em_free_receive_structures(struct adapter *);
222 static void     em_update_stats_counters(struct adapter *);
223 static void     em_clean_transmit_interrupts(struct adapter *);
224 static int      em_allocate_receive_structures(struct adapter *);
225 static int      em_allocate_transmit_structures(struct adapter *);
226 static void     em_process_receive_interrupts(struct adapter *, int);
227 static void     em_receive_checksum(struct adapter *, struct em_rx_desc *,
228                                     struct mbuf *);
229 static void     em_transmit_checksum_setup(struct adapter *, struct mbuf *,
230                                            uint32_t *, uint32_t *);
231 static void     em_set_promisc(struct adapter *);
232 static void     em_disable_promisc(struct adapter *);
233 static void     em_set_multi(struct adapter *);
234 static void     em_print_hw_stats(struct adapter *);
235 static void     em_print_link_status(struct adapter *);
236 static int      em_get_buf(int i, struct adapter *, struct mbuf *, int how);
237 static void     em_enable_vlans(struct adapter *);
238 static int      em_encap(struct adapter *, struct mbuf *);
239 static void     em_smartspeed(struct adapter *);
240 static int      em_82547_fifo_workaround(struct adapter *, int);
241 static void     em_82547_update_fifo_head(struct adapter *, int);
242 static int      em_82547_tx_fifo_reset(struct adapter *);
243 static void     em_82547_move_tail(void *arg);
244 static void     em_82547_move_tail_serialized(void *arg);
245 static int      em_dma_malloc(struct adapter *, bus_size_t,
246                               struct em_dma_alloc *, int);
247 static void     em_dma_free(struct adapter *, struct em_dma_alloc *);
248 static void     em_print_debug_info(struct adapter *);
249 static int      em_is_valid_ether_addr(uint8_t *);
250 static int      em_sysctl_stats(SYSCTL_HANDLER_ARGS);
251 static int      em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
252 static uint32_t em_fill_descriptors(uint64_t address, uint32_t length, 
253                                    PDESC_ARRAY desc_array);
254 static int      em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
255 static int      em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS);
256 static void     em_add_int_delay_sysctl(struct adapter *, const char *,
257                                         const char *,
258                                         struct em_int_delay_info *, int, int);
259
260 /*********************************************************************
261  *  FreeBSD Device Interface Entry Points                    
262  *********************************************************************/
263
264 static device_method_t em_methods[] = {
265         /* Device interface */
266         DEVMETHOD(device_probe, em_probe),
267         DEVMETHOD(device_attach, em_attach),
268         DEVMETHOD(device_detach, em_detach),
269         DEVMETHOD(device_shutdown, em_shutdown),
270         {0, 0}
271 };
272
273 static driver_t em_driver = {
274         "em", em_methods, sizeof(struct adapter),
275 };
276
277 static devclass_t em_devclass;
278
279 DECLARE_DUMMY_MODULE(if_em);
280 DRIVER_MODULE(if_em, pci, em_driver, em_devclass, 0, 0);
281
282 /*********************************************************************
283  *  Tunable default values.
284  *********************************************************************/
285
286 #define E1000_TICKS_TO_USECS(ticks)     ((1024 * (ticks) + 500) / 1000)
287 #define E1000_USECS_TO_TICKS(usecs)     ((1000 * (usecs) + 512) / 1024)
288
289 static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
290 static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
291 static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
292 static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
293 static int em_int_throttle_ceil = 10000;
294
295 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
296 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
297 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
298 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
299 TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil);
300
301 /*********************************************************************
302  *  Device identification routine
303  *
304  *  em_probe determines if the driver should be loaded on
305  *  adapter based on PCI vendor/device id of the adapter.
306  *
307  *  return 0 on success, positive on failure
308  *********************************************************************/
309
310 static int
311 em_probe(device_t dev)
312 {
313         em_vendor_info_t *ent;
314
315         uint16_t pci_vendor_id = 0;
316         uint16_t pci_device_id = 0;
317         uint16_t pci_subvendor_id = 0;
318         uint16_t pci_subdevice_id = 0;
319         char adapter_name[60];
320
321         INIT_DEBUGOUT("em_probe: begin");
322
323         pci_vendor_id = pci_get_vendor(dev);
324         if (pci_vendor_id != EM_VENDOR_ID)
325                 return(ENXIO);
326
327         pci_device_id = pci_get_device(dev);
328         pci_subvendor_id = pci_get_subvendor(dev);
329         pci_subdevice_id = pci_get_subdevice(dev);
330
331         ent = em_vendor_info_array;
332         while (ent->vendor_id != 0) {
333                 if ((pci_vendor_id == ent->vendor_id) &&
334                     (pci_device_id == ent->device_id) &&
335
336                     ((pci_subvendor_id == ent->subvendor_id) ||
337                      (ent->subvendor_id == PCI_ANY_ID)) &&
338
339                     ((pci_subdevice_id == ent->subdevice_id) ||
340                      (ent->subdevice_id == PCI_ANY_ID))) {
341                         snprintf(adapter_name, sizeof(adapter_name),
342                                  "%s, Version - %s",  em_strings[ent->index], 
343                                  em_driver_version);
344                         device_set_desc_copy(dev, adapter_name);
345                         return(0);
346                 }
347                 ent++;
348         }
349
350         return(ENXIO);
351 }
352
353 /*********************************************************************
354  *  Device initialization routine
355  *
356  *  The attach entry point is called when the driver is being loaded.
357  *  This routine identifies the type of hardware, allocates all resources 
358  *  and initializes the hardware.     
359  *  
360  *  return 0 on success, positive on failure
361  *********************************************************************/
362
363 static int
364 em_attach(device_t dev)
365 {
366         struct adapter *adapter;
367         int tsize, rsize;
368         int i, val, rid;
369         int error = 0;
370
371         INIT_DEBUGOUT("em_attach: begin");
372
373         adapter = device_get_softc(dev);
374
375         callout_init(&adapter->timer);
376         callout_init(&adapter->tx_fifo_timer);
377
378         adapter->dev = dev;
379         adapter->osdep.dev = dev;
380
381         /* SYSCTL stuff */
382         sysctl_ctx_init(&adapter->sysctl_ctx);
383         adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx,
384                                                SYSCTL_STATIC_CHILDREN(_hw),
385                                                OID_AUTO, 
386                                                device_get_nameunit(dev),
387                                                CTLFLAG_RD,
388                                                0, "");
389
390         if (adapter->sysctl_tree == NULL) {
391                 error = EIO;
392                 goto fail;
393         }
394
395         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,  
396                         SYSCTL_CHILDREN(adapter->sysctl_tree),
397                         OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW, 
398                         (void *)adapter, 0,
399                         em_sysctl_debug_info, "I", "Debug Information");
400
401         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,  
402                         SYSCTL_CHILDREN(adapter->sysctl_tree),
403                         OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, 
404                         (void *)adapter, 0,
405                         em_sysctl_stats, "I", "Statistics");
406
407         /* Determine hardware revision */
408         em_identify_hardware(adapter);
409
410         /* Set up some sysctls for the tunable interrupt delays */
411         em_add_int_delay_sysctl(adapter, "rx_int_delay",
412                                 "receive interrupt delay in usecs",
413                                 &adapter->rx_int_delay,
414                                 E1000_REG_OFFSET(&adapter->hw, RDTR),
415                                 em_rx_int_delay_dflt);
416         em_add_int_delay_sysctl(adapter, "tx_int_delay",
417                                 "transmit interrupt delay in usecs",
418                                 &adapter->tx_int_delay,
419                                 E1000_REG_OFFSET(&adapter->hw, TIDV),
420                                 em_tx_int_delay_dflt);
421         if (adapter->hw.mac_type >= em_82540) {
422                 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
423                                         "receive interrupt delay limit in usecs",
424                                         &adapter->rx_abs_int_delay,
425                                         E1000_REG_OFFSET(&adapter->hw, RADV),
426                                         em_rx_abs_int_delay_dflt);
427                 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
428                                         "transmit interrupt delay limit in usecs",
429                                         &adapter->tx_abs_int_delay,
430                                         E1000_REG_OFFSET(&adapter->hw, TADV),
431                                         em_tx_abs_int_delay_dflt);
432                 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
433                         SYSCTL_CHILDREN(adapter->sysctl_tree),
434                         OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW,
435                         adapter, 0, em_sysctl_int_throttle, "I", NULL);
436         }
437
438         /* Parameters (to be read from user) */   
439         adapter->num_tx_desc = EM_MAX_TXD;
440         adapter->num_rx_desc = EM_MAX_RXD;
441         adapter->hw.autoneg = DO_AUTO_NEG;
442         adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
443         adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
444         adapter->hw.tbi_compatibility_en = TRUE;
445         adapter->rx_buffer_len = EM_RXBUFFER_2048;
446
447         adapter->hw.phy_init_script = 1;
448         adapter->hw.phy_reset_disable = FALSE;
449
450 #ifndef EM_MASTER_SLAVE
451         adapter->hw.master_slave = em_ms_hw_default;
452 #else
453         adapter->hw.master_slave = EM_MASTER_SLAVE;
454 #endif
455
456         /* 
457          * Set the max frame size assuming standard ethernet 
458          * sized frames 
459          */   
460         adapter->hw.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
461
462         adapter->hw.min_frame_size = 
463             MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
464
465         /* 
466          * This controls when hardware reports transmit completion 
467          * status. 
468          */
469         adapter->hw.report_tx_early = 1;
470
471         rid = EM_MMBA;
472         adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
473                                                      &rid, RF_ACTIVE);
474         if (!(adapter->res_memory)) {
475                 device_printf(dev, "Unable to allocate bus resource: memory\n");
476                 error = ENXIO;
477                 goto fail;
478         }
479         adapter->osdep.mem_bus_space_tag = 
480             rman_get_bustag(adapter->res_memory);
481         adapter->osdep.mem_bus_space_handle = 
482             rman_get_bushandle(adapter->res_memory);
483         adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
484
485         if (adapter->hw.mac_type > em_82543) {
486                 /* Figure our where our IO BAR is ? */
487                 rid = EM_MMBA;
488                 for (i = 0; i < 5; i++) {
489                         val = pci_read_config(dev, rid, 4);
490                         if (val & 0x00000001) {
491                                 adapter->io_rid = rid;
492                                 break;
493                         }
494                         rid += 4;
495                 }
496
497                 adapter->res_ioport = bus_alloc_resource_any(dev,
498                     SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
499                 if (!(adapter->res_ioport)) {
500                         device_printf(dev, "Unable to allocate bus resource: ioport\n");
501                         error = ENXIO;
502                         goto fail;
503                 }
504
505                 adapter->hw.reg_io_tag = rman_get_bustag(adapter->res_ioport);
506                 adapter->hw.reg_io_handle = rman_get_bushandle(adapter->res_ioport);
507         }
508
509         rid = 0x0;
510         adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ,
511             &rid, RF_SHAREABLE | RF_ACTIVE);
512         if (!(adapter->res_interrupt)) {
513                 device_printf(dev, "Unable to allocate bus resource: interrupt\n");
514                 error = ENXIO;
515                 goto fail;
516         }
517
518         adapter->hw.back = &adapter->osdep;
519
520         em_init_eeprom_params(&adapter->hw);
521
522         tsize = EM_ROUNDUP(adapter->num_tx_desc *
523                            sizeof(struct em_tx_desc), 4096);
524
525         /* Allocate Transmit Descriptor ring */
526         if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_WAITOK)) {
527                 device_printf(dev, "Unable to allocate TxDescriptor memory\n");
528                 error = ENOMEM;
529                 goto fail;
530         }
531         adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
532
533         rsize = EM_ROUNDUP(adapter->num_rx_desc *
534                            sizeof(struct em_rx_desc), 4096);
535
536         /* Allocate Receive Descriptor ring */
537         if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_WAITOK)) {
538                 device_printf(dev, "Unable to allocate rx_desc memory\n");
539                 error = ENOMEM;
540                 goto fail;
541         }
542         adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
543
544         /* Initialize the hardware */
545         if (em_hardware_init(adapter)) {
546                 device_printf(dev, "Unable to initialize the hardware\n");
547                 error = EIO;
548                 goto fail;
549         }
550
551         /* Copy the permanent MAC address out of the EEPROM */
552         if (em_read_mac_addr(&adapter->hw) < 0) {
553                 device_printf(dev, "EEPROM read error while reading mac address\n");
554                 error = EIO;
555                 goto fail;
556         }
557
558         if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
559                 device_printf(dev, "Invalid mac address\n");
560                 error = EIO;
561                 goto fail;
562         }
563
564         /* Setup OS specific network interface */
565         em_setup_interface(dev, adapter);
566
567         /* Initialize statistics */
568         em_clear_hw_cntrs(&adapter->hw);
569         em_update_stats_counters(adapter);
570         adapter->hw.get_link_status = 1;
571         em_check_for_link(&adapter->hw);
572
573         /* Print the link status */
574         if (adapter->link_active == 1) {
575                 em_get_speed_and_duplex(&adapter->hw, &adapter->link_speed, 
576                                         &adapter->link_duplex);
577                 device_printf(dev, "Speed: %d Mbps, Duplex: %s\n",
578                     adapter->link_speed,
579                     adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
580         } else
581                 device_printf(dev, "Speed: N/A, Duplex:N/A\n");
582
583         /* Identify 82544 on PCIX */
584         em_get_bus_info(&adapter->hw);  
585         if (adapter->hw.bus_type == em_bus_type_pcix &&
586             adapter->hw.mac_type == em_82544)
587                 adapter->pcix_82544 = TRUE;
588         else
589                 adapter->pcix_82544 = FALSE;
590
591         error = bus_setup_intr(dev, adapter->res_interrupt, INTR_NETSAFE,
592                            (void (*)(void *)) em_intr, adapter,
593                            &adapter->int_handler_tag,
594                            adapter->interface_data.ac_if.if_serializer);
595         if (error) {
596                 device_printf(dev, "Error registering interrupt handler!\n");
597                 ether_ifdetach(&adapter->interface_data.ac_if);
598                 goto fail;
599         }
600
601         INIT_DEBUGOUT("em_attach: end");
602         return(0);
603
604 fail:
605         em_detach(dev);
606         return(error);
607 }
608
609 /*********************************************************************
610  *  Device removal routine
611  *
612  *  The detach entry point is called when the driver is being removed.
613  *  This routine stops the adapter and deallocates all the resources
614  *  that were allocated for driver operation.
615  *  
616  *  return 0 on success, positive on failure
617  *********************************************************************/
618
619 static int
620 em_detach(device_t dev)
621 {
622         struct adapter *adapter = device_get_softc(dev);
623
624         INIT_DEBUGOUT("em_detach: begin");
625
626         lwkt_serialize_enter(adapter->interface_data.ac_if.if_serializer);
627         adapter->in_detach = 1;
628
629         if (device_is_attached(dev)) {
630                 em_stop(adapter);
631                 em_phy_hw_reset(&adapter->hw);
632                 ether_ifdetach(&adapter->interface_data.ac_if);
633         }
634         bus_generic_detach(dev);
635
636         if (adapter->int_handler_tag != NULL) {
637                 bus_teardown_intr(dev, adapter->res_interrupt, 
638                                   adapter->int_handler_tag);
639         }
640         if (adapter->res_interrupt != NULL) {
641                 bus_release_resource(dev, SYS_RES_IRQ, 0, 
642                                      adapter->res_interrupt);
643         }
644         if (adapter->res_memory != NULL) {
645                 bus_release_resource(dev, SYS_RES_MEMORY, EM_MMBA, 
646                                      adapter->res_memory);
647         }
648
649         if (adapter->res_ioport != NULL) {
650                 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid, 
651                                      adapter->res_ioport);
652         }
653
654         /* Free Transmit Descriptor ring */
655         if (adapter->tx_desc_base != NULL) {
656                 em_dma_free(adapter, &adapter->txdma);
657                 adapter->tx_desc_base = NULL;
658         }
659
660         /* Free Receive Descriptor ring */
661         if (adapter->rx_desc_base != NULL) {
662                 em_dma_free(adapter, &adapter->rxdma);
663                 adapter->rx_desc_base = NULL;
664         }
665
666         adapter->sysctl_tree = NULL;
667         sysctl_ctx_free(&adapter->sysctl_ctx);
668
669         lwkt_serialize_exit(adapter->interface_data.ac_if.if_serializer);
670         return(0);
671 }
672
673 /*********************************************************************
674  *
675  *  Shutdown entry point
676  *
677  **********************************************************************/ 
678
679 static int
680 em_shutdown(device_t dev)
681 {
682         struct adapter *adapter = device_get_softc(dev);
683         em_stop(adapter);
684         return(0);
685 }
686
687 /*********************************************************************
688  *  Transmit entry point
689  *
690  *  em_start is called by the stack to initiate a transmit.
691  *  The driver will remain in this routine as long as there are
692  *  packets to transmit and transmit resources are available.
693  *  In case resources are not available stack is notified and
694  *  the packet is requeued.
695  **********************************************************************/
696
697 static void
698 em_start(struct ifnet *ifp)
699 {
700         struct mbuf *m_head;
701         struct adapter *adapter = ifp->if_softc;
702
703         ASSERT_SERIALIZED(adapter->interface_data.ac_if.if_serializer);
704
705         if (!adapter->link_active)
706                 return;
707         while (!ifq_is_empty(&ifp->if_snd)) {
708                 m_head = ifq_poll(&ifp->if_snd);
709
710                 if (m_head == NULL)
711                         break;
712
713                 if (em_encap(adapter, m_head)) { 
714                         ifp->if_flags |= IFF_OACTIVE;
715                         break;
716                 }
717                 ifq_dequeue(&ifp->if_snd, m_head);
718
719                 /* Send a copy of the frame to the BPF listener */
720                 BPF_MTAP(ifp, m_head);
721         
722                 /* Set timeout in case hardware has problems transmitting */
723                 ifp->if_timer = EM_TX_TIMEOUT;        
724         }
725 }
726
727 /*********************************************************************
728  *  Ioctl entry point
729  *
730  *  em_ioctl is called when the user wants to configure the
731  *  interface.
732  *
733  *  return 0 on success, positive on failure
734  **********************************************************************/
735
736 static int
737 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
738 {
739         int max_frame_size, mask, error = 0;
740         struct ifreq *ifr = (struct ifreq *) data;
741         struct adapter *adapter = ifp->if_softc;
742
743         ASSERT_SERIALIZED(adapter->interface_data.ac_if.if_serializer);
744
745         if (adapter->in_detach)
746                 goto out;
747
748         switch (command) {
749         case SIOCSIFADDR:
750         case SIOCGIFADDR:
751                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
752                 ether_ioctl(ifp, command, data);
753                 break;
754         case SIOCSIFMTU:
755                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
756                 switch (adapter->hw.mac_type) {
757                 case em_82571:
758                 case em_82572:
759                         max_frame_size = 10500;
760                         break;
761                 case em_82573:
762                         /* 82573 does not support jumbo frames */
763                         max_frame_size = ETHER_MAX_LEN;
764                         break;
765                 default:
766                         max_frame_size = MAX_JUMBO_FRAME_SIZE;
767                         break;
768                 }
769                 if (ifr->ifr_mtu >
770                         max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) {
771                         error = EINVAL;
772                 } else {
773                         ifp->if_mtu = ifr->ifr_mtu;
774                         adapter->hw.max_frame_size = 
775                         ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
776                         em_init(adapter);
777                 }
778                 break;
779         case SIOCSIFFLAGS:
780                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
781                 if (ifp->if_flags & IFF_UP) {
782                         if (!(ifp->if_flags & IFF_RUNNING))
783                                 em_init(adapter);
784                         em_disable_promisc(adapter);
785                         em_set_promisc(adapter);
786                 } else {
787                         if (ifp->if_flags & IFF_RUNNING)
788                                 em_stop(adapter);
789                 }
790                 break;
791         case SIOCADDMULTI:
792         case SIOCDELMULTI:
793                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
794                 if (ifp->if_flags & IFF_RUNNING) {
795                         em_disable_intr(adapter);
796                         em_set_multi(adapter);
797                         if (adapter->hw.mac_type == em_82542_rev2_0)
798                                 em_initialize_receive_unit(adapter);
799                         em_enable_intr(adapter);
800                 }
801                 break;
802         case SIOCSIFMEDIA:
803         case SIOCGIFMEDIA:
804                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
805                 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
806                 break;
807         case SIOCSIFCAP:
808                 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
809                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
810                 if (mask & IFCAP_HWCSUM) {
811                         if (IFCAP_HWCSUM & ifp->if_capenable)
812                                 ifp->if_capenable &= ~IFCAP_HWCSUM;
813                         else
814                                 ifp->if_capenable |= IFCAP_HWCSUM;
815                         if (ifp->if_flags & IFF_RUNNING)
816                                 em_init(adapter);
817                 }
818                 break;
819         default:
820                 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)", (int)command);
821                 error = EINVAL;
822         }
823
824 out:
825         return(error);
826 }
827
828 /*********************************************************************
829  *  Watchdog entry point
830  *
831  *  This routine is called whenever hardware quits transmitting.
832  *
833  **********************************************************************/
834
835 static void
836 em_watchdog(struct ifnet *ifp)
837 {
838         struct adapter * adapter;
839         adapter = ifp->if_softc;
840
841         /* If we are in this routine because of pause frames, then
842          * don't reset the hardware.
843          */
844         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
845                 ifp->if_timer = EM_TX_TIMEOUT;
846                 return;
847         }
848
849         if (em_check_for_link(&adapter->hw))
850                 if_printf(ifp, "watchdog timeout -- resetting\n");
851
852         ifp->if_flags &= ~IFF_RUNNING;
853
854         em_init(adapter);
855
856         ifp->if_oerrors++;
857 }
858
859 /*********************************************************************
860  *  Init entry point
861  *
862  *  This routine is used in two ways. It is used by the stack as
863  *  init entry point in network interface structure. It is also used
864  *  by the driver as a hw/sw initialization routine to get to a 
865  *  consistent state.
866  *
867  *  return 0 on success, positive on failure
868  **********************************************************************/
869
870 static void
871 em_init(void *arg)
872 {
873         struct adapter *adapter = arg;
874         uint32_t pba;
875         struct ifnet *ifp = &adapter->interface_data.ac_if;
876
877         INIT_DEBUGOUT("em_init: begin");
878
879         em_stop(adapter);
880
881         /*
882          * Packet Buffer Allocation (PBA)
883          * Writing PBA sets the receive portion of the buffer
884          * the remainder is used for the transmit buffer.
885          */
886         switch (adapter->hw.mac_type) {
887         case em_82547: 
888         case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
889                 if (adapter->hw.max_frame_size > EM_RXBUFFER_8192)
890                         pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
891                 else
892                         pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
893
894                 adapter->tx_fifo_head = 0;
895                 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
896                 adapter->tx_fifo_size =
897                         (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
898                 break;
899         case em_82571: /* 82571: Total Packet Buffer is 48K */
900         case em_82572: /* 82572: Total Packet Buffer is 48K */
901                 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
902                 break;
903         case em_82573: /* 82573: Total Packet Buffer is 32K */
904                 /* Jumbo frames not supported */
905                 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
906                 break;
907         default:
908                 /* Devices before 82547 had a Packet Buffer of 64K.   */
909                 if(adapter->hw.max_frame_size > EM_RXBUFFER_8192)
910                         pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
911                 else
912                         pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
913         }
914
915         INIT_DEBUGOUT1("em_init: pba=%dK",pba);
916         E1000_WRITE_REG(&adapter->hw, PBA, pba);
917
918         /* Get the latest mac address, User can use a LAA */
919         bcopy(adapter->interface_data.ac_enaddr, adapter->hw.mac_addr,
920               ETHER_ADDR_LEN);
921
922         /* Initialize the hardware */
923         if (em_hardware_init(adapter)) {
924                 if_printf(ifp, "Unable to initialize the hardware\n");
925                 return;
926         }
927
928         em_enable_vlans(adapter);
929
930         /* Prepare transmit descriptors and buffers */
931         if (em_setup_transmit_structures(adapter)) {
932                 if_printf(ifp, "Could not setup transmit structures\n");
933                 em_stop(adapter); 
934                 return;
935         }
936         em_initialize_transmit_unit(adapter);
937
938         /* Setup Multicast table */
939         em_set_multi(adapter);
940
941         /* Prepare receive descriptors and buffers */
942         if (em_setup_receive_structures(adapter)) {
943                 if_printf(ifp, "Could not setup receive structures\n");
944                 em_stop(adapter);
945                 return;
946         }
947         em_initialize_receive_unit(adapter);
948
949         /* Don't loose promiscuous settings */
950         em_set_promisc(adapter);
951
952         ifp->if_flags |= IFF_RUNNING;
953         ifp->if_flags &= ~IFF_OACTIVE;
954
955         if (adapter->hw.mac_type >= em_82543) {
956                 if (ifp->if_capenable & IFCAP_TXCSUM)
957                         ifp->if_hwassist = EM_CHECKSUM_FEATURES;
958                 else
959                         ifp->if_hwassist = 0;
960         }
961
962         callout_reset(&adapter->timer, hz, em_local_timer, adapter);
963         em_clear_hw_cntrs(&adapter->hw);
964         em_enable_intr(adapter);
965
966         /* Don't reset the phy next time init gets called */
967         adapter->hw.phy_reset_disable = TRUE;
968 }
969
970 #ifdef DEVICE_POLLING
971
972 static void
973 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
974 {
975         struct adapter *adapter = ifp->if_softc;
976         uint32_t reg_icr;
977
978         ASSERT_SERIALIZED(ifp->if_serializer);
979
980         switch(cmd) {
981         case POLL_REGISTER:
982                 em_disable_intr(adapter);
983                 break;
984         case POLL_DEREGISTER:
985                 em_enable_intr(adapter);
986                 break;
987         case POLL_AND_CHECK_STATUS:
988                 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
989                 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
990                         callout_stop(&adapter->timer);
991                         adapter->hw.get_link_status = 1;
992                         em_check_for_link(&adapter->hw);
993                         em_print_link_status(adapter);
994                         callout_reset(&adapter->timer, hz, em_local_timer,
995                                       adapter);
996                 }
997                 /* fall through */
998         case POLL_ONLY:
999                 if (ifp->if_flags & IFF_RUNNING) {
1000                         em_process_receive_interrupts(adapter, count);
1001                         em_clean_transmit_interrupts(adapter);
1002                 }
1003                 if (ifp->if_flags & IFF_RUNNING) {
1004                         if (!ifq_is_empty(&ifp->if_snd))
1005                                 em_start(ifp);
1006                 }
1007                 break;
1008         }
1009 }
1010
1011 #endif /* DEVICE_POLLING */
1012
1013 /*********************************************************************
1014  *
1015  *  Interrupt Service routine
1016  *
1017  **********************************************************************/
1018 static void
1019 em_intr(void *arg)
1020 {
1021         uint32_t reg_icr;
1022         struct ifnet *ifp;
1023         struct adapter *adapter = arg;
1024
1025         ifp = &adapter->interface_data.ac_if;  
1026
1027         ASSERT_SERIALIZED(ifp->if_serializer);
1028
1029         reg_icr = E1000_READ_REG(&adapter->hw, ICR);
1030         if (!reg_icr)
1031                 return;
1032
1033         /* Link status change */
1034         if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1035                 callout_stop(&adapter->timer);
1036                 adapter->hw.get_link_status = 1;
1037                 em_check_for_link(&adapter->hw);
1038                 em_print_link_status(adapter);
1039                 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1040         }
1041
1042         /*
1043          * note: do not attempt to improve efficiency by looping.  This 
1044          * only results in unnecessary piecemeal collection of received
1045          * packets and unnecessary piecemeal cleanups of the transmit ring.
1046          */
1047         if (ifp->if_flags & IFF_RUNNING) {
1048                 em_process_receive_interrupts(adapter, -1);
1049                 em_clean_transmit_interrupts(adapter);
1050         }
1051
1052         if ((ifp->if_flags & IFF_RUNNING) && !ifq_is_empty(&ifp->if_snd))
1053                 em_start(ifp);
1054 }
1055
1056 /*********************************************************************
1057  *
1058  *  Media Ioctl callback
1059  *
1060  *  This routine is called whenever the user queries the status of
1061  *  the interface using ifconfig.
1062  *
1063  **********************************************************************/
1064 static void
1065 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1066 {
1067         struct adapter * adapter = ifp->if_softc;
1068
1069         INIT_DEBUGOUT("em_media_status: begin");
1070
1071         ASSERT_SERIALIZED(ifp->if_serializer);
1072
1073         em_check_for_link(&adapter->hw);
1074         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1075                 if (adapter->link_active == 0) {
1076                         em_get_speed_and_duplex(&adapter->hw, 
1077                                                 &adapter->link_speed, 
1078                                                 &adapter->link_duplex);
1079                         adapter->link_active = 1;
1080                 }
1081         } else {
1082                 if (adapter->link_active == 1) {
1083                         adapter->link_speed = 0;
1084                         adapter->link_duplex = 0;
1085                         adapter->link_active = 0;
1086                 }
1087         }
1088
1089         ifmr->ifm_status = IFM_AVALID;
1090         ifmr->ifm_active = IFM_ETHER;
1091
1092         if (!adapter->link_active)
1093                 return;
1094
1095         ifmr->ifm_status |= IFM_ACTIVE;
1096
1097         if (adapter->hw.media_type == em_media_type_fiber) {
1098                 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1099         } else {
1100                 switch (adapter->link_speed) {
1101                 case 10:
1102                         ifmr->ifm_active |= IFM_10_T;
1103                         break;
1104                 case 100:
1105                         ifmr->ifm_active |= IFM_100_TX;
1106                         break;
1107                 case 1000:
1108                         ifmr->ifm_active |= IFM_1000_T;
1109                         break;
1110                 }
1111                 if (adapter->link_duplex == FULL_DUPLEX)
1112                         ifmr->ifm_active |= IFM_FDX;
1113                 else
1114                         ifmr->ifm_active |= IFM_HDX;
1115         }
1116 }
1117
1118 /*********************************************************************
1119  *
1120  *  Media Ioctl callback
1121  *
1122  *  This routine is called when the user changes speed/duplex using
1123  *  media/mediopt option with ifconfig.
1124  *
1125  **********************************************************************/
1126 static int
1127 em_media_change(struct ifnet *ifp)
1128 {
1129         struct adapter * adapter = ifp->if_softc;
1130         struct ifmedia  *ifm = &adapter->media;
1131
1132         INIT_DEBUGOUT("em_media_change: begin");
1133
1134         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1135                 return(EINVAL);
1136
1137         ASSERT_SERIALIZED(ifp->if_serializer);
1138
1139         switch (IFM_SUBTYPE(ifm->ifm_media)) {
1140         case IFM_AUTO:
1141                 adapter->hw.autoneg = DO_AUTO_NEG;
1142                 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1143                 break;
1144         case IFM_1000_SX:
1145         case IFM_1000_T:
1146                 adapter->hw.autoneg = DO_AUTO_NEG;
1147                 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1148                 break;
1149         case IFM_100_TX:
1150                 adapter->hw.autoneg = FALSE;
1151                 adapter->hw.autoneg_advertised = 0;
1152                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1153                         adapter->hw.forced_speed_duplex = em_100_full;
1154                 else
1155                         adapter->hw.forced_speed_duplex = em_100_half;
1156                 break;
1157         case IFM_10_T:
1158                 adapter->hw.autoneg = FALSE;
1159                 adapter->hw.autoneg_advertised = 0;
1160                 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1161                         adapter->hw.forced_speed_duplex = em_10_full;
1162                 else
1163                         adapter->hw.forced_speed_duplex = em_10_half;
1164                 break;
1165         default:
1166                 if_printf(ifp, "Unsupported media type\n");
1167         }
1168         /*
1169          * As the speed/duplex settings may have changed we need to
1170          * reset the PHY.
1171          */
1172         adapter->hw.phy_reset_disable = FALSE;
1173
1174         em_init(adapter);
1175
1176         return(0);
1177 }
1178
1179 static void
1180 em_tx_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize,
1181          int error)
1182 {
1183         struct em_q *q = arg;
1184
1185         if (error)
1186                 return;
1187         KASSERT(nsegs <= EM_MAX_SCATTER,
1188                 ("Too many DMA segments returned when mapping tx packet"));
1189         q->nsegs = nsegs;
1190         bcopy(seg, q->segs, nsegs * sizeof(seg[0]));
1191 }
1192
1193 /*********************************************************************
1194  *
1195  *  This routine maps the mbufs to tx descriptors.
1196  *
1197  *  return 0 on success, positive on failure
1198  **********************************************************************/
1199 static int
1200 em_encap(struct adapter *adapter, struct mbuf *m_head)
1201 {
1202         uint32_t txd_upper;
1203         uint32_t txd_lower, txd_used = 0, txd_saved = 0;
1204         int i, j, error;
1205         uint64_t address;
1206
1207         /* For 82544 Workaround */
1208         DESC_ARRAY desc_array;
1209         uint32_t array_elements;
1210         uint32_t counter;
1211
1212         struct ifvlan *ifv = NULL;
1213         struct em_q q;
1214         struct em_buffer *tx_buffer = NULL;
1215         struct em_tx_desc *current_tx_desc = NULL;
1216         struct ifnet *ifp = &adapter->interface_data.ac_if;
1217
1218         /*
1219          * Force a cleanup if number of TX descriptors
1220          * available hits the threshold
1221          */
1222         if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1223                 em_clean_transmit_interrupts(adapter);
1224                 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1225                         adapter->no_tx_desc_avail1++;
1226                         return(ENOBUFS);
1227                 }
1228         }
1229         /*
1230          * Map the packet for DMA.
1231          */
1232         if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &q.map)) {
1233                 adapter->no_tx_map_avail++;
1234                 return(ENOMEM);
1235         }
1236         error = bus_dmamap_load_mbuf(adapter->txtag, q.map, m_head, em_tx_cb,
1237                                      &q, BUS_DMA_NOWAIT);
1238         if (error != 0) {
1239                 adapter->no_tx_dma_setup++;
1240                 bus_dmamap_destroy(adapter->txtag, q.map);
1241                 return(error);
1242         }
1243         KASSERT(q.nsegs != 0, ("em_encap: empty packet"));
1244
1245         if (q.nsegs > adapter->num_tx_desc_avail) {
1246                 adapter->no_tx_desc_avail2++;
1247                 bus_dmamap_unload(adapter->txtag, q.map);
1248                 bus_dmamap_destroy(adapter->txtag, q.map);
1249                 return(ENOBUFS);
1250         }
1251
1252         if (ifp->if_hwassist > 0) {
1253                 em_transmit_checksum_setup(adapter,  m_head,
1254                                            &txd_upper, &txd_lower);
1255         } else {
1256                 txd_upper = txd_lower = 0;
1257         }
1258
1259         /* Find out if we are in vlan mode */
1260         if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1261             m_head->m_pkthdr.rcvif != NULL &&
1262             m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1263                 ifv = m_head->m_pkthdr.rcvif->if_softc;
1264
1265         i = adapter->next_avail_tx_desc;
1266         if (adapter->pcix_82544) {
1267                 txd_saved = i;
1268                 txd_used = 0;
1269         }
1270         for (j = 0; j < q.nsegs; j++) {
1271                 /* If adapter is 82544 and on PCIX bus */
1272                 if(adapter->pcix_82544) {
1273                         array_elements = 0;
1274                         address = htole64(q.segs[j].ds_addr);
1275                         /* 
1276                          * Check the Address and Length combination and
1277                          * split the data accordingly
1278                          */
1279                         array_elements = em_fill_descriptors(address,
1280                                                              htole32(q.segs[j].ds_len),
1281                                                              &desc_array);
1282                         for (counter = 0; counter < array_elements; counter++) {
1283                                 if (txd_used == adapter->num_tx_desc_avail) {
1284                                         adapter->next_avail_tx_desc = txd_saved;
1285                                         adapter->no_tx_desc_avail2++;
1286                                         bus_dmamap_unload(adapter->txtag, q.map);
1287                                         bus_dmamap_destroy(adapter->txtag, q.map);
1288                                         return(ENOBUFS);
1289                                 }
1290                                 tx_buffer = &adapter->tx_buffer_area[i];
1291                                 current_tx_desc = &adapter->tx_desc_base[i];
1292                                 current_tx_desc->buffer_addr = htole64(
1293                                 desc_array.descriptor[counter].address);
1294                                 current_tx_desc->lower.data = htole32(
1295                                 (adapter->txd_cmd | txd_lower | 
1296                                 (uint16_t)desc_array.descriptor[counter].length));
1297                                 current_tx_desc->upper.data = htole32((txd_upper));
1298                                 if (++i == adapter->num_tx_desc)
1299                                         i = 0;
1300
1301                                 tx_buffer->m_head = NULL;
1302                                 txd_used++;
1303                         }
1304                 } else {
1305                         tx_buffer = &adapter->tx_buffer_area[i];
1306                         current_tx_desc = &adapter->tx_desc_base[i];
1307
1308                         current_tx_desc->buffer_addr = htole64(q.segs[j].ds_addr);
1309                         current_tx_desc->lower.data = htole32(
1310                                 adapter->txd_cmd | txd_lower | q.segs[j].ds_len);
1311                         current_tx_desc->upper.data = htole32(txd_upper);
1312
1313                         if (++i == adapter->num_tx_desc)
1314                                 i = 0;
1315
1316                         tx_buffer->m_head = NULL;
1317                 }
1318         }
1319
1320         adapter->next_avail_tx_desc = i;
1321         if (adapter->pcix_82544)
1322                 adapter->num_tx_desc_avail -= txd_used;
1323         else
1324                 adapter->num_tx_desc_avail -= q.nsegs;
1325
1326         if (ifv != NULL) {
1327                 /* Set the vlan id */
1328                 current_tx_desc->upper.fields.special = htole16(ifv->ifv_tag);
1329
1330                 /* Tell hardware to add tag */
1331                 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1332         }
1333
1334         tx_buffer->m_head = m_head;
1335         tx_buffer->map = q.map;
1336         bus_dmamap_sync(adapter->txtag, q.map, BUS_DMASYNC_PREWRITE);
1337
1338         /*
1339          * Last Descriptor of Packet needs End Of Packet (EOP)
1340          */
1341         current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1342
1343         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1344                         BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1345
1346         /* 
1347          * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1348          * that this frame is available to transmit.
1349          */
1350         if (adapter->hw.mac_type == em_82547 &&
1351             adapter->link_duplex == HALF_DUPLEX) {
1352                 em_82547_move_tail_serialized(adapter);
1353         } else {
1354                 E1000_WRITE_REG(&adapter->hw, TDT, i);
1355                 if (adapter->hw.mac_type == em_82547) {
1356                         em_82547_update_fifo_head(adapter,
1357                                                   m_head->m_pkthdr.len);
1358                 }
1359         }
1360
1361         return(0);
1362 }
1363
1364 /*********************************************************************
1365  *
1366  * 82547 workaround to avoid controller hang in half-duplex environment.
1367  * The workaround is to avoid queuing a large packet that would span   
1368  * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1369  * in this case. We do that only when FIFO is queiced.
1370  *
1371  **********************************************************************/
1372 static void
1373 em_82547_move_tail(void *arg)
1374 {
1375         struct adapter *adapter = arg;
1376
1377         lwkt_serialize_enter(adapter->interface_data.ac_if.if_serializer);
1378         em_82547_move_tail_serialized(arg);
1379         lwkt_serialize_exit(adapter->interface_data.ac_if.if_serializer);
1380 }
1381
1382 static void
1383 em_82547_move_tail_serialized(void *arg)
1384 {
1385         struct adapter *adapter = arg;
1386         uint16_t hw_tdt;
1387         uint16_t sw_tdt;
1388         struct em_tx_desc *tx_desc;
1389         uint16_t length = 0;
1390         boolean_t eop = 0;
1391
1392         hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1393         sw_tdt = adapter->next_avail_tx_desc;
1394
1395         while (hw_tdt != sw_tdt) {
1396                 tx_desc = &adapter->tx_desc_base[hw_tdt];
1397                 length += tx_desc->lower.flags.length;
1398                 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1399                 if(++hw_tdt == adapter->num_tx_desc)
1400                         hw_tdt = 0;
1401
1402                 if(eop) {
1403                         if (em_82547_fifo_workaround(adapter, length)) {
1404                                 adapter->tx_fifo_wrk_cnt++;
1405                                 callout_reset(&adapter->tx_fifo_timer, 1,
1406                                         em_82547_move_tail, adapter);
1407                                 break;
1408                         }
1409                         E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1410                         em_82547_update_fifo_head(adapter, length);
1411                         length = 0;
1412                 }
1413         }       
1414 }
1415
1416 static int
1417 em_82547_fifo_workaround(struct adapter *adapter, int len)
1418 {       
1419         int fifo_space, fifo_pkt_len;
1420
1421         fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1422
1423         if (adapter->link_duplex == HALF_DUPLEX) {
1424                 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1425
1426                 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1427                         if (em_82547_tx_fifo_reset(adapter))
1428                                 return(0);
1429                         else
1430                                 return(1);
1431                 }
1432         }
1433
1434         return(0);
1435 }
1436
1437 static void
1438 em_82547_update_fifo_head(struct adapter *adapter, int len)
1439 {
1440         int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1441
1442         /* tx_fifo_head is always 16 byte aligned */
1443         adapter->tx_fifo_head += fifo_pkt_len;
1444         if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
1445                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1446 }
1447
1448 static int
1449 em_82547_tx_fifo_reset(struct adapter *adapter)
1450 {
1451         uint32_t tctl;
1452
1453         if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1454               E1000_READ_REG(&adapter->hw, TDH)) &&
1455              (E1000_READ_REG(&adapter->hw, TDFT) == 
1456               E1000_READ_REG(&adapter->hw, TDFH)) &&
1457              (E1000_READ_REG(&adapter->hw, TDFTS) ==
1458               E1000_READ_REG(&adapter->hw, TDFHS)) &&
1459              (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1460
1461                 /* Disable TX unit */
1462                 tctl = E1000_READ_REG(&adapter->hw, TCTL);
1463                 E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1464
1465                 /* Reset FIFO pointers */
1466                 E1000_WRITE_REG(&adapter->hw, TDFT,  adapter->tx_head_addr);
1467                 E1000_WRITE_REG(&adapter->hw, TDFH,  adapter->tx_head_addr);
1468                 E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr);
1469                 E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr);
1470
1471                 /* Re-enable TX unit */
1472                 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1473                 E1000_WRITE_FLUSH(&adapter->hw);
1474
1475                 adapter->tx_fifo_head = 0;
1476                 adapter->tx_fifo_reset_cnt++;
1477
1478                 return(TRUE);
1479         } else {
1480                 return(FALSE);
1481         }
1482 }
1483
1484 static void
1485 em_set_promisc(struct adapter *adapter)
1486 {
1487         uint32_t reg_rctl, ctrl;
1488         struct ifnet *ifp = &adapter->interface_data.ac_if;
1489
1490         reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1491         ctrl = E1000_READ_REG(&adapter->hw, CTRL);
1492
1493         if (ifp->if_flags & IFF_PROMISC) {
1494                 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1495                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1496
1497                 /*
1498                  * Disable VLAN stripping in promiscous mode.
1499                  * This enables bridging of vlan tagged frames to occur 
1500                  * and also allows vlan tags to be seen in tcpdump.
1501                  */
1502                 ctrl &= ~E1000_CTRL_VME; 
1503                 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
1504         } else if (ifp->if_flags & IFF_ALLMULTI) {
1505                 reg_rctl |= E1000_RCTL_MPE;
1506                 reg_rctl &= ~E1000_RCTL_UPE;
1507                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1508         }
1509 }
1510
1511 static void
1512 em_disable_promisc(struct adapter *adapter)
1513 {
1514         uint32_t reg_rctl;
1515
1516         reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1517
1518         reg_rctl &= (~E1000_RCTL_UPE);
1519         reg_rctl &= (~E1000_RCTL_MPE);
1520         E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1521
1522         em_enable_vlans(adapter);
1523 }
1524
1525 /*********************************************************************
1526  *  Multicast Update
1527  *
1528  *  This routine is called whenever multicast address list is updated.
1529  *
1530  **********************************************************************/
1531
1532 static void
1533 em_set_multi(struct adapter *adapter)
1534 {
1535         uint32_t reg_rctl = 0;
1536         uint8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1537         struct ifmultiaddr *ifma;
1538         int mcnt = 0;
1539         struct ifnet *ifp = &adapter->interface_data.ac_if;
1540
1541         IOCTL_DEBUGOUT("em_set_multi: begin");
1542
1543         if (adapter->hw.mac_type == em_82542_rev2_0) {
1544                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1545                 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1546                         em_pci_clear_mwi(&adapter->hw);
1547                 reg_rctl |= E1000_RCTL_RST;
1548                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1549                 msec_delay(5);
1550         }
1551
1552         LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1553                 if (ifma->ifma_addr->sa_family != AF_LINK)
1554                         continue;
1555
1556                 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1557                         break;
1558
1559                 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1560                       &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1561                 mcnt++;
1562         }
1563
1564         if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1565                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1566                 reg_rctl |= E1000_RCTL_MPE;
1567                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1568         } else {
1569                 em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1570         }
1571
1572         if (adapter->hw.mac_type == em_82542_rev2_0) {
1573                 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1574                 reg_rctl &= ~E1000_RCTL_RST;
1575                 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1576                 msec_delay(5);
1577                 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1578                         em_pci_set_mwi(&adapter->hw);
1579         }
1580 }
1581
1582 /*********************************************************************
1583  *  Timer routine
1584  *
1585  *  This routine checks for link status and updates statistics.
1586  *
1587  **********************************************************************/
1588
1589 static void
1590 em_local_timer(void *arg)
1591 {
1592         struct ifnet *ifp;
1593         struct adapter *adapter = arg;
1594         ifp = &adapter->interface_data.ac_if;
1595
1596         lwkt_serialize_enter(ifp->if_serializer);
1597
1598         em_check_for_link(&adapter->hw);
1599         em_print_link_status(adapter);
1600         em_update_stats_counters(adapter);   
1601         if (em_display_debug_stats && ifp->if_flags & IFF_RUNNING)
1602                 em_print_hw_stats(adapter);
1603         em_smartspeed(adapter);
1604
1605         callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1606
1607         lwkt_serialize_exit(ifp->if_serializer);
1608 }
1609
1610 static void
1611 em_print_link_status(struct adapter *adapter)
1612 {
1613         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1614                 if (adapter->link_active == 0) {
1615                         em_get_speed_and_duplex(&adapter->hw, 
1616                                                 &adapter->link_speed, 
1617                                                 &adapter->link_duplex);
1618                         device_printf(adapter->dev, "Link is up %d Mbps %s\n",
1619                                adapter->link_speed,
1620                                ((adapter->link_duplex == FULL_DUPLEX) ?
1621                                 "Full Duplex" : "Half Duplex"));
1622                         adapter->link_active = 1;
1623                         adapter->smartspeed = 0;
1624                 }
1625         } else {
1626                 if (adapter->link_active == 1) {
1627                         adapter->link_speed = 0;
1628                         adapter->link_duplex = 0;
1629                         device_printf(adapter->dev, "Link is Down\n");
1630                         adapter->link_active = 0;
1631                 }
1632         }
1633 }
1634
1635 /*********************************************************************
1636  *
1637  *  This routine disables all traffic on the adapter by issuing a
1638  *  global reset on the MAC and deallocates TX/RX buffers. 
1639  *
1640  **********************************************************************/
1641
1642 static void
1643 em_stop(void *arg)
1644 {
1645         struct ifnet   *ifp;
1646         struct adapter * adapter = arg;
1647         ifp = &adapter->interface_data.ac_if;
1648
1649         INIT_DEBUGOUT("em_stop: begin");
1650         em_disable_intr(adapter);
1651         em_reset_hw(&adapter->hw);
1652         callout_stop(&adapter->timer);
1653         callout_stop(&adapter->tx_fifo_timer);
1654         em_free_transmit_structures(adapter);
1655         em_free_receive_structures(adapter);
1656
1657         /* Tell the stack that the interface is no longer active */
1658         ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1659         ifp->if_timer = 0;
1660 }
1661
1662 /*********************************************************************
1663  *
1664  *  Determine hardware revision.
1665  *
1666  **********************************************************************/
1667 static void
1668 em_identify_hardware(struct adapter * adapter)
1669 {
1670         device_t dev = adapter->dev;
1671
1672         /* Make sure our PCI config space has the necessary stuff set */
1673         adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1674         if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1675               (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1676                 device_printf(dev, "Memory Access and/or Bus Master bits were not set!\n");
1677                 adapter->hw.pci_cmd_word |= 
1678                 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1679                 pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1680         }
1681
1682         /* Save off the information about this board */
1683         adapter->hw.vendor_id = pci_get_vendor(dev);
1684         adapter->hw.device_id = pci_get_device(dev);
1685         adapter->hw.revision_id = pci_get_revid(dev);
1686         adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev);
1687         adapter->hw.subsystem_id = pci_get_subdevice(dev);
1688
1689         /* Identify the MAC */
1690         if (em_set_mac_type(&adapter->hw))
1691                 device_printf(dev, "Unknown MAC Type\n");
1692
1693         if (adapter->hw.mac_type == em_82541 ||
1694             adapter->hw.mac_type == em_82541_rev_2 ||
1695             adapter->hw.mac_type == em_82547 ||
1696             adapter->hw.mac_type == em_82547_rev_2)
1697                 adapter->hw.phy_init_script = TRUE;
1698 }
1699
1700 /*********************************************************************
1701  *
1702  *  Initialize the hardware to a configuration as specified by the
1703  *  adapter structure. The controller is reset, the EEPROM is
1704  *  verified, the MAC address is set, then the shared initialization
1705  *  routines are called.
1706  *
1707  **********************************************************************/
1708 static int
1709 em_hardware_init(struct adapter *adapter)
1710 {
1711         uint16_t        rx_buffer_size;
1712
1713         INIT_DEBUGOUT("em_hardware_init: begin");
1714         /* Issue a global reset */
1715         em_reset_hw(&adapter->hw);
1716
1717         /* When hardware is reset, fifo_head is also reset */
1718         adapter->tx_fifo_head = 0;
1719
1720         /* Make sure we have a good EEPROM before we read from it */
1721         if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
1722                 device_printf(adapter->dev,
1723                               "The EEPROM Checksum Is Not Valid\n");
1724                 return(EIO);
1725         }
1726
1727         if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
1728                 device_printf(adapter->dev,
1729                               "EEPROM read error while reading part number\n");
1730                 return(EIO);
1731         }
1732
1733         /*
1734          * These parameters control the automatic generation (Tx) and 
1735          * response(Rx) to Ethernet PAUSE frames.
1736          * - High water mark should allow for at least two frames to be
1737          *   received after sending an XOFF.
1738          * - Low water mark works best when it is very near the high water mark.
1739          *   This allows the receiver to restart by sending XON when it has
1740          *   drained a bit.  Here we use an arbitary value of 1500 which will
1741          *   restart after one full frame is pulled from the buffer.  There
1742          *   could be several smaller frames in the buffer and if so they will
1743          *   not trigger the XON until their total number reduces the buffer
1744          *   by 1500.
1745          * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1746          */
1747         rx_buffer_size = ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff) << 10);
1748
1749         adapter->hw.fc_high_water =
1750             rx_buffer_size - EM_ROUNDUP(1 * adapter->hw.max_frame_size, 1024); 
1751         adapter->hw.fc_low_water = adapter->hw.fc_high_water - 1500;
1752         adapter->hw.fc_pause_time = 1000;
1753         adapter->hw.fc_send_xon = TRUE;
1754         adapter->hw.fc = em_fc_full;
1755
1756         if (em_init_hw(&adapter->hw) < 0) {
1757                 device_printf(adapter->dev, "Hardware Initialization Failed");
1758                 return(EIO);
1759         }
1760
1761         em_check_for_link(&adapter->hw);
1762         if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
1763                 adapter->link_active = 1;
1764         else
1765                 adapter->link_active = 0;
1766
1767         if (adapter->link_active) {
1768                 em_get_speed_and_duplex(&adapter->hw, 
1769                                         &adapter->link_speed, 
1770                                         &adapter->link_duplex);
1771         } else {
1772                 adapter->link_speed = 0;
1773                 adapter->link_duplex = 0;
1774         }
1775
1776         return(0);
1777 }
1778
1779 /*********************************************************************
1780  *
1781  *  Setup networking device structure and register an interface.
1782  *
1783  **********************************************************************/
1784 static void
1785 em_setup_interface(device_t dev, struct adapter *adapter)
1786 {
1787         struct ifnet   *ifp;
1788         INIT_DEBUGOUT("em_setup_interface: begin");
1789
1790         ifp = &adapter->interface_data.ac_if;
1791         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1792         ifp->if_mtu = ETHERMTU;
1793         ifp->if_baudrate = 1000000000;
1794         ifp->if_init =  em_init;
1795         ifp->if_softc = adapter;
1796         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1797         ifp->if_ioctl = em_ioctl;
1798         ifp->if_start = em_start;
1799 #ifdef DEVICE_POLLING
1800         ifp->if_poll = em_poll;
1801 #endif
1802         ifp->if_watchdog = em_watchdog;
1803         ifq_set_maxlen(&ifp->if_snd, adapter->num_tx_desc - 1);
1804         ifq_set_ready(&ifp->if_snd);
1805
1806         if (adapter->hw.mac_type >= em_82543)
1807                 ifp->if_capabilities |= IFCAP_HWCSUM;
1808
1809         ifp->if_capenable = ifp->if_capabilities;
1810
1811         ether_ifattach(ifp, adapter->hw.mac_addr, NULL);
1812
1813         /*
1814          * Tell the upper layer(s) we support long frames.
1815          */
1816         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1817         ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1818
1819         /* 
1820          * Specify the media types supported by this adapter and register
1821          * callbacks to update media and link information
1822          */
1823         ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
1824                      em_media_status);
1825         if (adapter->hw.media_type == em_media_type_fiber) {
1826                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 
1827                             0, NULL);
1828                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 
1829                             0, NULL);
1830         } else {
1831                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1832                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 
1833                             0, NULL);
1834                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 
1835                             0, NULL);
1836                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 
1837                             0, NULL);
1838                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 
1839                             0, NULL);
1840                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1841         }
1842         ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1843         ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1844 }
1845
1846 /*********************************************************************
1847  *
1848  *  Workaround for SmartSpeed on 82541 and 82547 controllers
1849  *
1850  **********************************************************************/        
1851 static void
1852 em_smartspeed(struct adapter *adapter)
1853 {
1854         uint16_t phy_tmp;
1855
1856         if (adapter->link_active || (adapter->hw.phy_type != em_phy_igp) || 
1857             !adapter->hw.autoneg ||
1858             !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1859                 return;
1860
1861         if (adapter->smartspeed == 0) {
1862                 /*
1863                  * If Master/Slave config fault is asserted twice,
1864                  * we assume back-to-back.
1865                  */
1866                 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1867                 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
1868                         return;
1869                 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1870                 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
1871                         em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
1872                                         &phy_tmp);
1873                         if (phy_tmp & CR_1000T_MS_ENABLE) {
1874                                 phy_tmp &= ~CR_1000T_MS_ENABLE;
1875                                 em_write_phy_reg(&adapter->hw,
1876                                                  PHY_1000T_CTRL, phy_tmp);
1877                                 adapter->smartspeed++;
1878                                 if (adapter->hw.autoneg &&
1879                                     !em_phy_setup_autoneg(&adapter->hw) &&
1880                                     !em_read_phy_reg(&adapter->hw, PHY_CTRL,
1881                                                      &phy_tmp)) {
1882                                         phy_tmp |= (MII_CR_AUTO_NEG_EN |  
1883                                                     MII_CR_RESTART_AUTO_NEG);
1884                                         em_write_phy_reg(&adapter->hw,
1885                                                          PHY_CTRL, phy_tmp);
1886                                 }
1887                         }
1888                 }
1889                 return;
1890         } else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
1891                 /* If still no link, perhaps using 2/3 pair cable */
1892                 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
1893                 phy_tmp |= CR_1000T_MS_ENABLE;
1894                 em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
1895                 if (adapter->hw.autoneg &&
1896                     !em_phy_setup_autoneg(&adapter->hw) &&
1897                     !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
1898                         phy_tmp |= (MII_CR_AUTO_NEG_EN |
1899                                     MII_CR_RESTART_AUTO_NEG);
1900                         em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
1901                 }
1902         }
1903         /* Restart process after EM_SMARTSPEED_MAX iterations */
1904         if (adapter->smartspeed++ == EM_SMARTSPEED_MAX)
1905                 adapter->smartspeed = 0;
1906 }
1907
1908 /*
1909  * Manage DMA'able memory.
1910  */
1911 static void
1912 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1913
1914         if (error)
1915                 return;
1916         *(bus_addr_t*) arg = segs->ds_addr;
1917 }
1918
1919 static int
1920 em_dma_malloc(struct adapter *adapter, bus_size_t size,
1921               struct em_dma_alloc *dma, int mapflags)
1922 {
1923         int r;
1924         device_t dev = adapter->dev;
1925
1926         r = bus_dma_tag_create(NULL,                    /* parent */
1927                                PAGE_SIZE, 0,            /* alignment, bounds */
1928                                BUS_SPACE_MAXADDR,       /* lowaddr */
1929                                BUS_SPACE_MAXADDR,       /* highaddr */
1930                                NULL, NULL,              /* filter, filterarg */
1931                                size,                    /* maxsize */
1932                                1,                       /* nsegments */
1933                                size,                    /* maxsegsize */
1934                                BUS_DMA_ALLOCNOW,        /* flags */
1935                                &dma->dma_tag);
1936         if (r != 0) {
1937                 device_printf(dev, "em_dma_malloc: bus_dma_tag_create failed; "
1938                               "error %u\n", r);
1939                 goto fail_0;
1940         }
1941
1942         r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
1943                              BUS_DMA_NOWAIT, &dma->dma_map);
1944         if (r != 0) {
1945                 device_printf(dev, "em_dma_malloc: bus_dmammem_alloc failed; "
1946                               "size %llu, error %d\n", (uintmax_t)size, r);
1947                 goto fail_2;
1948         }
1949
1950         r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1951                             size,
1952                             em_dmamap_cb,
1953                             &dma->dma_paddr,
1954                             mapflags | BUS_DMA_NOWAIT);
1955         if (r != 0) {
1956                 device_printf(dev, "em_dma_malloc: bus_dmamap_load failed; "
1957                               "error %u\n", r);
1958                 goto fail_3;
1959         }
1960
1961         dma->dma_size = size;
1962         return(0);
1963
1964 fail_3:
1965         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1966 fail_2:
1967         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1968         bus_dma_tag_destroy(dma->dma_tag);
1969 fail_0:
1970         dma->dma_map = NULL;
1971         dma->dma_tag = NULL;
1972         return(r);
1973 }
1974
1975 static void
1976 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
1977 {
1978         bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1979         bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1980         bus_dma_tag_destroy(dma->dma_tag);
1981 }
1982
1983 /*********************************************************************
1984  *
1985  *  Allocate memory for tx_buffer structures. The tx_buffer stores all 
1986  *  the information needed to transmit a packet on the wire. 
1987  *
1988  **********************************************************************/
1989 static int
1990 em_allocate_transmit_structures(struct adapter * adapter)
1991 {
1992         adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
1993             adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
1994         if (adapter->tx_buffer_area == NULL) {
1995                 device_printf(adapter->dev, "Unable to allocate tx_buffer memory\n");
1996                 return(ENOMEM);
1997         }
1998
1999         return(0);
2000 }
2001
2002 /*********************************************************************
2003  *
2004  *  Allocate and initialize transmit structures. 
2005  *
2006  **********************************************************************/
2007 static int
2008 em_setup_transmit_structures(struct adapter * adapter)
2009 {
2010         /*
2011          * Setup DMA descriptor areas.
2012          */
2013         if (bus_dma_tag_create(NULL,                    /* parent */
2014                                1, 0,                    /* alignment, bounds */
2015                                BUS_SPACE_MAXADDR,       /* lowaddr */ 
2016                                BUS_SPACE_MAXADDR,       /* highaddr */
2017                                NULL, NULL,              /* filter, filterarg */
2018                                MCLBYTES * 8,            /* maxsize */
2019                                EM_MAX_SCATTER,          /* nsegments */
2020                                MCLBYTES * 8,            /* maxsegsize */
2021                                BUS_DMA_ALLOCNOW,        /* flags */ 
2022                                &adapter->txtag)) {
2023                 device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
2024                 return(ENOMEM);
2025         }
2026
2027         if (em_allocate_transmit_structures(adapter))
2028                 return(ENOMEM);
2029
2030         bzero((void *) adapter->tx_desc_base,
2031               (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
2032
2033         adapter->next_avail_tx_desc = 0;
2034         adapter->oldest_used_tx_desc = 0;
2035
2036         /* Set number of descriptors available */
2037         adapter->num_tx_desc_avail = adapter->num_tx_desc;
2038
2039         /* Set checksum context */
2040         adapter->active_checksum_context = OFFLOAD_NONE;
2041
2042         return(0);
2043 }
2044
2045 /*********************************************************************
2046  *
2047  *  Enable transmit unit.
2048  *
2049  **********************************************************************/
2050 static void
2051 em_initialize_transmit_unit(struct adapter * adapter)
2052 {
2053         uint32_t reg_tctl;
2054         uint32_t reg_tipg = 0;
2055         uint64_t bus_addr;
2056
2057         INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2058
2059         /* Setup the Base and Length of the Tx Descriptor Ring */
2060         bus_addr = adapter->txdma.dma_paddr;
2061         E1000_WRITE_REG(&adapter->hw, TDBAL, (uint32_t)bus_addr);
2062         E1000_WRITE_REG(&adapter->hw, TDBAH, (uint32_t)(bus_addr >> 32));
2063         E1000_WRITE_REG(&adapter->hw, TDLEN, 
2064                         adapter->num_tx_desc * sizeof(struct em_tx_desc));
2065
2066         /* Setup the HW Tx Head and Tail descriptor pointers */
2067         E1000_WRITE_REG(&adapter->hw, TDH, 0);
2068         E1000_WRITE_REG(&adapter->hw, TDT, 0);
2069
2070         HW_DEBUGOUT2("Base = %x, Length = %x\n", 
2071                      E1000_READ_REG(&adapter->hw, TDBAL),
2072                      E1000_READ_REG(&adapter->hw, TDLEN));
2073
2074         /* Set the default values for the Tx Inter Packet Gap timer */
2075         switch (adapter->hw.mac_type) {
2076         case em_82542_rev2_0:
2077         case em_82542_rev2_1:
2078                 reg_tipg = DEFAULT_82542_TIPG_IPGT;
2079                 reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2080                 reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2081                 break;
2082         default:
2083                 if (adapter->hw.media_type == em_media_type_fiber)
2084                         reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2085                 else
2086                         reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2087                 reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2088                 reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2089         }
2090
2091         E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
2092         E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
2093         if (adapter->hw.mac_type >= em_82540)
2094                 E1000_WRITE_REG(&adapter->hw, TADV,
2095                                 adapter->tx_abs_int_delay.value);
2096
2097         /* Program the Transmit Control Register */
2098         reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2099                    (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2100         if (adapter->hw.mac_type >= em_82571)
2101                 reg_tctl |= E1000_TCTL_MULR;
2102         if (adapter->link_duplex == 1)
2103                 reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2104         else
2105                 reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2106         E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
2107
2108         /* Setup Transmit Descriptor Settings for this adapter */   
2109         adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
2110
2111         if (adapter->tx_int_delay.value > 0)
2112                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2113 }
2114
2115 /*********************************************************************
2116  *
2117  *  Free all transmit related data structures.
2118  *
2119  **********************************************************************/
2120 static void
2121 em_free_transmit_structures(struct adapter * adapter)
2122 {
2123         struct em_buffer *tx_buffer;
2124         int i;
2125
2126         INIT_DEBUGOUT("free_transmit_structures: begin");
2127
2128         if (adapter->tx_buffer_area != NULL) {
2129                 tx_buffer = adapter->tx_buffer_area;
2130                 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2131                         if (tx_buffer->m_head != NULL) {
2132                                 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2133                                 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2134                                 m_freem(tx_buffer->m_head);
2135                         }
2136                         tx_buffer->m_head = NULL;
2137                 }
2138         }
2139         if (adapter->tx_buffer_area != NULL) {
2140                 free(adapter->tx_buffer_area, M_DEVBUF);
2141                 adapter->tx_buffer_area = NULL;
2142         }
2143         if (adapter->txtag != NULL) {
2144                 bus_dma_tag_destroy(adapter->txtag);
2145                 adapter->txtag = NULL;
2146         }
2147 }
2148
2149 /*********************************************************************
2150  *
2151  *  The offload context needs to be set when we transfer the first
2152  *  packet of a particular protocol (TCP/UDP). We change the
2153  *  context only if the protocol type changes.
2154  *
2155  **********************************************************************/
2156 static void
2157 em_transmit_checksum_setup(struct adapter * adapter,
2158                            struct mbuf *mp,
2159                            uint32_t *txd_upper,
2160                            uint32_t *txd_lower) 
2161 {
2162         struct em_context_desc *TXD;
2163         struct em_buffer *tx_buffer;
2164         int curr_txd;
2165
2166         if (mp->m_pkthdr.csum_flags) {
2167                 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2168                         *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2169                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2170                         if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2171                                 return;
2172                         else
2173                                 adapter->active_checksum_context = OFFLOAD_TCP_IP;
2174                 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2175                         *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2176                         *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2177                         if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2178                                 return;
2179                         else
2180                                 adapter->active_checksum_context = OFFLOAD_UDP_IP;
2181                 } else {
2182                         *txd_upper = 0;
2183                         *txd_lower = 0;
2184                         return;
2185                 }
2186         } else {
2187                 *txd_upper = 0;
2188                 *txd_lower = 0;
2189                 return;
2190         }
2191
2192         /* If we reach this point, the checksum offload context
2193          * needs to be reset.
2194          */
2195         curr_txd = adapter->next_avail_tx_desc;
2196         tx_buffer = &adapter->tx_buffer_area[curr_txd];
2197         TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2198
2199         TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2200         TXD->lower_setup.ip_fields.ipcso =
2201             ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2202         TXD->lower_setup.ip_fields.ipcse =
2203             htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2204
2205         TXD->upper_setup.tcp_fields.tucss = 
2206             ETHER_HDR_LEN + sizeof(struct ip);
2207         TXD->upper_setup.tcp_fields.tucse = htole16(0);
2208
2209         if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2210                 TXD->upper_setup.tcp_fields.tucso =
2211                     ETHER_HDR_LEN + sizeof(struct ip) +
2212                     offsetof(struct tcphdr, th_sum);
2213         } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2214                 TXD->upper_setup.tcp_fields.tucso =
2215                         ETHER_HDR_LEN + sizeof(struct ip) +
2216                         offsetof(struct udphdr, uh_sum);
2217         }
2218
2219         TXD->tcp_seg_setup.data = htole32(0);
2220         TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2221
2222         tx_buffer->m_head = NULL;
2223
2224         if (++curr_txd == adapter->num_tx_desc)
2225                 curr_txd = 0;
2226
2227         adapter->num_tx_desc_avail--;
2228         adapter->next_avail_tx_desc = curr_txd;
2229 }
2230
2231 /**********************************************************************
2232  *
2233  *  Examine each tx_buffer in the used queue. If the hardware is done
2234  *  processing the packet then free associated resources. The
2235  *  tx_buffer is put back on the free queue.
2236  *
2237  **********************************************************************/
2238
2239 static void
2240 em_clean_transmit_interrupts(struct adapter *adapter)
2241 {
2242         int i, num_avail;
2243         struct em_buffer *tx_buffer;
2244         struct em_tx_desc *tx_desc;
2245         struct ifnet *ifp = &adapter->interface_data.ac_if;
2246
2247         if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2248                 return;
2249
2250         num_avail = adapter->num_tx_desc_avail; 
2251         i = adapter->oldest_used_tx_desc;
2252
2253         tx_buffer = &adapter->tx_buffer_area[i];
2254         tx_desc = &adapter->tx_desc_base[i];
2255
2256         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2257                         BUS_DMASYNC_POSTREAD);
2258
2259         while(tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2260                 tx_desc->upper.data = 0;
2261                 num_avail++;                        
2262
2263                 if (tx_buffer->m_head) {
2264                         ifp->if_opackets++;
2265                         bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2266                                         BUS_DMASYNC_POSTWRITE);
2267                         bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2268                         bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2269
2270                         m_freem(tx_buffer->m_head);
2271                         tx_buffer->m_head = NULL;
2272                 }
2273
2274                 if (++i == adapter->num_tx_desc)
2275                         i = 0;
2276
2277                 tx_buffer = &adapter->tx_buffer_area[i];
2278                 tx_desc = &adapter->tx_desc_base[i];
2279         }
2280
2281         bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2282                         BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2283
2284         adapter->oldest_used_tx_desc = i;
2285
2286         /*
2287          * If we have enough room, clear IFF_OACTIVE to tell the stack
2288          * that it is OK to send packets.
2289          * If there are no pending descriptors, clear the timeout. Otherwise,
2290          * if some descriptors have been freed, restart the timeout.
2291          */
2292         if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2293                 ifp->if_flags &= ~IFF_OACTIVE;
2294                 if (num_avail == adapter->num_tx_desc)
2295                         ifp->if_timer = 0;
2296                 else if (num_avail == adapter->num_tx_desc_avail)
2297                         ifp->if_timer = EM_TX_TIMEOUT;
2298         }
2299         adapter->num_tx_desc_avail = num_avail;
2300 }
2301
2302 /*********************************************************************
2303  *
2304  *  Get a buffer from system mbuf buffer pool.
2305  *
2306  **********************************************************************/
2307 static int
2308 em_get_buf(int i, struct adapter *adapter, struct mbuf *nmp, int how)
2309 {
2310         struct mbuf *mp = nmp;
2311         struct em_buffer *rx_buffer;
2312         struct ifnet *ifp;
2313         bus_addr_t paddr;
2314         int error;
2315
2316         ifp = &adapter->interface_data.ac_if;
2317
2318         if (mp == NULL) {
2319                 mp = m_getcl(how, MT_DATA, M_PKTHDR);
2320                 if (mp == NULL) {
2321                         adapter->mbuf_cluster_failed++;
2322                         return(ENOBUFS);
2323                 }
2324                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2325         } else {
2326                 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2327                 mp->m_data = mp->m_ext.ext_buf;
2328                 mp->m_next = NULL;
2329         }
2330         if (ifp->if_mtu <= ETHERMTU)
2331                 m_adj(mp, ETHER_ALIGN);
2332
2333         rx_buffer = &adapter->rx_buffer_area[i];
2334
2335         /*
2336          * Using memory from the mbuf cluster pool, invoke the
2337          * bus_dma machinery to arrange the memory mapping.
2338          */
2339         error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
2340                                 mtod(mp, void *), mp->m_len,
2341                                 em_dmamap_cb, &paddr, 0);
2342         if (error) {
2343                 m_free(mp);
2344                 return(error);
2345         }
2346         rx_buffer->m_head = mp;
2347         adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
2348         bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2349
2350         return(0);
2351 }
2352
2353 /*********************************************************************
2354  *
2355  *  Allocate memory for rx_buffer structures. Since we use one 
2356  *  rx_buffer per received packet, the maximum number of rx_buffer's 
2357  *  that we'll need is equal to the number of receive descriptors 
2358  *  that we've allocated.
2359  *
2360  **********************************************************************/
2361 static int
2362 em_allocate_receive_structures(struct adapter *adapter)
2363 {
2364         int i, error, size;
2365         struct em_buffer *rx_buffer;
2366
2367         size = adapter->num_rx_desc * sizeof(struct em_buffer);
2368         adapter->rx_buffer_area = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
2369
2370         error = bus_dma_tag_create(NULL,                /* parent */
2371                                    1, 0,                /* alignment, bounds */
2372                                    BUS_SPACE_MAXADDR,   /* lowaddr */
2373                                    BUS_SPACE_MAXADDR,   /* highaddr */
2374                                    NULL, NULL,          /* filter, filterarg */
2375                                    MCLBYTES,            /* maxsize */
2376                                    1,                   /* nsegments */
2377                                    MCLBYTES,            /* maxsegsize */
2378                                    BUS_DMA_ALLOCNOW,    /* flags */
2379                                    &adapter->rxtag);
2380         if (error != 0) {
2381                 device_printf(adapter->dev, "em_allocate_receive_structures: "
2382                               "bus_dma_tag_create failed; error %u\n", error);
2383                 goto fail_0;
2384         }
2385  
2386         rx_buffer = adapter->rx_buffer_area;
2387         for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2388                 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2389                                           &rx_buffer->map);
2390                 if (error != 0) {
2391                         device_printf(adapter->dev,
2392                                       "em_allocate_receive_structures: "
2393                                       "bus_dmamap_create failed; error %u\n",
2394                                       error);
2395                         goto fail_1;
2396                 }
2397         }
2398
2399         for (i = 0; i < adapter->num_rx_desc; i++) {
2400                 error = em_get_buf(i, adapter, NULL, MB_WAIT);
2401                 if (error != 0) {
2402                         adapter->rx_buffer_area[i].m_head = NULL;
2403                         adapter->rx_desc_base[i].buffer_addr = 0;
2404                         return(error);
2405                 }
2406         }
2407
2408         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2409                         BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2410
2411         return(0);
2412
2413 fail_1:
2414         bus_dma_tag_destroy(adapter->rxtag);
2415 fail_0:
2416         adapter->rxtag = NULL;
2417         free(adapter->rx_buffer_area, M_DEVBUF);
2418         adapter->rx_buffer_area = NULL;
2419         return(error);
2420 }
2421
2422 /*********************************************************************
2423  *
2424  *  Allocate and initialize receive structures.
2425  *  
2426  **********************************************************************/
2427 static int
2428 em_setup_receive_structures(struct adapter *adapter)
2429 {
2430         bzero((void *) adapter->rx_desc_base,
2431               (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2432
2433         if (em_allocate_receive_structures(adapter))
2434                 return(ENOMEM);
2435
2436         /* Setup our descriptor pointers */
2437         adapter->next_rx_desc_to_check = 0;
2438         return(0);
2439 }
2440
2441 /*********************************************************************
2442  *
2443  *  Enable receive unit.
2444  *  
2445  **********************************************************************/
2446 static void
2447 em_initialize_receive_unit(struct adapter *adapter)
2448 {
2449         uint32_t reg_rctl;
2450         uint32_t reg_rxcsum;
2451         struct ifnet *ifp;
2452         uint64_t bus_addr;
2453  
2454         INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2455
2456         ifp = &adapter->interface_data.ac_if;
2457
2458         /* Make sure receives are disabled while setting up the descriptor ring */
2459         E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2460
2461         /* Set the Receive Delay Timer Register */
2462         E1000_WRITE_REG(&adapter->hw, RDTR, 
2463                         adapter->rx_int_delay.value | E1000_RDT_FPDB);
2464
2465         if(adapter->hw.mac_type >= em_82540) {
2466                 E1000_WRITE_REG(&adapter->hw, RADV,
2467                                 adapter->rx_abs_int_delay.value);
2468
2469                 /* Set the interrupt throttling rate in 256ns increments */  
2470                 if (em_int_throttle_ceil) {
2471                         E1000_WRITE_REG(&adapter->hw, ITR,
2472                                 1000000000 / 256 / em_int_throttle_ceil);
2473                 } else {
2474                         E1000_WRITE_REG(&adapter->hw, ITR, 0);
2475                 }
2476         }
2477
2478         /* Setup the Base and Length of the Rx Descriptor Ring */
2479         bus_addr = adapter->rxdma.dma_paddr;
2480         E1000_WRITE_REG(&adapter->hw, RDBAL, (uint32_t)bus_addr);
2481         E1000_WRITE_REG(&adapter->hw, RDBAH, (uint32_t)(bus_addr >> 32));
2482         E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2483                         sizeof(struct em_rx_desc));
2484
2485         /* Setup the HW Rx Head and Tail Descriptor Pointers */
2486         E1000_WRITE_REG(&adapter->hw, RDH, 0);
2487         E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2488
2489         /* Setup the Receive Control Register */
2490         reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2491                    E1000_RCTL_RDMTS_HALF |
2492                    (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2493
2494         if (adapter->hw.tbi_compatibility_on == TRUE)
2495                 reg_rctl |= E1000_RCTL_SBP;
2496
2497         switch (adapter->rx_buffer_len) {
2498         default:
2499         case EM_RXBUFFER_2048:
2500                 reg_rctl |= E1000_RCTL_SZ_2048;
2501                 break;
2502         case EM_RXBUFFER_4096:
2503                 reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2504                 break;            
2505         case EM_RXBUFFER_8192:
2506                 reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2507                 break;
2508         case EM_RXBUFFER_16384:
2509                 reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2510                 break;
2511         }
2512
2513         if (ifp->if_mtu > ETHERMTU)
2514                 reg_rctl |= E1000_RCTL_LPE;
2515
2516         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2517         if ((adapter->hw.mac_type >= em_82543) && 
2518             (ifp->if_capenable & IFCAP_RXCSUM)) {
2519                 reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2520                 reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2521                 E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2522         }
2523
2524         /* Enable Receives */
2525         E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2526 }
2527
2528 /*********************************************************************
2529  *
2530  *  Free receive related data structures.
2531  *
2532  **********************************************************************/
2533 static void
2534 em_free_receive_structures(struct adapter *adapter)
2535 {
2536         struct em_buffer *rx_buffer;
2537         int i;
2538
2539         INIT_DEBUGOUT("free_receive_structures: begin");
2540
2541         if (adapter->rx_buffer_area != NULL) {
2542                 rx_buffer = adapter->rx_buffer_area;
2543                 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2544                         if (rx_buffer->map != NULL) {
2545                                 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2546                                 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2547                         }
2548                         if (rx_buffer->m_head != NULL)
2549                                 m_freem(rx_buffer->m_head);
2550                         rx_buffer->m_head = NULL;
2551                 }
2552         }
2553         if (adapter->rx_buffer_area != NULL) {
2554                 free(adapter->rx_buffer_area, M_DEVBUF);
2555                 adapter->rx_buffer_area = NULL;
2556         }
2557         if (adapter->rxtag != NULL) {
2558                 bus_dma_tag_destroy(adapter->rxtag);
2559                 adapter->rxtag = NULL;
2560         }
2561 }
2562
2563 /*********************************************************************
2564  *
2565  *  This routine executes in interrupt context. It replenishes
2566  *  the mbufs in the descriptor and sends data which has been
2567  *  dma'ed into host memory to upper layer.
2568  *
2569  *  We loop at most count times if count is > 0, or until done if
2570  *  count < 0.
2571  *
2572  *********************************************************************/
2573 static void
2574 em_process_receive_interrupts(struct adapter *adapter, int count)
2575 {
2576         struct ifnet *ifp;
2577         struct mbuf *mp;
2578         uint8_t accept_frame = 0;
2579         uint8_t eop = 0;
2580         uint16_t len, desc_len, prev_len_adj;
2581         int i;
2582
2583         /* Pointer to the receive descriptor being examined. */
2584         struct em_rx_desc *current_desc;
2585
2586         ifp = &adapter->interface_data.ac_if;
2587         i = adapter->next_rx_desc_to_check;
2588         current_desc = &adapter->rx_desc_base[i];
2589
2590         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2591                         BUS_DMASYNC_POSTREAD);
2592
2593         if (!((current_desc->status) & E1000_RXD_STAT_DD))
2594                 return;
2595
2596         while ((current_desc->status & E1000_RXD_STAT_DD) && (count != 0)) {
2597                 mp = adapter->rx_buffer_area[i].m_head;
2598                 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2599                                 BUS_DMASYNC_POSTREAD);
2600
2601                 accept_frame = 1;
2602                 prev_len_adj = 0;
2603                 desc_len = le16toh(current_desc->length);
2604                 if (current_desc->status & E1000_RXD_STAT_EOP) {
2605                         count--;
2606                         eop = 1;
2607                         if (desc_len < ETHER_CRC_LEN) {
2608                                 len = 0;
2609                                 prev_len_adj = ETHER_CRC_LEN - desc_len;
2610                         } else {
2611                                 len = desc_len - ETHER_CRC_LEN;
2612                         }
2613                 } else {
2614                         eop = 0;
2615                         len = desc_len;
2616                 }
2617
2618                 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2619                         uint8_t last_byte;
2620                         uint32_t pkt_len = desc_len;
2621
2622                         if (adapter->fmp != NULL)
2623                                 pkt_len += adapter->fmp->m_pkthdr.len; 
2624
2625                         last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2626
2627                         if (TBI_ACCEPT(&adapter->hw, current_desc->status, 
2628                                        current_desc->errors, 
2629                                        pkt_len, last_byte)) {
2630                                 em_tbi_adjust_stats(&adapter->hw, 
2631                                                     &adapter->stats, 
2632                                                     pkt_len, 
2633                                                     adapter->hw.mac_addr);
2634                                 if (len > 0)
2635                                         len--;
2636                         } else {
2637                                 accept_frame = 0;
2638                         }
2639                 }
2640
2641                 if (accept_frame) {
2642                         if (em_get_buf(i, adapter, NULL, MB_DONTWAIT) == ENOBUFS) {
2643                                 adapter->dropped_pkts++;
2644                                 em_get_buf(i, adapter, mp, MB_DONTWAIT);
2645                                 if (adapter->fmp != NULL) 
2646                                         m_freem(adapter->fmp);
2647                                 adapter->fmp = NULL;
2648                                 adapter->lmp = NULL;
2649                                 break;
2650                         }
2651
2652                         /* Assign correct length to the current fragment */
2653                         mp->m_len = len;
2654
2655                         if (adapter->fmp == NULL) {
2656                                 mp->m_pkthdr.len = len;
2657                                 adapter->fmp = mp;       /* Store the first mbuf */
2658                                 adapter->lmp = mp;
2659                         } else {
2660                                 /* Chain mbuf's together */
2661                                 /* 
2662                                  * Adjust length of previous mbuf in chain if we 
2663                                  * received less than 4 bytes in the last descriptor.
2664                                  */
2665                                 if (prev_len_adj > 0) {
2666                                         adapter->lmp->m_len -= prev_len_adj;
2667                                         adapter->fmp->m_pkthdr.len -= prev_len_adj;
2668                                 }
2669                                 adapter->lmp->m_next = mp;
2670                                 adapter->lmp = adapter->lmp->m_next;
2671                                 adapter->fmp->m_pkthdr.len += len;
2672                         }
2673
2674                         if (eop) {
2675                                 adapter->fmp->m_pkthdr.rcvif = ifp;
2676                                 ifp->if_ipackets++;
2677
2678                                 em_receive_checksum(adapter, current_desc,
2679                                                     adapter->fmp);
2680                                 if (current_desc->status & E1000_RXD_STAT_VP) {
2681                                         VLAN_INPUT_TAG(adapter->fmp,
2682                                                        (current_desc->special & 
2683                                                         E1000_RXD_SPC_VLAN_MASK));
2684                                 } else {
2685                                         ifp->if_input(ifp, adapter->fmp);
2686                                 }
2687                                 adapter->fmp = NULL;
2688                                 adapter->lmp = NULL;
2689                         }
2690                 } else {
2691                         adapter->dropped_pkts++;
2692                         em_get_buf(i, adapter, mp, MB_DONTWAIT);
2693                         if (adapter->fmp != NULL) 
2694                                 m_freem(adapter->fmp);
2695                         adapter->fmp = NULL;
2696                         adapter->lmp = NULL;
2697                 }
2698
2699                 /* Zero out the receive descriptors status  */
2700                 current_desc->status = 0;
2701
2702                 /* Advance the E1000's Receive Queue #0  "Tail Pointer". */
2703                 E1000_WRITE_REG(&adapter->hw, RDT, i);
2704
2705                 /* Advance our pointers to the next descriptor */
2706                 if (++i == adapter->num_rx_desc) {
2707                         i = 0;
2708                         current_desc = adapter->rx_desc_base;
2709                 } else {
2710                         current_desc++;
2711                 }
2712         }
2713
2714         bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2715                         BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2716
2717         adapter->next_rx_desc_to_check = i;
2718 }
2719
2720 /*********************************************************************
2721  *
2722  *  Verify that the hardware indicated that the checksum is valid. 
2723  *  Inform the stack about the status of checksum so that stack
2724  *  doesn't spend time verifying the checksum.
2725  *
2726  *********************************************************************/
2727 static void
2728 em_receive_checksum(struct adapter *adapter,
2729                     struct em_rx_desc *rx_desc,
2730                     struct mbuf *mp)
2731 {
2732         /* 82543 or newer only */
2733         if ((adapter->hw.mac_type < em_82543) ||
2734             /* Ignore Checksum bit is set */
2735             (rx_desc->status & E1000_RXD_STAT_IXSM)) {
2736                 mp->m_pkthdr.csum_flags = 0;
2737                 return;
2738         }
2739
2740         if (rx_desc->status & E1000_RXD_STAT_IPCS) {
2741                 /* Did it pass? */
2742                 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
2743                         /* IP Checksum Good */
2744                         mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2745                         mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2746                 } else {
2747                         mp->m_pkthdr.csum_flags = 0;
2748                 }
2749         }
2750
2751         if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
2752                 /* Did it pass? */        
2753                 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
2754                         mp->m_pkthdr.csum_flags |= 
2755                         (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2756                         mp->m_pkthdr.csum_data = htons(0xffff);
2757                 }
2758         }
2759 }
2760
2761
2762 static void 
2763 em_enable_vlans(struct adapter *adapter)
2764 {
2765         uint32_t ctrl;
2766
2767         E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
2768
2769         ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2770         ctrl |= E1000_CTRL_VME; 
2771         E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2772 }
2773
2774 /*
2775  * note: we must call bus_enable_intr() prior to enabling the hardware
2776  * interrupt and bus_disable_intr() after disabling the hardware interrupt
2777  * in order to avoid handler execution races from scheduled interrupt
2778  * threads.
2779  */
2780 static void
2781 em_enable_intr(struct adapter *adapter)
2782 {
2783         struct ifnet *ifp = &adapter->interface_data.ac_if;
2784         
2785         if ((ifp->if_flags & IFF_POLLING) == 0) {
2786                 lwkt_serialize_handler_enable(ifp->if_serializer);
2787                 E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
2788         }
2789 }
2790
2791 static void
2792 em_disable_intr(struct adapter *adapter)
2793 {
2794         /*
2795          * The first version of 82542 had an errata where when link was
2796          * forced it would stay up even up even if the cable was disconnected.
2797          * Sequence errors were used to detect the disconnect and then the
2798          * driver would unforce the link.  This code in the in the ISR.  For
2799          * this to work correctly the Sequence error interrupt had to be
2800          * enabled all the time.
2801          */
2802         if (adapter->hw.mac_type == em_82542_rev2_0) {
2803                 E1000_WRITE_REG(&adapter->hw, IMC,
2804                                 (0xffffffff & ~E1000_IMC_RXSEQ));
2805         } else {
2806                 E1000_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
2807         }
2808
2809         lwkt_serialize_handler_disable(adapter->interface_data.ac_if.if_serializer);
2810 }
2811
2812 static int
2813 em_is_valid_ether_addr(uint8_t *addr)
2814 {
2815         char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
2816
2817         if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN)))
2818                 return(FALSE);
2819         else
2820                 return(TRUE);
2821 }
2822
2823 void 
2824 em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2825 {
2826         pci_write_config(((struct em_osdep *)hw->back)->dev, reg, *value, 2);
2827 }
2828
2829 void 
2830 em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2831 {
2832         *value = pci_read_config(((struct em_osdep *)hw->back)->dev, reg, 2);
2833 }
2834
2835 void
2836 em_pci_set_mwi(struct em_hw *hw)
2837 {
2838         pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2839                          (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
2840 }
2841
2842 void
2843 em_pci_clear_mwi(struct em_hw *hw)
2844 {
2845         pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2846                          (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
2847 }
2848
2849 uint32_t
2850 em_read_reg_io(struct em_hw *hw, uint32_t offset)
2851 {
2852         bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2853         return(bus_space_read_4(hw->reg_io_tag, hw->reg_io_handle, 4));
2854 }
2855
2856 void
2857 em_write_reg_io(struct em_hw *hw, uint32_t offset, uint32_t value)
2858 {
2859         bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2860         bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 4, value);
2861 }
2862
2863 /*********************************************************************
2864  * 82544 Coexistence issue workaround.
2865  *    There are 2 issues.
2866  *      1. Transmit Hang issue.
2867  *    To detect this issue, following equation can be used...
2868  *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2869  *          If SUM[3:0] is in between 1 to 4, we will have this issue.
2870  *
2871  *      2. DAC issue.
2872  *    To detect this issue, following equation can be used...
2873  *          SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2874  *          If SUM[3:0] is in between 9 to c, we will have this issue.
2875  *
2876  *
2877  *    WORKAROUND:
2878  *          Make sure we do not have ending address as 1,2,3,4(Hang) or
2879  *          9,a,b,c (DAC)
2880  *
2881 *************************************************************************/
2882 static uint32_t
2883 em_fill_descriptors(uint64_t address, uint32_t length, PDESC_ARRAY desc_array)
2884 {
2885         /* Since issue is sensitive to length and address.*/
2886         /* Let us first check the address...*/
2887         uint32_t safe_terminator;
2888         if (length <= 4) {
2889                 desc_array->descriptor[0].address = address;
2890                 desc_array->descriptor[0].length = length;
2891                 desc_array->elements = 1;
2892                 return(desc_array->elements);
2893         }
2894         safe_terminator = (uint32_t)((((uint32_t)address & 0x7) + (length & 0xF)) & 0xF);
2895         /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */ 
2896         if (safe_terminator == 0 ||
2897             (safe_terminator > 4 && safe_terminator < 9) || 
2898             (safe_terminator > 0xC && safe_terminator <= 0xF)) {
2899                 desc_array->descriptor[0].address = address;
2900                 desc_array->descriptor[0].length = length;
2901                 desc_array->elements = 1;
2902                 return(desc_array->elements);
2903         }
2904
2905         desc_array->descriptor[0].address = address;
2906         desc_array->descriptor[0].length = length - 4;
2907         desc_array->descriptor[1].address = address + (length - 4);
2908         desc_array->descriptor[1].length = 4;
2909         desc_array->elements = 2;
2910         return(desc_array->elements);
2911 }
2912
2913 /**********************************************************************
2914  *
2915  *  Update the board statistics counters. 
2916  *
2917  **********************************************************************/
2918 static void
2919 em_update_stats_counters(struct adapter *adapter)
2920 {
2921         struct ifnet   *ifp;
2922
2923         if (adapter->hw.media_type == em_media_type_copper ||
2924             (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
2925                 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
2926                 adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
2927         }
2928         adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
2929         adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
2930         adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
2931         adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
2932
2933         adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
2934         adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
2935         adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
2936         adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
2937         adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
2938         adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
2939         adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
2940         adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
2941         adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
2942         adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
2943         adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
2944         adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
2945         adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
2946         adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
2947         adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
2948         adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
2949         adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
2950         adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
2951         adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
2952         adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
2953
2954         /* For the 64-bit byte counters the low dword must be read first. */
2955         /* Both registers clear on the read of the high dword */
2956
2957         adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL); 
2958         adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
2959         adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
2960         adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
2961
2962         adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
2963         adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
2964         adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
2965         adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
2966         adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
2967
2968         adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
2969         adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
2970         adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
2971         adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
2972
2973         adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
2974         adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
2975         adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
2976         adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
2977         adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
2978         adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
2979         adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
2980         adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
2981         adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
2982         adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
2983
2984         if (adapter->hw.mac_type >= em_82543) {
2985                 adapter->stats.algnerrc += 
2986                     E1000_READ_REG(&adapter->hw, ALGNERRC);
2987                 adapter->stats.rxerrc += 
2988                     E1000_READ_REG(&adapter->hw, RXERRC);
2989                 adapter->stats.tncrs += 
2990                     E1000_READ_REG(&adapter->hw, TNCRS);
2991                 adapter->stats.cexterr += 
2992                     E1000_READ_REG(&adapter->hw, CEXTERR);
2993                 adapter->stats.tsctc += 
2994                     E1000_READ_REG(&adapter->hw, TSCTC);
2995                 adapter->stats.tsctfc += 
2996                     E1000_READ_REG(&adapter->hw, TSCTFC);
2997         }
2998         ifp = &adapter->interface_data.ac_if;
2999
3000         /* Fill out the OS statistics structure */
3001         ifp->if_ibytes = adapter->stats.gorcl;
3002         ifp->if_obytes = adapter->stats.gotcl;
3003         ifp->if_imcasts = adapter->stats.mprc;
3004         ifp->if_collisions = adapter->stats.colc;
3005
3006         /* Rx Errors */
3007         ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
3008             adapter->stats.crcerrs + adapter->stats.algnerrc +
3009             adapter->stats.rlec + adapter->stats.mpc + adapter->stats.cexterr;
3010
3011         /* Tx Errors */
3012         ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol;
3013 }
3014
3015
3016 /**********************************************************************
3017  *
3018  *  This routine is called only when em_display_debug_stats is enabled.
3019  *  This routine provides a way to take a look at important statistics
3020  *  maintained by the driver and hardware.
3021  *
3022  **********************************************************************/
3023 static void
3024 em_print_debug_info(struct adapter *adapter)
3025 {
3026         device_t dev= adapter->dev;
3027         uint8_t *hw_addr = adapter->hw.hw_addr;
3028
3029         device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
3030         device_printf(dev, "CTRL  = 0x%x\n",
3031                       E1000_READ_REG(&adapter->hw, CTRL)); 
3032         device_printf(dev, "RCTL  = 0x%x PS=(0x8402)\n",
3033                       E1000_READ_REG(&adapter->hw, RCTL)); 
3034         device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk\n",
3035                       ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff0000) >> 16),
3036                       (E1000_READ_REG(&adapter->hw, PBA) & 0xffff));
3037         device_printf(dev, "Flow control watermarks high = %d low = %d\n",
3038                       adapter->hw.fc_high_water, adapter->hw.fc_low_water);
3039         device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
3040                       E1000_READ_REG(&adapter->hw, TIDV),
3041                       E1000_READ_REG(&adapter->hw, TADV));
3042         device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
3043                       E1000_READ_REG(&adapter->hw, RDTR),
3044                       E1000_READ_REG(&adapter->hw, RADV));
3045         device_printf(dev, "fifo workaround = %lld, fifo_reset = %lld\n",
3046                       (long long)adapter->tx_fifo_wrk_cnt,
3047                       (long long)adapter->tx_fifo_reset_cnt);
3048         device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
3049                       E1000_READ_REG(&adapter->hw, TDH),
3050                       E1000_READ_REG(&adapter->hw, TDT));
3051         device_printf(dev, "Num Tx descriptors avail = %d\n",
3052                       adapter->num_tx_desc_avail);
3053         device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
3054                       adapter->no_tx_desc_avail1);
3055         device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
3056                       adapter->no_tx_desc_avail2);
3057         device_printf(dev, "Std mbuf failed = %ld\n",
3058                       adapter->mbuf_alloc_failed);
3059         device_printf(dev, "Std mbuf cluster failed = %ld\n",
3060                       adapter->mbuf_cluster_failed);
3061         device_printf(dev, "Driver dropped packets = %ld\n",
3062                       adapter->dropped_pkts);
3063 }
3064
3065 static void
3066 em_print_hw_stats(struct adapter *adapter)
3067 {
3068         device_t dev= adapter->dev;
3069
3070         device_printf(dev, "Adapter: %p\n", adapter);
3071
3072         device_printf(dev, "Excessive collisions = %lld\n",
3073                       (long long)adapter->stats.ecol);
3074         device_printf(dev, "Symbol errors = %lld\n",
3075                       (long long)adapter->stats.symerrs);
3076         device_printf(dev, "Sequence errors = %lld\n",
3077                       (long long)adapter->stats.sec);
3078         device_printf(dev, "Defer count = %lld\n",
3079                       (long long)adapter->stats.dc);
3080
3081         device_printf(dev, "Missed Packets = %lld\n",
3082                       (long long)adapter->stats.mpc);
3083         device_printf(dev, "Receive No Buffers = %lld\n",
3084                       (long long)adapter->stats.rnbc);
3085         device_printf(dev, "Receive length errors = %lld\n",
3086                       (long long)adapter->stats.rlec);
3087         device_printf(dev, "Receive errors = %lld\n",
3088                       (long long)adapter->stats.rxerrc);
3089         device_printf(dev, "Crc errors = %lld\n",
3090                       (long long)adapter->stats.crcerrs);
3091         device_printf(dev, "Alignment errors = %lld\n",
3092                       (long long)adapter->stats.algnerrc);
3093         device_printf(dev, "Carrier extension errors = %lld\n",
3094                       (long long)adapter->stats.cexterr);
3095
3096         device_printf(dev, "XON Rcvd = %lld\n",
3097                       (long long)adapter->stats.xonrxc);
3098         device_printf(dev, "XON Xmtd = %lld\n",
3099                       (long long)adapter->stats.xontxc);
3100         device_printf(dev, "XOFF Rcvd = %lld\n",
3101                       (long long)adapter->stats.xoffrxc);
3102         device_printf(dev, "XOFF Xmtd = %lld\n",
3103                       (long long)adapter->stats.xofftxc);
3104
3105         device_printf(dev, "Good Packets Rcvd = %lld\n",
3106                       (long long)adapter->stats.gprc);
3107         device_printf(dev, "Good Packets Xmtd = %lld\n",
3108                       (long long)adapter->stats.gptc);
3109 }
3110
3111 static int
3112 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3113 {
3114         int error;
3115         int result;
3116         struct adapter *adapter;
3117
3118         result = -1;
3119         error = sysctl_handle_int(oidp, &result, 0, req);
3120
3121         if (error || !req->newptr)
3122                 return(error);
3123
3124         if (result == 1) {
3125                 adapter = (struct adapter *)arg1;
3126                 em_print_debug_info(adapter);
3127         }
3128
3129         return(error);
3130 }
3131
3132 static int
3133 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3134 {
3135         int error;
3136         int result;
3137         struct adapter *adapter;
3138
3139         result = -1;
3140         error = sysctl_handle_int(oidp, &result, 0, req);
3141
3142         if (error || !req->newptr)
3143                 return(error);
3144
3145         if (result == 1) {
3146                 adapter = (struct adapter *)arg1;
3147                 em_print_hw_stats(adapter);
3148         }
3149
3150         return(error);
3151 }
3152
3153 static int
3154 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3155 {
3156         struct em_int_delay_info *info;
3157         struct adapter *adapter;
3158         uint32_t regval;
3159         int error;
3160         int usecs;
3161         int ticks;
3162
3163         info = (struct em_int_delay_info *)arg1;
3164         adapter = info->adapter;
3165         usecs = info->value;
3166         error = sysctl_handle_int(oidp, &usecs, 0, req);
3167         if (error != 0 || req->newptr == NULL)
3168                 return(error);
3169         if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3170                 return(EINVAL);
3171         info->value = usecs;
3172         ticks = E1000_USECS_TO_TICKS(usecs);
3173
3174         lwkt_serialize_enter(adapter->interface_data.ac_if.if_serializer);
3175         regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3176         regval = (regval & ~0xffff) | (ticks & 0xffff);
3177         /* Handle a few special cases. */
3178         switch (info->offset) {
3179         case E1000_RDTR:
3180         case E1000_82542_RDTR:
3181                 regval |= E1000_RDT_FPDB;
3182                 break;
3183         case E1000_TIDV:
3184         case E1000_82542_TIDV:
3185                 if (ticks == 0) {
3186                         adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3187                         /* Don't write 0 into the TIDV register. */
3188                         regval++;
3189                 } else
3190                         adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3191                 break;
3192         }
3193         E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3194         lwkt_serialize_exit(adapter->interface_data.ac_if.if_serializer);
3195         return(0);
3196 }
3197
3198 static void
3199 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3200                         const char *description, struct em_int_delay_info *info,
3201                         int offset, int value)
3202 {
3203         info->adapter = adapter;
3204         info->offset = offset;
3205         info->value = value;
3206         SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
3207                         SYSCTL_CHILDREN(adapter->sysctl_tree),
3208                         OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3209                         info, 0, em_sysctl_int_delay, "I", description);
3210 }
3211
3212 static int
3213 em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS)
3214 {
3215         struct adapter *adapter = (void *)arg1;
3216         int error;
3217         int throttle;
3218
3219         throttle = em_int_throttle_ceil;
3220         error = sysctl_handle_int(oidp, &throttle, 0, req);
3221         if (error || req->newptr == NULL)
3222                 return error;
3223         if (throttle < 0 || throttle > 1000000000 / 256)
3224                 return EINVAL;
3225         if (throttle) {
3226                 /*
3227                  * Set the interrupt throttling rate in 256ns increments,
3228                  * recalculate sysctl value assignment to get exact frequency.
3229                  */
3230                 throttle = 1000000000 / 256 / throttle;
3231                 lwkt_serialize_enter(adapter->interface_data.ac_if.if_serializer);
3232                 em_int_throttle_ceil = 1000000000 / 256 / throttle;
3233                 E1000_WRITE_REG(&adapter->hw, ITR, throttle);
3234                 lwkt_serialize_exit(adapter->interface_data.ac_if.if_serializer);
3235         } else {
3236                 lwkt_serialize_enter(adapter->interface_data.ac_if.if_serializer);
3237                 em_int_throttle_ceil = 0;
3238                 E1000_WRITE_REG(&adapter->hw, ITR, 0);
3239                 lwkt_serialize_exit(adapter->interface_data.ac_if.if_serializer);
3240         }
3241         device_printf(adapter->dev, "Interrupt moderation set to %d/sec\n", 
3242                         em_int_throttle_ceil);
3243         return 0;
3244 }
3245