1 /**************************************************************************
3 Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved.
5 Copyright (c) 2001-2005, Intel Corporation
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
34 ***************************************************************************/
36 /*$FreeBSD: src/sys/dev/em/if_em.c,v 1.2.2.15 2003/06/09 22:10:15 pdeuskar Exp $*/
37 /*$DragonFly: src/sys/dev/netif/em/if_em.c,v 1.42 2005/11/08 12:48:18 sephe Exp $*/
39 #include "opt_polling.h"
41 #include <dev/netif/em/if_em.h>
42 #include <net/ifq_var.h>
44 /*********************************************************************
45 * Set this to one to display debug statistics
46 *********************************************************************/
47 int em_display_debug_stats = 0;
49 /*********************************************************************
51 *********************************************************************/
53 char em_driver_version[] = "3.2.15";
56 /*********************************************************************
59 * Used by probe to select devices to load on
60 * Last field stores an index into em_strings
61 * Last entry must be all 0s
63 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
64 *********************************************************************/
66 static em_vendor_info_t em_vendor_info_array[] =
68 /* Intel(R) PRO/1000 Network Connection */
69 { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0},
70 { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
71 { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0},
72 { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
73 { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0},
75 { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0},
76 { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0},
77 { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
78 { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0},
79 { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0},
80 { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0},
82 { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0},
84 { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
85 { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
87 { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
88 { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
89 { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
90 { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0},
92 { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
93 { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
94 { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
95 { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
96 { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
98 { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
99 { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
100 { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
101 { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
102 { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
103 { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
104 { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0},
105 #ifdef KINGSPORT_PROJECT
106 { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
107 #endif /* KINGSPORT_PROJECT */
109 { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0},
110 { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0},
112 { 0x8086, E1000_DEV_ID_82571EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
113 { 0x8086, E1000_DEV_ID_82571EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
114 { 0x8086, E1000_DEV_ID_82571EB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
116 { 0x8086, E1000_DEV_ID_82572EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
117 { 0x8086, E1000_DEV_ID_82572EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
118 { 0x8086, E1000_DEV_ID_82572EI_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
120 { 0x8086, E1000_DEV_ID_82573E, PCI_ANY_ID, PCI_ANY_ID, 0},
121 { 0x8086, E1000_DEV_ID_82573E_IAMT, PCI_ANY_ID, PCI_ANY_ID, 0},
122 { 0x8086, E1000_DEV_ID_82573L, PCI_ANY_ID, PCI_ANY_ID, 0},
124 { 0x8086, 0x101A, PCI_ANY_ID, PCI_ANY_ID, 0},
125 { 0x8086, 0x1014, PCI_ANY_ID, PCI_ANY_ID, 0},
126 /* required last entry */
130 /*********************************************************************
131 * Table of branding strings for all supported NICs.
132 *********************************************************************/
134 static const char *em_strings[] = {
135 "Intel(R) PRO/1000 Network Connection"
138 /*********************************************************************
139 * Function prototypes
140 *********************************************************************/
141 static int em_probe(device_t);
142 static int em_attach(device_t);
143 static int em_detach(device_t);
144 static int em_shutdown(device_t);
145 static void em_intr(void *);
146 static void em_start(struct ifnet *);
147 static void em_start_serialized(struct ifnet *);
148 static int em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
149 static void em_watchdog(struct ifnet *);
150 static void em_init(void *);
151 static void em_init_serialized(void *);
152 static void em_stop(void *);
153 static void em_media_status(struct ifnet *, struct ifmediareq *);
154 static int em_media_change(struct ifnet *);
155 static void em_identify_hardware(struct adapter *);
156 static void em_local_timer(void *);
157 static int em_hardware_init(struct adapter *);
158 static void em_setup_interface(device_t, struct adapter *);
159 static int em_setup_transmit_structures(struct adapter *);
160 static void em_initialize_transmit_unit(struct adapter *);
161 static int em_setup_receive_structures(struct adapter *);
162 static void em_initialize_receive_unit(struct adapter *);
163 static void em_enable_intr(struct adapter *);
164 static void em_disable_intr(struct adapter *);
165 static void em_free_transmit_structures(struct adapter *);
166 static void em_free_receive_structures(struct adapter *);
167 static void em_update_stats_counters(struct adapter *);
168 static void em_clean_transmit_interrupts(struct adapter *);
169 static int em_allocate_receive_structures(struct adapter *);
170 static int em_allocate_transmit_structures(struct adapter *);
171 static void em_process_receive_interrupts(struct adapter *, int);
172 static void em_receive_checksum(struct adapter *, struct em_rx_desc *,
174 static void em_transmit_checksum_setup(struct adapter *, struct mbuf *,
175 uint32_t *, uint32_t *);
176 static void em_set_promisc(struct adapter *);
177 static void em_disable_promisc(struct adapter *);
178 static void em_set_multi(struct adapter *);
179 static void em_print_hw_stats(struct adapter *);
180 static void em_print_link_status(struct adapter *);
181 static int em_get_buf(int i, struct adapter *, struct mbuf *, int how);
182 static void em_enable_vlans(struct adapter *);
183 static int em_encap(struct adapter *, struct mbuf *);
184 static void em_smartspeed(struct adapter *);
185 static int em_82547_fifo_workaround(struct adapter *, int);
186 static void em_82547_update_fifo_head(struct adapter *, int);
187 static int em_82547_tx_fifo_reset(struct adapter *);
188 static void em_82547_move_tail(void *arg);
189 static void em_82547_move_tail_serialized(void *arg);
190 static int em_dma_malloc(struct adapter *, bus_size_t,
191 struct em_dma_alloc *, int);
192 static void em_dma_free(struct adapter *, struct em_dma_alloc *);
193 static void em_print_debug_info(struct adapter *);
194 static int em_is_valid_ether_addr(uint8_t *);
195 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS);
196 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
197 static uint32_t em_fill_descriptors(uint64_t address, uint32_t length,
198 PDESC_ARRAY desc_array);
199 static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
200 static int em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS);
201 static void em_add_int_delay_sysctl(struct adapter *, const char *,
203 struct em_int_delay_info *, int, int);
205 /*********************************************************************
206 * FreeBSD Device Interface Entry Points
207 *********************************************************************/
209 static device_method_t em_methods[] = {
210 /* Device interface */
211 DEVMETHOD(device_probe, em_probe),
212 DEVMETHOD(device_attach, em_attach),
213 DEVMETHOD(device_detach, em_detach),
214 DEVMETHOD(device_shutdown, em_shutdown),
218 static driver_t em_driver = {
219 "em", em_methods, sizeof(struct adapter),
222 static devclass_t em_devclass;
224 DECLARE_DUMMY_MODULE(if_em);
225 DRIVER_MODULE(if_em, pci, em_driver, em_devclass, 0, 0);
227 /*********************************************************************
228 * Tunable default values.
229 *********************************************************************/
231 #define E1000_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
232 #define E1000_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
234 static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
235 static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
236 static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
237 static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
238 static int em_int_throttle_ceil = 10000;
240 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
241 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
242 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
243 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
244 TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil);
246 /*********************************************************************
247 * Device identification routine
249 * em_probe determines if the driver should be loaded on
250 * adapter based on PCI vendor/device id of the adapter.
252 * return 0 on success, positive on failure
253 *********************************************************************/
256 em_probe(device_t dev)
258 em_vendor_info_t *ent;
260 uint16_t pci_vendor_id = 0;
261 uint16_t pci_device_id = 0;
262 uint16_t pci_subvendor_id = 0;
263 uint16_t pci_subdevice_id = 0;
264 char adapter_name[60];
266 INIT_DEBUGOUT("em_probe: begin");
268 pci_vendor_id = pci_get_vendor(dev);
269 if (pci_vendor_id != EM_VENDOR_ID)
272 pci_device_id = pci_get_device(dev);
273 pci_subvendor_id = pci_get_subvendor(dev);
274 pci_subdevice_id = pci_get_subdevice(dev);
276 ent = em_vendor_info_array;
277 while (ent->vendor_id != 0) {
278 if ((pci_vendor_id == ent->vendor_id) &&
279 (pci_device_id == ent->device_id) &&
281 ((pci_subvendor_id == ent->subvendor_id) ||
282 (ent->subvendor_id == PCI_ANY_ID)) &&
284 ((pci_subdevice_id == ent->subdevice_id) ||
285 (ent->subdevice_id == PCI_ANY_ID))) {
286 snprintf(adapter_name, sizeof(adapter_name),
287 "%s, Version - %s", em_strings[ent->index],
289 device_set_desc_copy(dev, adapter_name);
298 /*********************************************************************
299 * Device initialization routine
301 * The attach entry point is called when the driver is being loaded.
302 * This routine identifies the type of hardware, allocates all resources
303 * and initializes the hardware.
305 * return 0 on success, positive on failure
306 *********************************************************************/
309 em_attach(device_t dev)
311 struct adapter *adapter;
316 INIT_DEBUGOUT("em_attach: begin");
318 adapter = device_get_softc(dev);
320 lwkt_serialize_init(&adapter->serializer);
322 callout_init(&adapter->timer);
323 callout_init(&adapter->tx_fifo_timer);
326 adapter->osdep.dev = dev;
329 sysctl_ctx_init(&adapter->sysctl_ctx);
330 adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx,
331 SYSCTL_STATIC_CHILDREN(_hw),
333 device_get_nameunit(dev),
337 if (adapter->sysctl_tree == NULL) {
342 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
343 SYSCTL_CHILDREN(adapter->sysctl_tree),
344 OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
346 em_sysctl_debug_info, "I", "Debug Information");
348 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
349 SYSCTL_CHILDREN(adapter->sysctl_tree),
350 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
352 em_sysctl_stats, "I", "Statistics");
354 /* Determine hardware revision */
355 em_identify_hardware(adapter);
357 /* Set up some sysctls for the tunable interrupt delays */
358 em_add_int_delay_sysctl(adapter, "rx_int_delay",
359 "receive interrupt delay in usecs",
360 &adapter->rx_int_delay,
361 E1000_REG_OFFSET(&adapter->hw, RDTR),
362 em_rx_int_delay_dflt);
363 em_add_int_delay_sysctl(adapter, "tx_int_delay",
364 "transmit interrupt delay in usecs",
365 &adapter->tx_int_delay,
366 E1000_REG_OFFSET(&adapter->hw, TIDV),
367 em_tx_int_delay_dflt);
368 if (adapter->hw.mac_type >= em_82540) {
369 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
370 "receive interrupt delay limit in usecs",
371 &adapter->rx_abs_int_delay,
372 E1000_REG_OFFSET(&adapter->hw, RADV),
373 em_rx_abs_int_delay_dflt);
374 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
375 "transmit interrupt delay limit in usecs",
376 &adapter->tx_abs_int_delay,
377 E1000_REG_OFFSET(&adapter->hw, TADV),
378 em_tx_abs_int_delay_dflt);
379 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
380 SYSCTL_CHILDREN(adapter->sysctl_tree),
381 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW,
382 adapter, 0, em_sysctl_int_throttle, "I", NULL);
385 /* Parameters (to be read from user) */
386 adapter->num_tx_desc = EM_MAX_TXD;
387 adapter->num_rx_desc = EM_MAX_RXD;
388 adapter->hw.autoneg = DO_AUTO_NEG;
389 adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
390 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
391 adapter->hw.tbi_compatibility_en = TRUE;
392 adapter->rx_buffer_len = EM_RXBUFFER_2048;
394 adapter->hw.phy_init_script = 1;
395 adapter->hw.phy_reset_disable = FALSE;
397 #ifndef EM_MASTER_SLAVE
398 adapter->hw.master_slave = em_ms_hw_default;
400 adapter->hw.master_slave = EM_MASTER_SLAVE;
404 * Set the max frame size assuming standard ethernet
407 adapter->hw.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
409 adapter->hw.min_frame_size =
410 MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
413 * This controls when hardware reports transmit completion
416 adapter->hw.report_tx_early = 1;
419 adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
421 if (!(adapter->res_memory)) {
422 device_printf(dev, "Unable to allocate bus resource: memory\n");
426 adapter->osdep.mem_bus_space_tag =
427 rman_get_bustag(adapter->res_memory);
428 adapter->osdep.mem_bus_space_handle =
429 rman_get_bushandle(adapter->res_memory);
430 adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
432 if (adapter->hw.mac_type > em_82543) {
433 /* Figure our where our IO BAR is ? */
435 for (i = 0; i < 5; i++) {
436 val = pci_read_config(dev, rid, 4);
437 if (val & 0x00000001) {
438 adapter->io_rid = rid;
444 adapter->res_ioport = bus_alloc_resource_any(dev,
445 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
446 if (!(adapter->res_ioport)) {
447 device_printf(dev, "Unable to allocate bus resource: ioport\n");
452 adapter->hw.reg_io_tag = rman_get_bustag(adapter->res_ioport);
453 adapter->hw.reg_io_handle = rman_get_bushandle(adapter->res_ioport);
457 adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ,
458 &rid, RF_SHAREABLE | RF_ACTIVE);
459 if (!(adapter->res_interrupt)) {
460 device_printf(dev, "Unable to allocate bus resource: interrupt\n");
465 adapter->hw.back = &adapter->osdep;
467 em_init_eeprom_params(&adapter->hw);
469 tsize = EM_ROUNDUP(adapter->num_tx_desc *
470 sizeof(struct em_tx_desc), 4096);
472 /* Allocate Transmit Descriptor ring */
473 if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_WAITOK)) {
474 device_printf(dev, "Unable to allocate TxDescriptor memory\n");
478 adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
480 rsize = EM_ROUNDUP(adapter->num_rx_desc *
481 sizeof(struct em_rx_desc), 4096);
483 /* Allocate Receive Descriptor ring */
484 if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_WAITOK)) {
485 device_printf(dev, "Unable to allocate rx_desc memory\n");
489 adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
491 /* Initialize the hardware */
492 if (em_hardware_init(adapter)) {
493 device_printf(dev, "Unable to initialize the hardware\n");
498 /* Copy the permanent MAC address out of the EEPROM */
499 if (em_read_mac_addr(&adapter->hw) < 0) {
500 device_printf(dev, "EEPROM read error while reading mac address\n");
505 if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
506 device_printf(dev, "Invalid mac address\n");
511 /* Setup OS specific network interface */
512 em_setup_interface(dev, adapter);
514 /* Initialize statistics */
515 em_clear_hw_cntrs(&adapter->hw);
516 em_update_stats_counters(adapter);
517 adapter->hw.get_link_status = 1;
518 em_check_for_link(&adapter->hw);
520 /* Print the link status */
521 if (adapter->link_active == 1) {
522 em_get_speed_and_duplex(&adapter->hw, &adapter->link_speed,
523 &adapter->link_duplex);
524 device_printf(dev, "Speed: %d Mbps, Duplex: %s\n",
526 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
528 device_printf(dev, "Speed: N/A, Duplex:N/A\n");
530 /* Identify 82544 on PCIX */
531 em_get_bus_info(&adapter->hw);
532 if (adapter->hw.bus_type == em_bus_type_pcix &&
533 adapter->hw.mac_type == em_82544)
534 adapter->pcix_82544 = TRUE;
536 adapter->pcix_82544 = FALSE;
538 error = bus_setup_intr(dev, adapter->res_interrupt, 0,
539 (void (*)(void *)) em_intr, adapter,
540 &adapter->int_handler_tag, &adapter->serializer);
542 device_printf(dev, "Error registering interrupt handler!\n");
543 ether_ifdetach(&adapter->interface_data.ac_if);
547 INIT_DEBUGOUT("em_attach: end");
555 /*********************************************************************
556 * Device removal routine
558 * The detach entry point is called when the driver is being removed.
559 * This routine stops the adapter and deallocates all the resources
560 * that were allocated for driver operation.
562 * return 0 on success, positive on failure
563 *********************************************************************/
566 em_detach(device_t dev)
568 struct adapter * adapter = device_get_softc(dev);
570 INIT_DEBUGOUT("em_detach: begin");
572 lwkt_serialize_enter(&adapter->serializer);
573 adapter->in_detach = 1;
575 if (device_is_attached(dev)) {
577 em_phy_hw_reset(&adapter->hw);
578 ether_ifdetach(&adapter->interface_data.ac_if);
580 bus_generic_detach(dev);
582 if (adapter->int_handler_tag != NULL) {
583 bus_teardown_intr(dev, adapter->res_interrupt,
584 adapter->int_handler_tag);
586 if (adapter->res_interrupt != NULL) {
587 bus_release_resource(dev, SYS_RES_IRQ, 0,
588 adapter->res_interrupt);
590 if (adapter->res_memory != NULL) {
591 bus_release_resource(dev, SYS_RES_MEMORY, EM_MMBA,
592 adapter->res_memory);
595 if (adapter->res_ioport != NULL) {
596 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
597 adapter->res_ioport);
600 /* Free Transmit Descriptor ring */
601 if (adapter->tx_desc_base != NULL) {
602 em_dma_free(adapter, &adapter->txdma);
603 adapter->tx_desc_base = NULL;
606 /* Free Receive Descriptor ring */
607 if (adapter->rx_desc_base != NULL) {
608 em_dma_free(adapter, &adapter->rxdma);
609 adapter->rx_desc_base = NULL;
612 adapter->sysctl_tree = NULL;
613 sysctl_ctx_free(&adapter->sysctl_ctx);
615 lwkt_serialize_exit(&adapter->serializer);
619 /*********************************************************************
621 * Shutdown entry point
623 **********************************************************************/
626 em_shutdown(device_t dev)
628 struct adapter *adapter = device_get_softc(dev);
633 /*********************************************************************
634 * Transmit entry point
636 * em_start is called by the stack to initiate a transmit.
637 * The driver will remain in this routine as long as there are
638 * packets to transmit and transmit resources are available.
639 * In case resources are not available stack is notified and
640 * the packet is requeued.
641 **********************************************************************/
644 em_start(struct ifnet *ifp)
646 struct adapter *adapter = ifp->if_softc;
648 lwkt_serialize_enter(&adapter->serializer);
649 em_start_serialized(ifp);
650 lwkt_serialize_exit(&adapter->serializer);
654 em_start_serialized(struct ifnet *ifp)
657 struct adapter *adapter = ifp->if_softc;
659 if (!adapter->link_active)
661 while (!ifq_is_empty(&ifp->if_snd)) {
662 m_head = ifq_poll(&ifp->if_snd);
667 if (em_encap(adapter, m_head)) {
668 ifp->if_flags |= IFF_OACTIVE;
671 m_head = ifq_dequeue(&ifp->if_snd);
673 /* Send a copy of the frame to the BPF listener */
674 BPF_MTAP(ifp, m_head);
676 /* Set timeout in case hardware has problems transmitting */
677 ifp->if_timer = EM_TX_TIMEOUT;
681 /*********************************************************************
684 * em_ioctl is called when the user wants to configure the
687 * return 0 on success, positive on failure
688 **********************************************************************/
691 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
693 int max_frame_size, mask, error = 0;
694 struct ifreq *ifr = (struct ifreq *) data;
695 struct adapter *adapter = ifp->if_softc;
697 lwkt_serialize_enter(&adapter->serializer);
699 if (adapter->in_detach)
705 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
706 lwkt_serialize_exit(&adapter->serializer);
707 ether_ioctl(ifp, command, data);
708 lwkt_serialize_enter(&adapter->serializer);
711 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
712 switch (adapter->hw.mac_type) {
715 max_frame_size = 10500;
718 /* 82573 does not support jumbo frames */
719 max_frame_size = ETHER_MAX_LEN;
722 max_frame_size = MAX_JUMBO_FRAME_SIZE;
726 max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) {
729 ifp->if_mtu = ifr->ifr_mtu;
730 adapter->hw.max_frame_size =
731 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
732 em_init_serialized(adapter);
736 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
737 if (ifp->if_flags & IFF_UP) {
738 if (!(ifp->if_flags & IFF_RUNNING))
739 em_init_serialized(adapter);
740 em_disable_promisc(adapter);
741 em_set_promisc(adapter);
743 if (ifp->if_flags & IFF_RUNNING)
749 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
750 if (ifp->if_flags & IFF_RUNNING) {
751 em_disable_intr(adapter);
752 em_set_multi(adapter);
753 if (adapter->hw.mac_type == em_82542_rev2_0)
754 em_initialize_receive_unit(adapter);
755 em_enable_intr(adapter);
760 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
761 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
764 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
765 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
766 if (mask & IFCAP_HWCSUM) {
767 if (IFCAP_HWCSUM & ifp->if_capenable)
768 ifp->if_capenable &= ~IFCAP_HWCSUM;
770 ifp->if_capenable |= IFCAP_HWCSUM;
771 if (ifp->if_flags & IFF_RUNNING)
772 em_init_serialized(adapter);
776 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)", (int)command);
781 lwkt_serialize_exit(&adapter->serializer);
785 /*********************************************************************
786 * Watchdog entry point
788 * This routine is called whenever hardware quits transmitting.
790 **********************************************************************/
793 em_watchdog(struct ifnet *ifp)
795 struct adapter * adapter;
796 adapter = ifp->if_softc;
798 /* If we are in this routine because of pause frames, then
799 * don't reset the hardware.
801 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
802 ifp->if_timer = EM_TX_TIMEOUT;
806 if (em_check_for_link(&adapter->hw))
807 if_printf(ifp, "watchdog timeout -- resetting\n");
809 ifp->if_flags &= ~IFF_RUNNING;
816 /*********************************************************************
819 * This routine is used in two ways. It is used by the stack as
820 * init entry point in network interface structure. It is also used
821 * by the driver as a hw/sw initialization routine to get to a
824 * return 0 on success, positive on failure
825 **********************************************************************/
830 struct adapter *adapter = arg;
832 lwkt_serialize_enter(&adapter->serializer);
833 em_init_serialized(arg);
834 lwkt_serialize_exit(&adapter->serializer);
838 em_init_serialized(void *arg)
840 struct adapter *adapter = arg;
842 struct ifnet *ifp = &adapter->interface_data.ac_if;
844 INIT_DEBUGOUT("em_init: begin");
849 * Packet Buffer Allocation (PBA)
850 * Writing PBA sets the receive portion of the buffer
851 * the remainder is used for the transmit buffer.
853 switch (adapter->hw.mac_type) {
855 case em_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
856 if (adapter->hw.max_frame_size > EM_RXBUFFER_8192)
857 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
859 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
861 adapter->tx_fifo_head = 0;
862 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
863 adapter->tx_fifo_size =
864 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
866 case em_82571: /* 82571: Total Packet Buffer is 48K */
867 case em_82572: /* 82572: Total Packet Buffer is 48K */
868 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
870 case em_82573: /* 82573: Total Packet Buffer is 32K */
871 /* Jumbo frames not supported */
872 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
875 /* Devices before 82547 had a Packet Buffer of 64K. */
876 if(adapter->hw.max_frame_size > EM_RXBUFFER_8192)
877 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
879 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
882 INIT_DEBUGOUT1("em_init: pba=%dK",pba);
883 E1000_WRITE_REG(&adapter->hw, PBA, pba);
885 /* Get the latest mac address, User can use a LAA */
886 bcopy(adapter->interface_data.ac_enaddr, adapter->hw.mac_addr,
889 /* Initialize the hardware */
890 if (em_hardware_init(adapter)) {
891 if_printf(ifp, "Unable to initialize the hardware\n");
895 em_enable_vlans(adapter);
897 /* Prepare transmit descriptors and buffers */
898 if (em_setup_transmit_structures(adapter)) {
899 if_printf(ifp, "Could not setup transmit structures\n");
903 em_initialize_transmit_unit(adapter);
905 /* Setup Multicast table */
906 em_set_multi(adapter);
908 /* Prepare receive descriptors and buffers */
909 if (em_setup_receive_structures(adapter)) {
910 if_printf(ifp, "Could not setup receive structures\n");
914 em_initialize_receive_unit(adapter);
916 /* Don't loose promiscuous settings */
917 em_set_promisc(adapter);
919 ifp->if_flags |= IFF_RUNNING;
920 ifp->if_flags &= ~IFF_OACTIVE;
922 if (adapter->hw.mac_type >= em_82543) {
923 if (ifp->if_capenable & IFCAP_TXCSUM)
924 ifp->if_hwassist = EM_CHECKSUM_FEATURES;
926 ifp->if_hwassist = 0;
929 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
930 em_clear_hw_cntrs(&adapter->hw);
931 em_enable_intr(adapter);
933 /* Don't reset the phy next time init gets called */
934 adapter->hw.phy_reset_disable = TRUE;
937 #ifdef DEVICE_POLLING
940 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
942 struct adapter *adapter = ifp->if_softc;
945 lwkt_serialize_enter(&adapter->serializer);
948 em_disable_intr(adapter);
950 case POLL_DEREGISTER:
951 em_enable_intr(adapter);
953 case POLL_AND_CHECK_STATUS:
954 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
955 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
956 callout_stop(&adapter->timer);
957 adapter->hw.get_link_status = 1;
958 em_check_for_link(&adapter->hw);
959 em_print_link_status(adapter);
960 callout_reset(&adapter->timer, hz, em_local_timer,
965 if (ifp->if_flags & IFF_RUNNING) {
966 em_process_receive_interrupts(adapter, count);
967 em_clean_transmit_interrupts(adapter);
969 if (ifp->if_flags & IFF_RUNNING) {
970 if (!ifq_is_empty(&ifp->if_snd))
971 em_start_serialized(ifp);
975 lwkt_serialize_exit(&adapter->serializer);
978 #endif /* DEVICE_POLLING */
980 /*********************************************************************
982 * Interrupt Service routine
984 **********************************************************************/
990 struct adapter *adapter = arg;
992 ifp = &adapter->interface_data.ac_if;
994 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
998 /* Link status change */
999 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1000 callout_stop(&adapter->timer);
1001 adapter->hw.get_link_status = 1;
1002 em_check_for_link(&adapter->hw);
1003 em_print_link_status(adapter);
1004 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1008 * note: do not attempt to improve efficiency by looping. This
1009 * only results in unnecessary piecemeal collection of received
1010 * packets and unnecessary piecemeal cleanups of the transmit ring.
1012 if (ifp->if_flags & IFF_RUNNING) {
1013 em_process_receive_interrupts(adapter, -1);
1014 em_clean_transmit_interrupts(adapter);
1017 if ((ifp->if_flags & IFF_RUNNING) && !ifq_is_empty(&ifp->if_snd))
1018 em_start_serialized(ifp);
1021 /*********************************************************************
1023 * Media Ioctl callback
1025 * This routine is called whenever the user queries the status of
1026 * the interface using ifconfig.
1028 **********************************************************************/
1030 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1032 struct adapter * adapter = ifp->if_softc;
1034 INIT_DEBUGOUT("em_media_status: begin");
1036 em_check_for_link(&adapter->hw);
1037 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1038 if (adapter->link_active == 0) {
1039 em_get_speed_and_duplex(&adapter->hw,
1040 &adapter->link_speed,
1041 &adapter->link_duplex);
1042 adapter->link_active = 1;
1045 if (adapter->link_active == 1) {
1046 adapter->link_speed = 0;
1047 adapter->link_duplex = 0;
1048 adapter->link_active = 0;
1052 ifmr->ifm_status = IFM_AVALID;
1053 ifmr->ifm_active = IFM_ETHER;
1055 if (!adapter->link_active)
1058 ifmr->ifm_status |= IFM_ACTIVE;
1060 if (adapter->hw.media_type == em_media_type_fiber) {
1061 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1063 switch (adapter->link_speed) {
1065 ifmr->ifm_active |= IFM_10_T;
1068 ifmr->ifm_active |= IFM_100_TX;
1071 ifmr->ifm_active |= IFM_1000_T;
1074 if (adapter->link_duplex == FULL_DUPLEX)
1075 ifmr->ifm_active |= IFM_FDX;
1077 ifmr->ifm_active |= IFM_HDX;
1081 /*********************************************************************
1083 * Media Ioctl callback
1085 * This routine is called when the user changes speed/duplex using
1086 * media/mediopt option with ifconfig.
1088 **********************************************************************/
1090 em_media_change(struct ifnet *ifp)
1092 struct adapter * adapter = ifp->if_softc;
1093 struct ifmedia *ifm = &adapter->media;
1095 INIT_DEBUGOUT("em_media_change: begin");
1097 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1100 lwkt_serialize_enter(&adapter->serializer);
1102 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1104 adapter->hw.autoneg = DO_AUTO_NEG;
1105 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1109 adapter->hw.autoneg = DO_AUTO_NEG;
1110 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1113 adapter->hw.autoneg = FALSE;
1114 adapter->hw.autoneg_advertised = 0;
1115 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1116 adapter->hw.forced_speed_duplex = em_100_full;
1118 adapter->hw.forced_speed_duplex = em_100_half;
1121 adapter->hw.autoneg = FALSE;
1122 adapter->hw.autoneg_advertised = 0;
1123 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1124 adapter->hw.forced_speed_duplex = em_10_full;
1126 adapter->hw.forced_speed_duplex = em_10_half;
1129 if_printf(ifp, "Unsupported media type\n");
1132 * As the speed/duplex settings may have changed we need to
1135 adapter->hw.phy_reset_disable = FALSE;
1137 em_init_serialized(adapter);
1139 lwkt_serialize_exit(&adapter->serializer);
1144 em_tx_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize,
1147 struct em_q *q = arg;
1151 KASSERT(nsegs <= EM_MAX_SCATTER,
1152 ("Too many DMA segments returned when mapping tx packet"));
1154 bcopy(seg, q->segs, nsegs * sizeof(seg[0]));
1157 /*********************************************************************
1159 * This routine maps the mbufs to tx descriptors.
1161 * return 0 on success, positive on failure
1162 **********************************************************************/
1164 em_encap(struct adapter *adapter, struct mbuf *m_head)
1167 uint32_t txd_lower, txd_used = 0, txd_saved = 0;
1171 /* For 82544 Workaround */
1172 DESC_ARRAY desc_array;
1173 uint32_t array_elements;
1176 struct ifvlan *ifv = NULL;
1178 struct em_buffer *tx_buffer = NULL;
1179 struct em_tx_desc *current_tx_desc = NULL;
1180 struct ifnet *ifp = &adapter->interface_data.ac_if;
1183 * Force a cleanup if number of TX descriptors
1184 * available hits the threshold
1186 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1187 em_clean_transmit_interrupts(adapter);
1188 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1189 adapter->no_tx_desc_avail1++;
1194 * Map the packet for DMA.
1196 if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &q.map)) {
1197 adapter->no_tx_map_avail++;
1200 error = bus_dmamap_load_mbuf(adapter->txtag, q.map, m_head, em_tx_cb,
1201 &q, BUS_DMA_NOWAIT);
1203 adapter->no_tx_dma_setup++;
1204 bus_dmamap_destroy(adapter->txtag, q.map);
1207 KASSERT(q.nsegs != 0, ("em_encap: empty packet"));
1209 if (q.nsegs > adapter->num_tx_desc_avail) {
1210 adapter->no_tx_desc_avail2++;
1211 bus_dmamap_unload(adapter->txtag, q.map);
1212 bus_dmamap_destroy(adapter->txtag, q.map);
1216 if (ifp->if_hwassist > 0) {
1217 em_transmit_checksum_setup(adapter, m_head,
1218 &txd_upper, &txd_lower);
1220 txd_upper = txd_lower = 0;
1223 /* Find out if we are in vlan mode */
1224 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1225 m_head->m_pkthdr.rcvif != NULL &&
1226 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1227 ifv = m_head->m_pkthdr.rcvif->if_softc;
1229 i = adapter->next_avail_tx_desc;
1230 if (adapter->pcix_82544) {
1234 for (j = 0; j < q.nsegs; j++) {
1235 /* If adapter is 82544 and on PCIX bus */
1236 if(adapter->pcix_82544) {
1238 address = htole64(q.segs[j].ds_addr);
1240 * Check the Address and Length combination and
1241 * split the data accordingly
1243 array_elements = em_fill_descriptors(address,
1244 htole32(q.segs[j].ds_len),
1246 for (counter = 0; counter < array_elements; counter++) {
1247 if (txd_used == adapter->num_tx_desc_avail) {
1248 adapter->next_avail_tx_desc = txd_saved;
1249 adapter->no_tx_desc_avail2++;
1250 bus_dmamap_unload(adapter->txtag, q.map);
1251 bus_dmamap_destroy(adapter->txtag, q.map);
1254 tx_buffer = &adapter->tx_buffer_area[i];
1255 current_tx_desc = &adapter->tx_desc_base[i];
1256 current_tx_desc->buffer_addr = htole64(
1257 desc_array.descriptor[counter].address);
1258 current_tx_desc->lower.data = htole32(
1259 (adapter->txd_cmd | txd_lower |
1260 (uint16_t)desc_array.descriptor[counter].length));
1261 current_tx_desc->upper.data = htole32((txd_upper));
1262 if (++i == adapter->num_tx_desc)
1265 tx_buffer->m_head = NULL;
1269 tx_buffer = &adapter->tx_buffer_area[i];
1270 current_tx_desc = &adapter->tx_desc_base[i];
1272 current_tx_desc->buffer_addr = htole64(q.segs[j].ds_addr);
1273 current_tx_desc->lower.data = htole32(
1274 adapter->txd_cmd | txd_lower | q.segs[j].ds_len);
1275 current_tx_desc->upper.data = htole32(txd_upper);
1277 if (++i == adapter->num_tx_desc)
1280 tx_buffer->m_head = NULL;
1284 adapter->next_avail_tx_desc = i;
1285 if (adapter->pcix_82544)
1286 adapter->num_tx_desc_avail -= txd_used;
1288 adapter->num_tx_desc_avail -= q.nsegs;
1291 /* Set the vlan id */
1292 current_tx_desc->upper.fields.special = htole16(ifv->ifv_tag);
1294 /* Tell hardware to add tag */
1295 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1298 tx_buffer->m_head = m_head;
1299 tx_buffer->map = q.map;
1300 bus_dmamap_sync(adapter->txtag, q.map, BUS_DMASYNC_PREWRITE);
1303 * Last Descriptor of Packet needs End Of Packet (EOP)
1305 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1307 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
1308 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1311 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1312 * that this frame is available to transmit.
1314 if (adapter->hw.mac_type == em_82547 &&
1315 adapter->link_duplex == HALF_DUPLEX) {
1316 em_82547_move_tail_serialized(adapter);
1318 E1000_WRITE_REG(&adapter->hw, TDT, i);
1319 if (adapter->hw.mac_type == em_82547) {
1320 em_82547_update_fifo_head(adapter,
1321 m_head->m_pkthdr.len);
1328 /*********************************************************************
1330 * 82547 workaround to avoid controller hang in half-duplex environment.
1331 * The workaround is to avoid queuing a large packet that would span
1332 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1333 * in this case. We do that only when FIFO is queiced.
1335 **********************************************************************/
1337 em_82547_move_tail(void *arg)
1339 struct adapter *adapter = arg;
1341 lwkt_serialize_enter(&adapter->serializer);
1342 em_82547_move_tail_serialized(arg);
1343 lwkt_serialize_exit(&adapter->serializer);
1347 em_82547_move_tail_serialized(void *arg)
1349 struct adapter *adapter = arg;
1352 struct em_tx_desc *tx_desc;
1353 uint16_t length = 0;
1356 hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1357 sw_tdt = adapter->next_avail_tx_desc;
1359 while (hw_tdt != sw_tdt) {
1360 tx_desc = &adapter->tx_desc_base[hw_tdt];
1361 length += tx_desc->lower.flags.length;
1362 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1363 if(++hw_tdt == adapter->num_tx_desc)
1367 if (em_82547_fifo_workaround(adapter, length)) {
1368 adapter->tx_fifo_wrk_cnt++;
1369 callout_reset(&adapter->tx_fifo_timer, 1,
1370 em_82547_move_tail, adapter);
1373 E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1374 em_82547_update_fifo_head(adapter, length);
1381 em_82547_fifo_workaround(struct adapter *adapter, int len)
1383 int fifo_space, fifo_pkt_len;
1385 fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1387 if (adapter->link_duplex == HALF_DUPLEX) {
1388 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1390 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1391 if (em_82547_tx_fifo_reset(adapter))
1402 em_82547_update_fifo_head(struct adapter *adapter, int len)
1404 int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1406 /* tx_fifo_head is always 16 byte aligned */
1407 adapter->tx_fifo_head += fifo_pkt_len;
1408 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
1409 adapter->tx_fifo_head -= adapter->tx_fifo_size;
1413 em_82547_tx_fifo_reset(struct adapter *adapter)
1417 if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1418 E1000_READ_REG(&adapter->hw, TDH)) &&
1419 (E1000_READ_REG(&adapter->hw, TDFT) ==
1420 E1000_READ_REG(&adapter->hw, TDFH)) &&
1421 (E1000_READ_REG(&adapter->hw, TDFTS) ==
1422 E1000_READ_REG(&adapter->hw, TDFHS)) &&
1423 (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1425 /* Disable TX unit */
1426 tctl = E1000_READ_REG(&adapter->hw, TCTL);
1427 E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1429 /* Reset FIFO pointers */
1430 E1000_WRITE_REG(&adapter->hw, TDFT, adapter->tx_head_addr);
1431 E1000_WRITE_REG(&adapter->hw, TDFH, adapter->tx_head_addr);
1432 E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr);
1433 E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr);
1435 /* Re-enable TX unit */
1436 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1437 E1000_WRITE_FLUSH(&adapter->hw);
1439 adapter->tx_fifo_head = 0;
1440 adapter->tx_fifo_reset_cnt++;
1449 em_set_promisc(struct adapter *adapter)
1451 uint32_t reg_rctl, ctrl;
1452 struct ifnet *ifp = &adapter->interface_data.ac_if;
1454 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1455 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
1457 if (ifp->if_flags & IFF_PROMISC) {
1458 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1459 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1462 * Disable VLAN stripping in promiscous mode.
1463 * This enables bridging of vlan tagged frames to occur
1464 * and also allows vlan tags to be seen in tcpdump.
1466 ctrl &= ~E1000_CTRL_VME;
1467 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
1468 } else if (ifp->if_flags & IFF_ALLMULTI) {
1469 reg_rctl |= E1000_RCTL_MPE;
1470 reg_rctl &= ~E1000_RCTL_UPE;
1471 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1476 em_disable_promisc(struct adapter *adapter)
1480 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1482 reg_rctl &= (~E1000_RCTL_UPE);
1483 reg_rctl &= (~E1000_RCTL_MPE);
1484 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1486 em_enable_vlans(adapter);
1489 /*********************************************************************
1492 * This routine is called whenever multicast address list is updated.
1494 **********************************************************************/
1497 em_set_multi(struct adapter *adapter)
1499 uint32_t reg_rctl = 0;
1500 uint8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1501 struct ifmultiaddr *ifma;
1503 struct ifnet *ifp = &adapter->interface_data.ac_if;
1505 IOCTL_DEBUGOUT("em_set_multi: begin");
1507 if (adapter->hw.mac_type == em_82542_rev2_0) {
1508 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1509 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1510 em_pci_clear_mwi(&adapter->hw);
1511 reg_rctl |= E1000_RCTL_RST;
1512 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1516 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1517 if (ifma->ifma_addr->sa_family != AF_LINK)
1520 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1523 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1524 &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1528 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1529 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1530 reg_rctl |= E1000_RCTL_MPE;
1531 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1533 em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1536 if (adapter->hw.mac_type == em_82542_rev2_0) {
1537 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1538 reg_rctl &= ~E1000_RCTL_RST;
1539 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1541 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1542 em_pci_set_mwi(&adapter->hw);
1546 /*********************************************************************
1549 * This routine checks for link status and updates statistics.
1551 **********************************************************************/
1554 em_local_timer(void *arg)
1557 struct adapter *adapter = arg;
1558 ifp = &adapter->interface_data.ac_if;
1560 lwkt_serialize_enter(&adapter->serializer);
1562 em_check_for_link(&adapter->hw);
1563 em_print_link_status(adapter);
1564 em_update_stats_counters(adapter);
1565 if (em_display_debug_stats && ifp->if_flags & IFF_RUNNING)
1566 em_print_hw_stats(adapter);
1567 em_smartspeed(adapter);
1569 callout_reset(&adapter->timer, hz, em_local_timer, adapter);
1571 lwkt_serialize_exit(&adapter->serializer);
1575 em_print_link_status(struct adapter *adapter)
1577 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1578 if (adapter->link_active == 0) {
1579 em_get_speed_and_duplex(&adapter->hw,
1580 &adapter->link_speed,
1581 &adapter->link_duplex);
1582 device_printf(adapter->dev, "Link is up %d Mbps %s\n",
1583 adapter->link_speed,
1584 ((adapter->link_duplex == FULL_DUPLEX) ?
1585 "Full Duplex" : "Half Duplex"));
1586 adapter->link_active = 1;
1587 adapter->smartspeed = 0;
1590 if (adapter->link_active == 1) {
1591 adapter->link_speed = 0;
1592 adapter->link_duplex = 0;
1593 device_printf(adapter->dev, "Link is Down\n");
1594 adapter->link_active = 0;
1599 /*********************************************************************
1601 * This routine disables all traffic on the adapter by issuing a
1602 * global reset on the MAC and deallocates TX/RX buffers.
1604 **********************************************************************/
1610 struct adapter * adapter = arg;
1611 ifp = &adapter->interface_data.ac_if;
1613 INIT_DEBUGOUT("em_stop: begin");
1614 em_disable_intr(adapter);
1615 em_reset_hw(&adapter->hw);
1616 callout_stop(&adapter->timer);
1617 callout_stop(&adapter->tx_fifo_timer);
1618 em_free_transmit_structures(adapter);
1619 em_free_receive_structures(adapter);
1621 /* Tell the stack that the interface is no longer active */
1622 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1626 /*********************************************************************
1628 * Determine hardware revision.
1630 **********************************************************************/
1632 em_identify_hardware(struct adapter * adapter)
1634 device_t dev = adapter->dev;
1636 /* Make sure our PCI config space has the necessary stuff set */
1637 adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1638 if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1639 (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1640 device_printf(dev, "Memory Access and/or Bus Master bits were not set!\n");
1641 adapter->hw.pci_cmd_word |=
1642 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1643 pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1646 /* Save off the information about this board */
1647 adapter->hw.vendor_id = pci_get_vendor(dev);
1648 adapter->hw.device_id = pci_get_device(dev);
1649 adapter->hw.revision_id = pci_get_revid(dev);
1650 adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev);
1651 adapter->hw.subsystem_id = pci_get_subdevice(dev);
1653 /* Identify the MAC */
1654 if (em_set_mac_type(&adapter->hw))
1655 device_printf(dev, "Unknown MAC Type\n");
1657 if (adapter->hw.mac_type == em_82541 ||
1658 adapter->hw.mac_type == em_82541_rev_2 ||
1659 adapter->hw.mac_type == em_82547 ||
1660 adapter->hw.mac_type == em_82547_rev_2)
1661 adapter->hw.phy_init_script = TRUE;
1664 /*********************************************************************
1666 * Initialize the hardware to a configuration as specified by the
1667 * adapter structure. The controller is reset, the EEPROM is
1668 * verified, the MAC address is set, then the shared initialization
1669 * routines are called.
1671 **********************************************************************/
1673 em_hardware_init(struct adapter *adapter)
1675 uint16_t rx_buffer_size;
1677 INIT_DEBUGOUT("em_hardware_init: begin");
1678 /* Issue a global reset */
1679 em_reset_hw(&adapter->hw);
1681 /* When hardware is reset, fifo_head is also reset */
1682 adapter->tx_fifo_head = 0;
1684 /* Make sure we have a good EEPROM before we read from it */
1685 if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
1686 device_printf(adapter->dev,
1687 "The EEPROM Checksum Is Not Valid\n");
1691 if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
1692 device_printf(adapter->dev,
1693 "EEPROM read error while reading part number\n");
1698 * These parameters control the automatic generation (Tx) and
1699 * response(Rx) to Ethernet PAUSE frames.
1700 * - High water mark should allow for at least two frames to be
1701 * received after sending an XOFF.
1702 * - Low water mark works best when it is very near the high water mark.
1703 * This allows the receiver to restart by sending XON when it has
1704 * drained a bit. Here we use an arbitary value of 1500 which will
1705 * restart after one full frame is pulled from the buffer. There
1706 * could be several smaller frames in the buffer and if so they will
1707 * not trigger the XON until their total number reduces the buffer
1709 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1711 rx_buffer_size = ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff) << 10);
1713 adapter->hw.fc_high_water =
1714 rx_buffer_size - EM_ROUNDUP(1 * adapter->hw.max_frame_size, 1024);
1715 adapter->hw.fc_low_water = adapter->hw.fc_high_water - 1500;
1716 adapter->hw.fc_pause_time = 1000;
1717 adapter->hw.fc_send_xon = TRUE;
1718 adapter->hw.fc = em_fc_full;
1720 if (em_init_hw(&adapter->hw) < 0) {
1721 device_printf(adapter->dev, "Hardware Initialization Failed");
1725 em_check_for_link(&adapter->hw);
1726 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
1727 adapter->link_active = 1;
1729 adapter->link_active = 0;
1731 if (adapter->link_active) {
1732 em_get_speed_and_duplex(&adapter->hw,
1733 &adapter->link_speed,
1734 &adapter->link_duplex);
1736 adapter->link_speed = 0;
1737 adapter->link_duplex = 0;
1743 /*********************************************************************
1745 * Setup networking device structure and register an interface.
1747 **********************************************************************/
1749 em_setup_interface(device_t dev, struct adapter *adapter)
1752 INIT_DEBUGOUT("em_setup_interface: begin");
1754 ifp = &adapter->interface_data.ac_if;
1755 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1756 ifp->if_mtu = ETHERMTU;
1757 ifp->if_baudrate = 1000000000;
1758 ifp->if_init = em_init;
1759 ifp->if_softc = adapter;
1760 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1761 ifp->if_ioctl = em_ioctl;
1762 ifp->if_start = em_start;
1763 #ifdef DEVICE_POLLING
1764 ifp->if_poll = em_poll;
1766 ifp->if_watchdog = em_watchdog;
1767 ifq_set_maxlen(&ifp->if_snd, adapter->num_tx_desc - 1);
1768 ifq_set_ready(&ifp->if_snd);
1770 if (adapter->hw.mac_type >= em_82543)
1771 ifp->if_capabilities |= IFCAP_HWCSUM;
1773 ifp->if_capenable = ifp->if_capabilities;
1775 ether_ifattach(ifp, adapter->hw.mac_addr);
1778 * Tell the upper layer(s) we support long frames.
1780 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1781 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1784 * Specify the media types supported by this adapter and register
1785 * callbacks to update media and link information
1787 ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
1789 if (adapter->hw.media_type == em_media_type_fiber) {
1790 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1792 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1795 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1796 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1798 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
1800 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1802 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1804 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1806 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1807 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1810 /*********************************************************************
1812 * Workaround for SmartSpeed on 82541 and 82547 controllers
1814 **********************************************************************/
1816 em_smartspeed(struct adapter *adapter)
1820 if (adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
1821 !adapter->hw.autoneg ||
1822 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1825 if (adapter->smartspeed == 0) {
1827 * If Master/Slave config fault is asserted twice,
1828 * we assume back-to-back.
1830 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1831 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
1833 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1834 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
1835 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
1837 if (phy_tmp & CR_1000T_MS_ENABLE) {
1838 phy_tmp &= ~CR_1000T_MS_ENABLE;
1839 em_write_phy_reg(&adapter->hw,
1840 PHY_1000T_CTRL, phy_tmp);
1841 adapter->smartspeed++;
1842 if (adapter->hw.autoneg &&
1843 !em_phy_setup_autoneg(&adapter->hw) &&
1844 !em_read_phy_reg(&adapter->hw, PHY_CTRL,
1846 phy_tmp |= (MII_CR_AUTO_NEG_EN |
1847 MII_CR_RESTART_AUTO_NEG);
1848 em_write_phy_reg(&adapter->hw,
1854 } else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
1855 /* If still no link, perhaps using 2/3 pair cable */
1856 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
1857 phy_tmp |= CR_1000T_MS_ENABLE;
1858 em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
1859 if (adapter->hw.autoneg &&
1860 !em_phy_setup_autoneg(&adapter->hw) &&
1861 !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
1862 phy_tmp |= (MII_CR_AUTO_NEG_EN |
1863 MII_CR_RESTART_AUTO_NEG);
1864 em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
1867 /* Restart process after EM_SMARTSPEED_MAX iterations */
1868 if (adapter->smartspeed++ == EM_SMARTSPEED_MAX)
1869 adapter->smartspeed = 0;
1873 * Manage DMA'able memory.
1876 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1880 *(bus_addr_t*) arg = segs->ds_addr;
1884 em_dma_malloc(struct adapter *adapter, bus_size_t size,
1885 struct em_dma_alloc *dma, int mapflags)
1888 device_t dev = adapter->dev;
1890 r = bus_dma_tag_create(NULL, /* parent */
1891 PAGE_SIZE, 0, /* alignment, bounds */
1892 BUS_SPACE_MAXADDR, /* lowaddr */
1893 BUS_SPACE_MAXADDR, /* highaddr */
1894 NULL, NULL, /* filter, filterarg */
1897 size, /* maxsegsize */
1898 BUS_DMA_ALLOCNOW, /* flags */
1901 device_printf(dev, "em_dma_malloc: bus_dma_tag_create failed; "
1906 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
1907 BUS_DMA_NOWAIT, &dma->dma_map);
1909 device_printf(dev, "em_dma_malloc: bus_dmammem_alloc failed; "
1910 "size %llu, error %d\n", (uintmax_t)size, r);
1914 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1918 mapflags | BUS_DMA_NOWAIT);
1920 device_printf(dev, "em_dma_malloc: bus_dmamap_load failed; "
1925 dma->dma_size = size;
1929 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1931 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1932 bus_dma_tag_destroy(dma->dma_tag);
1934 dma->dma_map = NULL;
1935 dma->dma_tag = NULL;
1940 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
1942 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1943 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1944 bus_dma_tag_destroy(dma->dma_tag);
1947 /*********************************************************************
1949 * Allocate memory for tx_buffer structures. The tx_buffer stores all
1950 * the information needed to transmit a packet on the wire.
1952 **********************************************************************/
1954 em_allocate_transmit_structures(struct adapter * adapter)
1956 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
1957 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
1958 if (adapter->tx_buffer_area == NULL) {
1959 device_printf(adapter->dev, "Unable to allocate tx_buffer memory\n");
1966 /*********************************************************************
1968 * Allocate and initialize transmit structures.
1970 **********************************************************************/
1972 em_setup_transmit_structures(struct adapter * adapter)
1975 * Setup DMA descriptor areas.
1977 if (bus_dma_tag_create(NULL, /* parent */
1978 1, 0, /* alignment, bounds */
1979 BUS_SPACE_MAXADDR, /* lowaddr */
1980 BUS_SPACE_MAXADDR, /* highaddr */
1981 NULL, NULL, /* filter, filterarg */
1982 MCLBYTES * 8, /* maxsize */
1983 EM_MAX_SCATTER, /* nsegments */
1984 MCLBYTES * 8, /* maxsegsize */
1985 BUS_DMA_ALLOCNOW, /* flags */
1987 device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
1991 if (em_allocate_transmit_structures(adapter))
1994 bzero((void *) adapter->tx_desc_base,
1995 (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
1997 adapter->next_avail_tx_desc = 0;
1998 adapter->oldest_used_tx_desc = 0;
2000 /* Set number of descriptors available */
2001 adapter->num_tx_desc_avail = adapter->num_tx_desc;
2003 /* Set checksum context */
2004 adapter->active_checksum_context = OFFLOAD_NONE;
2009 /*********************************************************************
2011 * Enable transmit unit.
2013 **********************************************************************/
2015 em_initialize_transmit_unit(struct adapter * adapter)
2018 uint32_t reg_tipg = 0;
2021 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
2023 /* Setup the Base and Length of the Tx Descriptor Ring */
2024 bus_addr = adapter->txdma.dma_paddr;
2025 E1000_WRITE_REG(&adapter->hw, TDBAL, (uint32_t)bus_addr);
2026 E1000_WRITE_REG(&adapter->hw, TDBAH, (uint32_t)(bus_addr >> 32));
2027 E1000_WRITE_REG(&adapter->hw, TDLEN,
2028 adapter->num_tx_desc * sizeof(struct em_tx_desc));
2030 /* Setup the HW Tx Head and Tail descriptor pointers */
2031 E1000_WRITE_REG(&adapter->hw, TDH, 0);
2032 E1000_WRITE_REG(&adapter->hw, TDT, 0);
2034 HW_DEBUGOUT2("Base = %x, Length = %x\n",
2035 E1000_READ_REG(&adapter->hw, TDBAL),
2036 E1000_READ_REG(&adapter->hw, TDLEN));
2038 /* Set the default values for the Tx Inter Packet Gap timer */
2039 switch (adapter->hw.mac_type) {
2040 case em_82542_rev2_0:
2041 case em_82542_rev2_1:
2042 reg_tipg = DEFAULT_82542_TIPG_IPGT;
2043 reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2044 reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2047 if (adapter->hw.media_type == em_media_type_fiber)
2048 reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2050 reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2051 reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2052 reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2055 E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
2056 E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
2057 if (adapter->hw.mac_type >= em_82540)
2058 E1000_WRITE_REG(&adapter->hw, TADV,
2059 adapter->tx_abs_int_delay.value);
2061 /* Program the Transmit Control Register */
2062 reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
2063 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2064 if (adapter->hw.mac_type >= em_82571)
2065 reg_tctl |= E1000_TCTL_MULR;
2066 if (adapter->link_duplex == 1)
2067 reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2069 reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
2070 E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
2072 /* Setup Transmit Descriptor Settings for this adapter */
2073 adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
2075 if (adapter->tx_int_delay.value > 0)
2076 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2079 /*********************************************************************
2081 * Free all transmit related data structures.
2083 **********************************************************************/
2085 em_free_transmit_structures(struct adapter * adapter)
2087 struct em_buffer *tx_buffer;
2090 INIT_DEBUGOUT("free_transmit_structures: begin");
2092 if (adapter->tx_buffer_area != NULL) {
2093 tx_buffer = adapter->tx_buffer_area;
2094 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
2095 if (tx_buffer->m_head != NULL) {
2096 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2097 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2098 m_freem(tx_buffer->m_head);
2100 tx_buffer->m_head = NULL;
2103 if (adapter->tx_buffer_area != NULL) {
2104 free(adapter->tx_buffer_area, M_DEVBUF);
2105 adapter->tx_buffer_area = NULL;
2107 if (adapter->txtag != NULL) {
2108 bus_dma_tag_destroy(adapter->txtag);
2109 adapter->txtag = NULL;
2113 /*********************************************************************
2115 * The offload context needs to be set when we transfer the first
2116 * packet of a particular protocol (TCP/UDP). We change the
2117 * context only if the protocol type changes.
2119 **********************************************************************/
2121 em_transmit_checksum_setup(struct adapter * adapter,
2123 uint32_t *txd_upper,
2124 uint32_t *txd_lower)
2126 struct em_context_desc *TXD;
2127 struct em_buffer *tx_buffer;
2130 if (mp->m_pkthdr.csum_flags) {
2131 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2132 *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2133 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2134 if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2137 adapter->active_checksum_context = OFFLOAD_TCP_IP;
2138 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2139 *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2140 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2141 if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2144 adapter->active_checksum_context = OFFLOAD_UDP_IP;
2156 /* If we reach this point, the checksum offload context
2157 * needs to be reset.
2159 curr_txd = adapter->next_avail_tx_desc;
2160 tx_buffer = &adapter->tx_buffer_area[curr_txd];
2161 TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2163 TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2164 TXD->lower_setup.ip_fields.ipcso =
2165 ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2166 TXD->lower_setup.ip_fields.ipcse =
2167 htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2169 TXD->upper_setup.tcp_fields.tucss =
2170 ETHER_HDR_LEN + sizeof(struct ip);
2171 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2173 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2174 TXD->upper_setup.tcp_fields.tucso =
2175 ETHER_HDR_LEN + sizeof(struct ip) +
2176 offsetof(struct tcphdr, th_sum);
2177 } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2178 TXD->upper_setup.tcp_fields.tucso =
2179 ETHER_HDR_LEN + sizeof(struct ip) +
2180 offsetof(struct udphdr, uh_sum);
2183 TXD->tcp_seg_setup.data = htole32(0);
2184 TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2186 tx_buffer->m_head = NULL;
2188 if (++curr_txd == adapter->num_tx_desc)
2191 adapter->num_tx_desc_avail--;
2192 adapter->next_avail_tx_desc = curr_txd;
2195 /**********************************************************************
2197 * Examine each tx_buffer in the used queue. If the hardware is done
2198 * processing the packet then free associated resources. The
2199 * tx_buffer is put back on the free queue.
2201 **********************************************************************/
2204 em_clean_transmit_interrupts(struct adapter *adapter)
2207 struct em_buffer *tx_buffer;
2208 struct em_tx_desc *tx_desc;
2209 struct ifnet *ifp = &adapter->interface_data.ac_if;
2211 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2214 num_avail = adapter->num_tx_desc_avail;
2215 i = adapter->oldest_used_tx_desc;
2217 tx_buffer = &adapter->tx_buffer_area[i];
2218 tx_desc = &adapter->tx_desc_base[i];
2220 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2221 BUS_DMASYNC_POSTREAD);
2223 while(tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2224 tx_desc->upper.data = 0;
2227 if (tx_buffer->m_head) {
2229 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2230 BUS_DMASYNC_POSTWRITE);
2231 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2232 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2234 m_freem(tx_buffer->m_head);
2235 tx_buffer->m_head = NULL;
2238 if (++i == adapter->num_tx_desc)
2241 tx_buffer = &adapter->tx_buffer_area[i];
2242 tx_desc = &adapter->tx_desc_base[i];
2245 bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map,
2246 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2248 adapter->oldest_used_tx_desc = i;
2251 * If we have enough room, clear IFF_OACTIVE to tell the stack
2252 * that it is OK to send packets.
2253 * If there are no pending descriptors, clear the timeout. Otherwise,
2254 * if some descriptors have been freed, restart the timeout.
2256 if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2257 ifp->if_flags &= ~IFF_OACTIVE;
2258 if (num_avail == adapter->num_tx_desc)
2260 else if (num_avail == adapter->num_tx_desc_avail)
2261 ifp->if_timer = EM_TX_TIMEOUT;
2263 adapter->num_tx_desc_avail = num_avail;
2266 /*********************************************************************
2268 * Get a buffer from system mbuf buffer pool.
2270 **********************************************************************/
2272 em_get_buf(int i, struct adapter *adapter, struct mbuf *nmp, int how)
2274 struct mbuf *mp = nmp;
2275 struct em_buffer *rx_buffer;
2280 ifp = &adapter->interface_data.ac_if;
2283 mp = m_getcl(how, MT_DATA, M_PKTHDR);
2285 adapter->mbuf_cluster_failed++;
2288 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2290 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2291 mp->m_data = mp->m_ext.ext_buf;
2294 if (ifp->if_mtu <= ETHERMTU)
2295 m_adj(mp, ETHER_ALIGN);
2297 rx_buffer = &adapter->rx_buffer_area[i];
2300 * Using memory from the mbuf cluster pool, invoke the
2301 * bus_dma machinery to arrange the memory mapping.
2303 error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
2304 mtod(mp, void *), mp->m_len,
2305 em_dmamap_cb, &paddr, 0);
2310 rx_buffer->m_head = mp;
2311 adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
2312 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2317 /*********************************************************************
2319 * Allocate memory for rx_buffer structures. Since we use one
2320 * rx_buffer per received packet, the maximum number of rx_buffer's
2321 * that we'll need is equal to the number of receive descriptors
2322 * that we've allocated.
2324 **********************************************************************/
2326 em_allocate_receive_structures(struct adapter *adapter)
2329 struct em_buffer *rx_buffer;
2331 size = adapter->num_rx_desc * sizeof(struct em_buffer);
2332 adapter->rx_buffer_area = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
2334 error = bus_dma_tag_create(NULL, /* parent */
2335 1, 0, /* alignment, bounds */
2336 BUS_SPACE_MAXADDR, /* lowaddr */
2337 BUS_SPACE_MAXADDR, /* highaddr */
2338 NULL, NULL, /* filter, filterarg */
2339 MCLBYTES, /* maxsize */
2341 MCLBYTES, /* maxsegsize */
2342 BUS_DMA_ALLOCNOW, /* flags */
2345 device_printf(adapter->dev, "em_allocate_receive_structures: "
2346 "bus_dma_tag_create failed; error %u\n", error);
2350 rx_buffer = adapter->rx_buffer_area;
2351 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2352 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2355 device_printf(adapter->dev,
2356 "em_allocate_receive_structures: "
2357 "bus_dmamap_create failed; error %u\n",
2363 for (i = 0; i < adapter->num_rx_desc; i++) {
2364 error = em_get_buf(i, adapter, NULL, MB_WAIT);
2366 adapter->rx_buffer_area[i].m_head = NULL;
2367 adapter->rx_desc_base[i].buffer_addr = 0;
2372 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2373 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2378 bus_dma_tag_destroy(adapter->rxtag);
2380 adapter->rxtag = NULL;
2381 free(adapter->rx_buffer_area, M_DEVBUF);
2382 adapter->rx_buffer_area = NULL;
2386 /*********************************************************************
2388 * Allocate and initialize receive structures.
2390 **********************************************************************/
2392 em_setup_receive_structures(struct adapter *adapter)
2394 bzero((void *) adapter->rx_desc_base,
2395 (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2397 if (em_allocate_receive_structures(adapter))
2400 /* Setup our descriptor pointers */
2401 adapter->next_rx_desc_to_check = 0;
2405 /*********************************************************************
2407 * Enable receive unit.
2409 **********************************************************************/
2411 em_initialize_receive_unit(struct adapter *adapter)
2414 uint32_t reg_rxcsum;
2418 INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2420 ifp = &adapter->interface_data.ac_if;
2422 /* Make sure receives are disabled while setting up the descriptor ring */
2423 E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2425 /* Set the Receive Delay Timer Register */
2426 E1000_WRITE_REG(&adapter->hw, RDTR,
2427 adapter->rx_int_delay.value | E1000_RDT_FPDB);
2429 if(adapter->hw.mac_type >= em_82540) {
2430 E1000_WRITE_REG(&adapter->hw, RADV,
2431 adapter->rx_abs_int_delay.value);
2433 /* Set the interrupt throttling rate in 256ns increments */
2434 if (em_int_throttle_ceil) {
2435 E1000_WRITE_REG(&adapter->hw, ITR,
2436 1000000000 / 256 / em_int_throttle_ceil);
2438 E1000_WRITE_REG(&adapter->hw, ITR, 0);
2442 /* Setup the Base and Length of the Rx Descriptor Ring */
2443 bus_addr = adapter->rxdma.dma_paddr;
2444 E1000_WRITE_REG(&adapter->hw, RDBAL, (uint32_t)bus_addr);
2445 E1000_WRITE_REG(&adapter->hw, RDBAH, (uint32_t)(bus_addr >> 32));
2446 E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2447 sizeof(struct em_rx_desc));
2449 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2450 E1000_WRITE_REG(&adapter->hw, RDH, 0);
2451 E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2453 /* Setup the Receive Control Register */
2454 reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2455 E1000_RCTL_RDMTS_HALF |
2456 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2458 if (adapter->hw.tbi_compatibility_on == TRUE)
2459 reg_rctl |= E1000_RCTL_SBP;
2461 switch (adapter->rx_buffer_len) {
2463 case EM_RXBUFFER_2048:
2464 reg_rctl |= E1000_RCTL_SZ_2048;
2466 case EM_RXBUFFER_4096:
2467 reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2469 case EM_RXBUFFER_8192:
2470 reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2472 case EM_RXBUFFER_16384:
2473 reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2477 if (ifp->if_mtu > ETHERMTU)
2478 reg_rctl |= E1000_RCTL_LPE;
2480 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2481 if ((adapter->hw.mac_type >= em_82543) &&
2482 (ifp->if_capenable & IFCAP_RXCSUM)) {
2483 reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2484 reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2485 E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2488 /* Enable Receives */
2489 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2492 /*********************************************************************
2494 * Free receive related data structures.
2496 **********************************************************************/
2498 em_free_receive_structures(struct adapter *adapter)
2500 struct em_buffer *rx_buffer;
2503 INIT_DEBUGOUT("free_receive_structures: begin");
2505 if (adapter->rx_buffer_area != NULL) {
2506 rx_buffer = adapter->rx_buffer_area;
2507 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2508 if (rx_buffer->map != NULL) {
2509 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2510 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2512 if (rx_buffer->m_head != NULL)
2513 m_freem(rx_buffer->m_head);
2514 rx_buffer->m_head = NULL;
2517 if (adapter->rx_buffer_area != NULL) {
2518 free(adapter->rx_buffer_area, M_DEVBUF);
2519 adapter->rx_buffer_area = NULL;
2521 if (adapter->rxtag != NULL) {
2522 bus_dma_tag_destroy(adapter->rxtag);
2523 adapter->rxtag = NULL;
2527 /*********************************************************************
2529 * This routine executes in interrupt context. It replenishes
2530 * the mbufs in the descriptor and sends data which has been
2531 * dma'ed into host memory to upper layer.
2533 * We loop at most count times if count is > 0, or until done if
2536 *********************************************************************/
2538 em_process_receive_interrupts(struct adapter *adapter, int count)
2542 uint8_t accept_frame = 0;
2544 uint16_t len, desc_len, prev_len_adj;
2547 /* Pointer to the receive descriptor being examined. */
2548 struct em_rx_desc *current_desc;
2550 ifp = &adapter->interface_data.ac_if;
2551 i = adapter->next_rx_desc_to_check;
2552 current_desc = &adapter->rx_desc_base[i];
2554 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2555 BUS_DMASYNC_POSTREAD);
2557 if (!((current_desc->status) & E1000_RXD_STAT_DD))
2560 while ((current_desc->status & E1000_RXD_STAT_DD) && (count != 0)) {
2561 mp = adapter->rx_buffer_area[i].m_head;
2562 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2563 BUS_DMASYNC_POSTREAD);
2567 desc_len = le16toh(current_desc->length);
2568 if (current_desc->status & E1000_RXD_STAT_EOP) {
2571 if (desc_len < ETHER_CRC_LEN) {
2573 prev_len_adj = ETHER_CRC_LEN - desc_len;
2575 len = desc_len - ETHER_CRC_LEN;
2582 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2584 uint32_t pkt_len = desc_len;
2586 if (adapter->fmp != NULL)
2587 pkt_len += adapter->fmp->m_pkthdr.len;
2589 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2591 if (TBI_ACCEPT(&adapter->hw, current_desc->status,
2592 current_desc->errors,
2593 pkt_len, last_byte)) {
2594 em_tbi_adjust_stats(&adapter->hw,
2597 adapter->hw.mac_addr);
2606 if (em_get_buf(i, adapter, NULL, MB_DONTWAIT) == ENOBUFS) {
2607 adapter->dropped_pkts++;
2608 em_get_buf(i, adapter, mp, MB_DONTWAIT);
2609 if (adapter->fmp != NULL)
2610 m_freem(adapter->fmp);
2611 adapter->fmp = NULL;
2612 adapter->lmp = NULL;
2616 /* Assign correct length to the current fragment */
2619 if (adapter->fmp == NULL) {
2620 mp->m_pkthdr.len = len;
2621 adapter->fmp = mp; /* Store the first mbuf */
2624 /* Chain mbuf's together */
2626 * Adjust length of previous mbuf in chain if we
2627 * received less than 4 bytes in the last descriptor.
2629 if (prev_len_adj > 0) {
2630 adapter->lmp->m_len -= prev_len_adj;
2631 adapter->fmp->m_pkthdr.len -= prev_len_adj;
2633 adapter->lmp->m_next = mp;
2634 adapter->lmp = adapter->lmp->m_next;
2635 adapter->fmp->m_pkthdr.len += len;
2639 adapter->fmp->m_pkthdr.rcvif = ifp;
2642 em_receive_checksum(adapter, current_desc,
2644 if (current_desc->status & E1000_RXD_STAT_VP) {
2645 VLAN_INPUT_TAG(adapter->fmp,
2646 (current_desc->special &
2647 E1000_RXD_SPC_VLAN_MASK));
2649 /* lwkt_serialize_exit() */
2650 (*ifp->if_input)(ifp, adapter->fmp);
2651 /* lwkt_serialize_enter() */
2653 adapter->fmp = NULL;
2654 adapter->lmp = NULL;
2657 adapter->dropped_pkts++;
2658 em_get_buf(i, adapter, mp, MB_DONTWAIT);
2659 if (adapter->fmp != NULL)
2660 m_freem(adapter->fmp);
2661 adapter->fmp = NULL;
2662 adapter->lmp = NULL;
2665 /* Zero out the receive descriptors status */
2666 current_desc->status = 0;
2668 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
2669 E1000_WRITE_REG(&adapter->hw, RDT, i);
2671 /* Advance our pointers to the next descriptor */
2672 if (++i == adapter->num_rx_desc) {
2674 current_desc = adapter->rx_desc_base;
2680 bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
2681 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2683 adapter->next_rx_desc_to_check = i;
2686 /*********************************************************************
2688 * Verify that the hardware indicated that the checksum is valid.
2689 * Inform the stack about the status of checksum so that stack
2690 * doesn't spend time verifying the checksum.
2692 *********************************************************************/
2694 em_receive_checksum(struct adapter *adapter,
2695 struct em_rx_desc *rx_desc,
2698 /* 82543 or newer only */
2699 if ((adapter->hw.mac_type < em_82543) ||
2700 /* Ignore Checksum bit is set */
2701 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
2702 mp->m_pkthdr.csum_flags = 0;
2706 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
2708 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
2709 /* IP Checksum Good */
2710 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2711 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2713 mp->m_pkthdr.csum_flags = 0;
2717 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
2719 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
2720 mp->m_pkthdr.csum_flags |=
2721 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2722 mp->m_pkthdr.csum_data = htons(0xffff);
2729 em_enable_vlans(struct adapter *adapter)
2733 E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
2735 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2736 ctrl |= E1000_CTRL_VME;
2737 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2741 * note: we must call bus_enable_intr() prior to enabling the hardware
2742 * interrupt and bus_disable_intr() after disabling the hardware interrupt
2743 * in order to avoid handler execution races from scheduled interrupt
2747 em_enable_intr(struct adapter *adapter)
2749 struct ifnet *ifp = &adapter->interface_data.ac_if;
2751 if ((ifp->if_flags & IFF_POLLING) == 0) {
2752 lwkt_serialize_handler_enable(&adapter->serializer);
2753 E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
2758 em_disable_intr(struct adapter *adapter)
2761 * The first version of 82542 had an errata where when link was
2762 * forced it would stay up even up even if the cable was disconnected.
2763 * Sequence errors were used to detect the disconnect and then the
2764 * driver would unforce the link. This code in the in the ISR. For
2765 * this to work correctly the Sequence error interrupt had to be
2766 * enabled all the time.
2768 if (adapter->hw.mac_type == em_82542_rev2_0) {
2769 E1000_WRITE_REG(&adapter->hw, IMC,
2770 (0xffffffff & ~E1000_IMC_RXSEQ));
2772 E1000_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
2775 lwkt_serialize_handler_disable(&adapter->serializer);
2779 em_is_valid_ether_addr(uint8_t *addr)
2781 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
2783 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN)))
2790 em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2792 pci_write_config(((struct em_osdep *)hw->back)->dev, reg, *value, 2);
2796 em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2798 *value = pci_read_config(((struct em_osdep *)hw->back)->dev, reg, 2);
2802 em_pci_set_mwi(struct em_hw *hw)
2804 pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2805 (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
2809 em_pci_clear_mwi(struct em_hw *hw)
2811 pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2812 (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
2816 em_read_reg_io(struct em_hw *hw, uint32_t offset)
2818 bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2819 return(bus_space_read_4(hw->reg_io_tag, hw->reg_io_handle, 4));
2823 em_write_reg_io(struct em_hw *hw, uint32_t offset, uint32_t value)
2825 bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2826 bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 4, value);
2829 /*********************************************************************
2830 * 82544 Coexistence issue workaround.
2831 * There are 2 issues.
2832 * 1. Transmit Hang issue.
2833 * To detect this issue, following equation can be used...
2834 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2835 * If SUM[3:0] is in between 1 to 4, we will have this issue.
2838 * To detect this issue, following equation can be used...
2839 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2840 * If SUM[3:0] is in between 9 to c, we will have this issue.
2844 * Make sure we do not have ending address as 1,2,3,4(Hang) or
2847 *************************************************************************/
2849 em_fill_descriptors(uint64_t address, uint32_t length, PDESC_ARRAY desc_array)
2851 /* Since issue is sensitive to length and address.*/
2852 /* Let us first check the address...*/
2853 uint32_t safe_terminator;
2855 desc_array->descriptor[0].address = address;
2856 desc_array->descriptor[0].length = length;
2857 desc_array->elements = 1;
2858 return(desc_array->elements);
2860 safe_terminator = (uint32_t)((((uint32_t)address & 0x7) + (length & 0xF)) & 0xF);
2861 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
2862 if (safe_terminator == 0 ||
2863 (safe_terminator > 4 && safe_terminator < 9) ||
2864 (safe_terminator > 0xC && safe_terminator <= 0xF)) {
2865 desc_array->descriptor[0].address = address;
2866 desc_array->descriptor[0].length = length;
2867 desc_array->elements = 1;
2868 return(desc_array->elements);
2871 desc_array->descriptor[0].address = address;
2872 desc_array->descriptor[0].length = length - 4;
2873 desc_array->descriptor[1].address = address + (length - 4);
2874 desc_array->descriptor[1].length = 4;
2875 desc_array->elements = 2;
2876 return(desc_array->elements);
2879 /**********************************************************************
2881 * Update the board statistics counters.
2883 **********************************************************************/
2885 em_update_stats_counters(struct adapter *adapter)
2889 if (adapter->hw.media_type == em_media_type_copper ||
2890 (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
2891 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
2892 adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
2894 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
2895 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
2896 adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
2897 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
2899 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
2900 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
2901 adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
2902 adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
2903 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
2904 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
2905 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
2906 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
2907 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
2908 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
2909 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
2910 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
2911 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
2912 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
2913 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
2914 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
2915 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
2916 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
2917 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
2918 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
2920 /* For the 64-bit byte counters the low dword must be read first. */
2921 /* Both registers clear on the read of the high dword */
2923 adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
2924 adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
2925 adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
2926 adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
2928 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
2929 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
2930 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
2931 adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
2932 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
2934 adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
2935 adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
2936 adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
2937 adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
2939 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
2940 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
2941 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
2942 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
2943 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
2944 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
2945 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
2946 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
2947 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
2948 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
2950 if (adapter->hw.mac_type >= em_82543) {
2951 adapter->stats.algnerrc +=
2952 E1000_READ_REG(&adapter->hw, ALGNERRC);
2953 adapter->stats.rxerrc +=
2954 E1000_READ_REG(&adapter->hw, RXERRC);
2955 adapter->stats.tncrs +=
2956 E1000_READ_REG(&adapter->hw, TNCRS);
2957 adapter->stats.cexterr +=
2958 E1000_READ_REG(&adapter->hw, CEXTERR);
2959 adapter->stats.tsctc +=
2960 E1000_READ_REG(&adapter->hw, TSCTC);
2961 adapter->stats.tsctfc +=
2962 E1000_READ_REG(&adapter->hw, TSCTFC);
2964 ifp = &adapter->interface_data.ac_if;
2966 /* Fill out the OS statistics structure */
2967 ifp->if_ibytes = adapter->stats.gorcl;
2968 ifp->if_obytes = adapter->stats.gotcl;
2969 ifp->if_imcasts = adapter->stats.mprc;
2970 ifp->if_collisions = adapter->stats.colc;
2973 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
2974 adapter->stats.crcerrs + adapter->stats.algnerrc +
2975 adapter->stats.rlec + adapter->stats.mpc + adapter->stats.cexterr;
2978 ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol;
2982 /**********************************************************************
2984 * This routine is called only when em_display_debug_stats is enabled.
2985 * This routine provides a way to take a look at important statistics
2986 * maintained by the driver and hardware.
2988 **********************************************************************/
2990 em_print_debug_info(struct adapter *adapter)
2992 device_t dev= adapter->dev;
2993 uint8_t *hw_addr = adapter->hw.hw_addr;
2995 device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
2996 device_printf(dev, "CTRL = 0x%x\n",
2997 E1000_READ_REG(&adapter->hw, CTRL));
2998 device_printf(dev, "RCTL = 0x%x PS=(0x8402)\n",
2999 E1000_READ_REG(&adapter->hw, RCTL));
3000 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk\n",
3001 ((E1000_READ_REG(&adapter->hw, PBA) & 0xffff0000) >> 16),
3002 (E1000_READ_REG(&adapter->hw, PBA) & 0xffff));
3003 device_printf(dev, "Flow control watermarks high = %d low = %d\n",
3004 adapter->hw.fc_high_water, adapter->hw.fc_low_water);
3005 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
3006 E1000_READ_REG(&adapter->hw, TIDV),
3007 E1000_READ_REG(&adapter->hw, TADV));
3008 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
3009 E1000_READ_REG(&adapter->hw, RDTR),
3010 E1000_READ_REG(&adapter->hw, RADV));
3011 device_printf(dev, "fifo workaround = %lld, fifo_reset = %lld\n",
3012 (long long)adapter->tx_fifo_wrk_cnt,
3013 (long long)adapter->tx_fifo_reset_cnt);
3014 device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
3015 E1000_READ_REG(&adapter->hw, TDH),
3016 E1000_READ_REG(&adapter->hw, TDT));
3017 device_printf(dev, "Num Tx descriptors avail = %d\n",
3018 adapter->num_tx_desc_avail);
3019 device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
3020 adapter->no_tx_desc_avail1);
3021 device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
3022 adapter->no_tx_desc_avail2);
3023 device_printf(dev, "Std mbuf failed = %ld\n",
3024 adapter->mbuf_alloc_failed);
3025 device_printf(dev, "Std mbuf cluster failed = %ld\n",
3026 adapter->mbuf_cluster_failed);
3027 device_printf(dev, "Driver dropped packets = %ld\n",
3028 adapter->dropped_pkts);
3032 em_print_hw_stats(struct adapter *adapter)
3034 device_t dev= adapter->dev;
3036 device_printf(dev, "Adapter: %p\n", adapter);
3038 device_printf(dev, "Excessive collisions = %lld\n",
3039 (long long)adapter->stats.ecol);
3040 device_printf(dev, "Symbol errors = %lld\n",
3041 (long long)adapter->stats.symerrs);
3042 device_printf(dev, "Sequence errors = %lld\n",
3043 (long long)adapter->stats.sec);
3044 device_printf(dev, "Defer count = %lld\n",
3045 (long long)adapter->stats.dc);
3047 device_printf(dev, "Missed Packets = %lld\n",
3048 (long long)adapter->stats.mpc);
3049 device_printf(dev, "Receive No Buffers = %lld\n",
3050 (long long)adapter->stats.rnbc);
3051 device_printf(dev, "Receive length errors = %lld\n",
3052 (long long)adapter->stats.rlec);
3053 device_printf(dev, "Receive errors = %lld\n",
3054 (long long)adapter->stats.rxerrc);
3055 device_printf(dev, "Crc errors = %lld\n",
3056 (long long)adapter->stats.crcerrs);
3057 device_printf(dev, "Alignment errors = %lld\n",
3058 (long long)adapter->stats.algnerrc);
3059 device_printf(dev, "Carrier extension errors = %lld\n",
3060 (long long)adapter->stats.cexterr);
3062 device_printf(dev, "XON Rcvd = %lld\n",
3063 (long long)adapter->stats.xonrxc);
3064 device_printf(dev, "XON Xmtd = %lld\n",
3065 (long long)adapter->stats.xontxc);
3066 device_printf(dev, "XOFF Rcvd = %lld\n",
3067 (long long)adapter->stats.xoffrxc);
3068 device_printf(dev, "XOFF Xmtd = %lld\n",
3069 (long long)adapter->stats.xofftxc);
3071 device_printf(dev, "Good Packets Rcvd = %lld\n",
3072 (long long)adapter->stats.gprc);
3073 device_printf(dev, "Good Packets Xmtd = %lld\n",
3074 (long long)adapter->stats.gptc);
3078 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3082 struct adapter *adapter;
3085 error = sysctl_handle_int(oidp, &result, 0, req);
3087 if (error || !req->newptr)
3091 adapter = (struct adapter *)arg1;
3092 em_print_debug_info(adapter);
3099 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
3103 struct adapter *adapter;
3106 error = sysctl_handle_int(oidp, &result, 0, req);
3108 if (error || !req->newptr)
3112 adapter = (struct adapter *)arg1;
3113 em_print_hw_stats(adapter);
3120 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
3122 struct em_int_delay_info *info;
3123 struct adapter *adapter;
3129 info = (struct em_int_delay_info *)arg1;
3130 adapter = info->adapter;
3131 usecs = info->value;
3132 error = sysctl_handle_int(oidp, &usecs, 0, req);
3133 if (error != 0 || req->newptr == NULL)
3135 if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3137 info->value = usecs;
3138 ticks = E1000_USECS_TO_TICKS(usecs);
3140 lwkt_serialize_enter(&adapter->serializer);
3141 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3142 regval = (regval & ~0xffff) | (ticks & 0xffff);
3143 /* Handle a few special cases. */
3144 switch (info->offset) {
3146 case E1000_82542_RDTR:
3147 regval |= E1000_RDT_FPDB;
3150 case E1000_82542_TIDV:
3152 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3153 /* Don't write 0 into the TIDV register. */
3156 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3159 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3160 lwkt_serialize_exit(&adapter->serializer);
3165 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3166 const char *description, struct em_int_delay_info *info,
3167 int offset, int value)
3169 info->adapter = adapter;
3170 info->offset = offset;
3171 info->value = value;
3172 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
3173 SYSCTL_CHILDREN(adapter->sysctl_tree),
3174 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3175 info, 0, em_sysctl_int_delay, "I", description);
3179 em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS)
3181 struct adapter *adapter = (void *)arg1;
3185 throttle = em_int_throttle_ceil;
3186 error = sysctl_handle_int(oidp, &throttle, 0, req);
3187 if (error || req->newptr == NULL)
3189 if (throttle < 0 || throttle > 1000000000 / 256)
3193 * Set the interrupt throttling rate in 256ns increments,
3194 * recalculate sysctl value assignment to get exact frequency.
3196 throttle = 1000000000 / 256 / throttle;
3197 lwkt_serialize_enter(&adapter->serializer);
3198 em_int_throttle_ceil = 1000000000 / 256 / throttle;
3199 E1000_WRITE_REG(&adapter->hw, ITR, throttle);
3200 lwkt_serialize_exit(&adapter->serializer);
3202 lwkt_serialize_enter(&adapter->serializer);
3203 em_int_throttle_ceil = 0;
3204 E1000_WRITE_REG(&adapter->hw, ITR, 0);
3205 lwkt_serialize_exit(&adapter->serializer);
3207 device_printf(adapter->dev, "Interrupt moderation set to %d/sec\n",
3208 em_int_throttle_ceil);