1 /**************************************************************************
3 Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved.
5 Copyright (c) 2001-2003, Intel Corporation
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
34 ***************************************************************************/
36 /*$FreeBSD: src/sys/dev/em/if_em.c,v 1.2.2.15 2003/06/09 22:10:15 pdeuskar Exp $*/
37 /*$DragonFly: src/sys/dev/netif/em/if_em.c,v 1.36 2005/08/29 10:19:52 sephe Exp $*/
40 #include <net/ifq_var.h>
42 /*********************************************************************
43 * Set this to one to display debug statistics
44 *********************************************************************/
45 int em_display_debug_stats = 0;
47 /*********************************************************************
49 *********************************************************************/
51 char em_driver_version[] = "1.7.25";
54 /*********************************************************************
57 * Used by probe to select devices to load on
58 * Last field stores an index into em_strings
59 * Last entry must be all 0s
61 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
62 *********************************************************************/
64 static em_vendor_info_t em_vendor_info_array[] =
66 /* Intel(R) PRO/1000 Network Connection */
67 { 0x8086, 0x1000, PCI_ANY_ID, PCI_ANY_ID, 0},
68 { 0x8086, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0},
69 { 0x8086, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0},
70 { 0x8086, 0x1008, PCI_ANY_ID, PCI_ANY_ID, 0},
71 { 0x8086, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0},
72 { 0x8086, 0x100C, PCI_ANY_ID, PCI_ANY_ID, 0},
73 { 0x8086, 0x100D, PCI_ANY_ID, PCI_ANY_ID, 0},
74 { 0x8086, 0x100E, PCI_ANY_ID, PCI_ANY_ID, 0},
75 { 0x8086, 0x100F, PCI_ANY_ID, PCI_ANY_ID, 0},
76 { 0x8086, 0x1010, PCI_ANY_ID, PCI_ANY_ID, 0},
77 { 0x8086, 0x1011, PCI_ANY_ID, PCI_ANY_ID, 0},
78 { 0x8086, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0},
79 { 0x8086, 0x1013, PCI_ANY_ID, PCI_ANY_ID, 0},
80 { 0x8086, 0x1014, PCI_ANY_ID, PCI_ANY_ID, 0},
81 { 0x8086, 0x1015, PCI_ANY_ID, PCI_ANY_ID, 0},
82 { 0x8086, 0x1016, PCI_ANY_ID, PCI_ANY_ID, 0},
83 { 0x8086, 0x1017, PCI_ANY_ID, PCI_ANY_ID, 0},
84 { 0x8086, 0x1018, PCI_ANY_ID, PCI_ANY_ID, 0},
85 { 0x8086, 0x1019, PCI_ANY_ID, PCI_ANY_ID, 0},
86 { 0x8086, 0x101A, PCI_ANY_ID, PCI_ANY_ID, 0},
87 { 0x8086, 0x101D, PCI_ANY_ID, PCI_ANY_ID, 0},
88 { 0x8086, 0x101E, PCI_ANY_ID, PCI_ANY_ID, 0},
89 { 0x8086, 0x1026, PCI_ANY_ID, PCI_ANY_ID, 0},
90 { 0x8086, 0x1027, PCI_ANY_ID, PCI_ANY_ID, 0},
91 { 0x8086, 0x1028, PCI_ANY_ID, PCI_ANY_ID, 0},
92 { 0x8086, 0x1075, PCI_ANY_ID, PCI_ANY_ID, 0},
93 { 0x8086, 0x1076, PCI_ANY_ID, PCI_ANY_ID, 0},
94 { 0x8086, 0x1077, PCI_ANY_ID, PCI_ANY_ID, 0},
95 { 0x8086, 0x1078, PCI_ANY_ID, PCI_ANY_ID, 0},
96 { 0x8086, 0x1079, PCI_ANY_ID, PCI_ANY_ID, 0},
97 { 0x8086, 0x107A, PCI_ANY_ID, PCI_ANY_ID, 0},
98 { 0x8086, 0x107B, PCI_ANY_ID, PCI_ANY_ID, 0},
99 /* required last entry */
103 /*********************************************************************
104 * Table of branding strings for all supported NICs.
105 *********************************************************************/
107 static const char *em_strings[] = {
108 "Intel(R) PRO/1000 Network Connection"
111 /*********************************************************************
112 * Function prototypes
113 *********************************************************************/
114 static int em_probe(device_t);
115 static int em_attach(device_t);
116 static int em_detach(device_t);
117 static int em_shutdown(device_t);
118 static void em_intr(void *);
119 static void em_start(struct ifnet *);
120 static void em_start_serialized(struct ifnet *);
121 static int em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
122 static void em_watchdog(struct ifnet *);
123 static void em_init(void *);
124 static void em_init_serialized(void *);
125 static void em_stop(void *);
126 static void em_media_status(struct ifnet *, struct ifmediareq *);
127 static int em_media_change(struct ifnet *);
128 static void em_identify_hardware(struct adapter *);
129 static void em_local_timer(void *);
130 static int em_hardware_init(struct adapter *);
131 static void em_setup_interface(device_t, struct adapter *);
132 static int em_setup_transmit_structures(struct adapter *);
133 static void em_initialize_transmit_unit(struct adapter *);
134 static int em_setup_receive_structures(struct adapter *);
135 static void em_initialize_receive_unit(struct adapter *);
136 static void em_enable_intr(struct adapter *);
137 static void em_disable_intr(struct adapter *);
138 static void em_free_transmit_structures(struct adapter *);
139 static void em_free_receive_structures(struct adapter *);
140 static void em_update_stats_counters(struct adapter *);
141 static void em_clean_transmit_interrupts(struct adapter *);
142 static int em_allocate_receive_structures(struct adapter *);
143 static int em_allocate_transmit_structures(struct adapter *);
144 static void em_process_receive_interrupts(struct adapter *, int);
145 static void em_receive_checksum(struct adapter *, struct em_rx_desc *,
147 static void em_transmit_checksum_setup(struct adapter *, struct mbuf *,
148 uint32_t *, uint32_t *);
149 static void em_set_promisc(struct adapter *);
150 static void em_disable_promisc(struct adapter *);
151 static void em_set_multi(struct adapter *);
152 static void em_print_hw_stats(struct adapter *);
153 static void em_print_link_status(struct adapter *);
154 static int em_get_buf(int i, struct adapter *, struct mbuf *, int how);
155 static void em_enable_vlans(struct adapter *);
156 static int em_encap(struct adapter *, struct mbuf *);
157 static void em_smartspeed(struct adapter *);
158 static int em_82547_fifo_workaround(struct adapter *, int);
159 static void em_82547_update_fifo_head(struct adapter *, int);
160 static int em_82547_tx_fifo_reset(struct adapter *);
161 static void em_82547_move_tail(void *arg);
162 static void em_82547_move_tail_serialized(void *arg);
163 static int em_dma_malloc(struct adapter *, bus_size_t,
164 struct em_dma_alloc *, int);
165 static void em_dma_free(struct adapter *, struct em_dma_alloc *);
166 static void em_print_debug_info(struct adapter *);
167 static int em_is_valid_ether_addr(uint8_t *);
168 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS);
169 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
170 static uint32_t em_fill_descriptors(uint64_t address, uint32_t length,
171 PDESC_ARRAY desc_array);
172 static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
173 static int em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS);
174 static void em_add_int_delay_sysctl(struct adapter *, const char *,
176 struct em_int_delay_info *, int, int);
178 /*********************************************************************
179 * FreeBSD Device Interface Entry Points
180 *********************************************************************/
182 static device_method_t em_methods[] = {
183 /* Device interface */
184 DEVMETHOD(device_probe, em_probe),
185 DEVMETHOD(device_attach, em_attach),
186 DEVMETHOD(device_detach, em_detach),
187 DEVMETHOD(device_shutdown, em_shutdown),
191 static driver_t em_driver = {
192 "em", em_methods, sizeof(struct adapter),
195 static devclass_t em_devclass;
197 DECLARE_DUMMY_MODULE(if_em);
198 DRIVER_MODULE(if_em, pci, em_driver, em_devclass, 0, 0);
200 /*********************************************************************
201 * Tunable default values.
202 *********************************************************************/
204 #define E1000_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
205 #define E1000_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
207 static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
208 static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
209 static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
210 static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
211 static int em_int_throttle_ceil = 10000;
213 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
214 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
215 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
216 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
217 TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil);
219 /*********************************************************************
220 * Device identification routine
222 * em_probe determines if the driver should be loaded on
223 * adapter based on PCI vendor/device id of the adapter.
225 * return 0 on success, positive on failure
226 *********************************************************************/
229 em_probe(device_t dev)
231 em_vendor_info_t *ent;
233 uint16_t pci_vendor_id = 0;
234 uint16_t pci_device_id = 0;
235 uint16_t pci_subvendor_id = 0;
236 uint16_t pci_subdevice_id = 0;
237 char adapter_name[60];
239 INIT_DEBUGOUT("em_probe: begin");
241 pci_vendor_id = pci_get_vendor(dev);
242 if (pci_vendor_id != EM_VENDOR_ID)
245 pci_device_id = pci_get_device(dev);
246 pci_subvendor_id = pci_get_subvendor(dev);
247 pci_subdevice_id = pci_get_subdevice(dev);
249 ent = em_vendor_info_array;
250 while (ent->vendor_id != 0) {
251 if ((pci_vendor_id == ent->vendor_id) &&
252 (pci_device_id == ent->device_id) &&
254 ((pci_subvendor_id == ent->subvendor_id) ||
255 (ent->subvendor_id == PCI_ANY_ID)) &&
257 ((pci_subdevice_id == ent->subdevice_id) ||
258 (ent->subdevice_id == PCI_ANY_ID))) {
259 snprintf(adapter_name, sizeof(adapter_name),
260 "%s, Version - %s", em_strings[ent->index],
262 device_set_desc_copy(dev, adapter_name);
271 /*********************************************************************
272 * Device initialization routine
274 * The attach entry point is called when the driver is being loaded.
275 * This routine identifies the type of hardware, allocates all resources
276 * and initializes the hardware.
278 * return 0 on success, positive on failure
279 *********************************************************************/
282 em_attach(device_t dev)
284 struct adapter *adapter;
289 INIT_DEBUGOUT("em_attach: begin");
291 adapter = device_get_softc(dev);
293 lwkt_serialize_init(&adapter->serializer);
295 callout_init(&adapter->timer);
296 callout_init(&adapter->tx_fifo_timer);
299 adapter->osdep.dev = dev;
302 sysctl_ctx_init(&adapter->sysctl_ctx);
303 adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx,
304 SYSCTL_STATIC_CHILDREN(_hw),
306 device_get_nameunit(dev),
310 if (adapter->sysctl_tree == NULL) {
315 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
316 SYSCTL_CHILDREN(adapter->sysctl_tree),
317 OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
319 em_sysctl_debug_info, "I", "Debug Information");
321 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
322 SYSCTL_CHILDREN(adapter->sysctl_tree),
323 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
325 em_sysctl_stats, "I", "Statistics");
327 /* Determine hardware revision */
328 em_identify_hardware(adapter);
330 /* Set up some sysctls for the tunable interrupt delays */
331 em_add_int_delay_sysctl(adapter, "rx_int_delay",
332 "receive interrupt delay in usecs",
333 &adapter->rx_int_delay,
334 E1000_REG_OFFSET(&adapter->hw, RDTR),
335 em_rx_int_delay_dflt);
336 em_add_int_delay_sysctl(adapter, "tx_int_delay",
337 "transmit interrupt delay in usecs",
338 &adapter->tx_int_delay,
339 E1000_REG_OFFSET(&adapter->hw, TIDV),
340 em_tx_int_delay_dflt);
341 if (adapter->hw.mac_type >= em_82540) {
342 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
343 "receive interrupt delay limit in usecs",
344 &adapter->rx_abs_int_delay,
345 E1000_REG_OFFSET(&adapter->hw, RADV),
346 em_rx_abs_int_delay_dflt);
347 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
348 "transmit interrupt delay limit in usecs",
349 &adapter->tx_abs_int_delay,
350 E1000_REG_OFFSET(&adapter->hw, TADV),
351 em_tx_abs_int_delay_dflt);
352 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
353 SYSCTL_CHILDREN(adapter->sysctl_tree),
354 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW,
355 adapter, 0, em_sysctl_int_throttle, "I", NULL);
358 /* Parameters (to be read from user) */
359 adapter->num_tx_desc = EM_MAX_TXD;
360 adapter->num_rx_desc = EM_MAX_RXD;
361 adapter->hw.autoneg = DO_AUTO_NEG;
362 adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
363 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
364 adapter->hw.tbi_compatibility_en = TRUE;
365 adapter->rx_buffer_len = EM_RXBUFFER_2048;
368 * These parameters control the automatic generation(Tx) and
369 * response(Rx) to Ethernet PAUSE frames.
371 adapter->hw.fc_high_water = FC_DEFAULT_HI_THRESH;
372 adapter->hw.fc_low_water = FC_DEFAULT_LO_THRESH;
373 adapter->hw.fc_pause_time = FC_DEFAULT_TX_TIMER;
374 adapter->hw.fc_send_xon = TRUE;
375 adapter->hw.fc = em_fc_full;
377 adapter->hw.phy_init_script = 1;
378 adapter->hw.phy_reset_disable = FALSE;
380 #ifndef EM_MASTER_SLAVE
381 adapter->hw.master_slave = em_ms_hw_default;
383 adapter->hw.master_slave = EM_MASTER_SLAVE;
387 * Set the max frame size assuming standard ethernet
390 adapter->hw.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
392 adapter->hw.min_frame_size =
393 MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
396 * This controls when hardware reports transmit completion
399 adapter->hw.report_tx_early = 1;
402 adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
404 if (!(adapter->res_memory)) {
405 device_printf(dev, "Unable to allocate bus resource: memory\n");
409 adapter->osdep.mem_bus_space_tag =
410 rman_get_bustag(adapter->res_memory);
411 adapter->osdep.mem_bus_space_handle =
412 rman_get_bushandle(adapter->res_memory);
413 adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
415 if (adapter->hw.mac_type > em_82543) {
416 /* Figure our where our IO BAR is ? */
418 for (i = 0; i < 5; i++) {
419 val = pci_read_config(dev, rid, 4);
420 if (val & 0x00000001) {
421 adapter->io_rid = rid;
427 adapter->res_ioport = bus_alloc_resource_any(dev,
428 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
429 if (!(adapter->res_ioport)) {
430 device_printf(dev, "Unable to allocate bus resource: ioport\n");
435 adapter->hw.reg_io_tag = rman_get_bustag(adapter->res_ioport);
436 adapter->hw.reg_io_handle = rman_get_bushandle(adapter->res_ioport);
440 adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ,
441 &rid, RF_SHAREABLE | RF_ACTIVE);
442 if (!(adapter->res_interrupt)) {
443 device_printf(dev, "Unable to allocate bus resource: interrupt\n");
448 adapter->hw.back = &adapter->osdep;
450 /* Initialize eeprom parameters */
451 em_init_eeprom_params(&adapter->hw);
453 tsize = adapter->num_tx_desc * sizeof(struct em_tx_desc);
455 /* Allocate Transmit Descriptor ring */
456 if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_WAITOK)) {
457 device_printf(dev, "Unable to allocate TxDescriptor memory\n");
461 adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
463 rsize = adapter->num_rx_desc * sizeof(struct em_rx_desc);
465 /* Allocate Receive Descriptor ring */
466 if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_WAITOK)) {
467 device_printf(dev, "Unable to allocate rx_desc memory\n");
471 adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
473 /* Initialize the hardware */
474 if (em_hardware_init(adapter)) {
475 device_printf(dev, "Unable to initialize the hardware\n");
480 /* Copy the permanent MAC address out of the EEPROM */
481 if (em_read_mac_addr(&adapter->hw) < 0) {
482 device_printf(dev, "EEPROM read error while reading mac address\n");
487 if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
488 device_printf(dev, "Invalid mac address\n");
493 /* Setup OS specific network interface */
494 em_setup_interface(dev, adapter);
496 /* Initialize statistics */
497 em_clear_hw_cntrs(&adapter->hw);
498 em_update_stats_counters(adapter);
499 adapter->hw.get_link_status = 1;
500 em_check_for_link(&adapter->hw);
502 /* Print the link status */
503 if (adapter->link_active == 1) {
504 em_get_speed_and_duplex(&adapter->hw, &adapter->link_speed,
505 &adapter->link_duplex);
506 device_printf(dev, "Speed: %d Mbps, Duplex: %s\n",
508 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
510 device_printf(dev, "Speed: N/A, Duplex:N/A\n");
512 /* Identify 82544 on PCIX */
513 em_get_bus_info(&adapter->hw);
514 if (adapter->hw.bus_type == em_bus_type_pcix &&
515 adapter->hw.mac_type == em_82544)
516 adapter->pcix_82544 = TRUE;
518 adapter->pcix_82544 = FALSE;
520 error = bus_setup_intr(dev, adapter->res_interrupt, INTR_TYPE_MISC,
521 (void (*)(void *)) em_intr, adapter,
522 &adapter->int_handler_tag, &adapter->serializer);
524 device_printf(dev, "Error registering interrupt handler!\n");
525 ether_ifdetach(&adapter->interface_data.ac_if);
529 INIT_DEBUGOUT("em_attach: end");
537 /*********************************************************************
538 * Device removal routine
540 * The detach entry point is called when the driver is being removed.
541 * This routine stops the adapter and deallocates all the resources
542 * that were allocated for driver operation.
544 * return 0 on success, positive on failure
545 *********************************************************************/
548 em_detach(device_t dev)
550 struct adapter * adapter = device_get_softc(dev);
552 INIT_DEBUGOUT("em_detach: begin");
554 lwkt_serialize_enter(&adapter->serializer);
555 adapter->in_detach = 1;
557 if (device_is_attached(dev)) {
559 em_phy_hw_reset(&adapter->hw);
560 ether_ifdetach(&adapter->interface_data.ac_if);
562 bus_generic_detach(dev);
564 if (adapter->res_interrupt != NULL) {
565 bus_teardown_intr(dev, adapter->res_interrupt,
566 adapter->int_handler_tag);
567 bus_release_resource(dev, SYS_RES_IRQ, 0,
568 adapter->res_interrupt);
570 if (adapter->res_memory != NULL) {
571 bus_release_resource(dev, SYS_RES_MEMORY, EM_MMBA,
572 adapter->res_memory);
575 if (adapter->res_ioport != NULL) {
576 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
577 adapter->res_ioport);
580 /* Free Transmit Descriptor ring */
581 if (adapter->tx_desc_base != NULL) {
582 em_dma_free(adapter, &adapter->txdma);
583 adapter->tx_desc_base = NULL;
586 /* Free Receive Descriptor ring */
587 if (adapter->rx_desc_base != NULL) {
588 em_dma_free(adapter, &adapter->rxdma);
589 adapter->rx_desc_base = NULL;
592 adapter->sysctl_tree = NULL;
593 sysctl_ctx_free(&adapter->sysctl_ctx);
595 lwkt_serialize_exit(&adapter->serializer);
599 /*********************************************************************
601 * Shutdown entry point
603 **********************************************************************/
606 em_shutdown(device_t dev)
608 struct adapter *adapter = device_get_softc(dev);
613 /*********************************************************************
614 * Transmit entry point
616 * em_start is called by the stack to initiate a transmit.
617 * The driver will remain in this routine as long as there are
618 * packets to transmit and transmit resources are available.
619 * In case resources are not available stack is notified and
620 * the packet is requeued.
621 **********************************************************************/
624 em_start(struct ifnet *ifp)
626 struct adapter *adapter = ifp->if_softc;
628 lwkt_serialize_enter(&adapter->serializer);
629 em_start_serialized(ifp);
630 lwkt_serialize_exit(&adapter->serializer);
634 em_start_serialized(struct ifnet *ifp)
637 struct adapter *adapter = ifp->if_softc;
639 if (!adapter->link_active)
641 while (!ifq_is_empty(&ifp->if_snd)) {
642 m_head = ifq_poll(&ifp->if_snd);
647 if (em_encap(adapter, m_head)) {
648 ifp->if_flags |= IFF_OACTIVE;
651 m_head = ifq_dequeue(&ifp->if_snd);
653 /* Send a copy of the frame to the BPF listener */
654 BPF_MTAP(ifp, m_head);
656 /* Set timeout in case hardware has problems transmitting */
657 ifp->if_timer = EM_TX_TIMEOUT;
661 /*********************************************************************
664 * em_ioctl is called when the user wants to configure the
667 * return 0 on success, positive on failure
668 **********************************************************************/
671 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
674 struct ifreq *ifr = (struct ifreq *) data;
675 struct adapter *adapter = ifp->if_softc;
677 lwkt_serialize_enter(&adapter->serializer);
679 if (adapter->in_detach)
685 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
686 lwkt_serialize_exit(&adapter->serializer);
687 ether_ioctl(ifp, command, data);
688 lwkt_serialize_enter(&adapter->serializer);
691 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
692 if (ifr->ifr_mtu > MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
695 ifp->if_mtu = ifr->ifr_mtu;
696 adapter->hw.max_frame_size =
697 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
698 em_init_serialized(adapter);
702 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
703 if (ifp->if_flags & IFF_UP) {
704 if (!(ifp->if_flags & IFF_RUNNING))
705 em_init_serialized(adapter);
706 em_disable_promisc(adapter);
707 em_set_promisc(adapter);
709 if (ifp->if_flags & IFF_RUNNING)
715 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
716 if (ifp->if_flags & IFF_RUNNING) {
717 em_disable_intr(adapter);
718 em_set_multi(adapter);
719 if (adapter->hw.mac_type == em_82542_rev2_0)
720 em_initialize_receive_unit(adapter);
721 em_enable_intr(adapter);
726 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
727 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
730 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
731 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
732 if (mask & IFCAP_HWCSUM) {
733 if (IFCAP_HWCSUM & ifp->if_capenable)
734 ifp->if_capenable &= ~IFCAP_HWCSUM;
736 ifp->if_capenable |= IFCAP_HWCSUM;
737 if (ifp->if_flags & IFF_RUNNING)
738 em_init_serialized(adapter);
742 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)\n", (int)command);
747 lwkt_serialize_exit(&adapter->serializer);
751 /*********************************************************************
752 * Watchdog entry point
754 * This routine is called whenever hardware quits transmitting.
756 **********************************************************************/
759 em_watchdog(struct ifnet *ifp)
761 struct adapter * adapter;
762 adapter = ifp->if_softc;
764 /* If we are in this routine because of pause frames, then
765 * don't reset the hardware.
767 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
768 ifp->if_timer = EM_TX_TIMEOUT;
772 if (em_check_for_link(&adapter->hw))
773 if_printf(ifp, "watchdog timeout -- resetting\n");
775 ifp->if_flags &= ~IFF_RUNNING;
782 /*********************************************************************
785 * This routine is used in two ways. It is used by the stack as
786 * init entry point in network interface structure. It is also used
787 * by the driver as a hw/sw initialization routine to get to a
790 * return 0 on success, positive on failure
791 **********************************************************************/
796 struct adapter *adapter = arg;
798 lwkt_serialize_enter(&adapter->serializer);
799 em_init_serialized(arg);
800 lwkt_serialize_exit(&adapter->serializer);
804 em_init_serialized(void *arg)
806 struct adapter *adapter = arg;
807 struct ifnet *ifp = &adapter->interface_data.ac_if;
809 INIT_DEBUGOUT("em_init: begin");
813 /* Get the latest mac address, User can use a LAA */
814 bcopy(adapter->interface_data.ac_enaddr, adapter->hw.mac_addr,
817 /* Initialize the hardware */
818 if (em_hardware_init(adapter)) {
819 if_printf(ifp, "Unable to initialize the hardware\n");
823 em_enable_vlans(adapter);
825 /* Prepare transmit descriptors and buffers */
826 if (em_setup_transmit_structures(adapter)) {
827 if_printf(ifp, "Could not setup transmit structures\n");
831 em_initialize_transmit_unit(adapter);
833 /* Setup Multicast table */
834 em_set_multi(adapter);
836 /* Prepare receive descriptors and buffers */
837 if (em_setup_receive_structures(adapter)) {
838 if_printf(ifp, "Could not setup receive structures\n");
842 em_initialize_receive_unit(adapter);
844 /* Don't loose promiscuous settings */
845 em_set_promisc(adapter);
847 ifp->if_flags |= IFF_RUNNING;
848 ifp->if_flags &= ~IFF_OACTIVE;
850 if (adapter->hw.mac_type >= em_82543) {
851 if (ifp->if_capenable & IFCAP_TXCSUM)
852 ifp->if_hwassist = EM_CHECKSUM_FEATURES;
854 ifp->if_hwassist = 0;
857 callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
858 em_clear_hw_cntrs(&adapter->hw);
859 em_enable_intr(adapter);
861 /* Don't reset the phy next time init gets called */
862 adapter->hw.phy_reset_disable = TRUE;
865 #ifdef DEVICE_POLLING
868 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
870 struct adapter *adapter = ifp->if_softc;
873 lwkt_serialize_enter(&adapter->serializer);
876 em_disable_intr(adapter);
878 case POLL_DEREGISTER:
879 em_enable_intr(adapter);
881 case POLL_AND_CHECK_STATUS:
882 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
883 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
884 callout_stop(&adapter->timer);
885 adapter->hw.get_link_status = 1;
886 em_check_for_link(&adapter->hw);
887 em_print_link_status(adapter);
888 callout_reset(&adapter->timer, 2*hz, em_local_timer,
893 if (ifp->if_flags & IFF_RUNNING) {
894 em_process_receive_interrupts(adapter, count);
895 em_clean_transmit_interrupts(adapter);
897 if (ifp->if_flags & IFF_RUNNING) {
898 if (!ifq_is_empty(&ifp->if_snd))
899 em_start_serialized(ifp);
903 lwkt_serialize_exit(&adapter->serializer);
906 #endif /* DEVICE_POLLING */
908 /*********************************************************************
910 * Interrupt Service routine
912 **********************************************************************/
918 struct adapter *adapter = arg;
920 ifp = &adapter->interface_data.ac_if;
922 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
926 /* Link status change */
927 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
928 callout_stop(&adapter->timer);
929 adapter->hw.get_link_status = 1;
930 em_check_for_link(&adapter->hw);
931 em_print_link_status(adapter);
932 callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
936 * note: do not attempt to improve efficiency by looping. This
937 * only results in unnecessary piecemeal collection of received
938 * packets and unnecessary piecemeal cleanups of the transmit ring.
940 if (ifp->if_flags & IFF_RUNNING) {
941 em_process_receive_interrupts(adapter, -1);
942 em_clean_transmit_interrupts(adapter);
945 if ((ifp->if_flags & IFF_RUNNING) && !ifq_is_empty(&ifp->if_snd))
946 em_start_serialized(ifp);
949 /*********************************************************************
951 * Media Ioctl callback
953 * This routine is called whenever the user queries the status of
954 * the interface using ifconfig.
956 **********************************************************************/
958 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
960 struct adapter * adapter = ifp->if_softc;
962 INIT_DEBUGOUT("em_media_status: begin");
964 em_check_for_link(&adapter->hw);
965 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
966 if (adapter->link_active == 0) {
967 em_get_speed_and_duplex(&adapter->hw,
968 &adapter->link_speed,
969 &adapter->link_duplex);
970 adapter->link_active = 1;
973 if (adapter->link_active == 1) {
974 adapter->link_speed = 0;
975 adapter->link_duplex = 0;
976 adapter->link_active = 0;
980 ifmr->ifm_status = IFM_AVALID;
981 ifmr->ifm_active = IFM_ETHER;
983 if (!adapter->link_active)
986 ifmr->ifm_status |= IFM_ACTIVE;
988 if (adapter->hw.media_type == em_media_type_fiber) {
989 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
991 switch (adapter->link_speed) {
993 ifmr->ifm_active |= IFM_10_T;
996 ifmr->ifm_active |= IFM_100_TX;
999 ifmr->ifm_active |= IFM_1000_T;
1002 if (adapter->link_duplex == FULL_DUPLEX)
1003 ifmr->ifm_active |= IFM_FDX;
1005 ifmr->ifm_active |= IFM_HDX;
1009 /*********************************************************************
1011 * Media Ioctl callback
1013 * This routine is called when the user changes speed/duplex using
1014 * media/mediopt option with ifconfig.
1016 **********************************************************************/
1018 em_media_change(struct ifnet *ifp)
1020 struct adapter * adapter = ifp->if_softc;
1021 struct ifmedia *ifm = &adapter->media;
1023 INIT_DEBUGOUT("em_media_change: begin");
1025 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1028 lwkt_serialize_enter(&adapter->serializer);
1030 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1032 adapter->hw.autoneg = DO_AUTO_NEG;
1033 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1037 adapter->hw.autoneg = DO_AUTO_NEG;
1038 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1041 adapter->hw.autoneg = FALSE;
1042 adapter->hw.autoneg_advertised = 0;
1043 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1044 adapter->hw.forced_speed_duplex = em_100_full;
1046 adapter->hw.forced_speed_duplex = em_100_half;
1049 adapter->hw.autoneg = FALSE;
1050 adapter->hw.autoneg_advertised = 0;
1051 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1052 adapter->hw.forced_speed_duplex = em_10_full;
1054 adapter->hw.forced_speed_duplex = em_10_half;
1057 if_printf(ifp, "Unsupported media type\n");
1060 * As the speed/duplex settings may have changed we need to
1063 adapter->hw.phy_reset_disable = FALSE;
1065 em_init_serialized(adapter);
1067 lwkt_serialize_exit(&adapter->serializer);
1072 em_tx_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize,
1075 struct em_q *q = arg;
1079 KASSERT(nsegs <= EM_MAX_SCATTER,
1080 ("Too many DMA segments returned when mapping tx packet"));
1082 bcopy(seg, q->segs, nsegs * sizeof(seg[0]));
1085 #define EM_FIFO_HDR 0x10
1086 #define EM_82547_PKT_THRESH 0x3e0
1087 #define EM_82547_TX_FIFO_SIZE 0x2800
1088 #define EM_82547_TX_FIFO_BEGIN 0xf00
1089 /*********************************************************************
1091 * This routine maps the mbufs to tx descriptors.
1093 * return 0 on success, positive on failure
1094 **********************************************************************/
1096 em_encap(struct adapter *adapter, struct mbuf *m_head)
1099 uint32_t txd_lower, txd_used = 0, txd_saved = 0;
1103 /* For 82544 Workaround */
1104 DESC_ARRAY desc_array;
1105 uint32_t array_elements;
1108 struct ifvlan *ifv = NULL;
1110 struct em_buffer *tx_buffer = NULL;
1111 struct em_tx_desc *current_tx_desc = NULL;
1112 struct ifnet *ifp = &adapter->interface_data.ac_if;
1115 * Force a cleanup if number of TX descriptors
1116 * available hits the threshold
1118 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1119 em_clean_transmit_interrupts(adapter);
1120 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1121 adapter->no_tx_desc_avail1++;
1126 * Map the packet for DMA.
1128 if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &q.map)) {
1129 adapter->no_tx_map_avail++;
1132 error = bus_dmamap_load_mbuf(adapter->txtag, q.map, m_head, em_tx_cb,
1133 &q, BUS_DMA_NOWAIT);
1135 adapter->no_tx_dma_setup++;
1136 bus_dmamap_destroy(adapter->txtag, q.map);
1139 KASSERT(q.nsegs != 0, ("em_encap: empty packet"));
1141 if (q.nsegs > adapter->num_tx_desc_avail) {
1142 adapter->no_tx_desc_avail2++;
1143 bus_dmamap_unload(adapter->txtag, q.map);
1144 bus_dmamap_destroy(adapter->txtag, q.map);
1148 if (ifp->if_hwassist > 0) {
1149 em_transmit_checksum_setup(adapter, m_head,
1150 &txd_upper, &txd_lower);
1153 txd_upper = txd_lower = 0;
1155 /* Find out if we are in vlan mode */
1156 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1157 m_head->m_pkthdr.rcvif != NULL &&
1158 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1159 ifv = m_head->m_pkthdr.rcvif->if_softc;
1161 i = adapter->next_avail_tx_desc;
1162 if (adapter->pcix_82544) {
1166 for (j = 0; j < q.nsegs; j++) {
1167 /* If adapter is 82544 and on PCIX bus */
1168 if(adapter->pcix_82544) {
1170 address = htole64(q.segs[j].ds_addr);
1172 * Check the Address and Length combination and
1173 * split the data accordingly
1175 array_elements = em_fill_descriptors(address,
1176 htole32(q.segs[j].ds_len),
1178 for (counter = 0; counter < array_elements; counter++) {
1179 if (txd_used == adapter->num_tx_desc_avail) {
1180 adapter->next_avail_tx_desc = txd_saved;
1181 adapter->no_tx_desc_avail2++;
1182 bus_dmamap_unload(adapter->txtag, q.map);
1183 bus_dmamap_destroy(adapter->txtag, q.map);
1186 tx_buffer = &adapter->tx_buffer_area[i];
1187 current_tx_desc = &adapter->tx_desc_base[i];
1188 current_tx_desc->buffer_addr = htole64(
1189 desc_array.descriptor[counter].address);
1190 current_tx_desc->lower.data = htole32(
1191 (adapter->txd_cmd | txd_lower |
1192 (uint16_t)desc_array.descriptor[counter].length));
1193 current_tx_desc->upper.data = htole32((txd_upper));
1194 if (++i == adapter->num_tx_desc)
1197 tx_buffer->m_head = NULL;
1201 tx_buffer = &adapter->tx_buffer_area[i];
1202 current_tx_desc = &adapter->tx_desc_base[i];
1204 current_tx_desc->buffer_addr = htole64(q.segs[j].ds_addr);
1205 current_tx_desc->lower.data = htole32(
1206 adapter->txd_cmd | txd_lower | q.segs[j].ds_len);
1207 current_tx_desc->upper.data = htole32(txd_upper);
1209 if (++i == adapter->num_tx_desc)
1212 tx_buffer->m_head = NULL;
1216 adapter->next_avail_tx_desc = i;
1217 if (adapter->pcix_82544)
1218 adapter->num_tx_desc_avail -= txd_used;
1220 adapter->num_tx_desc_avail -= q.nsegs;
1223 /* Set the vlan id */
1224 current_tx_desc->upper.fields.special = htole16(ifv->ifv_tag);
1226 /* Tell hardware to add tag */
1227 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1230 tx_buffer->m_head = m_head;
1231 tx_buffer->map = q.map;
1232 bus_dmamap_sync(adapter->txtag, q.map, BUS_DMASYNC_PREWRITE);
1235 * Last Descriptor of Packet needs End Of Packet (EOP)
1237 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1240 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1241 * that this frame is available to transmit.
1243 if (adapter->hw.mac_type == em_82547 &&
1244 adapter->link_duplex == HALF_DUPLEX) {
1245 em_82547_move_tail(adapter);
1247 E1000_WRITE_REG(&adapter->hw, TDT, i);
1248 if (adapter->hw.mac_type == em_82547) {
1249 em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
1256 /*********************************************************************
1258 * 82547 workaround to avoid controller hang in half-duplex environment.
1259 * The workaround is to avoid queuing a large packet that would span
1260 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1261 * in this case. We do that only when FIFO is quiescent.
1263 **********************************************************************/
1265 em_82547_move_tail(void *arg)
1267 struct adapter *adapter = arg;
1269 lwkt_serialize_enter(&adapter->serializer);
1270 em_82547_move_tail_serialized(arg);
1271 lwkt_serialize_exit(&adapter->serializer);
1275 em_82547_move_tail_serialized(void *arg)
1277 struct adapter *adapter = arg;
1280 struct em_tx_desc *tx_desc;
1281 uint16_t length = 0;
1284 hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1285 sw_tdt = adapter->next_avail_tx_desc;
1287 while (hw_tdt != sw_tdt) {
1288 tx_desc = &adapter->tx_desc_base[hw_tdt];
1289 length += tx_desc->lower.flags.length;
1290 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1291 if(++hw_tdt == adapter->num_tx_desc)
1295 if (em_82547_fifo_workaround(adapter, length)) {
1296 adapter->tx_fifo_wrk++;
1297 callout_reset(&adapter->tx_fifo_timer, 1,
1298 em_82547_move_tail, adapter);
1301 E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1302 em_82547_update_fifo_head(adapter, length);
1309 em_82547_fifo_workaround(struct adapter *adapter, int len)
1311 int fifo_space, fifo_pkt_len;
1313 fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1315 if (adapter->link_duplex == HALF_DUPLEX) {
1316 fifo_space = EM_82547_TX_FIFO_SIZE - adapter->tx_fifo_head;
1318 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1319 if (em_82547_tx_fifo_reset(adapter))
1330 em_82547_update_fifo_head(struct adapter *adapter, int len)
1332 int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1334 /* tx_fifo_head is always 16 byte aligned */
1335 adapter->tx_fifo_head += fifo_pkt_len;
1336 if (adapter->tx_fifo_head >= EM_82547_TX_FIFO_SIZE)
1337 adapter->tx_fifo_head -= EM_82547_TX_FIFO_SIZE;
1341 em_82547_tx_fifo_reset(struct adapter *adapter)
1345 if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1346 E1000_READ_REG(&adapter->hw, TDH)) &&
1347 (E1000_READ_REG(&adapter->hw, TDFT) ==
1348 E1000_READ_REG(&adapter->hw, TDFH)) &&
1349 (E1000_READ_REG(&adapter->hw, TDFTS) ==
1350 E1000_READ_REG(&adapter->hw, TDFHS)) &&
1351 (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1353 /* Disable TX unit */
1354 tctl = E1000_READ_REG(&adapter->hw, TCTL);
1355 E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1357 /* Reset FIFO pointers */
1358 E1000_WRITE_REG(&adapter->hw, TDFT, EM_82547_TX_FIFO_BEGIN);
1359 E1000_WRITE_REG(&adapter->hw, TDFH, EM_82547_TX_FIFO_BEGIN);
1360 E1000_WRITE_REG(&adapter->hw, TDFTS, EM_82547_TX_FIFO_BEGIN);
1361 E1000_WRITE_REG(&adapter->hw, TDFHS, EM_82547_TX_FIFO_BEGIN);
1363 /* Re-enable TX unit */
1364 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1365 E1000_WRITE_FLUSH(&adapter->hw);
1367 adapter->tx_fifo_head = 0;
1368 adapter->tx_fifo_reset++;
1378 em_set_promisc(struct adapter *adapter)
1381 struct ifnet *ifp = &adapter->interface_data.ac_if;
1383 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1385 if (ifp->if_flags & IFF_PROMISC) {
1386 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1387 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1388 } else if (ifp->if_flags & IFF_ALLMULTI) {
1389 reg_rctl |= E1000_RCTL_MPE;
1390 reg_rctl &= ~E1000_RCTL_UPE;
1391 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1396 em_disable_promisc(struct adapter *adapter)
1400 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1402 reg_rctl &= (~E1000_RCTL_UPE);
1403 reg_rctl &= (~E1000_RCTL_MPE);
1404 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1407 /*********************************************************************
1410 * This routine is called whenever multicast address list is updated.
1412 **********************************************************************/
1415 em_set_multi(struct adapter *adapter)
1417 uint32_t reg_rctl = 0;
1418 uint8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1419 struct ifmultiaddr *ifma;
1421 struct ifnet *ifp = &adapter->interface_data.ac_if;
1423 IOCTL_DEBUGOUT("em_set_multi: begin");
1425 if (adapter->hw.mac_type == em_82542_rev2_0) {
1426 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1427 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1428 em_pci_clear_mwi(&adapter->hw);
1429 reg_rctl |= E1000_RCTL_RST;
1430 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1434 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1435 if (ifma->ifma_addr->sa_family != AF_LINK)
1438 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1441 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1442 &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1446 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1447 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1448 reg_rctl |= E1000_RCTL_MPE;
1449 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1451 em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1453 if (adapter->hw.mac_type == em_82542_rev2_0) {
1454 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1455 reg_rctl &= ~E1000_RCTL_RST;
1456 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1458 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1459 em_pci_set_mwi(&adapter->hw);
1463 /*********************************************************************
1466 * This routine checks for link status and updates statistics.
1468 **********************************************************************/
1471 em_local_timer(void *arg)
1474 struct adapter *adapter = arg;
1475 ifp = &adapter->interface_data.ac_if;
1477 lwkt_serialize_enter(&adapter->serializer);
1479 em_check_for_link(&adapter->hw);
1480 em_print_link_status(adapter);
1481 em_update_stats_counters(adapter);
1482 if (em_display_debug_stats && ifp->if_flags & IFF_RUNNING)
1483 em_print_hw_stats(adapter);
1484 em_smartspeed(adapter);
1486 callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
1488 lwkt_serialize_exit(&adapter->serializer);
1492 em_print_link_status(struct adapter *adapter)
1494 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1495 if (adapter->link_active == 0) {
1496 em_get_speed_and_duplex(&adapter->hw,
1497 &adapter->link_speed,
1498 &adapter->link_duplex);
1499 device_printf(adapter->dev, "Link is up %d Mbps %s\n",
1500 adapter->link_speed,
1501 ((adapter->link_duplex == FULL_DUPLEX) ?
1502 "Full Duplex" : "Half Duplex"));
1503 adapter->link_active = 1;
1504 adapter->smartspeed = 0;
1507 if (adapter->link_active == 1) {
1508 adapter->link_speed = 0;
1509 adapter->link_duplex = 0;
1510 device_printf(adapter->dev, "Link is Down\n");
1511 adapter->link_active = 0;
1516 /*********************************************************************
1518 * This routine disables all traffic on the adapter by issuing a
1519 * global reset on the MAC and deallocates TX/RX buffers.
1521 **********************************************************************/
1527 struct adapter * adapter = arg;
1528 ifp = &adapter->interface_data.ac_if;
1530 INIT_DEBUGOUT("em_stop: begin");
1531 em_disable_intr(adapter);
1532 em_reset_hw(&adapter->hw);
1533 callout_stop(&adapter->timer);
1534 callout_stop(&adapter->tx_fifo_timer);
1535 em_free_transmit_structures(adapter);
1536 em_free_receive_structures(adapter);
1538 /* Tell the stack that the interface is no longer active */
1539 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1543 /*********************************************************************
1545 * Determine hardware revision.
1547 **********************************************************************/
1549 em_identify_hardware(struct adapter * adapter)
1551 device_t dev = adapter->dev;
1553 /* Make sure our PCI config space has the necessary stuff set */
1554 adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1555 if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1556 (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1557 device_printf(dev, "Memory Access and/or Bus Master bits were not set!\n");
1558 adapter->hw.pci_cmd_word |=
1559 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1560 pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1563 /* Save off the information about this board */
1564 adapter->hw.vendor_id = pci_get_vendor(dev);
1565 adapter->hw.device_id = pci_get_device(dev);
1566 adapter->hw.revision_id = pci_get_revid(dev);
1567 adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev);
1568 adapter->hw.subsystem_id = pci_get_subdevice(dev);
1570 /* Identify the MAC */
1571 if (em_set_mac_type(&adapter->hw))
1572 device_printf(dev, "Unknown MAC Type\n");
1574 if (adapter->hw.mac_type == em_82541 ||
1575 adapter->hw.mac_type == em_82541_rev_2 ||
1576 adapter->hw.mac_type == em_82547 ||
1577 adapter->hw.mac_type == em_82547_rev_2)
1578 adapter->hw.phy_init_script = TRUE;
1581 /*********************************************************************
1583 * Initialize the hardware to a configuration as specified by the
1584 * adapter structure. The controller is reset, the EEPROM is
1585 * verified, the MAC address is set, then the shared initialization
1586 * routines are called.
1588 **********************************************************************/
1590 em_hardware_init(struct adapter *adapter)
1592 INIT_DEBUGOUT("em_hardware_init: begin");
1593 /* Issue a global reset */
1594 em_reset_hw(&adapter->hw);
1596 /* When hardware is reset, fifo_head is also reset */
1597 adapter->tx_fifo_head = 0;
1599 /* Make sure we have a good EEPROM before we read from it */
1600 if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
1601 device_printf(adapter->dev, "The EEPROM Checksum Is Not Valid\n");
1605 if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
1606 device_printf(adapter->dev, "EEPROM read error while reading part number\n");
1610 if (em_init_hw(&adapter->hw) < 0) {
1611 device_printf(adapter->dev, "Hardware Initialization Failed");
1615 em_check_for_link(&adapter->hw);
1616 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
1617 adapter->link_active = 1;
1619 adapter->link_active = 0;
1621 if (adapter->link_active) {
1622 em_get_speed_and_duplex(&adapter->hw,
1623 &adapter->link_speed,
1624 &adapter->link_duplex);
1626 adapter->link_speed = 0;
1627 adapter->link_duplex = 0;
1633 /*********************************************************************
1635 * Setup networking device structure and register an interface.
1637 **********************************************************************/
1639 em_setup_interface(device_t dev, struct adapter *adapter)
1642 INIT_DEBUGOUT("em_setup_interface: begin");
1644 ifp = &adapter->interface_data.ac_if;
1645 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1646 ifp->if_mtu = ETHERMTU;
1647 ifp->if_baudrate = 1000000000;
1648 ifp->if_init = em_init;
1649 ifp->if_softc = adapter;
1650 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1651 ifp->if_ioctl = em_ioctl;
1652 ifp->if_start = em_start;
1653 #ifdef DEVICE_POLLING
1654 ifp->if_poll = em_poll;
1656 ifp->if_watchdog = em_watchdog;
1657 ifq_set_maxlen(&ifp->if_snd, adapter->num_tx_desc - 1);
1658 ifq_set_ready(&ifp->if_snd);
1660 if (adapter->hw.mac_type >= em_82543)
1661 ifp->if_capabilities |= IFCAP_HWCSUM;
1663 ifp->if_capenable = ifp->if_capabilities;
1665 ether_ifattach(ifp, adapter->hw.mac_addr);
1668 * Tell the upper layer(s) we support long frames.
1670 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1671 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1674 * Specify the media types supported by this adapter and register
1675 * callbacks to update media and link information
1677 ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
1679 if (adapter->hw.media_type == em_media_type_fiber) {
1680 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1682 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1685 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1686 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1688 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
1690 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1692 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
1694 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1696 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1697 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1700 /*********************************************************************
1702 * Workaround for SmartSpeed on 82541 and 82547 controllers
1704 **********************************************************************/
1706 em_smartspeed(struct adapter *adapter)
1710 if (adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
1711 !adapter->hw.autoneg ||
1712 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1715 if (adapter->smartspeed == 0) {
1717 * If Master/Slave config fault is asserted twice,
1718 * we assume back-to-back.
1720 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1721 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
1723 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1724 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
1725 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
1727 if (phy_tmp & CR_1000T_MS_ENABLE) {
1728 phy_tmp &= ~CR_1000T_MS_ENABLE;
1729 em_write_phy_reg(&adapter->hw,
1730 PHY_1000T_CTRL, phy_tmp);
1731 adapter->smartspeed++;
1732 if (adapter->hw.autoneg &&
1733 !em_phy_setup_autoneg(&adapter->hw) &&
1734 !em_read_phy_reg(&adapter->hw, PHY_CTRL,
1736 phy_tmp |= (MII_CR_AUTO_NEG_EN |
1737 MII_CR_RESTART_AUTO_NEG);
1738 em_write_phy_reg(&adapter->hw,
1744 } else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
1745 /* If still no link, perhaps using 2/3 pair cable */
1746 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
1747 phy_tmp |= CR_1000T_MS_ENABLE;
1748 em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
1749 if (adapter->hw.autoneg &&
1750 !em_phy_setup_autoneg(&adapter->hw) &&
1751 !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
1752 phy_tmp |= (MII_CR_AUTO_NEG_EN |
1753 MII_CR_RESTART_AUTO_NEG);
1754 em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
1757 /* Restart process after EM_SMARTSPEED_MAX iterations */
1758 if (adapter->smartspeed++ == EM_SMARTSPEED_MAX)
1759 adapter->smartspeed = 0;
1763 * Manage DMA'able memory.
1766 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1770 *(bus_addr_t*) arg = segs->ds_addr;
1774 em_dma_malloc(struct adapter *adapter, bus_size_t size,
1775 struct em_dma_alloc *dma, int mapflags)
1778 device_t dev = adapter->dev;
1780 r = bus_dma_tag_create(NULL, /* parent */
1781 PAGE_SIZE, 0, /* alignment, bounds */
1782 BUS_SPACE_MAXADDR, /* lowaddr */
1783 BUS_SPACE_MAXADDR, /* highaddr */
1784 NULL, NULL, /* filter, filterarg */
1787 size, /* maxsegsize */
1788 BUS_DMA_ALLOCNOW, /* flags */
1791 device_printf(dev, "em_dma_malloc: bus_dma_tag_create failed; "
1796 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
1797 BUS_DMA_NOWAIT, &dma->dma_map);
1799 device_printf(dev, "em_dma_malloc: bus_dmammem_alloc failed; "
1800 "size %llu, error %d\n", (uintmax_t)size, r);
1804 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1808 mapflags | BUS_DMA_NOWAIT);
1810 device_printf(dev, "em_dma_malloc: bus_dmamap_load failed; "
1815 dma->dma_size = size;
1819 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1821 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1822 bus_dma_tag_destroy(dma->dma_tag);
1824 dma->dma_map = NULL;
1825 dma->dma_tag = NULL;
1830 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
1832 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1833 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1834 bus_dma_tag_destroy(dma->dma_tag);
1837 /*********************************************************************
1839 * Allocate memory for tx_buffer structures. The tx_buffer stores all
1840 * the information needed to transmit a packet on the wire.
1842 **********************************************************************/
1844 em_allocate_transmit_structures(struct adapter * adapter)
1846 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
1847 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
1848 if (adapter->tx_buffer_area == NULL) {
1849 device_printf(adapter->dev, "Unable to allocate tx_buffer memory\n");
1856 /*********************************************************************
1858 * Allocate and initialize transmit structures.
1860 **********************************************************************/
1862 em_setup_transmit_structures(struct adapter * adapter)
1865 * Setup DMA descriptor areas.
1867 if (bus_dma_tag_create(NULL, /* parent */
1868 1, 0, /* alignment, bounds */
1869 BUS_SPACE_MAXADDR, /* lowaddr */
1870 BUS_SPACE_MAXADDR, /* highaddr */
1871 NULL, NULL, /* filter, filterarg */
1872 MCLBYTES * 8, /* maxsize */
1873 EM_MAX_SCATTER, /* nsegments */
1874 MCLBYTES * 8, /* maxsegsize */
1875 BUS_DMA_ALLOCNOW, /* flags */
1877 device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
1881 if (em_allocate_transmit_structures(adapter))
1884 bzero((void *) adapter->tx_desc_base,
1885 (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
1887 adapter->next_avail_tx_desc = 0;
1888 adapter->oldest_used_tx_desc = 0;
1890 /* Set number of descriptors available */
1891 adapter->num_tx_desc_avail = adapter->num_tx_desc;
1893 /* Set checksum context */
1894 adapter->active_checksum_context = OFFLOAD_NONE;
1899 /*********************************************************************
1901 * Enable transmit unit.
1903 **********************************************************************/
1905 em_initialize_transmit_unit(struct adapter * adapter)
1908 uint32_t reg_tipg = 0;
1911 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
1913 /* Setup the Base and Length of the Tx Descriptor Ring */
1914 bus_addr = adapter->txdma.dma_paddr;
1915 E1000_WRITE_REG(&adapter->hw, TDBAL, (uint32_t)bus_addr);
1916 E1000_WRITE_REG(&adapter->hw, TDBAH, (uint32_t)(bus_addr >> 32));
1917 E1000_WRITE_REG(&adapter->hw, TDLEN,
1918 adapter->num_tx_desc * sizeof(struct em_tx_desc));
1920 /* Setup the HW Tx Head and Tail descriptor pointers */
1921 E1000_WRITE_REG(&adapter->hw, TDH, 0);
1922 E1000_WRITE_REG(&adapter->hw, TDT, 0);
1924 HW_DEBUGOUT2("Base = %x, Length = %x\n",
1925 E1000_READ_REG(&adapter->hw, TDBAL),
1926 E1000_READ_REG(&adapter->hw, TDLEN));
1928 /* Set the default values for the Tx Inter Packet Gap timer */
1929 switch (adapter->hw.mac_type) {
1930 case em_82542_rev2_0:
1931 case em_82542_rev2_1:
1932 reg_tipg = DEFAULT_82542_TIPG_IPGT;
1933 reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1934 reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1937 if (adapter->hw.media_type == em_media_type_fiber)
1938 reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1940 reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1941 reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1942 reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1945 E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
1946 E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
1947 if (adapter->hw.mac_type >= em_82540)
1948 E1000_WRITE_REG(&adapter->hw, TADV,
1949 adapter->tx_abs_int_delay.value);
1951 /* Program the Transmit Control Register */
1952 reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
1953 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1954 if (adapter->link_duplex == 1)
1955 reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
1957 reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
1958 E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
1960 /* Setup Transmit Descriptor Settings for this adapter */
1961 adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
1963 if (adapter->tx_int_delay.value > 0)
1964 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1967 /*********************************************************************
1969 * Free all transmit related data structures.
1971 **********************************************************************/
1973 em_free_transmit_structures(struct adapter * adapter)
1975 struct em_buffer *tx_buffer;
1978 INIT_DEBUGOUT("free_transmit_structures: begin");
1980 if (adapter->tx_buffer_area != NULL) {
1981 tx_buffer = adapter->tx_buffer_area;
1982 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
1983 if (tx_buffer->m_head != NULL) {
1984 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1985 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1986 m_freem(tx_buffer->m_head);
1988 tx_buffer->m_head = NULL;
1991 if (adapter->tx_buffer_area != NULL) {
1992 free(adapter->tx_buffer_area, M_DEVBUF);
1993 adapter->tx_buffer_area = NULL;
1995 if (adapter->txtag != NULL) {
1996 bus_dma_tag_destroy(adapter->txtag);
1997 adapter->txtag = NULL;
2001 /*********************************************************************
2003 * The offload context needs to be set when we transfer the first
2004 * packet of a particular protocol (TCP/UDP). We change the
2005 * context only if the protocol type changes.
2007 **********************************************************************/
2009 em_transmit_checksum_setup(struct adapter * adapter,
2011 uint32_t *txd_upper,
2012 uint32_t *txd_lower)
2014 struct em_context_desc *TXD;
2015 struct em_buffer *tx_buffer;
2018 if (mp->m_pkthdr.csum_flags) {
2019 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2020 *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2021 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2022 if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2025 adapter->active_checksum_context = OFFLOAD_TCP_IP;
2026 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2027 *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2028 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2029 if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2032 adapter->active_checksum_context = OFFLOAD_UDP_IP;
2044 /* If we reach this point, the checksum offload context
2045 * needs to be reset.
2047 curr_txd = adapter->next_avail_tx_desc;
2048 tx_buffer = &adapter->tx_buffer_area[curr_txd];
2049 TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2051 TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2052 TXD->lower_setup.ip_fields.ipcso =
2053 ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2054 TXD->lower_setup.ip_fields.ipcse =
2055 htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2057 TXD->upper_setup.tcp_fields.tucss =
2058 ETHER_HDR_LEN + sizeof(struct ip);
2059 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2061 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2062 TXD->upper_setup.tcp_fields.tucso =
2063 ETHER_HDR_LEN + sizeof(struct ip) +
2064 offsetof(struct tcphdr, th_sum);
2065 } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2066 TXD->upper_setup.tcp_fields.tucso =
2067 ETHER_HDR_LEN + sizeof(struct ip) +
2068 offsetof(struct udphdr, uh_sum);
2071 TXD->tcp_seg_setup.data = htole32(0);
2072 TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2074 tx_buffer->m_head = NULL;
2076 if (++curr_txd == adapter->num_tx_desc)
2079 adapter->num_tx_desc_avail--;
2080 adapter->next_avail_tx_desc = curr_txd;
2083 /**********************************************************************
2085 * Examine each tx_buffer in the used queue. If the hardware is done
2086 * processing the packet then free associated resources. The
2087 * tx_buffer is put back on the free queue.
2089 **********************************************************************/
2092 em_clean_transmit_interrupts(struct adapter *adapter)
2095 struct em_buffer *tx_buffer;
2096 struct em_tx_desc *tx_desc;
2097 struct ifnet *ifp = &adapter->interface_data.ac_if;
2099 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2103 adapter->clean_tx_interrupts++;
2105 num_avail = adapter->num_tx_desc_avail;
2106 i = adapter->oldest_used_tx_desc;
2108 tx_buffer = &adapter->tx_buffer_area[i];
2109 tx_desc = &adapter->tx_desc_base[i];
2111 while(tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2112 tx_desc->upper.data = 0;
2115 if (tx_buffer->m_head) {
2117 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2118 BUS_DMASYNC_POSTWRITE);
2119 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2120 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2122 m_freem(tx_buffer->m_head);
2123 tx_buffer->m_head = NULL;
2126 if (++i == adapter->num_tx_desc)
2129 tx_buffer = &adapter->tx_buffer_area[i];
2130 tx_desc = &adapter->tx_desc_base[i];
2133 adapter->oldest_used_tx_desc = i;
2136 * If we have enough room, clear IFF_OACTIVE to tell the stack
2137 * that it is OK to send packets.
2138 * If there are no pending descriptors, clear the timeout. Otherwise,
2139 * if some descriptors have been freed, restart the timeout.
2141 if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2142 ifp->if_flags &= ~IFF_OACTIVE;
2143 if (num_avail == adapter->num_tx_desc)
2145 else if (num_avail == adapter->num_tx_desc_avail)
2146 ifp->if_timer = EM_TX_TIMEOUT;
2148 adapter->num_tx_desc_avail = num_avail;
2151 /*********************************************************************
2153 * Get a buffer from system mbuf buffer pool.
2155 **********************************************************************/
2157 em_get_buf(int i, struct adapter *adapter, struct mbuf *nmp, int how)
2159 struct mbuf *mp = nmp;
2160 struct em_buffer *rx_buffer;
2165 ifp = &adapter->interface_data.ac_if;
2168 mp = m_getcl(how, MT_DATA, M_PKTHDR);
2170 adapter->mbuf_cluster_failed++;
2173 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2175 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2176 mp->m_data = mp->m_ext.ext_buf;
2179 if (ifp->if_mtu <= ETHERMTU)
2180 m_adj(mp, ETHER_ALIGN);
2182 rx_buffer = &adapter->rx_buffer_area[i];
2185 * Using memory from the mbuf cluster pool, invoke the
2186 * bus_dma machinery to arrange the memory mapping.
2188 error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
2189 mtod(mp, void *), mp->m_len,
2190 em_dmamap_cb, &paddr, 0);
2195 rx_buffer->m_head = mp;
2196 adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
2197 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2202 /*********************************************************************
2204 * Allocate memory for rx_buffer structures. Since we use one
2205 * rx_buffer per received packet, the maximum number of rx_buffer's
2206 * that we'll need is equal to the number of receive descriptors
2207 * that we've allocated.
2209 **********************************************************************/
2211 em_allocate_receive_structures(struct adapter *adapter)
2214 struct em_buffer *rx_buffer;
2216 size = adapter->num_rx_desc * sizeof(struct em_buffer);
2217 adapter->rx_buffer_area = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
2219 error = bus_dma_tag_create(NULL, /* parent */
2220 1, 0, /* alignment, bounds */
2221 BUS_SPACE_MAXADDR, /* lowaddr */
2222 BUS_SPACE_MAXADDR, /* highaddr */
2223 NULL, NULL, /* filter, filterarg */
2224 MCLBYTES, /* maxsize */
2226 MCLBYTES, /* maxsegsize */
2227 BUS_DMA_ALLOCNOW, /* flags */
2230 device_printf(adapter->dev, "em_allocate_receive_structures: "
2231 "bus_dma_tag_create failed; error %u\n", error);
2235 rx_buffer = adapter->rx_buffer_area;
2236 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2237 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2240 device_printf(adapter->dev,
2241 "em_allocate_receive_structures: "
2242 "bus_dmamap_create failed; error %u\n",
2248 for (i = 0; i < adapter->num_rx_desc; i++) {
2249 error = em_get_buf(i, adapter, NULL, MB_WAIT);
2251 adapter->rx_buffer_area[i].m_head = NULL;
2252 adapter->rx_desc_base[i].buffer_addr = 0;
2260 bus_dma_tag_destroy(adapter->rxtag);
2262 adapter->rxtag = NULL;
2263 free(adapter->rx_buffer_area, M_DEVBUF);
2264 adapter->rx_buffer_area = NULL;
2268 /*********************************************************************
2270 * Allocate and initialize receive structures.
2272 **********************************************************************/
2274 em_setup_receive_structures(struct adapter *adapter)
2276 bzero((void *) adapter->rx_desc_base,
2277 (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2279 if (em_allocate_receive_structures(adapter))
2282 /* Setup our descriptor pointers */
2283 adapter->next_rx_desc_to_check = 0;
2287 /*********************************************************************
2289 * Enable receive unit.
2291 **********************************************************************/
2293 em_initialize_receive_unit(struct adapter *adapter)
2296 uint32_t reg_rxcsum;
2300 INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2302 ifp = &adapter->interface_data.ac_if;
2304 /* Make sure receives are disabled while setting up the descriptor ring */
2305 E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2307 /* Set the Receive Delay Timer Register */
2308 E1000_WRITE_REG(&adapter->hw, RDTR,
2309 adapter->rx_int_delay.value | E1000_RDT_FPDB);
2311 if(adapter->hw.mac_type >= em_82540) {
2312 E1000_WRITE_REG(&adapter->hw, RADV,
2313 adapter->rx_abs_int_delay.value);
2315 /* Set the interrupt throttling rate in 256ns increments */
2316 if (em_int_throttle_ceil) {
2317 E1000_WRITE_REG(&adapter->hw, ITR,
2318 1000000000 / 256 / em_int_throttle_ceil);
2320 E1000_WRITE_REG(&adapter->hw, ITR, 0);
2324 /* Setup the Base and Length of the Rx Descriptor Ring */
2325 bus_addr = adapter->rxdma.dma_paddr;
2326 E1000_WRITE_REG(&adapter->hw, RDBAL, (uint32_t)bus_addr);
2327 E1000_WRITE_REG(&adapter->hw, RDBAH, (uint32_t)(bus_addr >> 32));
2328 E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2329 sizeof(struct em_rx_desc));
2331 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2332 E1000_WRITE_REG(&adapter->hw, RDH, 0);
2333 E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2335 /* Setup the Receive Control Register */
2336 reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2337 E1000_RCTL_RDMTS_HALF |
2338 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2340 if (adapter->hw.tbi_compatibility_on == TRUE)
2341 reg_rctl |= E1000_RCTL_SBP;
2343 switch (adapter->rx_buffer_len) {
2345 case EM_RXBUFFER_2048:
2346 reg_rctl |= E1000_RCTL_SZ_2048;
2348 case EM_RXBUFFER_4096:
2349 reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2351 case EM_RXBUFFER_8192:
2352 reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2354 case EM_RXBUFFER_16384:
2355 reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2359 if (ifp->if_mtu > ETHERMTU)
2360 reg_rctl |= E1000_RCTL_LPE;
2362 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2363 if ((adapter->hw.mac_type >= em_82543) &&
2364 (ifp->if_capenable & IFCAP_RXCSUM)) {
2365 reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2366 reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2367 E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2370 /* Enable Receives */
2371 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2374 /*********************************************************************
2376 * Free receive related data structures.
2378 **********************************************************************/
2380 em_free_receive_structures(struct adapter *adapter)
2382 struct em_buffer *rx_buffer;
2385 INIT_DEBUGOUT("free_receive_structures: begin");
2387 if (adapter->rx_buffer_area != NULL) {
2388 rx_buffer = adapter->rx_buffer_area;
2389 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2390 if (rx_buffer->map != NULL) {
2391 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2392 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2394 if (rx_buffer->m_head != NULL)
2395 m_freem(rx_buffer->m_head);
2396 rx_buffer->m_head = NULL;
2399 if (adapter->rx_buffer_area != NULL) {
2400 free(adapter->rx_buffer_area, M_DEVBUF);
2401 adapter->rx_buffer_area = NULL;
2403 if (adapter->rxtag != NULL) {
2404 bus_dma_tag_destroy(adapter->rxtag);
2405 adapter->rxtag = NULL;
2409 /*********************************************************************
2411 * This routine executes in interrupt context. It replenishes
2412 * the mbufs in the descriptor and sends data which has been
2413 * dma'ed into host memory to upper layer.
2415 * We loop at most count times if count is > 0, or until done if
2418 *********************************************************************/
2420 em_process_receive_interrupts(struct adapter *adapter, int count)
2424 uint8_t accept_frame = 0;
2426 uint16_t len, desc_len, prev_len_adj;
2429 /* Pointer to the receive descriptor being examined. */
2430 struct em_rx_desc *current_desc;
2432 ifp = &adapter->interface_data.ac_if;
2433 i = adapter->next_rx_desc_to_check;
2434 current_desc = &adapter->rx_desc_base[i];
2436 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
2438 adapter->no_pkts_avail++;
2442 while ((current_desc->status & E1000_RXD_STAT_DD) && (count != 0)) {
2443 mp = adapter->rx_buffer_area[i].m_head;
2444 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2445 BUS_DMASYNC_POSTREAD);
2449 desc_len = le16toh(current_desc->length);
2450 if (current_desc->status & E1000_RXD_STAT_EOP) {
2453 if (desc_len < ETHER_CRC_LEN) {
2455 prev_len_adj = ETHER_CRC_LEN - desc_len;
2458 len = desc_len - ETHER_CRC_LEN;
2465 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2467 uint32_t pkt_len = desc_len;
2469 if (adapter->fmp != NULL)
2470 pkt_len += adapter->fmp->m_pkthdr.len;
2472 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2474 if (TBI_ACCEPT(&adapter->hw, current_desc->status,
2475 current_desc->errors,
2476 pkt_len, last_byte)) {
2477 em_tbi_adjust_stats(&adapter->hw,
2480 adapter->hw.mac_addr);
2490 if (em_get_buf(i, adapter, NULL, MB_DONTWAIT) == ENOBUFS) {
2491 adapter->dropped_pkts++;
2492 em_get_buf(i, adapter, mp, MB_DONTWAIT);
2493 if (adapter->fmp != NULL)
2494 m_freem(adapter->fmp);
2495 adapter->fmp = NULL;
2496 adapter->lmp = NULL;
2500 /* Assign correct length to the current fragment */
2503 if (adapter->fmp == NULL) {
2504 mp->m_pkthdr.len = len;
2505 adapter->fmp = mp; /* Store the first mbuf */
2508 /* Chain mbuf's together */
2510 * Adjust length of previous mbuf in chain if we
2511 * received less than 4 bytes in the last descriptor.
2513 if (prev_len_adj > 0) {
2514 adapter->lmp->m_len -= prev_len_adj;
2515 adapter->fmp->m_pkthdr.len -= prev_len_adj;
2517 adapter->lmp->m_next = mp;
2518 adapter->lmp = adapter->lmp->m_next;
2519 adapter->fmp->m_pkthdr.len += len;
2523 adapter->fmp->m_pkthdr.rcvif = ifp;
2526 em_receive_checksum(adapter, current_desc,
2528 if (current_desc->status & E1000_RXD_STAT_VP)
2529 VLAN_INPUT_TAG(adapter->fmp,
2530 (current_desc->special &
2531 E1000_RXD_SPC_VLAN_MASK));
2533 (*ifp->if_input)(ifp, adapter->fmp);
2534 adapter->fmp = NULL;
2535 adapter->lmp = NULL;
2538 adapter->dropped_pkts++;
2539 em_get_buf(i, adapter, mp, MB_DONTWAIT);
2540 if (adapter->fmp != NULL)
2541 m_freem(adapter->fmp);
2542 adapter->fmp = NULL;
2543 adapter->lmp = NULL;
2546 /* Zero out the receive descriptors status */
2547 current_desc->status = 0;
2549 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
2550 E1000_WRITE_REG(&adapter->hw, RDT, i);
2552 /* Advance our pointers to the next descriptor */
2553 if (++i == adapter->num_rx_desc) {
2555 current_desc = adapter->rx_desc_base;
2559 adapter->next_rx_desc_to_check = i;
2562 /*********************************************************************
2564 * Verify that the hardware indicated that the checksum is valid.
2565 * Inform the stack about the status of checksum so that stack
2566 * doesn't spend time verifying the checksum.
2568 *********************************************************************/
2570 em_receive_checksum(struct adapter *adapter,
2571 struct em_rx_desc *rx_desc,
2574 /* 82543 or newer only */
2575 if ((adapter->hw.mac_type < em_82543) ||
2576 /* Ignore Checksum bit is set */
2577 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
2578 mp->m_pkthdr.csum_flags = 0;
2582 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
2584 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
2585 /* IP Checksum Good */
2586 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2587 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2589 mp->m_pkthdr.csum_flags = 0;
2593 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
2595 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
2596 mp->m_pkthdr.csum_flags |=
2597 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2598 mp->m_pkthdr.csum_data = htons(0xffff);
2605 em_enable_vlans(struct adapter *adapter)
2609 E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
2611 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2612 ctrl |= E1000_CTRL_VME;
2613 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2617 * note: we must call bus_enable_intr() prior to enabling the hardware
2618 * interrupt and bus_disable_intr() after disabling the hardware interrupt
2619 * in order to avoid handler execution races from scheduled interrupt
2623 em_enable_intr(struct adapter *adapter)
2625 struct ifnet *ifp = &adapter->interface_data.ac_if;
2627 if ((ifp->if_flags & IFF_POLLING) == 0) {
2628 lwkt_serialize_handler_enable(&adapter->serializer);
2629 E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
2634 em_disable_intr(struct adapter *adapter)
2636 E1000_WRITE_REG(&adapter->hw, IMC,
2637 (0xffffffff & ~E1000_IMC_RXSEQ));
2638 lwkt_serialize_handler_disable(&adapter->serializer);
2642 em_is_valid_ether_addr(uint8_t *addr)
2644 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
2646 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN)))
2653 em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2655 pci_write_config(((struct em_osdep *)hw->back)->dev, reg, *value, 2);
2659 em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2661 *value = pci_read_config(((struct em_osdep *)hw->back)->dev, reg, 2);
2665 em_pci_set_mwi(struct em_hw *hw)
2667 pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2668 (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
2672 em_pci_clear_mwi(struct em_hw *hw)
2674 pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2675 (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
2679 em_read_reg_io(struct em_hw *hw, uint32_t offset)
2681 bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2682 return(bus_space_read_4(hw->reg_io_tag, hw->reg_io_handle, 4));
2686 em_write_reg_io(struct em_hw *hw, uint32_t offset, uint32_t value)
2688 bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2689 bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 4, value);
2692 /*********************************************************************
2693 * 82544 Coexistence issue workaround.
2694 * There are 2 issues.
2695 * 1. Transmit Hang issue.
2696 * To detect this issue, following equation can be used...
2697 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2698 * If SUM[3:0] is in between 1 to 4, we will have this issue.
2701 * To detect this issue, following equation can be used...
2702 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2703 * If SUM[3:0] is in between 9 to c, we will have this issue.
2707 * Make sure we do not have ending address as 1,2,3,4(Hang) or
2710 *************************************************************************/
2712 em_fill_descriptors(uint64_t address, uint32_t length, PDESC_ARRAY desc_array)
2714 /* Since issue is sensitive to length and address.*/
2715 /* Let us first check the address...*/
2716 uint32_t safe_terminator;
2718 desc_array->descriptor[0].address = address;
2719 desc_array->descriptor[0].length = length;
2720 desc_array->elements = 1;
2721 return(desc_array->elements);
2723 safe_terminator = (uint32_t)((((uint32_t)address & 0x7) + (length & 0xF)) & 0xF);
2724 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
2725 if (safe_terminator == 0 ||
2726 (safe_terminator > 4 && safe_terminator < 9) ||
2727 (safe_terminator > 0xC && safe_terminator <= 0xF)) {
2728 desc_array->descriptor[0].address = address;
2729 desc_array->descriptor[0].length = length;
2730 desc_array->elements = 1;
2731 return(desc_array->elements);
2734 desc_array->descriptor[0].address = address;
2735 desc_array->descriptor[0].length = length - 4;
2736 desc_array->descriptor[1].address = address + (length - 4);
2737 desc_array->descriptor[1].length = 4;
2738 desc_array->elements = 2;
2739 return(desc_array->elements);
2742 /**********************************************************************
2744 * Update the board statistics counters.
2746 **********************************************************************/
2748 em_update_stats_counters(struct adapter *adapter)
2752 if (adapter->hw.media_type == em_media_type_copper ||
2753 (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
2754 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
2755 adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
2757 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
2758 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
2759 adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
2760 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
2762 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
2763 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
2764 adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
2765 adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
2766 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
2767 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
2768 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
2769 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
2770 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
2771 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
2772 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
2773 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
2774 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
2775 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
2776 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
2777 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
2778 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
2779 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
2780 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
2781 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
2783 /* For the 64-bit byte counters the low dword must be read first. */
2784 /* Both registers clear on the read of the high dword */
2786 adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
2787 adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
2788 adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
2789 adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
2791 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
2792 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
2793 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
2794 adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
2795 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
2797 adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
2798 adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
2799 adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
2800 adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
2802 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
2803 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
2804 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
2805 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
2806 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
2807 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
2808 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
2809 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
2810 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
2811 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
2813 if (adapter->hw.mac_type >= em_82543) {
2814 adapter->stats.algnerrc +=
2815 E1000_READ_REG(&adapter->hw, ALGNERRC);
2816 adapter->stats.rxerrc +=
2817 E1000_READ_REG(&adapter->hw, RXERRC);
2818 adapter->stats.tncrs +=
2819 E1000_READ_REG(&adapter->hw, TNCRS);
2820 adapter->stats.cexterr +=
2821 E1000_READ_REG(&adapter->hw, CEXTERR);
2822 adapter->stats.tsctc +=
2823 E1000_READ_REG(&adapter->hw, TSCTC);
2824 adapter->stats.tsctfc +=
2825 E1000_READ_REG(&adapter->hw, TSCTFC);
2827 ifp = &adapter->interface_data.ac_if;
2829 /* Fill out the OS statistics structure */
2830 ifp->if_ibytes = adapter->stats.gorcl;
2831 ifp->if_obytes = adapter->stats.gotcl;
2832 ifp->if_imcasts = adapter->stats.mprc;
2833 ifp->if_collisions = adapter->stats.colc;
2836 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
2837 adapter->stats.crcerrs + adapter->stats.algnerrc +
2838 adapter->stats.rlec + adapter->stats.rnbc +
2839 adapter->stats.mpc + adapter->stats.cexterr;
2842 ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol;
2846 /**********************************************************************
2848 * This routine is called only when em_display_debug_stats is enabled.
2849 * This routine provides a way to take a look at important statistics
2850 * maintained by the driver and hardware.
2852 **********************************************************************/
2854 em_print_debug_info(struct adapter *adapter)
2856 device_t dev= adapter->dev;
2857 uint8_t *hw_addr = adapter->hw.hw_addr;
2859 device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
2860 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
2861 E1000_READ_REG(&adapter->hw, TIDV),
2862 E1000_READ_REG(&adapter->hw, TADV));
2863 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
2864 E1000_READ_REG(&adapter->hw, RDTR),
2865 E1000_READ_REG(&adapter->hw, RADV));
2867 device_printf(dev, "Packets not Avail = %ld\n", adapter->no_pkts_avail);
2868 device_printf(dev, "CleanTxInterrupts = %ld\n",
2869 adapter->clean_tx_interrupts);
2871 device_printf(dev, "fifo workaround = %lld, fifo_reset = %lld\n",
2872 (long long)adapter->tx_fifo_wrk,
2873 (long long)adapter->tx_fifo_reset);
2874 device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
2875 E1000_READ_REG(&adapter->hw, TDH),
2876 E1000_READ_REG(&adapter->hw, TDT));
2877 device_printf(dev, "Num Tx descriptors avail = %d\n",
2878 adapter->num_tx_desc_avail);
2879 device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
2880 adapter->no_tx_desc_avail1);
2881 device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
2882 adapter->no_tx_desc_avail2);
2883 device_printf(dev, "Std mbuf failed = %ld\n",
2884 adapter->mbuf_alloc_failed);
2885 device_printf(dev, "Std mbuf cluster failed = %ld\n",
2886 adapter->mbuf_cluster_failed);
2887 device_printf(dev, "Driver dropped packets = %ld\n",
2888 adapter->dropped_pkts);
2892 em_print_hw_stats(struct adapter *adapter)
2894 device_t dev= adapter->dev;
2896 device_printf(dev, "Adapter: %p\n", adapter);
2898 device_printf(dev, "Excessive collisions = %lld\n",
2899 (long long)adapter->stats.ecol);
2900 device_printf(dev, "Symbol errors = %lld\n",
2901 (long long)adapter->stats.symerrs);
2902 device_printf(dev, "Sequence errors = %lld\n",
2903 (long long)adapter->stats.sec);
2904 device_printf(dev, "Defer count = %lld\n",
2905 (long long)adapter->stats.dc);
2907 device_printf(dev, "Missed Packets = %lld\n",
2908 (long long)adapter->stats.mpc);
2909 device_printf(dev, "Receive No Buffers = %lld\n",
2910 (long long)adapter->stats.rnbc);
2911 device_printf(dev, "Receive length errors = %lld\n",
2912 (long long)adapter->stats.rlec);
2913 device_printf(dev, "Receive errors = %lld\n",
2914 (long long)adapter->stats.rxerrc);
2915 device_printf(dev, "Crc errors = %lld\n",
2916 (long long)adapter->stats.crcerrs);
2917 device_printf(dev, "Alignment errors = %lld\n",
2918 (long long)adapter->stats.algnerrc);
2919 device_printf(dev, "Carrier extension errors = %lld\n",
2920 (long long)adapter->stats.cexterr);
2922 device_printf(dev, "XON Rcvd = %lld\n",
2923 (long long)adapter->stats.xonrxc);
2924 device_printf(dev, "XON Xmtd = %lld\n",
2925 (long long)adapter->stats.xontxc);
2926 device_printf(dev, "XOFF Rcvd = %lld\n",
2927 (long long)adapter->stats.xoffrxc);
2928 device_printf(dev, "XOFF Xmtd = %lld\n",
2929 (long long)adapter->stats.xofftxc);
2931 device_printf(dev, "Good Packets Rcvd = %lld\n",
2932 (long long)adapter->stats.gprc);
2933 device_printf(dev, "Good Packets Xmtd = %lld\n",
2934 (long long)adapter->stats.gptc);
2938 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
2942 struct adapter *adapter;
2945 error = sysctl_handle_int(oidp, &result, 0, req);
2947 if (error || !req->newptr)
2951 adapter = (struct adapter *)arg1;
2952 em_print_debug_info(adapter);
2959 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
2963 struct adapter *adapter;
2966 error = sysctl_handle_int(oidp, &result, 0, req);
2968 if (error || !req->newptr)
2972 adapter = (struct adapter *)arg1;
2973 em_print_hw_stats(adapter);
2980 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
2982 struct em_int_delay_info *info;
2983 struct adapter *adapter;
2989 info = (struct em_int_delay_info *)arg1;
2990 adapter = info->adapter;
2991 usecs = info->value;
2992 error = sysctl_handle_int(oidp, &usecs, 0, req);
2993 if (error != 0 || req->newptr == NULL)
2995 if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
2997 info->value = usecs;
2998 ticks = E1000_USECS_TO_TICKS(usecs);
3000 lwkt_serialize_enter(&adapter->serializer);
3001 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3002 regval = (regval & ~0xffff) | (ticks & 0xffff);
3003 /* Handle a few special cases. */
3004 switch (info->offset) {
3006 case E1000_82542_RDTR:
3007 regval |= E1000_RDT_FPDB;
3010 case E1000_82542_TIDV:
3012 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3013 /* Don't write 0 into the TIDV register. */
3016 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3019 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3020 lwkt_serialize_exit(&adapter->serializer);
3025 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3026 const char *description, struct em_int_delay_info *info,
3027 int offset, int value)
3029 info->adapter = adapter;
3030 info->offset = offset;
3031 info->value = value;
3032 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
3033 SYSCTL_CHILDREN(adapter->sysctl_tree),
3034 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3035 info, 0, em_sysctl_int_delay, "I", description);
3039 em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS)
3041 struct adapter *adapter = (void *)arg1;
3045 throttle = em_int_throttle_ceil;
3046 error = sysctl_handle_int(oidp, &throttle, 0, req);
3047 if (error || req->newptr == NULL)
3049 if (throttle < 0 || throttle > 1000000000 / 256)
3053 * Set the interrupt throttling rate in 256ns increments,
3054 * recalculate sysctl value assignment to get exact frequency.
3056 throttle = 1000000000 / 256 / throttle;
3057 lwkt_serialize_enter(&adapter->serializer);
3058 em_int_throttle_ceil = 1000000000 / 256 / throttle;
3059 E1000_WRITE_REG(&adapter->hw, ITR, throttle);
3060 lwkt_serialize_exit(&adapter->serializer);
3062 lwkt_serialize_enter(&adapter->serializer);
3063 em_int_throttle_ceil = 0;
3064 E1000_WRITE_REG(&adapter->hw, ITR, 0);
3065 lwkt_serialize_exit(&adapter->serializer);
3067 device_printf(adapter->dev, "Interrupt moderation set to %d/sec\n",
3068 em_int_throttle_ceil);