1 /**************************************************************************
3 Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved.
5 Copyright (c) 2001-2003, Intel Corporation
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
11 1. Redistributions of source code must retain the above copyright notice,
12 this list of conditions and the following disclaimer.
14 2. Redistributions in binary form must reproduce the above copyright
15 notice, this list of conditions and the following disclaimer in the
16 documentation and/or other materials provided with the distribution.
18 3. Neither the name of the Intel Corporation nor the names of its
19 contributors may be used to endorse or promote products derived from
20 this software without specific prior written permission.
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 POSSIBILITY OF SUCH DAMAGE.
34 ***************************************************************************/
36 /*$FreeBSD: src/sys/dev/em/if_em.c,v 1.2.2.15 2003/06/09 22:10:15 pdeuskar Exp $*/
37 /*$DragonFly: src/sys/dev/netif/em/if_em.c,v 1.25 2005/02/05 23:16:42 joerg Exp $*/
41 /*********************************************************************
42 * Set this to one to display debug statistics
43 *********************************************************************/
44 int em_display_debug_stats = 0;
46 /*********************************************************************
48 *********************************************************************/
50 char em_driver_version[] = "1.7.25";
53 /*********************************************************************
56 * Used by probe to select devices to load on
57 * Last field stores an index into em_strings
58 * Last entry must be all 0s
60 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
61 *********************************************************************/
63 static em_vendor_info_t em_vendor_info_array[] =
65 /* Intel(R) PRO/1000 Network Connection */
66 { 0x8086, 0x1000, PCI_ANY_ID, PCI_ANY_ID, 0},
67 { 0x8086, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0},
68 { 0x8086, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0},
69 { 0x8086, 0x1008, PCI_ANY_ID, PCI_ANY_ID, 0},
70 { 0x8086, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0},
71 { 0x8086, 0x100C, PCI_ANY_ID, PCI_ANY_ID, 0},
72 { 0x8086, 0x100D, PCI_ANY_ID, PCI_ANY_ID, 0},
73 { 0x8086, 0x100E, PCI_ANY_ID, PCI_ANY_ID, 0},
74 { 0x8086, 0x100F, PCI_ANY_ID, PCI_ANY_ID, 0},
75 { 0x8086, 0x1010, PCI_ANY_ID, PCI_ANY_ID, 0},
76 { 0x8086, 0x1011, PCI_ANY_ID, PCI_ANY_ID, 0},
77 { 0x8086, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0},
78 { 0x8086, 0x1013, PCI_ANY_ID, PCI_ANY_ID, 0},
79 { 0x8086, 0x1014, PCI_ANY_ID, PCI_ANY_ID, 0},
80 { 0x8086, 0x1015, PCI_ANY_ID, PCI_ANY_ID, 0},
81 { 0x8086, 0x1016, PCI_ANY_ID, PCI_ANY_ID, 0},
82 { 0x8086, 0x1017, PCI_ANY_ID, PCI_ANY_ID, 0},
83 { 0x8086, 0x1018, PCI_ANY_ID, PCI_ANY_ID, 0},
84 { 0x8086, 0x1019, PCI_ANY_ID, PCI_ANY_ID, 0},
85 { 0x8086, 0x101A, PCI_ANY_ID, PCI_ANY_ID, 0},
86 { 0x8086, 0x101D, PCI_ANY_ID, PCI_ANY_ID, 0},
87 { 0x8086, 0x101E, PCI_ANY_ID, PCI_ANY_ID, 0},
88 { 0x8086, 0x1026, PCI_ANY_ID, PCI_ANY_ID, 0},
89 { 0x8086, 0x1027, PCI_ANY_ID, PCI_ANY_ID, 0},
90 { 0x8086, 0x1028, PCI_ANY_ID, PCI_ANY_ID, 0},
91 { 0x8086, 0x1075, PCI_ANY_ID, PCI_ANY_ID, 0},
92 { 0x8086, 0x1076, PCI_ANY_ID, PCI_ANY_ID, 0},
93 { 0x8086, 0x1077, PCI_ANY_ID, PCI_ANY_ID, 0},
94 { 0x8086, 0x1078, PCI_ANY_ID, PCI_ANY_ID, 0},
95 { 0x8086, 0x1079, PCI_ANY_ID, PCI_ANY_ID, 0},
96 { 0x8086, 0x107A, PCI_ANY_ID, PCI_ANY_ID, 0},
97 { 0x8086, 0x107B, PCI_ANY_ID, PCI_ANY_ID, 0},
98 /* required last entry */
102 /*********************************************************************
103 * Table of branding strings for all supported NICs.
104 *********************************************************************/
106 static const char *em_strings[] = {
107 "Intel(R) PRO/1000 Network Connection"
110 /*********************************************************************
111 * Function prototypes
112 *********************************************************************/
113 static int em_probe(device_t);
114 static int em_attach(device_t);
115 static int em_detach(device_t);
116 static int em_shutdown(device_t);
117 static void em_intr(void *);
118 static void em_start(struct ifnet *);
119 static int em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
120 static void em_watchdog(struct ifnet *);
121 static void em_init(void *);
122 static void em_stop(void *);
123 static void em_media_status(struct ifnet *, struct ifmediareq *);
124 static int em_media_change(struct ifnet *);
125 static void em_identify_hardware(struct adapter *);
126 static void em_local_timer(void *);
127 static int em_hardware_init(struct adapter *);
128 static void em_setup_interface(device_t, struct adapter *);
129 static int em_setup_transmit_structures(struct adapter *);
130 static void em_initialize_transmit_unit(struct adapter *);
131 static int em_setup_receive_structures(struct adapter *);
132 static void em_initialize_receive_unit(struct adapter *);
133 static void em_enable_intr(struct adapter *);
134 static void em_disable_intr(struct adapter *);
135 static void em_free_transmit_structures(struct adapter *);
136 static void em_free_receive_structures(struct adapter *);
137 static void em_update_stats_counters(struct adapter *);
138 static void em_clean_transmit_interrupts(struct adapter *);
139 static int em_allocate_receive_structures(struct adapter *);
140 static int em_allocate_transmit_structures(struct adapter *);
141 static void em_process_receive_interrupts(struct adapter *, int);
142 static void em_receive_checksum(struct adapter *, struct em_rx_desc *,
144 static void em_transmit_checksum_setup(struct adapter *, struct mbuf *,
145 uint32_t *, uint32_t *);
146 static void em_set_promisc(struct adapter *);
147 static void em_disable_promisc(struct adapter *);
148 static void em_set_multi(struct adapter *);
149 static void em_print_hw_stats(struct adapter *);
150 static void em_print_link_status(struct adapter *);
151 static int em_get_buf(int i, struct adapter *, struct mbuf *, int how);
152 static void em_enable_vlans(struct adapter *);
153 static int em_encap(struct adapter *, struct mbuf *);
154 static void em_smartspeed(struct adapter *);
155 static int em_82547_fifo_workaround(struct adapter *, int);
156 static void em_82547_update_fifo_head(struct adapter *, int);
157 static int em_82547_tx_fifo_reset(struct adapter *);
158 static void em_82547_move_tail(void *arg);
159 static int em_dma_malloc(struct adapter *, bus_size_t,
160 struct em_dma_alloc *, int);
161 static void em_dma_free(struct adapter *, struct em_dma_alloc *);
162 static void em_print_debug_info(struct adapter *);
163 static int em_is_valid_ether_addr(uint8_t *);
164 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS);
165 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
166 static uint32_t em_fill_descriptors(uint64_t address, uint32_t length,
167 PDESC_ARRAY desc_array);
168 static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
169 static int em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS);
170 static void em_add_int_delay_sysctl(struct adapter *, const char *,
172 struct em_int_delay_info *, int, int);
174 /*********************************************************************
175 * FreeBSD Device Interface Entry Points
176 *********************************************************************/
178 static device_method_t em_methods[] = {
179 /* Device interface */
180 DEVMETHOD(device_probe, em_probe),
181 DEVMETHOD(device_attach, em_attach),
182 DEVMETHOD(device_detach, em_detach),
183 DEVMETHOD(device_shutdown, em_shutdown),
187 static driver_t em_driver = {
188 "em", em_methods, sizeof(struct adapter),
191 static devclass_t em_devclass;
193 DECLARE_DUMMY_MODULE(if_em);
194 DRIVER_MODULE(if_em, pci, em_driver, em_devclass, 0, 0);
196 /*********************************************************************
197 * Tunable default values.
198 *********************************************************************/
200 #define E1000_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
201 #define E1000_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
203 static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV);
204 static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR);
205 static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV);
206 static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV);
207 static int em_int_throttle_ceil = 10000;
209 TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt);
210 TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt);
211 TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt);
212 TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt);
213 TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil);
215 /*********************************************************************
216 * Device identification routine
218 * em_probe determines if the driver should be loaded on
219 * adapter based on PCI vendor/device id of the adapter.
221 * return 0 on success, positive on failure
222 *********************************************************************/
225 em_probe(device_t dev)
227 em_vendor_info_t *ent;
229 uint16_t pci_vendor_id = 0;
230 uint16_t pci_device_id = 0;
231 uint16_t pci_subvendor_id = 0;
232 uint16_t pci_subdevice_id = 0;
233 char adapter_name[60];
235 INIT_DEBUGOUT("em_probe: begin");
237 pci_vendor_id = pci_get_vendor(dev);
238 if (pci_vendor_id != EM_VENDOR_ID)
241 pci_device_id = pci_get_device(dev);
242 pci_subvendor_id = pci_get_subvendor(dev);
243 pci_subdevice_id = pci_get_subdevice(dev);
245 ent = em_vendor_info_array;
246 while (ent->vendor_id != 0) {
247 if ((pci_vendor_id == ent->vendor_id) &&
248 (pci_device_id == ent->device_id) &&
250 ((pci_subvendor_id == ent->subvendor_id) ||
251 (ent->subvendor_id == PCI_ANY_ID)) &&
253 ((pci_subdevice_id == ent->subdevice_id) ||
254 (ent->subdevice_id == PCI_ANY_ID))) {
255 snprintf(adapter_name, sizeof(adapter_name),
256 "%s, Version - %s", em_strings[ent->index],
258 device_set_desc_copy(dev, adapter_name);
267 /*********************************************************************
268 * Device initialization routine
270 * The attach entry point is called when the driver is being loaded.
271 * This routine identifies the type of hardware, allocates all resources
272 * and initializes the hardware.
274 * return 0 on success, positive on failure
275 *********************************************************************/
278 em_attach(device_t dev)
280 struct adapter *adapter;
285 INIT_DEBUGOUT("em_attach: begin");
287 adapter = device_get_softc(dev);
289 bzero(adapter, sizeof(struct adapter));
291 callout_init(&adapter->timer);
292 callout_init(&adapter->tx_fifo_timer);
295 adapter->osdep.dev = dev;
298 sysctl_ctx_init(&adapter->sysctl_ctx);
299 adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx,
300 SYSCTL_STATIC_CHILDREN(_hw),
302 device_get_nameunit(dev),
306 if (adapter->sysctl_tree == NULL) {
311 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
312 SYSCTL_CHILDREN(adapter->sysctl_tree),
313 OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW,
315 em_sysctl_debug_info, "I", "Debug Information");
317 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
318 SYSCTL_CHILDREN(adapter->sysctl_tree),
319 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW,
321 em_sysctl_stats, "I", "Statistics");
323 /* Determine hardware revision */
324 em_identify_hardware(adapter);
326 /* Set up some sysctls for the tunable interrupt delays */
327 em_add_int_delay_sysctl(adapter, "rx_int_delay",
328 "receive interrupt delay in usecs",
329 &adapter->rx_int_delay,
330 E1000_REG_OFFSET(&adapter->hw, RDTR),
331 em_rx_int_delay_dflt);
332 em_add_int_delay_sysctl(adapter, "tx_int_delay",
333 "transmit interrupt delay in usecs",
334 &adapter->tx_int_delay,
335 E1000_REG_OFFSET(&adapter->hw, TIDV),
336 em_tx_int_delay_dflt);
337 if (adapter->hw.mac_type >= em_82540) {
338 em_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
339 "receive interrupt delay limit in usecs",
340 &adapter->rx_abs_int_delay,
341 E1000_REG_OFFSET(&adapter->hw, RADV),
342 em_rx_abs_int_delay_dflt);
343 em_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
344 "transmit interrupt delay limit in usecs",
345 &adapter->tx_abs_int_delay,
346 E1000_REG_OFFSET(&adapter->hw, TADV),
347 em_tx_abs_int_delay_dflt);
348 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
349 SYSCTL_CHILDREN(adapter->sysctl_tree),
350 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW,
351 adapter, 0, em_sysctl_int_throttle, "I", NULL);
354 /* Parameters (to be read from user) */
355 adapter->num_tx_desc = EM_MAX_TXD;
356 adapter->num_rx_desc = EM_MAX_RXD;
357 adapter->hw.autoneg = DO_AUTO_NEG;
358 adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT;
359 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
360 adapter->hw.tbi_compatibility_en = TRUE;
361 adapter->rx_buffer_len = EM_RXBUFFER_2048;
364 * These parameters control the automatic generation(Tx) and
365 * response(Rx) to Ethernet PAUSE frames.
367 adapter->hw.fc_high_water = FC_DEFAULT_HI_THRESH;
368 adapter->hw.fc_low_water = FC_DEFAULT_LO_THRESH;
369 adapter->hw.fc_pause_time = FC_DEFAULT_TX_TIMER;
370 adapter->hw.fc_send_xon = TRUE;
371 adapter->hw.fc = em_fc_full;
373 adapter->hw.phy_init_script = 1;
374 adapter->hw.phy_reset_disable = FALSE;
376 #ifndef EM_MASTER_SLAVE
377 adapter->hw.master_slave = em_ms_hw_default;
379 adapter->hw.master_slave = EM_MASTER_SLAVE;
383 * Set the max frame size assuming standard ethernet
386 adapter->hw.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
388 adapter->hw.min_frame_size =
389 MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN;
392 * This controls when hardware reports transmit completion
395 adapter->hw.report_tx_early = 1;
398 adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
400 if (!(adapter->res_memory)) {
401 device_printf(dev, "Unable to allocate bus resource: memory\n");
405 adapter->osdep.mem_bus_space_tag =
406 rman_get_bustag(adapter->res_memory);
407 adapter->osdep.mem_bus_space_handle =
408 rman_get_bushandle(adapter->res_memory);
409 adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
411 if (adapter->hw.mac_type > em_82543) {
412 /* Figure our where our IO BAR is ? */
414 for (i = 0; i < 5; i++) {
415 val = pci_read_config(dev, rid, 4);
416 if (val & 0x00000001) {
417 adapter->io_rid = rid;
423 adapter->res_ioport = bus_alloc_resource_any(dev,
424 SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE);
425 if (!(adapter->res_ioport)) {
426 device_printf(dev, "Unable to allocate bus resource: ioport\n");
431 adapter->hw.reg_io_tag = rman_get_bustag(adapter->res_ioport);
432 adapter->hw.reg_io_handle = rman_get_bushandle(adapter->res_ioport);
436 adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ,
437 &rid, RF_SHAREABLE | RF_ACTIVE);
438 if (!(adapter->res_interrupt)) {
439 device_printf(dev, "Unable to allocate bus resource: interrupt\n");
444 adapter->hw.back = &adapter->osdep;
446 /* Initialize eeprom parameters */
447 em_init_eeprom_params(&adapter->hw);
449 tsize = adapter->num_tx_desc * sizeof(struct em_tx_desc);
451 /* Allocate Transmit Descriptor ring */
452 if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_WAITOK)) {
453 device_printf(dev, "Unable to allocate TxDescriptor memory\n");
457 adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr;
459 rsize = adapter->num_rx_desc * sizeof(struct em_rx_desc);
461 /* Allocate Receive Descriptor ring */
462 if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_WAITOK)) {
463 device_printf(dev, "Unable to allocate rx_desc memory\n");
467 adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr;
469 /* Initialize the hardware */
470 if (em_hardware_init(adapter)) {
471 device_printf(dev, "Unable to initialize the hardware\n");
476 /* Copy the permanent MAC address out of the EEPROM */
477 if (em_read_mac_addr(&adapter->hw) < 0) {
478 device_printf(dev, "EEPROM read error while reading mac address\n");
483 if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) {
484 device_printf(dev, "Invalid mac address\n");
489 /* Setup OS specific network interface */
490 em_setup_interface(dev, adapter);
492 /* Initialize statistics */
493 em_clear_hw_cntrs(&adapter->hw);
494 em_update_stats_counters(adapter);
495 adapter->hw.get_link_status = 1;
496 em_check_for_link(&adapter->hw);
498 /* Print the link status */
499 if (adapter->link_active == 1) {
500 em_get_speed_and_duplex(&adapter->hw, &adapter->link_speed,
501 &adapter->link_duplex);
502 device_printf(dev, "Speed: %d Mbps, Duplex: %s\n",
504 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
506 device_printf(dev, "Speed: N/A, Duplex:N/A\n");
508 /* Identify 82544 on PCIX */
509 em_get_bus_info(&adapter->hw);
510 if (adapter->hw.bus_type == em_bus_type_pcix &&
511 adapter->hw.mac_type == em_82544)
512 adapter->pcix_82544 = TRUE;
514 adapter->pcix_82544 = FALSE;
516 error = bus_setup_intr(dev, adapter->res_interrupt, INTR_TYPE_NET,
517 (void (*)(void *)) em_intr, adapter,
518 &adapter->int_handler_tag);
520 device_printf(dev, "Error registering interrupt handler!\n");
521 ether_ifdetach(&adapter->interface_data.ac_if);
525 INIT_DEBUGOUT("em_attach: end");
533 /*********************************************************************
534 * Device removal routine
536 * The detach entry point is called when the driver is being removed.
537 * This routine stops the adapter and deallocates all the resources
538 * that were allocated for driver operation.
540 * return 0 on success, positive on failure
541 *********************************************************************/
544 em_detach(device_t dev)
546 struct adapter * adapter = device_get_softc(dev);
549 INIT_DEBUGOUT("em_detach: begin");
552 adapter->in_detach = 1;
554 if (device_is_attached(dev)) {
556 em_phy_hw_reset(&adapter->hw);
557 ether_ifdetach(&adapter->interface_data.ac_if);
559 bus_generic_detach(dev);
561 if (adapter->res_interrupt != NULL) {
562 bus_teardown_intr(dev, adapter->res_interrupt,
563 adapter->int_handler_tag);
564 bus_release_resource(dev, SYS_RES_IRQ, 0,
565 adapter->res_interrupt);
567 if (adapter->res_memory != NULL) {
568 bus_release_resource(dev, SYS_RES_MEMORY, EM_MMBA,
569 adapter->res_memory);
572 if (adapter->res_ioport != NULL) {
573 bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid,
574 adapter->res_ioport);
577 /* Free Transmit Descriptor ring */
578 if (adapter->tx_desc_base != NULL) {
579 em_dma_free(adapter, &adapter->txdma);
580 adapter->tx_desc_base = NULL;
583 /* Free Receive Descriptor ring */
584 if (adapter->rx_desc_base != NULL) {
585 em_dma_free(adapter, &adapter->rxdma);
586 adapter->rx_desc_base = NULL;
589 adapter->sysctl_tree = NULL;
590 sysctl_ctx_free(&adapter->sysctl_ctx);
596 /*********************************************************************
598 * Shutdown entry point
600 **********************************************************************/
603 em_shutdown(device_t dev)
605 struct adapter *adapter = device_get_softc(dev);
610 /*********************************************************************
611 * Transmit entry point
613 * em_start is called by the stack to initiate a transmit.
614 * The driver will remain in this routine as long as there are
615 * packets to transmit and transmit resources are available.
616 * In case resources are not available stack is notified and
617 * the packet is requeued.
618 **********************************************************************/
621 em_start(struct ifnet *ifp)
625 struct adapter *adapter = ifp->if_softc;
627 if (!adapter->link_active)
631 while (ifp->if_snd.ifq_head != NULL) {
632 IF_DEQUEUE(&ifp->if_snd, m_head);
637 if (em_encap(adapter, m_head)) {
638 ifp->if_flags |= IFF_OACTIVE;
639 IF_PREPEND(&ifp->if_snd, m_head);
643 /* Send a copy of the frame to the BPF listener */
644 BPF_MTAP(ifp, m_head);
646 /* Set timeout in case hardware has problems transmitting */
647 ifp->if_timer = EM_TX_TIMEOUT;
652 /*********************************************************************
655 * em_ioctl is called when the user wants to configure the
658 * return 0 on success, positive on failure
659 **********************************************************************/
662 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
664 int s, mask, error = 0;
665 struct ifreq *ifr = (struct ifreq *) data;
666 struct adapter *adapter = ifp->if_softc;
670 if (adapter->in_detach)
676 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)");
677 ether_ioctl(ifp, command, data);
680 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
681 if (ifr->ifr_mtu > MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) {
684 ifp->if_mtu = ifr->ifr_mtu;
685 adapter->hw.max_frame_size =
686 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
691 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)");
692 if (ifp->if_flags & IFF_UP) {
693 if (!(ifp->if_flags & IFF_RUNNING))
695 em_disable_promisc(adapter);
696 em_set_promisc(adapter);
698 if (ifp->if_flags & IFF_RUNNING)
704 IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
705 if (ifp->if_flags & IFF_RUNNING) {
706 em_disable_intr(adapter);
707 em_set_multi(adapter);
708 if (adapter->hw.mac_type == em_82542_rev2_0)
709 em_initialize_receive_unit(adapter);
710 #ifdef DEVICE_POLLING
711 if (!(ifp->if_flags & IFF_POLLING))
713 em_enable_intr(adapter);
718 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)");
719 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
722 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
723 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
724 if (mask & IFCAP_HWCSUM) {
725 if (IFCAP_HWCSUM & ifp->if_capenable)
726 ifp->if_capenable &= ~IFCAP_HWCSUM;
728 ifp->if_capenable |= IFCAP_HWCSUM;
729 if (ifp->if_flags & IFF_RUNNING)
734 IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)\n", (int)command);
743 /*********************************************************************
744 * Watchdog entry point
746 * This routine is called whenever hardware quits transmitting.
748 **********************************************************************/
751 em_watchdog(struct ifnet *ifp)
753 struct adapter * adapter;
754 adapter = ifp->if_softc;
756 /* If we are in this routine because of pause frames, then
757 * don't reset the hardware.
759 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) {
760 ifp->if_timer = EM_TX_TIMEOUT;
764 if (em_check_for_link(&adapter->hw))
765 if_printf(ifp, "watchdog timeout -- resetting\n");
767 ifp->if_flags &= ~IFF_RUNNING;
774 /*********************************************************************
777 * This routine is used in two ways. It is used by the stack as
778 * init entry point in network interface structure. It is also used
779 * by the driver as a hw/sw initialization routine to get to a
782 * return 0 on success, positive on failure
783 **********************************************************************/
789 struct adapter *adapter = arg;
790 struct ifnet *ifp = &adapter->interface_data.ac_if;
792 INIT_DEBUGOUT("em_init: begin");
798 /* Get the latest mac address, User can use a LAA */
799 bcopy(adapter->interface_data.ac_enaddr, adapter->hw.mac_addr,
802 /* Initialize the hardware */
803 if (em_hardware_init(adapter)) {
804 if_printf(ifp, "Unable to initialize the hardware\n");
809 em_enable_vlans(adapter);
811 /* Prepare transmit descriptors and buffers */
812 if (em_setup_transmit_structures(adapter)) {
813 if_printf(ifp, "Could not setup transmit structures\n");
818 em_initialize_transmit_unit(adapter);
820 /* Setup Multicast table */
821 em_set_multi(adapter);
823 /* Prepare receive descriptors and buffers */
824 if (em_setup_receive_structures(adapter)) {
825 if_printf(ifp, "Could not setup receive structures\n");
830 em_initialize_receive_unit(adapter);
832 /* Don't loose promiscuous settings */
833 em_set_promisc(adapter);
835 ifp->if_flags |= IFF_RUNNING;
836 ifp->if_flags &= ~IFF_OACTIVE;
838 if (adapter->hw.mac_type >= em_82543) {
839 if (ifp->if_capenable & IFCAP_TXCSUM)
840 ifp->if_hwassist = EM_CHECKSUM_FEATURES;
842 ifp->if_hwassist = 0;
845 callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
846 em_clear_hw_cntrs(&adapter->hw);
847 #ifdef DEVICE_POLLING
849 * Only enable interrupts if we are not polling, make sure
850 * they are off otherwise.
852 if (ifp->if_flags & IFF_POLLING)
853 em_disable_intr(adapter);
855 #endif /* DEVICE_POLLING */
856 em_enable_intr(adapter);
858 /* Don't reset the phy next time init gets called */
859 adapter->hw.phy_reset_disable = TRUE;
864 #ifdef DEVICE_POLLING
865 static poll_handler_t em_poll;
868 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
870 struct adapter *adapter = ifp->if_softc;
873 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
874 em_enable_intr(adapter);
877 if (cmd == POLL_AND_CHECK_STATUS) {
878 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
879 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
880 callout_stop(&adapter->timer);
881 adapter->hw.get_link_status = 1;
882 em_check_for_link(&adapter->hw);
883 em_print_link_status(adapter);
884 callout_reset(&adapter->timer, 2*hz, em_local_timer,
888 if (ifp->if_flags & IFF_RUNNING) {
889 em_process_receive_interrupts(adapter, count);
890 em_clean_transmit_interrupts(adapter);
893 if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
896 #endif /* DEVICE_POLLING */
898 /*********************************************************************
900 * Interrupt Service routine
902 **********************************************************************/
908 struct adapter *adapter = arg;
910 ifp = &adapter->interface_data.ac_if;
912 #ifdef DEVICE_POLLING
913 if (ifp->if_flags & IFF_POLLING)
916 if (ether_poll_register(em_poll, ifp)) {
917 em_disable_intr(adapter);
921 #endif /* DEVICE_POLLING */
923 reg_icr = E1000_READ_REG(&adapter->hw, ICR);
927 /* Link status change */
928 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
929 callout_stop(&adapter->timer);
930 adapter->hw.get_link_status = 1;
931 em_check_for_link(&adapter->hw);
932 em_print_link_status(adapter);
933 callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
937 * note: do not attempt to improve efficiency by looping. This
938 * only results in unnecessary piecemeal collection of received
939 * packets and unnecessary piecemeal cleanups of the transmit ring.
941 if (ifp->if_flags & IFF_RUNNING) {
942 em_process_receive_interrupts(adapter, -1);
943 em_clean_transmit_interrupts(adapter);
946 if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL)
950 /*********************************************************************
952 * Media Ioctl callback
954 * This routine is called whenever the user queries the status of
955 * the interface using ifconfig.
957 **********************************************************************/
959 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
961 struct adapter * adapter = ifp->if_softc;
963 INIT_DEBUGOUT("em_media_status: begin");
965 em_check_for_link(&adapter->hw);
966 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
967 if (adapter->link_active == 0) {
968 em_get_speed_and_duplex(&adapter->hw,
969 &adapter->link_speed,
970 &adapter->link_duplex);
971 adapter->link_active = 1;
974 if (adapter->link_active == 1) {
975 adapter->link_speed = 0;
976 adapter->link_duplex = 0;
977 adapter->link_active = 0;
981 ifmr->ifm_status = IFM_AVALID;
982 ifmr->ifm_active = IFM_ETHER;
984 if (!adapter->link_active)
987 ifmr->ifm_status |= IFM_ACTIVE;
989 if (adapter->hw.media_type == em_media_type_fiber) {
990 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
992 switch (adapter->link_speed) {
994 ifmr->ifm_active |= IFM_10_T;
997 ifmr->ifm_active |= IFM_100_TX;
1000 ifmr->ifm_active |= IFM_1000_TX;
1003 if (adapter->link_duplex == FULL_DUPLEX)
1004 ifmr->ifm_active |= IFM_FDX;
1006 ifmr->ifm_active |= IFM_HDX;
1010 /*********************************************************************
1012 * Media Ioctl callback
1014 * This routine is called when the user changes speed/duplex using
1015 * media/mediopt option with ifconfig.
1017 **********************************************************************/
1019 em_media_change(struct ifnet *ifp)
1021 struct adapter * adapter = ifp->if_softc;
1022 struct ifmedia *ifm = &adapter->media;
1024 INIT_DEBUGOUT("em_media_change: begin");
1026 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1029 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1031 adapter->hw.autoneg = DO_AUTO_NEG;
1032 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1036 adapter->hw.autoneg = DO_AUTO_NEG;
1037 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
1040 adapter->hw.autoneg = FALSE;
1041 adapter->hw.autoneg_advertised = 0;
1042 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1043 adapter->hw.forced_speed_duplex = em_100_full;
1045 adapter->hw.forced_speed_duplex = em_100_half;
1048 adapter->hw.autoneg = FALSE;
1049 adapter->hw.autoneg_advertised = 0;
1050 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1051 adapter->hw.forced_speed_duplex = em_10_full;
1053 adapter->hw.forced_speed_duplex = em_10_half;
1056 if_printf(ifp, "Unsupported media type\n");
1059 * As the speed/duplex settings may have changed we need to
1062 adapter->hw.phy_reset_disable = FALSE;
1070 em_tx_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize,
1073 struct em_q *q = arg;
1077 KASSERT(nsegs <= EM_MAX_SCATTER,
1078 ("Too many DMA segments returned when mapping tx packet"));
1080 bcopy(seg, q->segs, nsegs * sizeof(seg[0]));
1083 #define EM_FIFO_HDR 0x10
1084 #define EM_82547_PKT_THRESH 0x3e0
1085 #define EM_82547_TX_FIFO_SIZE 0x2800
1086 #define EM_82547_TX_FIFO_BEGIN 0xf00
1087 /*********************************************************************
1089 * This routine maps the mbufs to tx descriptors.
1091 * return 0 on success, positive on failure
1092 **********************************************************************/
1094 em_encap(struct adapter *adapter, struct mbuf *m_head)
1097 uint32_t txd_lower, txd_used = 0, txd_saved = 0;
1101 /* For 82544 Workaround */
1102 DESC_ARRAY desc_array;
1103 uint32_t array_elements;
1106 #if defined(__DragonFly__) || __FreeBSD_version < 500000
1107 struct ifvlan *ifv = NULL;
1112 struct em_buffer *tx_buffer = NULL;
1113 struct em_tx_desc *current_tx_desc = NULL;
1114 struct ifnet *ifp = &adapter->interface_data.ac_if;
1117 * Force a cleanup if number of TX descriptors
1118 * available hits the threshold
1120 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1121 em_clean_transmit_interrupts(adapter);
1122 if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) {
1123 adapter->no_tx_desc_avail1++;
1128 * Map the packet for DMA.
1130 if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &q.map)) {
1131 adapter->no_tx_map_avail++;
1134 error = bus_dmamap_load_mbuf(adapter->txtag, q.map, m_head, em_tx_cb,
1135 &q, BUS_DMA_NOWAIT);
1137 adapter->no_tx_dma_setup++;
1138 bus_dmamap_destroy(adapter->txtag, q.map);
1141 KASSERT(q.nsegs != 0, ("em_encap: empty packet"));
1143 if (q.nsegs > adapter->num_tx_desc_avail) {
1144 adapter->no_tx_desc_avail2++;
1145 bus_dmamap_unload(adapter->txtag, q.map);
1146 bus_dmamap_destroy(adapter->txtag, q.map);
1150 if (ifp->if_hwassist > 0) {
1151 em_transmit_checksum_setup(adapter, m_head,
1152 &txd_upper, &txd_lower);
1155 txd_upper = txd_lower = 0;
1157 /* Find out if we are in vlan mode */
1158 #if defined(__DragonFly__) || __FreeBSD_version < 500000
1159 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1160 m_head->m_pkthdr.rcvif != NULL &&
1161 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN)
1162 ifv = m_head->m_pkthdr.rcvif->if_softc;
1164 mtag = VLAN_OUTPUT_TAG(ifp, m_head);
1167 i = adapter->next_avail_tx_desc;
1168 if (adapter->pcix_82544) {
1172 for (j = 0; j < q.nsegs; j++) {
1173 /* If adapter is 82544 and on PCIX bus */
1174 if(adapter->pcix_82544) {
1176 address = htole64(q.segs[j].ds_addr);
1178 * Check the Address and Length combination and
1179 * split the data accordingly
1181 array_elements = em_fill_descriptors(address,
1182 htole32(q.segs[j].ds_len),
1184 for (counter = 0; counter < array_elements; counter++) {
1185 if (txd_used == adapter->num_tx_desc_avail) {
1186 adapter->next_avail_tx_desc = txd_saved;
1187 adapter->no_tx_desc_avail2++;
1188 bus_dmamap_unload(adapter->txtag, q.map);
1189 bus_dmamap_destroy(adapter->txtag, q.map);
1192 tx_buffer = &adapter->tx_buffer_area[i];
1193 current_tx_desc = &adapter->tx_desc_base[i];
1194 current_tx_desc->buffer_addr = htole64(
1195 desc_array.descriptor[counter].address);
1196 current_tx_desc->lower.data = htole32(
1197 (adapter->txd_cmd | txd_lower |
1198 (uint16_t)desc_array.descriptor[counter].length));
1199 current_tx_desc->upper.data = htole32((txd_upper));
1200 if (++i == adapter->num_tx_desc)
1203 tx_buffer->m_head = NULL;
1207 tx_buffer = &adapter->tx_buffer_area[i];
1208 current_tx_desc = &adapter->tx_desc_base[i];
1210 current_tx_desc->buffer_addr = htole64(q.segs[j].ds_addr);
1211 current_tx_desc->lower.data = htole32(
1212 adapter->txd_cmd | txd_lower | q.segs[j].ds_len);
1213 current_tx_desc->upper.data = htole32(txd_upper);
1215 if (++i == adapter->num_tx_desc)
1218 tx_buffer->m_head = NULL;
1222 adapter->next_avail_tx_desc = i;
1223 if (adapter->pcix_82544)
1224 adapter->num_tx_desc_avail -= txd_used;
1226 adapter->num_tx_desc_avail -= q.nsegs;
1228 #if defined(__DragonFly__) || __FreeBSD_version < 500000
1230 /* Set the vlan id */
1231 current_tx_desc->upper.fields.special = htole16(ifv->ifv_tag);
1234 /* Set the vlan id */
1235 current_tx_desc->upper.fields.special = htole16(VLAN_TAG_VALUE(mtag));
1238 /* Tell hardware to add tag */
1239 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE);
1242 tx_buffer->m_head = m_head;
1243 tx_buffer->map = q.map;
1244 bus_dmamap_sync(adapter->txtag, q.map, BUS_DMASYNC_PREWRITE);
1247 * Last Descriptor of Packet needs End Of Packet (EOP)
1249 current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP);
1252 * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000
1253 * that this frame is available to transmit.
1255 if (adapter->hw.mac_type == em_82547 &&
1256 adapter->link_duplex == HALF_DUPLEX) {
1257 em_82547_move_tail(adapter);
1259 E1000_WRITE_REG(&adapter->hw, TDT, i);
1260 if (adapter->hw.mac_type == em_82547) {
1261 em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len);
1268 /*********************************************************************
1270 * 82547 workaround to avoid controller hang in half-duplex environment.
1271 * The workaround is to avoid queuing a large packet that would span
1272 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1273 * in this case. We do that only when FIFO is quiescent.
1275 **********************************************************************/
1277 em_82547_move_tail(void *arg)
1280 struct adapter *adapter = arg;
1283 struct em_tx_desc *tx_desc;
1284 uint16_t length = 0;
1288 hw_tdt = E1000_READ_REG(&adapter->hw, TDT);
1289 sw_tdt = adapter->next_avail_tx_desc;
1291 while (hw_tdt != sw_tdt) {
1292 tx_desc = &adapter->tx_desc_base[hw_tdt];
1293 length += tx_desc->lower.flags.length;
1294 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
1295 if(++hw_tdt == adapter->num_tx_desc)
1299 if (em_82547_fifo_workaround(adapter, length)) {
1300 adapter->tx_fifo_wrk++;
1301 callout_reset(&adapter->tx_fifo_timer, 1,
1302 em_82547_move_tail, adapter);
1305 E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt);
1306 em_82547_update_fifo_head(adapter, length);
1314 em_82547_fifo_workaround(struct adapter *adapter, int len)
1316 int fifo_space, fifo_pkt_len;
1318 fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1320 if (adapter->link_duplex == HALF_DUPLEX) {
1321 fifo_space = EM_82547_TX_FIFO_SIZE - adapter->tx_fifo_head;
1323 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
1324 if (em_82547_tx_fifo_reset(adapter))
1335 em_82547_update_fifo_head(struct adapter *adapter, int len)
1337 int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR);
1339 /* tx_fifo_head is always 16 byte aligned */
1340 adapter->tx_fifo_head += fifo_pkt_len;
1341 if (adapter->tx_fifo_head >= EM_82547_TX_FIFO_SIZE)
1342 adapter->tx_fifo_head -= EM_82547_TX_FIFO_SIZE;
1346 em_82547_tx_fifo_reset(struct adapter *adapter)
1350 if ( (E1000_READ_REG(&adapter->hw, TDT) ==
1351 E1000_READ_REG(&adapter->hw, TDH)) &&
1352 (E1000_READ_REG(&adapter->hw, TDFT) ==
1353 E1000_READ_REG(&adapter->hw, TDFH)) &&
1354 (E1000_READ_REG(&adapter->hw, TDFTS) ==
1355 E1000_READ_REG(&adapter->hw, TDFHS)) &&
1356 (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) {
1358 /* Disable TX unit */
1359 tctl = E1000_READ_REG(&adapter->hw, TCTL);
1360 E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN);
1362 /* Reset FIFO pointers */
1363 E1000_WRITE_REG(&adapter->hw, TDFT, EM_82547_TX_FIFO_BEGIN);
1364 E1000_WRITE_REG(&adapter->hw, TDFH, EM_82547_TX_FIFO_BEGIN);
1365 E1000_WRITE_REG(&adapter->hw, TDFTS, EM_82547_TX_FIFO_BEGIN);
1366 E1000_WRITE_REG(&adapter->hw, TDFHS, EM_82547_TX_FIFO_BEGIN);
1368 /* Re-enable TX unit */
1369 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1370 E1000_WRITE_FLUSH(&adapter->hw);
1372 adapter->tx_fifo_head = 0;
1373 adapter->tx_fifo_reset++;
1383 em_set_promisc(struct adapter *adapter)
1386 struct ifnet *ifp = &adapter->interface_data.ac_if;
1388 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1390 if (ifp->if_flags & IFF_PROMISC) {
1391 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1392 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1393 } else if (ifp->if_flags & IFF_ALLMULTI) {
1394 reg_rctl |= E1000_RCTL_MPE;
1395 reg_rctl &= ~E1000_RCTL_UPE;
1396 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1401 em_disable_promisc(struct adapter *adapter)
1405 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1407 reg_rctl &= (~E1000_RCTL_UPE);
1408 reg_rctl &= (~E1000_RCTL_MPE);
1409 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1412 /*********************************************************************
1415 * This routine is called whenever multicast address list is updated.
1417 **********************************************************************/
1420 em_set_multi(struct adapter *adapter)
1422 uint32_t reg_rctl = 0;
1423 uint8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS];
1424 struct ifmultiaddr *ifma;
1426 struct ifnet *ifp = &adapter->interface_data.ac_if;
1428 IOCTL_DEBUGOUT("em_set_multi: begin");
1430 if (adapter->hw.mac_type == em_82542_rev2_0) {
1431 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1432 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1433 em_pci_clear_mwi(&adapter->hw);
1434 reg_rctl |= E1000_RCTL_RST;
1435 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1439 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1440 if (ifma->ifma_addr->sa_family != AF_LINK)
1443 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1446 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1447 &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS);
1451 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1452 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1453 reg_rctl |= E1000_RCTL_MPE;
1454 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1456 em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1);
1458 if (adapter->hw.mac_type == em_82542_rev2_0) {
1459 reg_rctl = E1000_READ_REG(&adapter->hw, RCTL);
1460 reg_rctl &= ~E1000_RCTL_RST;
1461 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
1463 if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1464 em_pci_set_mwi(&adapter->hw);
1468 /*********************************************************************
1471 * This routine checks for link status and updates statistics.
1473 **********************************************************************/
1476 em_local_timer(void *arg)
1480 struct adapter *adapter = arg;
1481 ifp = &adapter->interface_data.ac_if;
1485 em_check_for_link(&adapter->hw);
1486 em_print_link_status(adapter);
1487 em_update_stats_counters(adapter);
1488 if (em_display_debug_stats && ifp->if_flags & IFF_RUNNING)
1489 em_print_hw_stats(adapter);
1490 em_smartspeed(adapter);
1492 callout_reset(&adapter->timer, 2*hz, em_local_timer, adapter);
1498 em_print_link_status(struct adapter *adapter)
1500 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) {
1501 if (adapter->link_active == 0) {
1502 em_get_speed_and_duplex(&adapter->hw,
1503 &adapter->link_speed,
1504 &adapter->link_duplex);
1505 device_printf(adapter->dev, "Link is up %d Mbps %s\n",
1506 adapter->link_speed,
1507 ((adapter->link_duplex == FULL_DUPLEX) ?
1508 "Full Duplex" : "Half Duplex"));
1509 adapter->link_active = 1;
1510 adapter->smartspeed = 0;
1513 if (adapter->link_active == 1) {
1514 adapter->link_speed = 0;
1515 adapter->link_duplex = 0;
1516 device_printf(adapter->dev, "Link is Down\n");
1517 adapter->link_active = 0;
1522 /*********************************************************************
1524 * This routine disables all traffic on the adapter by issuing a
1525 * global reset on the MAC and deallocates TX/RX buffers.
1527 **********************************************************************/
1533 struct adapter * adapter = arg;
1534 ifp = &adapter->interface_data.ac_if;
1536 INIT_DEBUGOUT("em_stop: begin");
1537 em_disable_intr(adapter);
1538 em_reset_hw(&adapter->hw);
1539 callout_stop(&adapter->timer);
1540 callout_stop(&adapter->tx_fifo_timer);
1541 em_free_transmit_structures(adapter);
1542 em_free_receive_structures(adapter);
1544 /* Tell the stack that the interface is no longer active */
1545 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1549 /*********************************************************************
1551 * Determine hardware revision.
1553 **********************************************************************/
1555 em_identify_hardware(struct adapter * adapter)
1557 device_t dev = adapter->dev;
1559 /* Make sure our PCI config space has the necessary stuff set */
1560 adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1561 if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
1562 (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) {
1563 device_printf(dev, "Memory Access and/or Bus Master bits were not set!\n");
1564 adapter->hw.pci_cmd_word |=
1565 (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
1566 pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2);
1569 /* Save off the information about this board */
1570 adapter->hw.vendor_id = pci_get_vendor(dev);
1571 adapter->hw.device_id = pci_get_device(dev);
1572 adapter->hw.revision_id = pci_get_revid(dev);
1573 adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev);
1574 adapter->hw.subsystem_id = pci_get_subdevice(dev);
1576 /* Identify the MAC */
1577 if (em_set_mac_type(&adapter->hw))
1578 device_printf(dev, "Unknown MAC Type\n");
1580 if (adapter->hw.mac_type == em_82541 ||
1581 adapter->hw.mac_type == em_82541_rev_2 ||
1582 adapter->hw.mac_type == em_82547 ||
1583 adapter->hw.mac_type == em_82547_rev_2)
1584 adapter->hw.phy_init_script = TRUE;
1587 /*********************************************************************
1589 * Initialize the hardware to a configuration as specified by the
1590 * adapter structure. The controller is reset, the EEPROM is
1591 * verified, the MAC address is set, then the shared initialization
1592 * routines are called.
1594 **********************************************************************/
1596 em_hardware_init(struct adapter *adapter)
1598 INIT_DEBUGOUT("em_hardware_init: begin");
1599 /* Issue a global reset */
1600 em_reset_hw(&adapter->hw);
1602 /* When hardware is reset, fifo_head is also reset */
1603 adapter->tx_fifo_head = 0;
1605 /* Make sure we have a good EEPROM before we read from it */
1606 if (em_validate_eeprom_checksum(&adapter->hw) < 0) {
1607 device_printf(adapter->dev, "The EEPROM Checksum Is Not Valid\n");
1611 if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) {
1612 device_printf(adapter->dev, "EEPROM read error while reading part number\n");
1616 if (em_init_hw(&adapter->hw) < 0) {
1617 device_printf(adapter->dev, "Hardware Initialization Failed");
1621 em_check_for_link(&adapter->hw);
1622 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)
1623 adapter->link_active = 1;
1625 adapter->link_active = 0;
1627 if (adapter->link_active) {
1628 em_get_speed_and_duplex(&adapter->hw,
1629 &adapter->link_speed,
1630 &adapter->link_duplex);
1632 adapter->link_speed = 0;
1633 adapter->link_duplex = 0;
1639 /*********************************************************************
1641 * Setup networking device structure and register an interface.
1643 **********************************************************************/
1645 em_setup_interface(device_t dev, struct adapter *adapter)
1648 INIT_DEBUGOUT("em_setup_interface: begin");
1650 ifp = &adapter->interface_data.ac_if;
1651 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1652 ifp->if_mtu = ETHERMTU;
1653 ifp->if_baudrate = 1000000000;
1654 ifp->if_init = em_init;
1655 ifp->if_softc = adapter;
1656 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1657 ifp->if_ioctl = em_ioctl;
1658 ifp->if_start = em_start;
1659 ifp->if_watchdog = em_watchdog;
1660 ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1;
1662 ether_ifattach(ifp, adapter->hw.mac_addr);
1664 if (adapter->hw.mac_type >= em_82543) {
1665 ifp->if_capabilities = IFCAP_HWCSUM;
1666 ifp->if_capenable = ifp->if_capabilities;
1670 * Tell the upper layer(s) we support long frames.
1672 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1673 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
1674 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
1678 * Specify the media types supported by this adapter and register
1679 * callbacks to update media and link information
1681 ifmedia_init(&adapter->media, IFM_IMASK, em_media_change,
1683 if (adapter->hw.media_type == em_media_type_fiber) {
1684 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1686 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX,
1689 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
1690 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1692 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
1694 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1696 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_TX | IFM_FDX,
1698 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_TX, 0, NULL);
1700 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1701 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
1704 /*********************************************************************
1706 * Workaround for SmartSpeed on 82541 and 82547 controllers
1708 **********************************************************************/
1710 em_smartspeed(struct adapter *adapter)
1714 if (adapter->link_active || (adapter->hw.phy_type != em_phy_igp) ||
1715 !adapter->hw.autoneg ||
1716 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
1719 if (adapter->smartspeed == 0) {
1721 * If Master/Slave config fault is asserted twice,
1722 * we assume back-to-back.
1724 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1725 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
1727 em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
1728 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
1729 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL,
1731 if (phy_tmp & CR_1000T_MS_ENABLE) {
1732 phy_tmp &= ~CR_1000T_MS_ENABLE;
1733 em_write_phy_reg(&adapter->hw,
1734 PHY_1000T_CTRL, phy_tmp);
1735 adapter->smartspeed++;
1736 if (adapter->hw.autoneg &&
1737 !em_phy_setup_autoneg(&adapter->hw) &&
1738 !em_read_phy_reg(&adapter->hw, PHY_CTRL,
1740 phy_tmp |= (MII_CR_AUTO_NEG_EN |
1741 MII_CR_RESTART_AUTO_NEG);
1742 em_write_phy_reg(&adapter->hw,
1748 } else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
1749 /* If still no link, perhaps using 2/3 pair cable */
1750 em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
1751 phy_tmp |= CR_1000T_MS_ENABLE;
1752 em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
1753 if (adapter->hw.autoneg &&
1754 !em_phy_setup_autoneg(&adapter->hw) &&
1755 !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) {
1756 phy_tmp |= (MII_CR_AUTO_NEG_EN |
1757 MII_CR_RESTART_AUTO_NEG);
1758 em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp);
1761 /* Restart process after EM_SMARTSPEED_MAX iterations */
1762 if (adapter->smartspeed++ == EM_SMARTSPEED_MAX)
1763 adapter->smartspeed = 0;
1767 * Manage DMA'able memory.
1770 em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1774 *(bus_addr_t*) arg = segs->ds_addr;
1778 em_dma_malloc(struct adapter *adapter, bus_size_t size,
1779 struct em_dma_alloc *dma, int mapflags)
1782 device_t dev = adapter->dev;
1784 r = bus_dma_tag_create(NULL, /* parent */
1785 PAGE_SIZE, 0, /* alignment, bounds */
1786 BUS_SPACE_MAXADDR, /* lowaddr */
1787 BUS_SPACE_MAXADDR, /* highaddr */
1788 NULL, NULL, /* filter, filterarg */
1791 size, /* maxsegsize */
1792 BUS_DMA_ALLOCNOW, /* flags */
1795 device_printf(dev, "em_dma_malloc: bus_dma_tag_create failed; "
1800 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
1801 BUS_DMA_NOWAIT, &dma->dma_map);
1803 device_printf(dev, "em_dma_malloc: bus_dmammem_alloc failed; "
1804 "size %llu, error %d\n", (uintmax_t)size, r);
1808 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1812 mapflags | BUS_DMA_NOWAIT);
1814 device_printf(dev, "em_dma_malloc: bus_dmamap_load failed; "
1819 dma->dma_size = size;
1823 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1825 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1826 bus_dma_tag_destroy(dma->dma_tag);
1828 dma->dma_map = NULL;
1829 dma->dma_tag = NULL;
1834 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
1836 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1837 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1838 bus_dma_tag_destroy(dma->dma_tag);
1841 /*********************************************************************
1843 * Allocate memory for tx_buffer structures. The tx_buffer stores all
1844 * the information needed to transmit a packet on the wire.
1846 **********************************************************************/
1848 em_allocate_transmit_structures(struct adapter * adapter)
1850 adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) *
1851 adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO);
1852 if (adapter->tx_buffer_area == NULL) {
1853 device_printf(adapter->dev, "Unable to allocate tx_buffer memory\n");
1860 /*********************************************************************
1862 * Allocate and initialize transmit structures.
1864 **********************************************************************/
1866 em_setup_transmit_structures(struct adapter * adapter)
1869 * Setup DMA descriptor areas.
1871 if (bus_dma_tag_create(NULL, /* parent */
1872 1, 0, /* alignment, bounds */
1873 BUS_SPACE_MAXADDR, /* lowaddr */
1874 BUS_SPACE_MAXADDR, /* highaddr */
1875 NULL, NULL, /* filter, filterarg */
1876 MCLBYTES * 8, /* maxsize */
1877 EM_MAX_SCATTER, /* nsegments */
1878 MCLBYTES * 8, /* maxsegsize */
1879 BUS_DMA_ALLOCNOW, /* flags */
1881 device_printf(adapter->dev, "Unable to allocate TX DMA tag\n");
1885 if (em_allocate_transmit_structures(adapter))
1888 bzero((void *) adapter->tx_desc_base,
1889 (sizeof(struct em_tx_desc)) * adapter->num_tx_desc);
1891 adapter->next_avail_tx_desc = 0;
1892 adapter->oldest_used_tx_desc = 0;
1894 /* Set number of descriptors available */
1895 adapter->num_tx_desc_avail = adapter->num_tx_desc;
1897 /* Set checksum context */
1898 adapter->active_checksum_context = OFFLOAD_NONE;
1903 /*********************************************************************
1905 * Enable transmit unit.
1907 **********************************************************************/
1909 em_initialize_transmit_unit(struct adapter * adapter)
1912 uint32_t reg_tipg = 0;
1915 INIT_DEBUGOUT("em_initialize_transmit_unit: begin");
1917 /* Setup the Base and Length of the Tx Descriptor Ring */
1918 bus_addr = adapter->txdma.dma_paddr;
1919 E1000_WRITE_REG(&adapter->hw, TDBAL, (uint32_t)bus_addr);
1920 E1000_WRITE_REG(&adapter->hw, TDBAH, (uint32_t)(bus_addr >> 32));
1921 E1000_WRITE_REG(&adapter->hw, TDLEN,
1922 adapter->num_tx_desc * sizeof(struct em_tx_desc));
1924 /* Setup the HW Tx Head and Tail descriptor pointers */
1925 E1000_WRITE_REG(&adapter->hw, TDH, 0);
1926 E1000_WRITE_REG(&adapter->hw, TDT, 0);
1928 HW_DEBUGOUT2("Base = %x, Length = %x\n",
1929 E1000_READ_REG(&adapter->hw, TDBAL),
1930 E1000_READ_REG(&adapter->hw, TDLEN));
1932 /* Set the default values for the Tx Inter Packet Gap timer */
1933 switch (adapter->hw.mac_type) {
1934 case em_82542_rev2_0:
1935 case em_82542_rev2_1:
1936 reg_tipg = DEFAULT_82542_TIPG_IPGT;
1937 reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1938 reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1941 if (adapter->hw.media_type == em_media_type_fiber)
1942 reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1944 reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1945 reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1946 reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1949 E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg);
1950 E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value);
1951 if (adapter->hw.mac_type >= em_82540)
1952 E1000_WRITE_REG(&adapter->hw, TADV,
1953 adapter->tx_abs_int_delay.value);
1955 /* Program the Transmit Control Register */
1956 reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN |
1957 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1958 if (adapter->link_duplex == 1)
1959 reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
1961 reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT;
1962 E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl);
1964 /* Setup Transmit Descriptor Settings for this adapter */
1965 adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS;
1967 if (adapter->tx_int_delay.value > 0)
1968 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1971 /*********************************************************************
1973 * Free all transmit related data structures.
1975 **********************************************************************/
1977 em_free_transmit_structures(struct adapter * adapter)
1979 struct em_buffer *tx_buffer;
1982 INIT_DEBUGOUT("free_transmit_structures: begin");
1984 if (adapter->tx_buffer_area != NULL) {
1985 tx_buffer = adapter->tx_buffer_area;
1986 for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
1987 if (tx_buffer->m_head != NULL) {
1988 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
1989 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
1990 m_freem(tx_buffer->m_head);
1992 tx_buffer->m_head = NULL;
1995 if (adapter->tx_buffer_area != NULL) {
1996 free(adapter->tx_buffer_area, M_DEVBUF);
1997 adapter->tx_buffer_area = NULL;
1999 if (adapter->txtag != NULL) {
2000 bus_dma_tag_destroy(adapter->txtag);
2001 adapter->txtag = NULL;
2005 /*********************************************************************
2007 * The offload context needs to be set when we transfer the first
2008 * packet of a particular protocol (TCP/UDP). We change the
2009 * context only if the protocol type changes.
2011 **********************************************************************/
2013 em_transmit_checksum_setup(struct adapter * adapter,
2015 uint32_t *txd_upper,
2016 uint32_t *txd_lower)
2018 struct em_context_desc *TXD;
2019 struct em_buffer *tx_buffer;
2022 if (mp->m_pkthdr.csum_flags) {
2023 if (mp->m_pkthdr.csum_flags & CSUM_TCP) {
2024 *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2025 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2026 if (adapter->active_checksum_context == OFFLOAD_TCP_IP)
2029 adapter->active_checksum_context = OFFLOAD_TCP_IP;
2030 } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) {
2031 *txd_upper = E1000_TXD_POPTS_TXSM << 8;
2032 *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2033 if (adapter->active_checksum_context == OFFLOAD_UDP_IP)
2036 adapter->active_checksum_context = OFFLOAD_UDP_IP;
2048 /* If we reach this point, the checksum offload context
2049 * needs to be reset.
2051 curr_txd = adapter->next_avail_tx_desc;
2052 tx_buffer = &adapter->tx_buffer_area[curr_txd];
2053 TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd];
2055 TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN;
2056 TXD->lower_setup.ip_fields.ipcso =
2057 ETHER_HDR_LEN + offsetof(struct ip, ip_sum);
2058 TXD->lower_setup.ip_fields.ipcse =
2059 htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1);
2061 TXD->upper_setup.tcp_fields.tucss =
2062 ETHER_HDR_LEN + sizeof(struct ip);
2063 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2065 if (adapter->active_checksum_context == OFFLOAD_TCP_IP) {
2066 TXD->upper_setup.tcp_fields.tucso =
2067 ETHER_HDR_LEN + sizeof(struct ip) +
2068 offsetof(struct tcphdr, th_sum);
2069 } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) {
2070 TXD->upper_setup.tcp_fields.tucso =
2071 ETHER_HDR_LEN + sizeof(struct ip) +
2072 offsetof(struct udphdr, uh_sum);
2075 TXD->tcp_seg_setup.data = htole32(0);
2076 TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT);
2078 tx_buffer->m_head = NULL;
2080 if (++curr_txd == adapter->num_tx_desc)
2083 adapter->num_tx_desc_avail--;
2084 adapter->next_avail_tx_desc = curr_txd;
2087 /**********************************************************************
2089 * Examine each tx_buffer in the used queue. If the hardware is done
2090 * processing the packet then free associated resources. The
2091 * tx_buffer is put back on the free queue.
2093 **********************************************************************/
2096 em_clean_transmit_interrupts(struct adapter *adapter)
2100 struct em_buffer *tx_buffer;
2101 struct em_tx_desc *tx_desc;
2102 struct ifnet *ifp = &adapter->interface_data.ac_if;
2104 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2109 adapter->clean_tx_interrupts++;
2111 num_avail = adapter->num_tx_desc_avail;
2112 i = adapter->oldest_used_tx_desc;
2114 tx_buffer = &adapter->tx_buffer_area[i];
2115 tx_desc = &adapter->tx_desc_base[i];
2117 while(tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2118 tx_desc->upper.data = 0;
2121 if (tx_buffer->m_head) {
2123 bus_dmamap_sync(adapter->txtag, tx_buffer->map,
2124 BUS_DMASYNC_POSTWRITE);
2125 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2126 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
2128 m_freem(tx_buffer->m_head);
2129 tx_buffer->m_head = NULL;
2132 if (++i == adapter->num_tx_desc)
2135 tx_buffer = &adapter->tx_buffer_area[i];
2136 tx_desc = &adapter->tx_desc_base[i];
2139 adapter->oldest_used_tx_desc = i;
2142 * If we have enough room, clear IFF_OACTIVE to tell the stack
2143 * that it is OK to send packets.
2144 * If there are no pending descriptors, clear the timeout. Otherwise,
2145 * if some descriptors have been freed, restart the timeout.
2147 if (num_avail > EM_TX_CLEANUP_THRESHOLD) {
2148 ifp->if_flags &= ~IFF_OACTIVE;
2149 if (num_avail == adapter->num_tx_desc)
2151 else if (num_avail == adapter->num_tx_desc_avail)
2152 ifp->if_timer = EM_TX_TIMEOUT;
2154 adapter->num_tx_desc_avail = num_avail;
2158 /*********************************************************************
2160 * Get a buffer from system mbuf buffer pool.
2162 **********************************************************************/
2164 em_get_buf(int i, struct adapter *adapter, struct mbuf *nmp, int how)
2166 struct mbuf *mp = nmp;
2167 struct em_buffer *rx_buffer;
2172 ifp = &adapter->interface_data.ac_if;
2175 mp = m_getcl(how, MT_DATA, M_PKTHDR);
2177 adapter->mbuf_cluster_failed++;
2180 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2182 mp->m_len = mp->m_pkthdr.len = MCLBYTES;
2183 mp->m_data = mp->m_ext.ext_buf;
2186 if (ifp->if_mtu <= ETHERMTU)
2187 m_adj(mp, ETHER_ALIGN);
2189 rx_buffer = &adapter->rx_buffer_area[i];
2192 * Using memory from the mbuf cluster pool, invoke the
2193 * bus_dma machinery to arrange the memory mapping.
2195 error = bus_dmamap_load(adapter->rxtag, rx_buffer->map,
2196 mtod(mp, void *), mp->m_len,
2197 em_dmamap_cb, &paddr, 0);
2202 rx_buffer->m_head = mp;
2203 adapter->rx_desc_base[i].buffer_addr = htole64(paddr);
2204 bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
2209 /*********************************************************************
2211 * Allocate memory for rx_buffer structures. Since we use one
2212 * rx_buffer per received packet, the maximum number of rx_buffer's
2213 * that we'll need is equal to the number of receive descriptors
2214 * that we've allocated.
2216 **********************************************************************/
2218 em_allocate_receive_structures(struct adapter *adapter)
2221 struct em_buffer *rx_buffer;
2223 size = adapter->num_rx_desc * sizeof(struct em_buffer);
2224 adapter->rx_buffer_area = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
2226 error = bus_dma_tag_create(NULL, /* parent */
2227 1, 0, /* alignment, bounds */
2228 BUS_SPACE_MAXADDR, /* lowaddr */
2229 BUS_SPACE_MAXADDR, /* highaddr */
2230 NULL, NULL, /* filter, filterarg */
2231 MCLBYTES, /* maxsize */
2233 MCLBYTES, /* maxsegsize */
2234 BUS_DMA_ALLOCNOW, /* flags */
2237 device_printf(adapter->dev, "em_allocate_receive_structures: "
2238 "bus_dma_tag_create failed; error %u\n", error);
2242 rx_buffer = adapter->rx_buffer_area;
2243 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2244 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
2247 device_printf(adapter->dev,
2248 "em_allocate_receive_structures: "
2249 "bus_dmamap_create failed; error %u\n",
2255 for (i = 0; i < adapter->num_rx_desc; i++) {
2256 error = em_get_buf(i, adapter, NULL, MB_WAIT);
2258 adapter->rx_buffer_area[i].m_head = NULL;
2259 adapter->rx_desc_base[i].buffer_addr = 0;
2267 bus_dma_tag_destroy(adapter->rxtag);
2269 adapter->rxtag = NULL;
2270 free(adapter->rx_buffer_area, M_DEVBUF);
2271 adapter->rx_buffer_area = NULL;
2275 /*********************************************************************
2277 * Allocate and initialize receive structures.
2279 **********************************************************************/
2281 em_setup_receive_structures(struct adapter *adapter)
2283 bzero((void *) adapter->rx_desc_base,
2284 (sizeof(struct em_rx_desc)) * adapter->num_rx_desc);
2286 if (em_allocate_receive_structures(adapter))
2289 /* Setup our descriptor pointers */
2290 adapter->next_rx_desc_to_check = 0;
2294 /*********************************************************************
2296 * Enable receive unit.
2298 **********************************************************************/
2300 em_initialize_receive_unit(struct adapter *adapter)
2303 uint32_t reg_rxcsum;
2307 INIT_DEBUGOUT("em_initialize_receive_unit: begin");
2309 ifp = &adapter->interface_data.ac_if;
2311 /* Make sure receives are disabled while setting up the descriptor ring */
2312 E1000_WRITE_REG(&adapter->hw, RCTL, 0);
2314 /* Set the Receive Delay Timer Register */
2315 E1000_WRITE_REG(&adapter->hw, RDTR,
2316 adapter->rx_int_delay.value | E1000_RDT_FPDB);
2318 if(adapter->hw.mac_type >= em_82540) {
2319 E1000_WRITE_REG(&adapter->hw, RADV,
2320 adapter->rx_abs_int_delay.value);
2322 /* Set the interrupt throttling rate in 256ns increments */
2323 if (em_int_throttle_ceil) {
2324 E1000_WRITE_REG(&adapter->hw, ITR,
2325 1000000000 / 256 / em_int_throttle_ceil);
2327 E1000_WRITE_REG(&adapter->hw, ITR, 0);
2331 /* Setup the Base and Length of the Rx Descriptor Ring */
2332 bus_addr = adapter->rxdma.dma_paddr;
2333 E1000_WRITE_REG(&adapter->hw, RDBAL, (uint32_t)bus_addr);
2334 E1000_WRITE_REG(&adapter->hw, RDBAH, (uint32_t)(bus_addr >> 32));
2335 E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc *
2336 sizeof(struct em_rx_desc));
2338 /* Setup the HW Rx Head and Tail Descriptor Pointers */
2339 E1000_WRITE_REG(&adapter->hw, RDH, 0);
2340 E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1);
2342 /* Setup the Receive Control Register */
2343 reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2344 E1000_RCTL_RDMTS_HALF |
2345 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
2347 if (adapter->hw.tbi_compatibility_on == TRUE)
2348 reg_rctl |= E1000_RCTL_SBP;
2350 switch (adapter->rx_buffer_len) {
2352 case EM_RXBUFFER_2048:
2353 reg_rctl |= E1000_RCTL_SZ_2048;
2355 case EM_RXBUFFER_4096:
2356 reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2358 case EM_RXBUFFER_8192:
2359 reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2361 case EM_RXBUFFER_16384:
2362 reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
2366 if (ifp->if_mtu > ETHERMTU)
2367 reg_rctl |= E1000_RCTL_LPE;
2369 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2370 if ((adapter->hw.mac_type >= em_82543) &&
2371 (ifp->if_capenable & IFCAP_RXCSUM)) {
2372 reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
2373 reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
2374 E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum);
2377 /* Enable Receives */
2378 E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl);
2381 /*********************************************************************
2383 * Free receive related data structures.
2385 **********************************************************************/
2387 em_free_receive_structures(struct adapter *adapter)
2389 struct em_buffer *rx_buffer;
2392 INIT_DEBUGOUT("free_receive_structures: begin");
2394 if (adapter->rx_buffer_area != NULL) {
2395 rx_buffer = adapter->rx_buffer_area;
2396 for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
2397 if (rx_buffer->map != NULL) {
2398 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2399 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
2401 if (rx_buffer->m_head != NULL)
2402 m_freem(rx_buffer->m_head);
2403 rx_buffer->m_head = NULL;
2406 if (adapter->rx_buffer_area != NULL) {
2407 free(adapter->rx_buffer_area, M_DEVBUF);
2408 adapter->rx_buffer_area = NULL;
2410 if (adapter->rxtag != NULL) {
2411 bus_dma_tag_destroy(adapter->rxtag);
2412 adapter->rxtag = NULL;
2416 /*********************************************************************
2418 * This routine executes in interrupt context. It replenishes
2419 * the mbufs in the descriptor and sends data which has been
2420 * dma'ed into host memory to upper layer.
2422 * We loop at most count times if count is > 0, or until done if
2425 *********************************************************************/
2427 em_process_receive_interrupts(struct adapter *adapter, int count)
2431 uint8_t accept_frame = 0;
2433 uint16_t len, desc_len, prev_len_adj;
2436 /* Pointer to the receive descriptor being examined. */
2437 struct em_rx_desc *current_desc;
2439 ifp = &adapter->interface_data.ac_if;
2440 i = adapter->next_rx_desc_to_check;
2441 current_desc = &adapter->rx_desc_base[i];
2443 if (!((current_desc->status) & E1000_RXD_STAT_DD)) {
2445 adapter->no_pkts_avail++;
2449 while ((current_desc->status & E1000_RXD_STAT_DD) && (count != 0)) {
2450 mp = adapter->rx_buffer_area[i].m_head;
2451 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
2452 BUS_DMASYNC_POSTREAD);
2456 desc_len = le16toh(current_desc->length);
2457 if (current_desc->status & E1000_RXD_STAT_EOP) {
2460 if (desc_len < ETHER_CRC_LEN) {
2462 prev_len_adj = ETHER_CRC_LEN - desc_len;
2465 len = desc_len - ETHER_CRC_LEN;
2472 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2474 uint32_t pkt_len = desc_len;
2476 if (adapter->fmp != NULL)
2477 pkt_len += adapter->fmp->m_pkthdr.len;
2479 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
2481 if (TBI_ACCEPT(&adapter->hw, current_desc->status,
2482 current_desc->errors,
2483 pkt_len, last_byte)) {
2484 em_tbi_adjust_stats(&adapter->hw,
2487 adapter->hw.mac_addr);
2497 if (em_get_buf(i, adapter, NULL, MB_DONTWAIT) == ENOBUFS) {
2498 adapter->dropped_pkts++;
2499 em_get_buf(i, adapter, mp, MB_DONTWAIT);
2500 if (adapter->fmp != NULL)
2501 m_freem(adapter->fmp);
2502 adapter->fmp = NULL;
2503 adapter->lmp = NULL;
2507 /* Assign correct length to the current fragment */
2510 if (adapter->fmp == NULL) {
2511 mp->m_pkthdr.len = len;
2512 adapter->fmp = mp; /* Store the first mbuf */
2515 /* Chain mbuf's together */
2516 mp->m_flags &= ~M_PKTHDR;
2518 * Adjust length of previous mbuf in chain if we
2519 * received less than 4 bytes in the last descriptor.
2521 if (prev_len_adj > 0) {
2522 adapter->lmp->m_len -= prev_len_adj;
2523 adapter->fmp->m_pkthdr.len -= prev_len_adj;
2525 adapter->lmp->m_next = mp;
2526 adapter->lmp = adapter->lmp->m_next;
2527 adapter->fmp->m_pkthdr.len += len;
2531 adapter->fmp->m_pkthdr.rcvif = ifp;
2534 #if defined(__DragonFly__) || __FreeBSD_version < 500000
2535 em_receive_checksum(adapter, current_desc,
2537 if (current_desc->status & E1000_RXD_STAT_VP)
2538 VLAN_INPUT_TAG(adapter->fmp,
2539 (current_desc->special &
2540 E1000_RXD_SPC_VLAN_MASK));
2542 (*ifp->if_input)(ifp, adapter->fmp);
2544 em_receive_checksum(adapter, current_desc,
2546 if (current_desc->status & E1000_RXD_STAT_VP)
2547 VLAN_INPUT_TAG(ifp, adapter->fmp,
2548 (current_desc->special &
2549 E1000_RXD_SPC_VLAN_MASK),
2550 adapter->fmp = NULL);
2552 if (adapter->fmp != NULL)
2553 (*ifp->if_input)(ifp, adapter->fmp);
2555 adapter->fmp = NULL;
2556 adapter->lmp = NULL;
2559 adapter->dropped_pkts++;
2560 em_get_buf(i, adapter, mp, MB_DONTWAIT);
2561 if (adapter->fmp != NULL)
2562 m_freem(adapter->fmp);
2563 adapter->fmp = NULL;
2564 adapter->lmp = NULL;
2567 /* Zero out the receive descriptors status */
2568 current_desc->status = 0;
2570 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */
2571 E1000_WRITE_REG(&adapter->hw, RDT, i);
2573 /* Advance our pointers to the next descriptor */
2574 if (++i == adapter->num_rx_desc) {
2576 current_desc = adapter->rx_desc_base;
2580 adapter->next_rx_desc_to_check = i;
2583 /*********************************************************************
2585 * Verify that the hardware indicated that the checksum is valid.
2586 * Inform the stack about the status of checksum so that stack
2587 * doesn't spend time verifying the checksum.
2589 *********************************************************************/
2591 em_receive_checksum(struct adapter *adapter,
2592 struct em_rx_desc *rx_desc,
2595 /* 82543 or newer only */
2596 if ((adapter->hw.mac_type < em_82543) ||
2597 /* Ignore Checksum bit is set */
2598 (rx_desc->status & E1000_RXD_STAT_IXSM)) {
2599 mp->m_pkthdr.csum_flags = 0;
2603 if (rx_desc->status & E1000_RXD_STAT_IPCS) {
2605 if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) {
2606 /* IP Checksum Good */
2607 mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
2608 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2610 mp->m_pkthdr.csum_flags = 0;
2614 if (rx_desc->status & E1000_RXD_STAT_TCPCS) {
2616 if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) {
2617 mp->m_pkthdr.csum_flags |=
2618 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2619 mp->m_pkthdr.csum_data = htons(0xffff);
2626 em_enable_vlans(struct adapter *adapter)
2630 E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN);
2632 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2633 ctrl |= E1000_CTRL_VME;
2634 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2638 em_enable_intr(struct adapter *adapter)
2640 E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK));
2644 em_disable_intr(struct adapter *adapter)
2646 E1000_WRITE_REG(&adapter->hw, IMC,
2647 (0xffffffff & ~E1000_IMC_RXSEQ));
2651 em_is_valid_ether_addr(uint8_t *addr)
2653 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
2655 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN)))
2662 em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2664 pci_write_config(((struct em_osdep *)hw->back)->dev, reg, *value, 2);
2668 em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value)
2670 *value = pci_read_config(((struct em_osdep *)hw->back)->dev, reg, 2);
2674 em_pci_set_mwi(struct em_hw *hw)
2676 pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2677 (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2);
2681 em_pci_clear_mwi(struct em_hw *hw)
2683 pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND,
2684 (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2);
2688 em_read_reg_io(struct em_hw *hw, uint32_t offset)
2690 bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2691 return(bus_space_read_4(hw->reg_io_tag, hw->reg_io_handle, 4));
2695 em_write_reg_io(struct em_hw *hw, uint32_t offset, uint32_t value)
2697 bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 0, offset);
2698 bus_space_write_4(hw->reg_io_tag, hw->reg_io_handle, 4, value);
2701 /*********************************************************************
2702 * 82544 Coexistence issue workaround.
2703 * There are 2 issues.
2704 * 1. Transmit Hang issue.
2705 * To detect this issue, following equation can be used...
2706 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2707 * If SUM[3:0] is in between 1 to 4, we will have this issue.
2710 * To detect this issue, following equation can be used...
2711 * SIZE[3:0] + ADDR[2:0] = SUM[3:0].
2712 * If SUM[3:0] is in between 9 to c, we will have this issue.
2716 * Make sure we do not have ending address as 1,2,3,4(Hang) or
2719 *************************************************************************/
2721 em_fill_descriptors(uint64_t address, uint32_t length, PDESC_ARRAY desc_array)
2723 /* Since issue is sensitive to length and address.*/
2724 /* Let us first check the address...*/
2725 uint32_t safe_terminator;
2727 desc_array->descriptor[0].address = address;
2728 desc_array->descriptor[0].length = length;
2729 desc_array->elements = 1;
2730 return(desc_array->elements);
2732 safe_terminator = (uint32_t)((((uint32_t)address & 0x7) + (length & 0xF)) & 0xF);
2733 /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */
2734 if (safe_terminator == 0 ||
2735 (safe_terminator > 4 && safe_terminator < 9) ||
2736 (safe_terminator > 0xC && safe_terminator <= 0xF)) {
2737 desc_array->descriptor[0].address = address;
2738 desc_array->descriptor[0].length = length;
2739 desc_array->elements = 1;
2740 return(desc_array->elements);
2743 desc_array->descriptor[0].address = address;
2744 desc_array->descriptor[0].length = length - 4;
2745 desc_array->descriptor[1].address = address + (length - 4);
2746 desc_array->descriptor[1].length = 4;
2747 desc_array->elements = 2;
2748 return(desc_array->elements);
2751 /**********************************************************************
2753 * Update the board statistics counters.
2755 **********************************************************************/
2757 em_update_stats_counters(struct adapter *adapter)
2761 if (adapter->hw.media_type == em_media_type_copper ||
2762 (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
2763 adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS);
2764 adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC);
2766 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS);
2767 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC);
2768 adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC);
2769 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL);
2771 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC);
2772 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL);
2773 adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC);
2774 adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC);
2775 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC);
2776 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC);
2777 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC);
2778 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC);
2779 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC);
2780 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC);
2781 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64);
2782 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127);
2783 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255);
2784 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511);
2785 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023);
2786 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522);
2787 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC);
2788 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC);
2789 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC);
2790 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC);
2792 /* For the 64-bit byte counters the low dword must be read first. */
2793 /* Both registers clear on the read of the high dword */
2795 adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL);
2796 adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH);
2797 adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL);
2798 adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH);
2800 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC);
2801 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC);
2802 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC);
2803 adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC);
2804 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC);
2806 adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL);
2807 adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH);
2808 adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL);
2809 adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH);
2811 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR);
2812 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT);
2813 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64);
2814 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127);
2815 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255);
2816 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511);
2817 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023);
2818 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522);
2819 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC);
2820 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC);
2822 if (adapter->hw.mac_type >= em_82543) {
2823 adapter->stats.algnerrc +=
2824 E1000_READ_REG(&adapter->hw, ALGNERRC);
2825 adapter->stats.rxerrc +=
2826 E1000_READ_REG(&adapter->hw, RXERRC);
2827 adapter->stats.tncrs +=
2828 E1000_READ_REG(&adapter->hw, TNCRS);
2829 adapter->stats.cexterr +=
2830 E1000_READ_REG(&adapter->hw, CEXTERR);
2831 adapter->stats.tsctc +=
2832 E1000_READ_REG(&adapter->hw, TSCTC);
2833 adapter->stats.tsctfc +=
2834 E1000_READ_REG(&adapter->hw, TSCTFC);
2836 ifp = &adapter->interface_data.ac_if;
2838 /* Fill out the OS statistics structure */
2839 ifp->if_ibytes = adapter->stats.gorcl;
2840 ifp->if_obytes = adapter->stats.gotcl;
2841 ifp->if_imcasts = adapter->stats.mprc;
2842 ifp->if_collisions = adapter->stats.colc;
2845 ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc +
2846 adapter->stats.crcerrs + adapter->stats.algnerrc +
2847 adapter->stats.rlec + adapter->stats.rnbc +
2848 adapter->stats.mpc + adapter->stats.cexterr;
2851 ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol;
2855 /**********************************************************************
2857 * This routine is called only when em_display_debug_stats is enabled.
2858 * This routine provides a way to take a look at important statistics
2859 * maintained by the driver and hardware.
2861 **********************************************************************/
2863 em_print_debug_info(struct adapter *adapter)
2865 device_t dev= adapter->dev;
2866 uint8_t *hw_addr = adapter->hw.hw_addr;
2868 device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
2869 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
2870 E1000_READ_REG(&adapter->hw, TIDV),
2871 E1000_READ_REG(&adapter->hw, TADV));
2872 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
2873 E1000_READ_REG(&adapter->hw, RDTR),
2874 E1000_READ_REG(&adapter->hw, RADV));
2876 device_printf(dev, "Packets not Avail = %ld\n", adapter->no_pkts_avail);
2877 device_printf(dev, "CleanTxInterrupts = %ld\n",
2878 adapter->clean_tx_interrupts);
2880 device_printf(dev, "fifo workaround = %lld, fifo_reset = %lld\n",
2881 (long long)adapter->tx_fifo_wrk,
2882 (long long)adapter->tx_fifo_reset);
2883 device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
2884 E1000_READ_REG(&adapter->hw, TDH),
2885 E1000_READ_REG(&adapter->hw, TDT));
2886 device_printf(dev, "Num Tx descriptors avail = %d\n",
2887 adapter->num_tx_desc_avail);
2888 device_printf(dev, "Tx Descriptors not avail1 = %ld\n",
2889 adapter->no_tx_desc_avail1);
2890 device_printf(dev, "Tx Descriptors not avail2 = %ld\n",
2891 adapter->no_tx_desc_avail2);
2892 device_printf(dev, "Std mbuf failed = %ld\n",
2893 adapter->mbuf_alloc_failed);
2894 device_printf(dev, "Std mbuf cluster failed = %ld\n",
2895 adapter->mbuf_cluster_failed);
2896 device_printf(dev, "Driver dropped packets = %ld\n",
2897 adapter->dropped_pkts);
2901 em_print_hw_stats(struct adapter *adapter)
2903 device_t dev= adapter->dev;
2905 device_printf(dev, "Adapter: %p\n", adapter);
2907 device_printf(dev, "Excessive collisions = %lld\n",
2908 (long long)adapter->stats.ecol);
2909 device_printf(dev, "Symbol errors = %lld\n",
2910 (long long)adapter->stats.symerrs);
2911 device_printf(dev, "Sequence errors = %lld\n",
2912 (long long)adapter->stats.sec);
2913 device_printf(dev, "Defer count = %lld\n",
2914 (long long)adapter->stats.dc);
2916 device_printf(dev, "Missed Packets = %lld\n",
2917 (long long)adapter->stats.mpc);
2918 device_printf(dev, "Receive No Buffers = %lld\n",
2919 (long long)adapter->stats.rnbc);
2920 device_printf(dev, "Receive length errors = %lld\n",
2921 (long long)adapter->stats.rlec);
2922 device_printf(dev, "Receive errors = %lld\n",
2923 (long long)adapter->stats.rxerrc);
2924 device_printf(dev, "Crc errors = %lld\n",
2925 (long long)adapter->stats.crcerrs);
2926 device_printf(dev, "Alignment errors = %lld\n",
2927 (long long)adapter->stats.algnerrc);
2928 device_printf(dev, "Carrier extension errors = %lld\n",
2929 (long long)adapter->stats.cexterr);
2931 device_printf(dev, "XON Rcvd = %lld\n",
2932 (long long)adapter->stats.xonrxc);
2933 device_printf(dev, "XON Xmtd = %lld\n",
2934 (long long)adapter->stats.xontxc);
2935 device_printf(dev, "XOFF Rcvd = %lld\n",
2936 (long long)adapter->stats.xoffrxc);
2937 device_printf(dev, "XOFF Xmtd = %lld\n",
2938 (long long)adapter->stats.xofftxc);
2940 device_printf(dev, "Good Packets Rcvd = %lld\n",
2941 (long long)adapter->stats.gprc);
2942 device_printf(dev, "Good Packets Xmtd = %lld\n",
2943 (long long)adapter->stats.gptc);
2947 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
2951 struct adapter *adapter;
2954 error = sysctl_handle_int(oidp, &result, 0, req);
2956 if (error || !req->newptr)
2960 adapter = (struct adapter *)arg1;
2961 em_print_debug_info(adapter);
2968 em_sysctl_stats(SYSCTL_HANDLER_ARGS)
2972 struct adapter *adapter;
2975 error = sysctl_handle_int(oidp, &result, 0, req);
2977 if (error || !req->newptr)
2981 adapter = (struct adapter *)arg1;
2982 em_print_hw_stats(adapter);
2989 em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
2991 struct em_int_delay_info *info;
2992 struct adapter *adapter;
2999 info = (struct em_int_delay_info *)arg1;
3000 adapter = info->adapter;
3001 usecs = info->value;
3002 error = sysctl_handle_int(oidp, &usecs, 0, req);
3003 if (error != 0 || req->newptr == NULL)
3005 if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535))
3007 info->value = usecs;
3008 ticks = E1000_USECS_TO_TICKS(usecs);
3011 regval = E1000_READ_OFFSET(&adapter->hw, info->offset);
3012 regval = (regval & ~0xffff) | (ticks & 0xffff);
3013 /* Handle a few special cases. */
3014 switch (info->offset) {
3016 case E1000_82542_RDTR:
3017 regval |= E1000_RDT_FPDB;
3020 case E1000_82542_TIDV:
3022 adapter->txd_cmd &= ~E1000_TXD_CMD_IDE;
3023 /* Don't write 0 into the TIDV register. */
3026 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3029 E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval);
3035 em_add_int_delay_sysctl(struct adapter *adapter, const char *name,
3036 const char *description, struct em_int_delay_info *info,
3037 int offset, int value)
3039 info->adapter = adapter;
3040 info->offset = offset;
3041 info->value = value;
3042 SYSCTL_ADD_PROC(&adapter->sysctl_ctx,
3043 SYSCTL_CHILDREN(adapter->sysctl_tree),
3044 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW,
3045 info, 0, em_sysctl_int_delay, "I", description);
3049 em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS)
3051 struct adapter *adapter = (void *)arg1;
3055 throttle = em_int_throttle_ceil;
3056 error = sysctl_handle_int(oidp, &throttle, 0, req);
3057 if (error || req->newptr == NULL)
3059 if (throttle < 0 || throttle > 1000000000 / 256)
3063 * Set the interrupt throttling rate in 256ns increments,
3064 * recalculate sysctl value assignment to get exact frequency.
3066 throttle = 1000000000 / 256 / throttle;
3067 em_int_throttle_ceil = 1000000000 / 256 / throttle;
3069 E1000_WRITE_REG(&adapter->hw, ITR, throttle);
3072 em_int_throttle_ceil = 0;
3074 E1000_WRITE_REG(&adapter->hw, ITR, 0);
3077 device_printf(adapter->dev, "Interrupt moderation set to %d/sec\n",
3078 em_int_throttle_ceil);